1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016-present, Facebook, Inc.
9 #include <linux/bitmap.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
14 #include <linux/sched/mm.h>
15 #include <linux/pagemap.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/zstd.h>
21 #include "compression.h"
24 #define ZSTD_BTRFS_MAX_WINDOWLOG 17
25 #define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
26 #define ZSTD_BTRFS_DEFAULT_LEVEL 3
27 #define ZSTD_BTRFS_MAX_LEVEL 15
28 /* 307s to avoid pathologically clashing with transaction commit */
29 #define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
31 static zstd_parameters zstd_get_btrfs_parameters(unsigned int level,
34 zstd_parameters params = zstd_get_params(level, src_len);
36 if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
37 params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
38 WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT);
47 unsigned int req_level;
48 unsigned long last_used; /* jiffies */
49 struct list_head list;
50 struct list_head lru_list;
51 zstd_in_buffer in_buf;
52 zstd_out_buffer out_buf;
56 * Zstd Workspace Management
58 * Zstd workspaces have different memory requirements depending on the level.
59 * The zstd workspaces are managed by having individual lists for each level
60 * and a global lru. Forward progress is maintained by protecting a max level
63 * Getting a workspace is done by using the bitmap to identify the levels that
64 * have available workspaces and scans up. This lets us recycle higher level
65 * workspaces because of the monotonic memory guarantee. A workspace's
66 * last_used is only updated if it is being used by the corresponding memory
67 * level. Putting a workspace involves adding it back to the appropriate places
68 * and adding it back to the lru if necessary.
70 * A timer is used to reclaim workspaces if they have not been used for
71 * ZSTD_BTRFS_RECLAIM_JIFFIES. This helps keep only active workspaces around.
72 * The upper bound is provided by the workqueue limit which is 2 (percpu limit).
75 struct zstd_workspace_manager {
76 const struct btrfs_compress_op *ops;
78 struct list_head lru_list;
79 struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL];
80 unsigned long active_map;
81 wait_queue_head_t wait;
82 struct timer_list timer;
85 static struct zstd_workspace_manager wsm;
87 static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL];
89 static inline struct workspace *list_to_workspace(struct list_head *list)
91 return container_of(list, struct workspace, list);
94 void zstd_free_workspace(struct list_head *ws);
95 struct list_head *zstd_alloc_workspace(unsigned int level);
98 * Timer callback to free unused workspaces.
102 * This scans the lru_list and attempts to reclaim any workspace that hasn't
103 * been used for ZSTD_BTRFS_RECLAIM_JIFFIES.
105 * The context is softirq and does not need the _bh locking primitives.
107 static void zstd_reclaim_timer_fn(struct timer_list *timer)
109 unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES;
110 struct list_head *pos, *next;
112 spin_lock(&wsm.lock);
114 if (list_empty(&wsm.lru_list)) {
115 spin_unlock(&wsm.lock);
119 list_for_each_prev_safe(pos, next, &wsm.lru_list) {
120 struct workspace *victim = container_of(pos, struct workspace,
124 if (time_after(victim->last_used, reclaim_threshold))
127 /* workspace is in use */
128 if (victim->req_level)
131 level = victim->level;
132 list_del(&victim->lru_list);
133 list_del(&victim->list);
134 zstd_free_workspace(&victim->list);
136 if (list_empty(&wsm.idle_ws[level - 1]))
137 clear_bit(level - 1, &wsm.active_map);
141 if (!list_empty(&wsm.lru_list))
142 mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
144 spin_unlock(&wsm.lock);
148 * zstd_calc_ws_mem_sizes - calculate monotonic memory bounds
150 * It is possible based on the level configurations that a higher level
151 * workspace uses less memory than a lower level workspace. In order to reuse
152 * workspaces, this must be made a monotonic relationship. This precomputes
153 * the required memory for each level and enforces the monotonicity between
154 * level and memory required.
156 static void zstd_calc_ws_mem_sizes(void)
161 for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
162 zstd_parameters params =
163 zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
166 zstd_cstream_workspace_bound(¶ms.cParams),
167 zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
169 max_size = max_t(size_t, max_size, level_size);
170 zstd_ws_mem_sizes[level - 1] = max_size;
174 void zstd_init_workspace_manager(void)
176 struct list_head *ws;
179 zstd_calc_ws_mem_sizes();
181 wsm.ops = &btrfs_zstd_compress;
182 spin_lock_init(&wsm.lock);
183 init_waitqueue_head(&wsm.wait);
184 timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0);
186 INIT_LIST_HEAD(&wsm.lru_list);
187 for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
188 INIT_LIST_HEAD(&wsm.idle_ws[i]);
190 ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL);
193 "BTRFS: cannot preallocate zstd compression workspace\n");
195 set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map);
196 list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
200 void zstd_cleanup_workspace_manager(void)
202 struct workspace *workspace;
205 spin_lock_bh(&wsm.lock);
206 for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
207 while (!list_empty(&wsm.idle_ws[i])) {
208 workspace = container_of(wsm.idle_ws[i].next,
209 struct workspace, list);
210 list_del(&workspace->list);
211 list_del(&workspace->lru_list);
212 zstd_free_workspace(&workspace->list);
215 spin_unlock_bh(&wsm.lock);
217 del_timer_sync(&wsm.timer);
221 * zstd_find_workspace - find workspace
222 * @level: compression level
224 * This iterates over the set bits in the active_map beginning at the requested
225 * compression level. This lets us utilize already allocated workspaces before
226 * allocating a new one. If the workspace is of a larger size, it is used, but
227 * the place in the lru_list and last_used times are not updated. This is to
228 * offer the opportunity to reclaim the workspace in favor of allocating an
229 * appropriately sized one in the future.
231 static struct list_head *zstd_find_workspace(unsigned int level)
233 struct list_head *ws;
234 struct workspace *workspace;
237 spin_lock_bh(&wsm.lock);
238 for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) {
239 if (!list_empty(&wsm.idle_ws[i])) {
240 ws = wsm.idle_ws[i].next;
241 workspace = list_to_workspace(ws);
243 /* keep its place if it's a lower level using this */
244 workspace->req_level = level;
245 if (level == workspace->level)
246 list_del(&workspace->lru_list);
247 if (list_empty(&wsm.idle_ws[i]))
248 clear_bit(i, &wsm.active_map);
249 spin_unlock_bh(&wsm.lock);
253 spin_unlock_bh(&wsm.lock);
259 * zstd_get_workspace - zstd's get_workspace
260 * @level: compression level
262 * If @level is 0, then any compression level can be used. Therefore, we begin
263 * scanning from 1. We first scan through possible workspaces and then after
264 * attempt to allocate a new workspace. If we fail to allocate one due to
265 * memory pressure, go to sleep waiting for the max level workspace to free up.
267 struct list_head *zstd_get_workspace(unsigned int level)
269 struct list_head *ws;
270 unsigned int nofs_flag;
272 /* level == 0 means we can use any workspace */
277 ws = zstd_find_workspace(level);
281 nofs_flag = memalloc_nofs_save();
282 ws = zstd_alloc_workspace(level);
283 memalloc_nofs_restore(nofs_flag);
288 prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE);
290 finish_wait(&wsm.wait, &wait);
299 * zstd_put_workspace - zstd put_workspace
300 * @ws: list_head for the workspace
302 * When putting back a workspace, we only need to update the LRU if we are of
303 * the requested compression level. Here is where we continue to protect the
304 * max level workspace or update last_used accordingly. If the reclaim timer
305 * isn't set, it is also set here. Only the max level workspace tries and wakes
306 * up waiting workspaces.
308 void zstd_put_workspace(struct list_head *ws)
310 struct workspace *workspace = list_to_workspace(ws);
312 spin_lock_bh(&wsm.lock);
314 /* A node is only taken off the lru if we are the corresponding level */
315 if (workspace->req_level == workspace->level) {
316 /* Hide a max level workspace from reclaim */
317 if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
318 INIT_LIST_HEAD(&workspace->lru_list);
320 workspace->last_used = jiffies;
321 list_add(&workspace->lru_list, &wsm.lru_list);
322 if (!timer_pending(&wsm.timer))
323 mod_timer(&wsm.timer,
324 jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
328 set_bit(workspace->level - 1, &wsm.active_map);
329 list_add(&workspace->list, &wsm.idle_ws[workspace->level - 1]);
330 workspace->req_level = 0;
332 spin_unlock_bh(&wsm.lock);
334 if (workspace->level == ZSTD_BTRFS_MAX_LEVEL)
335 cond_wake_up(&wsm.wait);
338 void zstd_free_workspace(struct list_head *ws)
340 struct workspace *workspace = list_entry(ws, struct workspace, list);
342 kvfree(workspace->mem);
343 kfree(workspace->buf);
347 struct list_head *zstd_alloc_workspace(unsigned int level)
349 struct workspace *workspace;
351 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
353 return ERR_PTR(-ENOMEM);
355 workspace->size = zstd_ws_mem_sizes[level - 1];
356 workspace->level = level;
357 workspace->req_level = level;
358 workspace->last_used = jiffies;
359 workspace->mem = kvmalloc(workspace->size, GFP_KERNEL | __GFP_NOWARN);
360 workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
361 if (!workspace->mem || !workspace->buf)
364 INIT_LIST_HEAD(&workspace->list);
365 INIT_LIST_HEAD(&workspace->lru_list);
367 return &workspace->list;
369 zstd_free_workspace(&workspace->list);
370 return ERR_PTR(-ENOMEM);
373 int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
374 u64 start, struct page **pages, unsigned long *out_pages,
375 unsigned long *total_in, unsigned long *total_out)
377 struct workspace *workspace = list_entry(ws, struct workspace, list);
378 zstd_cstream *stream;
381 struct page *in_page = NULL; /* The current page to read */
382 struct page *out_page = NULL; /* The current page to write to */
383 unsigned long tot_in = 0;
384 unsigned long tot_out = 0;
385 unsigned long len = *total_out;
386 const unsigned long nr_dest_pages = *out_pages;
387 unsigned long max_out = nr_dest_pages * PAGE_SIZE;
388 zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
395 /* Initialize the stream */
396 stream = zstd_init_cstream(¶ms, len, workspace->mem,
399 pr_warn("BTRFS: zstd_init_cstream failed\n");
404 /* map in the first page of input data */
405 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
406 workspace->in_buf.src = kmap_local_page(in_page);
407 workspace->in_buf.pos = 0;
408 workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
411 /* Allocate and map in the output buffer */
412 out_page = alloc_page(GFP_NOFS);
413 if (out_page == NULL) {
417 pages[nr_pages++] = out_page;
418 workspace->out_buf.dst = page_address(out_page);
419 workspace->out_buf.pos = 0;
420 workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
425 ret2 = zstd_compress_stream(stream, &workspace->out_buf,
427 if (zstd_is_error(ret2)) {
428 pr_debug("BTRFS: zstd_compress_stream returned %d\n",
429 zstd_get_error_code(ret2));
434 /* Check to see if we are making it bigger */
435 if (tot_in + workspace->in_buf.pos > 8192 &&
436 tot_in + workspace->in_buf.pos <
437 tot_out + workspace->out_buf.pos) {
442 /* We've reached the end of our output range */
443 if (workspace->out_buf.pos >= max_out) {
444 tot_out += workspace->out_buf.pos;
449 /* Check if we need more output space */
450 if (workspace->out_buf.pos == workspace->out_buf.size) {
451 tot_out += PAGE_SIZE;
452 max_out -= PAGE_SIZE;
453 if (nr_pages == nr_dest_pages) {
457 out_page = alloc_page(GFP_NOFS);
458 if (out_page == NULL) {
462 pages[nr_pages++] = out_page;
463 workspace->out_buf.dst = page_address(out_page);
464 workspace->out_buf.pos = 0;
465 workspace->out_buf.size = min_t(size_t, max_out,
469 /* We've reached the end of the input */
470 if (workspace->in_buf.pos >= len) {
471 tot_in += workspace->in_buf.pos;
475 /* Check if we need more input */
476 if (workspace->in_buf.pos == workspace->in_buf.size) {
478 kunmap_local(workspace->in_buf.src);
482 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
483 workspace->in_buf.src = kmap_local_page(in_page);
484 workspace->in_buf.pos = 0;
485 workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
491 ret2 = zstd_end_stream(stream, &workspace->out_buf);
492 if (zstd_is_error(ret2)) {
493 pr_debug("BTRFS: zstd_end_stream returned %d\n",
494 zstd_get_error_code(ret2));
499 tot_out += workspace->out_buf.pos;
502 if (workspace->out_buf.pos >= max_out) {
503 tot_out += workspace->out_buf.pos;
508 tot_out += PAGE_SIZE;
509 max_out -= PAGE_SIZE;
510 if (nr_pages == nr_dest_pages) {
514 out_page = alloc_page(GFP_NOFS);
515 if (out_page == NULL) {
519 pages[nr_pages++] = out_page;
520 workspace->out_buf.dst = page_address(out_page);
521 workspace->out_buf.pos = 0;
522 workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
525 if (tot_out >= tot_in) {
532 *total_out = tot_out;
534 *out_pages = nr_pages;
535 if (workspace->in_buf.src) {
536 kunmap_local(workspace->in_buf.src);
542 int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
544 struct workspace *workspace = list_entry(ws, struct workspace, list);
545 struct page **pages_in = cb->compressed_pages;
546 size_t srclen = cb->compressed_len;
547 zstd_dstream *stream;
549 unsigned long page_in_index = 0;
550 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
551 unsigned long buf_start;
552 unsigned long total_out = 0;
554 stream = zstd_init_dstream(
555 ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
557 pr_debug("BTRFS: zstd_init_dstream failed\n");
562 workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
563 workspace->in_buf.pos = 0;
564 workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
566 workspace->out_buf.dst = workspace->buf;
567 workspace->out_buf.pos = 0;
568 workspace->out_buf.size = PAGE_SIZE;
573 ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
575 if (zstd_is_error(ret2)) {
576 pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
577 zstd_get_error_code(ret2));
581 buf_start = total_out;
582 total_out += workspace->out_buf.pos;
583 workspace->out_buf.pos = 0;
585 ret = btrfs_decompress_buf2page(workspace->out_buf.dst,
586 total_out - buf_start, cb, buf_start);
590 if (workspace->in_buf.pos >= srclen)
593 /* Check if we've hit the end of a frame */
597 if (workspace->in_buf.pos == workspace->in_buf.size) {
598 kunmap_local(workspace->in_buf.src);
600 if (page_in_index >= total_pages_in) {
601 workspace->in_buf.src = NULL;
606 workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
607 workspace->in_buf.pos = 0;
608 workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
613 if (workspace->in_buf.src)
614 kunmap_local(workspace->in_buf.src);
618 int zstd_decompress(struct list_head *ws, const u8 *data_in,
619 struct page *dest_page, unsigned long start_byte, size_t srclen,
622 struct workspace *workspace = list_entry(ws, struct workspace, list);
623 zstd_dstream *stream;
626 unsigned long total_out = 0;
627 unsigned long pg_offset = 0;
629 stream = zstd_init_dstream(
630 ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
632 pr_warn("BTRFS: zstd_init_dstream failed\n");
637 destlen = min_t(size_t, destlen, PAGE_SIZE);
639 workspace->in_buf.src = data_in;
640 workspace->in_buf.pos = 0;
641 workspace->in_buf.size = srclen;
643 workspace->out_buf.dst = workspace->buf;
644 workspace->out_buf.pos = 0;
645 workspace->out_buf.size = PAGE_SIZE;
648 while (pg_offset < destlen
649 && workspace->in_buf.pos < workspace->in_buf.size) {
650 unsigned long buf_start;
651 unsigned long buf_offset;
654 /* Check if the frame is over and we still need more input */
656 pr_debug("BTRFS: zstd_decompress_stream ended early\n");
660 ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
662 if (zstd_is_error(ret2)) {
663 pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
664 zstd_get_error_code(ret2));
669 buf_start = total_out;
670 total_out += workspace->out_buf.pos;
671 workspace->out_buf.pos = 0;
673 if (total_out <= start_byte)
676 if (total_out > start_byte && buf_start < start_byte)
677 buf_offset = start_byte - buf_start;
681 bytes = min_t(unsigned long, destlen - pg_offset,
682 workspace->out_buf.size - buf_offset);
684 memcpy_to_page(dest_page, pg_offset,
685 workspace->out_buf.dst + buf_offset, bytes);
691 if (pg_offset < destlen) {
692 memzero_page(dest_page, pg_offset, destlen - pg_offset);
697 const struct btrfs_compress_op btrfs_zstd_compress = {
698 /* ZSTD uses own workspace manager */
699 .workspace_manager = NULL,
700 .max_level = ZSTD_BTRFS_MAX_LEVEL,
701 .default_level = ZSTD_BTRFS_DEFAULT_LEVEL,