1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * zswap.c - zswap driver file
5 * zswap is a backend for frontswap that takes pages that are in the process
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/frontswap.h>
24 #include <linux/rbtree.h>
25 #include <linux/swap.h>
26 #include <linux/crypto.h>
27 #include <linux/scatterlist.h>
28 #include <linux/mempool.h>
29 #include <linux/zpool.h>
30 #include <crypto/acompress.h>
32 #include <linux/mm_types.h>
33 #include <linux/page-flags.h>
34 #include <linux/swapops.h>
35 #include <linux/writeback.h>
36 #include <linux/pagemap.h>
37 #include <linux/workqueue.h>
42 /*********************************
44 **********************************/
45 /* Total bytes used by the compressed storage */
46 u64 zswap_pool_total_size;
47 /* The number of compressed pages currently stored in zswap */
48 atomic_t zswap_stored_pages = ATOMIC_INIT(0);
49 /* The number of same-value filled pages currently stored in zswap */
50 static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
53 * The statistics below are not protected from concurrent access for
54 * performance reasons so they may not be a 100% accurate. However,
55 * they do provide useful information on roughly how many times a
56 * certain event is occurring.
59 /* Pool limit was hit (see zswap_max_pool_percent) */
60 static u64 zswap_pool_limit_hit;
61 /* Pages written back when pool limit was reached */
62 static u64 zswap_written_back_pages;
63 /* Store failed due to a reclaim failure after pool limit was reached */
64 static u64 zswap_reject_reclaim_fail;
65 /* Compressed page was too big for the allocator to (optimally) store */
66 static u64 zswap_reject_compress_poor;
67 /* Store failed because underlying allocator could not get memory */
68 static u64 zswap_reject_alloc_fail;
69 /* Store failed because the entry metadata could not be allocated (rare) */
70 static u64 zswap_reject_kmemcache_fail;
71 /* Duplicate store was encountered (rare) */
72 static u64 zswap_duplicate_entry;
74 /* Shrinker work queue */
75 static struct workqueue_struct *shrink_wq;
76 /* Pool limit was hit, we need to calm down */
77 static bool zswap_pool_reached_full;
79 /*********************************
81 **********************************/
83 #define ZSWAP_PARAM_UNSET ""
85 static int zswap_setup(void);
87 /* Enable/disable zswap */
88 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
89 static int zswap_enabled_param_set(const char *,
90 const struct kernel_param *);
91 static const struct kernel_param_ops zswap_enabled_param_ops = {
92 .set = zswap_enabled_param_set,
93 .get = param_get_bool,
95 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
97 /* Crypto compressor to use */
98 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
99 static int zswap_compressor_param_set(const char *,
100 const struct kernel_param *);
101 static const struct kernel_param_ops zswap_compressor_param_ops = {
102 .set = zswap_compressor_param_set,
103 .get = param_get_charp,
104 .free = param_free_charp,
106 module_param_cb(compressor, &zswap_compressor_param_ops,
107 &zswap_compressor, 0644);
109 /* Compressed storage zpool to use */
110 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
111 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
112 static const struct kernel_param_ops zswap_zpool_param_ops = {
113 .set = zswap_zpool_param_set,
114 .get = param_get_charp,
115 .free = param_free_charp,
117 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
119 /* The maximum percentage of memory that the compressed pool can occupy */
120 static unsigned int zswap_max_pool_percent = 20;
121 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
123 /* The threshold for accepting new pages after the max_pool_percent was hit */
124 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
125 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
129 * Enable/disable handling same-value filled pages (enabled by default).
130 * If disabled every page is considered non-same-value filled.
132 static bool zswap_same_filled_pages_enabled = true;
133 module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
136 /* Enable/disable handling non-same-value filled pages (enabled by default) */
137 static bool zswap_non_same_filled_pages_enabled = true;
138 module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
141 static bool zswap_exclusive_loads_enabled = IS_ENABLED(
142 CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
143 module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
145 /*********************************
147 **********************************/
149 struct crypto_acomp_ctx {
150 struct crypto_acomp *acomp;
151 struct acomp_req *req;
152 struct crypto_wait wait;
158 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
159 * The only case where lru_lock is not acquired while holding tree.lock is
160 * when a zswap_entry is taken off the lru for writeback, in that case it
161 * needs to be verified that it's still valid in the tree.
165 struct crypto_acomp_ctx __percpu *acomp_ctx;
167 struct list_head list;
168 struct work_struct release_work;
169 struct work_struct shrink_work;
170 struct hlist_node node;
171 char tfm_name[CRYPTO_MAX_ALG_NAME];
172 struct list_head lru;
179 * This structure contains the metadata for tracking a single compressed
182 * rbnode - links the entry into red-black tree for the appropriate swap type
183 * offset - the swap offset for the entry. Index into the red-black tree.
184 * refcount - the number of outstanding reference to the entry. This is needed
185 * to protect against premature freeing of the entry by code
186 * concurrent calls to load, invalidate, and writeback. The lock
187 * for the zswap_tree structure that contains the entry must
188 * be held while changing the refcount. Since the lock must
189 * be held, there is no reason to also make refcount atomic.
190 * length - the length in bytes of the compressed page data. Needed during
191 * decompression. For a same value filled page length is 0, and both
192 * pool and lru are invalid and must be ignored.
193 * pool - the zswap_pool the entry's data is in
194 * handle - zpool allocation handle that stores the compressed page data
195 * value - value of the same-value filled pages which have same content
196 * lru - handle to the pool's lru used to evict pages.
199 struct rb_node rbnode;
200 swp_entry_t swpentry;
203 struct zswap_pool *pool;
205 unsigned long handle;
208 struct obj_cgroup *objcg;
209 struct list_head lru;
213 * The tree lock in the zswap_tree struct protects a few things:
215 * - the refcount field of each entry in the tree
218 struct rb_root rbroot;
222 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
224 /* RCU-protected iteration */
225 static LIST_HEAD(zswap_pools);
226 /* protects zswap_pools list modification */
227 static DEFINE_SPINLOCK(zswap_pools_lock);
228 /* pool counter to provide unique names to zpool */
229 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
231 enum zswap_init_type {
237 static enum zswap_init_type zswap_init_state;
239 /* used to ensure the integrity of initialization */
240 static DEFINE_MUTEX(zswap_init_lock);
242 /* init completed, but couldn't create the initial pool */
243 static bool zswap_has_pool;
245 /*********************************
246 * helpers and fwd declarations
247 **********************************/
249 #define zswap_pool_debug(msg, p) \
250 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
251 zpool_get_type((p)->zpool))
253 static int zswap_writeback_entry(struct zswap_entry *entry,
254 struct zswap_tree *tree);
255 static int zswap_pool_get(struct zswap_pool *pool);
256 static void zswap_pool_put(struct zswap_pool *pool);
258 static bool zswap_is_full(void)
260 return totalram_pages() * zswap_max_pool_percent / 100 <
261 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
264 static bool zswap_can_accept(void)
266 return totalram_pages() * zswap_accept_thr_percent / 100 *
267 zswap_max_pool_percent / 100 >
268 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
271 static void zswap_update_total_size(void)
273 struct zswap_pool *pool;
278 list_for_each_entry_rcu(pool, &zswap_pools, list)
279 total += zpool_get_total_size(pool->zpool);
283 zswap_pool_total_size = total;
286 /*********************************
287 * zswap entry functions
288 **********************************/
289 static struct kmem_cache *zswap_entry_cache;
291 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
293 struct zswap_entry *entry;
294 entry = kmem_cache_alloc(zswap_entry_cache, gfp);
298 RB_CLEAR_NODE(&entry->rbnode);
302 static void zswap_entry_cache_free(struct zswap_entry *entry)
304 kmem_cache_free(zswap_entry_cache, entry);
307 /*********************************
309 **********************************/
310 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
312 struct rb_node *node = root->rb_node;
313 struct zswap_entry *entry;
314 pgoff_t entry_offset;
317 entry = rb_entry(node, struct zswap_entry, rbnode);
318 entry_offset = swp_offset(entry->swpentry);
319 if (entry_offset > offset)
320 node = node->rb_left;
321 else if (entry_offset < offset)
322 node = node->rb_right;
330 * In the case that a entry with the same offset is found, a pointer to
331 * the existing entry is stored in dupentry and the function returns -EEXIST
333 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
334 struct zswap_entry **dupentry)
336 struct rb_node **link = &root->rb_node, *parent = NULL;
337 struct zswap_entry *myentry;
338 pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
342 myentry = rb_entry(parent, struct zswap_entry, rbnode);
343 myentry_offset = swp_offset(myentry->swpentry);
344 if (myentry_offset > entry_offset)
345 link = &(*link)->rb_left;
346 else if (myentry_offset < entry_offset)
347 link = &(*link)->rb_right;
353 rb_link_node(&entry->rbnode, parent, link);
354 rb_insert_color(&entry->rbnode, root);
358 static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
360 if (!RB_EMPTY_NODE(&entry->rbnode)) {
361 rb_erase(&entry->rbnode, root);
362 RB_CLEAR_NODE(&entry->rbnode);
369 * Carries out the common pattern of freeing and entry's zpool allocation,
370 * freeing the entry itself, and decrementing the number of stored pages.
372 static void zswap_free_entry(struct zswap_entry *entry)
375 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
376 obj_cgroup_put(entry->objcg);
379 atomic_dec(&zswap_same_filled_pages);
381 spin_lock(&entry->pool->lru_lock);
382 list_del(&entry->lru);
383 spin_unlock(&entry->pool->lru_lock);
384 zpool_free(entry->pool->zpool, entry->handle);
385 zswap_pool_put(entry->pool);
387 zswap_entry_cache_free(entry);
388 atomic_dec(&zswap_stored_pages);
389 zswap_update_total_size();
392 /* caller must hold the tree lock */
393 static void zswap_entry_get(struct zswap_entry *entry)
398 /* caller must hold the tree lock
399 * remove from the tree and free it, if nobody reference the entry
401 static void zswap_entry_put(struct zswap_tree *tree,
402 struct zswap_entry *entry)
404 int refcount = --entry->refcount;
406 BUG_ON(refcount < 0);
408 zswap_rb_erase(&tree->rbroot, entry);
409 zswap_free_entry(entry);
413 /* caller must hold the tree lock */
414 static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
417 struct zswap_entry *entry;
419 entry = zswap_rb_search(root, offset);
421 zswap_entry_get(entry);
426 /*********************************
428 **********************************/
429 static DEFINE_PER_CPU(u8 *, zswap_dstmem);
431 * If users dynamically change the zpool type and compressor at runtime, i.e.
432 * zswap is running, zswap can have more than one zpool on one cpu, but they
433 * are sharing dtsmem. So we need this mutex to be per-cpu.
435 static DEFINE_PER_CPU(struct mutex *, zswap_mutex);
437 static int zswap_dstmem_prepare(unsigned int cpu)
442 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
446 mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu));
453 per_cpu(zswap_dstmem, cpu) = dst;
454 per_cpu(zswap_mutex, cpu) = mutex;
458 static int zswap_dstmem_dead(unsigned int cpu)
463 mutex = per_cpu(zswap_mutex, cpu);
465 per_cpu(zswap_mutex, cpu) = NULL;
467 dst = per_cpu(zswap_dstmem, cpu);
469 per_cpu(zswap_dstmem, cpu) = NULL;
474 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
476 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
477 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
478 struct crypto_acomp *acomp;
479 struct acomp_req *req;
481 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
483 pr_err("could not alloc crypto acomp %s : %ld\n",
484 pool->tfm_name, PTR_ERR(acomp));
485 return PTR_ERR(acomp);
487 acomp_ctx->acomp = acomp;
489 req = acomp_request_alloc(acomp_ctx->acomp);
491 pr_err("could not alloc crypto acomp_request %s\n",
493 crypto_free_acomp(acomp_ctx->acomp);
496 acomp_ctx->req = req;
498 crypto_init_wait(&acomp_ctx->wait);
500 * if the backend of acomp is async zip, crypto_req_done() will wakeup
501 * crypto_wait_req(); if the backend of acomp is scomp, the callback
502 * won't be called, crypto_wait_req() will return without blocking.
504 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
505 crypto_req_done, &acomp_ctx->wait);
507 acomp_ctx->mutex = per_cpu(zswap_mutex, cpu);
508 acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu);
513 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
515 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
516 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
518 if (!IS_ERR_OR_NULL(acomp_ctx)) {
519 if (!IS_ERR_OR_NULL(acomp_ctx->req))
520 acomp_request_free(acomp_ctx->req);
521 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
522 crypto_free_acomp(acomp_ctx->acomp);
528 /*********************************
530 **********************************/
532 static struct zswap_pool *__zswap_pool_current(void)
534 struct zswap_pool *pool;
536 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
537 WARN_ONCE(!pool && zswap_has_pool,
538 "%s: no page storage pool!\n", __func__);
543 static struct zswap_pool *zswap_pool_current(void)
545 assert_spin_locked(&zswap_pools_lock);
547 return __zswap_pool_current();
550 static struct zswap_pool *zswap_pool_current_get(void)
552 struct zswap_pool *pool;
556 pool = __zswap_pool_current();
557 if (!zswap_pool_get(pool))
565 static struct zswap_pool *zswap_pool_last_get(void)
567 struct zswap_pool *pool, *last = NULL;
571 list_for_each_entry_rcu(pool, &zswap_pools, list)
573 WARN_ONCE(!last && zswap_has_pool,
574 "%s: no page storage pool!\n", __func__);
575 if (!zswap_pool_get(last))
583 /* type and compressor must be null-terminated */
584 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
586 struct zswap_pool *pool;
588 assert_spin_locked(&zswap_pools_lock);
590 list_for_each_entry_rcu(pool, &zswap_pools, list) {
591 if (strcmp(pool->tfm_name, compressor))
593 if (strcmp(zpool_get_type(pool->zpool), type))
595 /* if we can't get it, it's about to be destroyed */
596 if (!zswap_pool_get(pool))
605 * If the entry is still valid in the tree, drop the initial ref and remove it
606 * from the tree. This function must be called with an additional ref held,
607 * otherwise it may race with another invalidation freeing the entry.
609 static void zswap_invalidate_entry(struct zswap_tree *tree,
610 struct zswap_entry *entry)
612 if (zswap_rb_erase(&tree->rbroot, entry))
613 zswap_entry_put(tree, entry);
616 static int zswap_reclaim_entry(struct zswap_pool *pool)
618 struct zswap_entry *entry;
619 struct zswap_tree *tree;
623 /* Get an entry off the LRU */
624 spin_lock(&pool->lru_lock);
625 if (list_empty(&pool->lru)) {
626 spin_unlock(&pool->lru_lock);
629 entry = list_last_entry(&pool->lru, struct zswap_entry, lru);
630 list_del_init(&entry->lru);
632 * Once the lru lock is dropped, the entry might get freed. The
633 * swpoffset is copied to the stack, and entry isn't deref'd again
634 * until the entry is verified to still be alive in the tree.
636 swpoffset = swp_offset(entry->swpentry);
637 tree = zswap_trees[swp_type(entry->swpentry)];
638 spin_unlock(&pool->lru_lock);
640 /* Check for invalidate() race */
641 spin_lock(&tree->lock);
642 if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) {
646 /* Hold a reference to prevent a free during writeback */
647 zswap_entry_get(entry);
648 spin_unlock(&tree->lock);
650 ret = zswap_writeback_entry(entry, tree);
652 spin_lock(&tree->lock);
654 /* Writeback failed, put entry back on LRU */
655 spin_lock(&pool->lru_lock);
656 list_move(&entry->lru, &pool->lru);
657 spin_unlock(&pool->lru_lock);
662 * Writeback started successfully, the page now belongs to the
663 * swapcache. Drop the entry from zswap - unless invalidate already
664 * took it out while we had the tree->lock released for IO.
666 zswap_invalidate_entry(tree, entry);
669 /* Drop local reference */
670 zswap_entry_put(tree, entry);
672 spin_unlock(&tree->lock);
673 return ret ? -EAGAIN : 0;
676 static void shrink_worker(struct work_struct *w)
678 struct zswap_pool *pool = container_of(w, typeof(*pool),
680 int ret, failures = 0;
683 ret = zswap_reclaim_entry(pool);
685 zswap_reject_reclaim_fail++;
688 if (++failures == MAX_RECLAIM_RETRIES)
692 } while (!zswap_can_accept());
693 zswap_pool_put(pool);
696 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
698 struct zswap_pool *pool;
699 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
700 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
703 if (!zswap_has_pool) {
704 /* if either are unset, pool initialization failed, and we
705 * need both params to be set correctly before trying to
708 if (!strcmp(type, ZSWAP_PARAM_UNSET))
710 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
714 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
718 /* unique name for each pool specifically required by zsmalloc */
719 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
721 pool->zpool = zpool_create_pool(type, name, gfp);
723 pr_err("%s zpool not available\n", type);
726 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
728 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
730 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
731 if (!pool->acomp_ctx) {
732 pr_err("percpu alloc failed\n");
736 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
740 pr_debug("using %s compressor\n", pool->tfm_name);
742 /* being the current pool takes 1 ref; this func expects the
743 * caller to always add the new pool as the current pool
745 kref_init(&pool->kref);
746 INIT_LIST_HEAD(&pool->list);
747 INIT_LIST_HEAD(&pool->lru);
748 spin_lock_init(&pool->lru_lock);
749 INIT_WORK(&pool->shrink_work, shrink_worker);
751 zswap_pool_debug("created", pool);
757 free_percpu(pool->acomp_ctx);
759 zpool_destroy_pool(pool->zpool);
764 static struct zswap_pool *__zswap_pool_create_fallback(void)
766 bool has_comp, has_zpool;
768 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
769 if (!has_comp && strcmp(zswap_compressor,
770 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
771 pr_err("compressor %s not available, using default %s\n",
772 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
773 param_free_charp(&zswap_compressor);
774 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
775 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
778 pr_err("default compressor %s not available\n",
780 param_free_charp(&zswap_compressor);
781 zswap_compressor = ZSWAP_PARAM_UNSET;
784 has_zpool = zpool_has_pool(zswap_zpool_type);
785 if (!has_zpool && strcmp(zswap_zpool_type,
786 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
787 pr_err("zpool %s not available, using default %s\n",
788 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
789 param_free_charp(&zswap_zpool_type);
790 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
791 has_zpool = zpool_has_pool(zswap_zpool_type);
794 pr_err("default zpool %s not available\n",
796 param_free_charp(&zswap_zpool_type);
797 zswap_zpool_type = ZSWAP_PARAM_UNSET;
800 if (!has_comp || !has_zpool)
803 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
806 static void zswap_pool_destroy(struct zswap_pool *pool)
808 zswap_pool_debug("destroying", pool);
810 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
811 free_percpu(pool->acomp_ctx);
812 zpool_destroy_pool(pool->zpool);
816 static int __must_check zswap_pool_get(struct zswap_pool *pool)
821 return kref_get_unless_zero(&pool->kref);
824 static void __zswap_pool_release(struct work_struct *work)
826 struct zswap_pool *pool = container_of(work, typeof(*pool),
831 /* nobody should have been able to get a kref... */
832 WARN_ON(kref_get_unless_zero(&pool->kref));
834 /* pool is now off zswap_pools list and has no references. */
835 zswap_pool_destroy(pool);
838 static void __zswap_pool_empty(struct kref *kref)
840 struct zswap_pool *pool;
842 pool = container_of(kref, typeof(*pool), kref);
844 spin_lock(&zswap_pools_lock);
846 WARN_ON(pool == zswap_pool_current());
848 list_del_rcu(&pool->list);
850 INIT_WORK(&pool->release_work, __zswap_pool_release);
851 schedule_work(&pool->release_work);
853 spin_unlock(&zswap_pools_lock);
856 static void zswap_pool_put(struct zswap_pool *pool)
858 kref_put(&pool->kref, __zswap_pool_empty);
861 /*********************************
863 **********************************/
865 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
867 /* no change required */
868 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
873 /* val must be a null-terminated string */
874 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
875 char *type, char *compressor)
877 struct zswap_pool *pool, *put_pool = NULL;
878 char *s = strstrip((char *)val);
880 bool new_pool = false;
882 mutex_lock(&zswap_init_lock);
883 switch (zswap_init_state) {
885 /* if this is load-time (pre-init) param setting,
886 * don't create a pool; that's done during init.
888 ret = param_set_charp(s, kp);
890 case ZSWAP_INIT_SUCCEED:
891 new_pool = zswap_pool_changed(s, kp);
893 case ZSWAP_INIT_FAILED:
894 pr_err("can't set param, initialization failed\n");
897 mutex_unlock(&zswap_init_lock);
899 /* no need to create a new pool, return directly */
904 if (!zpool_has_pool(s)) {
905 pr_err("zpool %s not available\n", s);
909 } else if (!compressor) {
910 if (!crypto_has_acomp(s, 0, 0)) {
911 pr_err("compressor %s not available\n", s);
920 spin_lock(&zswap_pools_lock);
922 pool = zswap_pool_find_get(type, compressor);
924 zswap_pool_debug("using existing", pool);
925 WARN_ON(pool == zswap_pool_current());
926 list_del_rcu(&pool->list);
929 spin_unlock(&zswap_pools_lock);
932 pool = zswap_pool_create(type, compressor);
935 ret = param_set_charp(s, kp);
939 spin_lock(&zswap_pools_lock);
942 put_pool = zswap_pool_current();
943 list_add_rcu(&pool->list, &zswap_pools);
944 zswap_has_pool = true;
946 /* add the possibly pre-existing pool to the end of the pools
947 * list; if it's new (and empty) then it'll be removed and
948 * destroyed by the put after we drop the lock
950 list_add_tail_rcu(&pool->list, &zswap_pools);
954 spin_unlock(&zswap_pools_lock);
956 if (!zswap_has_pool && !pool) {
957 /* if initial pool creation failed, and this pool creation also
958 * failed, maybe both compressor and zpool params were bad.
959 * Allow changing this param, so pool creation will succeed
960 * when the other param is changed. We already verified this
961 * param is ok in the zpool_has_pool() or crypto_has_acomp()
964 ret = param_set_charp(s, kp);
967 /* drop the ref from either the old current pool,
968 * or the new pool we failed to add
971 zswap_pool_put(put_pool);
976 static int zswap_compressor_param_set(const char *val,
977 const struct kernel_param *kp)
979 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
982 static int zswap_zpool_param_set(const char *val,
983 const struct kernel_param *kp)
985 return __zswap_param_set(val, kp, NULL, zswap_compressor);
988 static int zswap_enabled_param_set(const char *val,
989 const struct kernel_param *kp)
993 /* if this is load-time (pre-init) param setting, only set param. */
994 if (system_state != SYSTEM_RUNNING)
995 return param_set_bool(val, kp);
997 mutex_lock(&zswap_init_lock);
998 switch (zswap_init_state) {
1003 case ZSWAP_INIT_SUCCEED:
1004 if (!zswap_has_pool)
1005 pr_err("can't enable, no pool configured\n");
1007 ret = param_set_bool(val, kp);
1009 case ZSWAP_INIT_FAILED:
1010 pr_err("can't enable, initialization failed\n");
1012 mutex_unlock(&zswap_init_lock);
1017 /*********************************
1019 **********************************/
1020 /* return enum for zswap_get_swap_cache_page */
1021 enum zswap_get_swap_ret {
1022 ZSWAP_SWAPCACHE_NEW,
1023 ZSWAP_SWAPCACHE_EXIST,
1024 ZSWAP_SWAPCACHE_FAIL,
1028 * zswap_get_swap_cache_page
1030 * This is an adaption of read_swap_cache_async()
1032 * This function tries to find a page with the given swap entry
1033 * in the swapper_space address space (the swap cache). If the page
1034 * is found, it is returned in retpage. Otherwise, a page is allocated,
1035 * added to the swap cache, and returned in retpage.
1037 * If success, the swap cache page is returned in retpage
1038 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
1039 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
1040 * the new page is added to swapcache and locked
1041 * Returns ZSWAP_SWAPCACHE_FAIL on error
1043 static int zswap_get_swap_cache_page(swp_entry_t entry,
1044 struct page **retpage)
1046 bool page_was_allocated;
1048 *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
1049 NULL, 0, &page_was_allocated);
1050 if (page_was_allocated)
1051 return ZSWAP_SWAPCACHE_NEW;
1053 return ZSWAP_SWAPCACHE_FAIL;
1054 return ZSWAP_SWAPCACHE_EXIST;
1058 * Attempts to free an entry by adding a page to the swap cache,
1059 * decompressing the entry data into the page, and issuing a
1060 * bio write to write the page back to the swap device.
1062 * This can be thought of as a "resumed writeback" of the page
1063 * to the swap device. We are basically resuming the same swap
1064 * writeback path that was intercepted with the frontswap_store()
1065 * in the first place. After the page has been decompressed into
1066 * the swap cache, the compressed version stored by zswap can be
1069 static int zswap_writeback_entry(struct zswap_entry *entry,
1070 struct zswap_tree *tree)
1072 swp_entry_t swpentry = entry->swpentry;
1074 struct scatterlist input, output;
1075 struct crypto_acomp_ctx *acomp_ctx;
1076 struct zpool *pool = entry->pool->zpool;
1078 u8 *src, *tmp = NULL;
1081 struct writeback_control wbc = {
1082 .sync_mode = WB_SYNC_NONE,
1085 if (!zpool_can_sleep_mapped(pool)) {
1086 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
1091 /* try to allocate swap cache page */
1092 switch (zswap_get_swap_cache_page(swpentry, &page)) {
1093 case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
1097 case ZSWAP_SWAPCACHE_EXIST:
1098 /* page is already in the swap cache, ignore for now */
1103 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
1105 * Having a local reference to the zswap entry doesn't exclude
1106 * swapping from invalidating and recycling the swap slot. Once
1107 * the swapcache is secured against concurrent swapping to and
1108 * from the slot, recheck that the entry is still current before
1111 spin_lock(&tree->lock);
1112 if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
1113 spin_unlock(&tree->lock);
1114 delete_from_swap_cache(page_folio(page));
1118 spin_unlock(&tree->lock);
1121 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1124 src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
1125 if (!zpool_can_sleep_mapped(pool)) {
1126 memcpy(tmp, src, entry->length);
1128 zpool_unmap_handle(pool, entry->handle);
1131 mutex_lock(acomp_ctx->mutex);
1132 sg_init_one(&input, src, entry->length);
1133 sg_init_table(&output, 1);
1134 sg_set_page(&output, page, PAGE_SIZE, 0);
1135 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
1136 ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
1137 dlen = acomp_ctx->req->dlen;
1138 mutex_unlock(acomp_ctx->mutex);
1140 if (!zpool_can_sleep_mapped(pool))
1143 zpool_unmap_handle(pool, entry->handle);
1146 BUG_ON(dlen != PAGE_SIZE);
1148 /* page is up to date */
1149 SetPageUptodate(page);
1152 /* move it to the tail of the inactive list after end_writeback */
1153 SetPageReclaim(page);
1155 /* start writeback */
1156 __swap_writepage(page, &wbc);
1158 zswap_written_back_pages++;
1162 if (!zpool_can_sleep_mapped(pool))
1166 * if we get here due to ZSWAP_SWAPCACHE_EXIST
1167 * a load may be happening concurrently.
1168 * it is safe and okay to not free the entry.
1169 * it is also okay to return !0
1174 static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1176 unsigned long *page;
1178 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
1180 page = (unsigned long *)ptr;
1183 if (val != page[last_pos])
1186 for (pos = 1; pos < last_pos; pos++) {
1187 if (val != page[pos])
1196 static void zswap_fill_page(void *ptr, unsigned long value)
1198 unsigned long *page;
1200 page = (unsigned long *)ptr;
1201 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1204 /*********************************
1206 **********************************/
1207 /* attempts to compress and store an single page */
1208 static int zswap_frontswap_store(unsigned type, pgoff_t offset,
1211 struct zswap_tree *tree = zswap_trees[type];
1212 struct zswap_entry *entry, *dupentry;
1213 struct scatterlist input, output;
1214 struct crypto_acomp_ctx *acomp_ctx;
1215 struct obj_cgroup *objcg = NULL;
1216 struct zswap_pool *pool;
1218 unsigned int dlen = PAGE_SIZE;
1219 unsigned long handle, value;
1224 /* THP isn't supported */
1225 if (PageTransHuge(page)) {
1230 if (!zswap_enabled || !tree) {
1236 * XXX: zswap reclaim does not work with cgroups yet. Without a
1237 * cgroup-aware entry LRU, we will push out entries system-wide based on
1238 * local cgroup limits.
1240 objcg = get_obj_cgroup_from_page(page);
1241 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1246 /* reclaim space if needed */
1247 if (zswap_is_full()) {
1248 zswap_pool_limit_hit++;
1249 zswap_pool_reached_full = true;
1253 if (zswap_pool_reached_full) {
1254 if (!zswap_can_accept()) {
1258 zswap_pool_reached_full = false;
1261 /* allocate entry */
1262 entry = zswap_entry_cache_alloc(GFP_KERNEL);
1264 zswap_reject_kmemcache_fail++;
1269 if (zswap_same_filled_pages_enabled) {
1270 src = kmap_atomic(page);
1271 if (zswap_is_page_same_filled(src, &value)) {
1273 entry->swpentry = swp_entry(type, offset);
1275 entry->value = value;
1276 atomic_inc(&zswap_same_filled_pages);
1282 if (!zswap_non_same_filled_pages_enabled) {
1287 /* if entry is successfully added, it keeps the reference */
1288 entry->pool = zswap_pool_current_get();
1295 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1297 mutex_lock(acomp_ctx->mutex);
1299 dst = acomp_ctx->dstmem;
1300 sg_init_table(&input, 1);
1301 sg_set_page(&input, page, PAGE_SIZE, 0);
1303 /* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */
1304 sg_init_one(&output, dst, PAGE_SIZE * 2);
1305 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1307 * it maybe looks a little bit silly that we send an asynchronous request,
1308 * then wait for its completion synchronously. This makes the process look
1309 * synchronous in fact.
1310 * Theoretically, acomp supports users send multiple acomp requests in one
1311 * acomp instance, then get those requests done simultaneously. but in this
1312 * case, frontswap actually does store and load page by page, there is no
1313 * existing method to send the second page before the first page is done
1314 * in one thread doing frontswap.
1315 * but in different threads running on different cpu, we have different
1316 * acomp instance, so multiple threads can do (de)compression in parallel.
1318 ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1319 dlen = acomp_ctx->req->dlen;
1327 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1328 if (zpool_malloc_support_movable(entry->pool->zpool))
1329 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1330 ret = zpool_malloc(entry->pool->zpool, dlen, gfp, &handle);
1331 if (ret == -ENOSPC) {
1332 zswap_reject_compress_poor++;
1336 zswap_reject_alloc_fail++;
1339 buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO);
1340 memcpy(buf, dst, dlen);
1341 zpool_unmap_handle(entry->pool->zpool, handle);
1342 mutex_unlock(acomp_ctx->mutex);
1344 /* populate entry */
1345 entry->swpentry = swp_entry(type, offset);
1346 entry->handle = handle;
1347 entry->length = dlen;
1350 entry->objcg = objcg;
1352 obj_cgroup_charge_zswap(objcg, entry->length);
1353 /* Account before objcg ref is moved to tree */
1354 count_objcg_event(objcg, ZSWPOUT);
1358 spin_lock(&tree->lock);
1360 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1361 if (ret == -EEXIST) {
1362 zswap_duplicate_entry++;
1363 /* remove from rbtree */
1364 zswap_rb_erase(&tree->rbroot, dupentry);
1365 zswap_entry_put(tree, dupentry);
1367 } while (ret == -EEXIST);
1368 if (entry->length) {
1369 spin_lock(&entry->pool->lru_lock);
1370 list_add(&entry->lru, &entry->pool->lru);
1371 spin_unlock(&entry->pool->lru_lock);
1373 spin_unlock(&tree->lock);
1376 atomic_inc(&zswap_stored_pages);
1377 zswap_update_total_size();
1378 count_vm_event(ZSWPOUT);
1383 mutex_unlock(acomp_ctx->mutex);
1384 zswap_pool_put(entry->pool);
1386 zswap_entry_cache_free(entry);
1389 obj_cgroup_put(objcg);
1393 pool = zswap_pool_last_get();
1395 queue_work(shrink_wq, &pool->shrink_work);
1401 * returns 0 if the page was successfully decompressed
1402 * return -1 on entry not found or error
1404 static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1405 struct page *page, bool *exclusive)
1407 struct zswap_tree *tree = zswap_trees[type];
1408 struct zswap_entry *entry;
1409 struct scatterlist input, output;
1410 struct crypto_acomp_ctx *acomp_ctx;
1411 u8 *src, *dst, *tmp;
1416 spin_lock(&tree->lock);
1417 entry = zswap_entry_find_get(&tree->rbroot, offset);
1419 /* entry was written back */
1420 spin_unlock(&tree->lock);
1423 spin_unlock(&tree->lock);
1425 if (!entry->length) {
1426 dst = kmap_atomic(page);
1427 zswap_fill_page(dst, entry->value);
1433 if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
1434 tmp = kmalloc(entry->length, GFP_KERNEL);
1443 src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
1445 if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
1446 memcpy(tmp, src, entry->length);
1448 zpool_unmap_handle(entry->pool->zpool, entry->handle);
1451 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1452 mutex_lock(acomp_ctx->mutex);
1453 sg_init_one(&input, src, entry->length);
1454 sg_init_table(&output, 1);
1455 sg_set_page(&output, page, PAGE_SIZE, 0);
1456 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
1457 ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
1458 mutex_unlock(acomp_ctx->mutex);
1460 if (zpool_can_sleep_mapped(entry->pool->zpool))
1461 zpool_unmap_handle(entry->pool->zpool, entry->handle);
1467 count_vm_event(ZSWPIN);
1469 count_objcg_event(entry->objcg, ZSWPIN);
1471 spin_lock(&tree->lock);
1472 if (!ret && zswap_exclusive_loads_enabled) {
1473 zswap_invalidate_entry(tree, entry);
1475 } else if (entry->length) {
1476 spin_lock(&entry->pool->lru_lock);
1477 list_move(&entry->lru, &entry->pool->lru);
1478 spin_unlock(&entry->pool->lru_lock);
1480 zswap_entry_put(tree, entry);
1481 spin_unlock(&tree->lock);
1486 /* frees an entry in zswap */
1487 static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1489 struct zswap_tree *tree = zswap_trees[type];
1490 struct zswap_entry *entry;
1493 spin_lock(&tree->lock);
1494 entry = zswap_rb_search(&tree->rbroot, offset);
1496 /* entry was written back */
1497 spin_unlock(&tree->lock);
1500 zswap_invalidate_entry(tree, entry);
1501 spin_unlock(&tree->lock);
1504 /* frees all zswap entries for the given swap type */
1505 static void zswap_frontswap_invalidate_area(unsigned type)
1507 struct zswap_tree *tree = zswap_trees[type];
1508 struct zswap_entry *entry, *n;
1513 /* walk the tree and free everything */
1514 spin_lock(&tree->lock);
1515 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1516 zswap_free_entry(entry);
1517 tree->rbroot = RB_ROOT;
1518 spin_unlock(&tree->lock);
1520 zswap_trees[type] = NULL;
1523 static void zswap_frontswap_init(unsigned type)
1525 struct zswap_tree *tree;
1527 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1529 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1533 tree->rbroot = RB_ROOT;
1534 spin_lock_init(&tree->lock);
1535 zswap_trees[type] = tree;
1538 static const struct frontswap_ops zswap_frontswap_ops = {
1539 .store = zswap_frontswap_store,
1540 .load = zswap_frontswap_load,
1541 .invalidate_page = zswap_frontswap_invalidate_page,
1542 .invalidate_area = zswap_frontswap_invalidate_area,
1543 .init = zswap_frontswap_init
1546 /*********************************
1548 **********************************/
1549 #ifdef CONFIG_DEBUG_FS
1550 #include <linux/debugfs.h>
1552 static struct dentry *zswap_debugfs_root;
1554 static int zswap_debugfs_init(void)
1556 if (!debugfs_initialized())
1559 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1561 debugfs_create_u64("pool_limit_hit", 0444,
1562 zswap_debugfs_root, &zswap_pool_limit_hit);
1563 debugfs_create_u64("reject_reclaim_fail", 0444,
1564 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1565 debugfs_create_u64("reject_alloc_fail", 0444,
1566 zswap_debugfs_root, &zswap_reject_alloc_fail);
1567 debugfs_create_u64("reject_kmemcache_fail", 0444,
1568 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1569 debugfs_create_u64("reject_compress_poor", 0444,
1570 zswap_debugfs_root, &zswap_reject_compress_poor);
1571 debugfs_create_u64("written_back_pages", 0444,
1572 zswap_debugfs_root, &zswap_written_back_pages);
1573 debugfs_create_u64("duplicate_entry", 0444,
1574 zswap_debugfs_root, &zswap_duplicate_entry);
1575 debugfs_create_u64("pool_total_size", 0444,
1576 zswap_debugfs_root, &zswap_pool_total_size);
1577 debugfs_create_atomic_t("stored_pages", 0444,
1578 zswap_debugfs_root, &zswap_stored_pages);
1579 debugfs_create_atomic_t("same_filled_pages", 0444,
1580 zswap_debugfs_root, &zswap_same_filled_pages);
1585 static int zswap_debugfs_init(void)
1591 /*********************************
1592 * module init and exit
1593 **********************************/
1594 static int zswap_setup(void)
1596 struct zswap_pool *pool;
1599 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1600 if (!zswap_entry_cache) {
1601 pr_err("entry cache creation failed\n");
1605 ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1606 zswap_dstmem_prepare, zswap_dstmem_dead);
1608 pr_err("dstmem alloc failed\n");
1612 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1613 "mm/zswap_pool:prepare",
1614 zswap_cpu_comp_prepare,
1615 zswap_cpu_comp_dead);
1619 pool = __zswap_pool_create_fallback();
1621 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1622 zpool_get_type(pool->zpool));
1623 list_add(&pool->list, &zswap_pools);
1624 zswap_has_pool = true;
1626 pr_err("pool creation failed\n");
1627 zswap_enabled = false;
1630 shrink_wq = create_workqueue("zswap-shrink");
1634 ret = frontswap_register_ops(&zswap_frontswap_ops);
1637 if (zswap_debugfs_init())
1638 pr_warn("debugfs initialization failed\n");
1639 zswap_init_state = ZSWAP_INIT_SUCCEED;
1643 destroy_workqueue(shrink_wq);
1646 zswap_pool_destroy(pool);
1648 cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
1650 kmem_cache_destroy(zswap_entry_cache);
1652 /* if built-in, we aren't unloaded on failure; don't allow use */
1653 zswap_init_state = ZSWAP_INIT_FAILED;
1654 zswap_enabled = false;
1658 static int __init zswap_init(void)
1662 return zswap_setup();
1664 /* must be late so crypto has time to come up */
1665 late_initcall(zswap_init);
1667 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1668 MODULE_DESCRIPTION("Compressed cache for swap pages");