4 * Copyright (c) 2010,2011, Dan Magenheimer, Oracle Corp.
5 * Copyright (c) 2010,2011, Nitin Gupta
7 * Zcache provides an in-kernel "host implementation" for transcendent memory
8 * and, thus indirectly, for cleancache and frontswap. Zcache includes two
9 * page-accessible memory [1] interfaces, both utilizing the crypto compression
11 * 1) "compression buddies" ("zbud") is used for ephemeral pages
12 * 2) zsmalloc is used for persistent pages.
13 * Xvmalloc (based on the TLSF allocator) has very low fragmentation
14 * so maximizes space efficiency, while zbud allows pairs (and potentially,
15 * in the future, more than a pair of) compressed pages to be closely linked
16 * so that reclaiming can be done via the kernel's physical-page-oriented
17 * "shrinker" interface.
19 * [1] For a definition of page-accessible memory (aka PAM), see:
20 * http://marc.info/?l=linux-mm&m=127811271605009
23 #include <linux/module.h>
24 #include <linux/cpu.h>
25 #include <linux/highmem.h>
26 #include <linux/list.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/types.h>
30 #include <linux/atomic.h>
31 #include <linux/math64.h>
32 #include <linux/crypto.h>
33 #include <linux/string.h>
36 #include "../zsmalloc/zsmalloc.h"
38 #if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
39 #error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
41 #ifdef CONFIG_CLEANCACHE
42 #include <linux/cleancache.h>
44 #ifdef CONFIG_FRONTSWAP
45 #include <linux/frontswap.h>
49 /* this is more aggressive but may cause other problems? */
50 #define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
52 #define ZCACHE_GFP_MASK \
53 (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
56 #define MAX_POOLS_PER_CLIENT 16
58 #define MAX_CLIENTS 16
59 #define LOCAL_CLIENT ((uint16_t)-1)
61 MODULE_LICENSE("GPL");
63 struct zcache_client {
64 struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
65 struct zs_pool *zspool;
70 static struct zcache_client zcache_host;
71 static struct zcache_client zcache_clients[MAX_CLIENTS];
73 static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
76 if (cli == &zcache_host)
78 return cli - &zcache_clients[0];
81 static inline bool is_local_client(struct zcache_client *cli)
83 return cli == &zcache_host;
86 /* crypto API for zcache */
87 #define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
88 static char zcache_comp_name[ZCACHE_COMP_NAME_SZ];
89 static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms;
92 ZCACHE_COMPOP_COMPRESS,
93 ZCACHE_COMPOP_DECOMPRESS
96 static inline int zcache_comp_op(enum comp_op op,
97 const u8 *src, unsigned int slen,
98 u8 *dst, unsigned int *dlen)
100 struct crypto_comp *tfm;
103 BUG_ON(!zcache_comp_pcpu_tfms);
104 tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
107 case ZCACHE_COMPOP_COMPRESS:
108 ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
110 case ZCACHE_COMPOP_DECOMPRESS:
111 ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
119 * Compression buddies ("zbud") provides for packing two (or, possibly
120 * in the future, more) compressed ephemeral pages into a single "raw"
121 * (physical) page and tracking them with data structures so that
122 * the raw pages can be easily reclaimed.
124 * A zbud page ("zbpg") is an aligned page containing a list_head,
125 * a lock, and two "zbud headers". The remainder of the physical
126 * page is divided up into aligned 64-byte "chunks" which contain
127 * the compressed data for zero, one, or two zbuds. Each zbpg
128 * resides on: (1) an "unused list" if it has no zbuds; (2) a
129 * "buddied" list if it is fully populated with two zbuds; or
130 * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
131 * the one unbuddied zbud uses. The data inside a zbpg cannot be
132 * read or written unless the zbpg's lock is held.
135 #define ZBH_SENTINEL 0x43214321
136 #define ZBPG_SENTINEL 0xdeadbeef
138 #define ZBUD_MAX_BUDS 2
145 uint16_t size; /* compressed size in bytes, zero means unused */
150 struct list_head bud_list;
152 struct zbud_hdr buddy[ZBUD_MAX_BUDS];
154 /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
157 #define CHUNK_SHIFT 6
158 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
159 #define CHUNK_MASK (~(CHUNK_SIZE-1))
160 #define NCHUNKS (((PAGE_SIZE - sizeof(struct zbud_page)) & \
161 CHUNK_MASK) >> CHUNK_SHIFT)
162 #define MAX_CHUNK (NCHUNKS-1)
165 struct list_head list;
167 } zbud_unbuddied[NCHUNKS];
168 /* list N contains pages with N chunks USED and NCHUNKS-N unused */
169 /* element 0 is never used but optimizing that isn't worth it */
170 static unsigned long zbud_cumul_chunk_counts[NCHUNKS];
172 struct list_head zbud_buddied_list;
173 static unsigned long zcache_zbud_buddied_count;
175 /* protects the buddied list and all unbuddied lists */
176 static DEFINE_SPINLOCK(zbud_budlists_spinlock);
178 static LIST_HEAD(zbpg_unused_list);
179 static unsigned long zcache_zbpg_unused_list_count;
181 /* protects the unused page list */
182 static DEFINE_SPINLOCK(zbpg_unused_list_spinlock);
184 static atomic_t zcache_zbud_curr_raw_pages;
185 static atomic_t zcache_zbud_curr_zpages;
186 static unsigned long zcache_zbud_curr_zbytes;
187 static unsigned long zcache_zbud_cumul_zpages;
188 static unsigned long zcache_zbud_cumul_zbytes;
189 static unsigned long zcache_compress_poor;
190 static unsigned long zcache_mean_compress_poor;
192 /* forward references */
193 static void *zcache_get_free_page(void);
194 static void zcache_free_page(void *p);
197 * zbud helper functions
200 static inline unsigned zbud_max_buddy_size(void)
202 return MAX_CHUNK << CHUNK_SHIFT;
205 static inline unsigned zbud_size_to_chunks(unsigned size)
207 BUG_ON(size == 0 || size > zbud_max_buddy_size());
208 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
211 static inline int zbud_budnum(struct zbud_hdr *zh)
213 unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1);
214 struct zbud_page *zbpg = NULL;
215 unsigned budnum = -1U;
218 for (i = 0; i < ZBUD_MAX_BUDS; i++)
219 if (offset == offsetof(typeof(*zbpg), buddy[i])) {
223 BUG_ON(budnum == -1U);
227 static char *zbud_data(struct zbud_hdr *zh, unsigned size)
229 struct zbud_page *zbpg;
233 ASSERT_SENTINEL(zh, ZBH);
234 budnum = zbud_budnum(zh);
235 BUG_ON(size == 0 || size > zbud_max_buddy_size());
236 zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
237 ASSERT_SPINLOCK(&zbpg->lock);
240 p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
242 else if (budnum == 1)
243 p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
248 * zbud raw page management
251 static struct zbud_page *zbud_alloc_raw_page(void)
253 struct zbud_page *zbpg = NULL;
254 struct zbud_hdr *zh0, *zh1;
257 /* if any pages on the zbpg list, use one */
258 spin_lock(&zbpg_unused_list_spinlock);
259 if (!list_empty(&zbpg_unused_list)) {
260 zbpg = list_first_entry(&zbpg_unused_list,
261 struct zbud_page, bud_list);
262 list_del_init(&zbpg->bud_list);
263 zcache_zbpg_unused_list_count--;
266 spin_unlock(&zbpg_unused_list_spinlock);
268 /* none on zbpg list, try to get a kernel page */
269 zbpg = zcache_get_free_page();
270 if (likely(zbpg != NULL)) {
271 INIT_LIST_HEAD(&zbpg->bud_list);
272 zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
273 spin_lock_init(&zbpg->lock);
275 ASSERT_INVERTED_SENTINEL(zbpg, ZBPG);
276 SET_SENTINEL(zbpg, ZBPG);
277 BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
278 BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
280 atomic_inc(&zcache_zbud_curr_raw_pages);
281 INIT_LIST_HEAD(&zbpg->bud_list);
282 SET_SENTINEL(zbpg, ZBPG);
283 zh0->size = 0; zh1->size = 0;
284 tmem_oid_set_invalid(&zh0->oid);
285 tmem_oid_set_invalid(&zh1->oid);
291 static void zbud_free_raw_page(struct zbud_page *zbpg)
293 struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1];
295 ASSERT_SENTINEL(zbpg, ZBPG);
296 BUG_ON(!list_empty(&zbpg->bud_list));
297 ASSERT_SPINLOCK(&zbpg->lock);
298 BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
299 BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
300 INVERT_SENTINEL(zbpg, ZBPG);
301 spin_unlock(&zbpg->lock);
302 spin_lock(&zbpg_unused_list_spinlock);
303 list_add(&zbpg->bud_list, &zbpg_unused_list);
304 zcache_zbpg_unused_list_count++;
305 spin_unlock(&zbpg_unused_list_spinlock);
309 * core zbud handling routines
312 static unsigned zbud_free(struct zbud_hdr *zh)
316 ASSERT_SENTINEL(zh, ZBH);
317 BUG_ON(!tmem_oid_valid(&zh->oid));
319 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
321 tmem_oid_set_invalid(&zh->oid);
322 INVERT_SENTINEL(zh, ZBH);
323 zcache_zbud_curr_zbytes -= size;
324 atomic_dec(&zcache_zbud_curr_zpages);
328 static void zbud_free_and_delist(struct zbud_hdr *zh)
331 struct zbud_hdr *zh_other;
332 unsigned budnum = zbud_budnum(zh), size;
333 struct zbud_page *zbpg =
334 container_of(zh, struct zbud_page, buddy[budnum]);
336 spin_lock(&zbud_budlists_spinlock);
337 spin_lock(&zbpg->lock);
338 if (list_empty(&zbpg->bud_list)) {
339 /* ignore zombie page... see zbud_evict_pages() */
340 spin_unlock(&zbpg->lock);
341 spin_unlock(&zbud_budlists_spinlock);
344 size = zbud_free(zh);
345 ASSERT_SPINLOCK(&zbpg->lock);
346 zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
347 if (zh_other->size == 0) { /* was unbuddied: unlist and free */
348 chunks = zbud_size_to_chunks(size) ;
349 BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
350 list_del_init(&zbpg->bud_list);
351 zbud_unbuddied[chunks].count--;
352 spin_unlock(&zbud_budlists_spinlock);
353 zbud_free_raw_page(zbpg);
354 } else { /* was buddied: move remaining buddy to unbuddied list */
355 chunks = zbud_size_to_chunks(zh_other->size) ;
356 list_del_init(&zbpg->bud_list);
357 zcache_zbud_buddied_count--;
358 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
359 zbud_unbuddied[chunks].count++;
360 spin_unlock(&zbud_budlists_spinlock);
361 spin_unlock(&zbpg->lock);
365 static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
366 struct tmem_oid *oid,
367 uint32_t index, struct page *page,
368 void *cdata, unsigned size)
370 struct zbud_hdr *zh0, *zh1, *zh = NULL;
371 struct zbud_page *zbpg = NULL, *ztmp;
374 int i, found_good_buddy = 0;
376 nchunks = zbud_size_to_chunks(size) ;
377 for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
378 spin_lock(&zbud_budlists_spinlock);
379 if (!list_empty(&zbud_unbuddied[i].list)) {
380 list_for_each_entry_safe(zbpg, ztmp,
381 &zbud_unbuddied[i].list, bud_list) {
382 if (spin_trylock(&zbpg->lock)) {
383 found_good_buddy = i;
384 goto found_unbuddied;
388 spin_unlock(&zbud_budlists_spinlock);
390 /* didn't find a good buddy, try allocating a new page */
391 zbpg = zbud_alloc_raw_page();
392 if (unlikely(zbpg == NULL))
394 /* ok, have a page, now compress the data before taking locks */
395 spin_lock(&zbud_budlists_spinlock);
396 spin_lock(&zbpg->lock);
397 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
398 zbud_unbuddied[nchunks].count++;
399 zh = &zbpg->buddy[0];
403 ASSERT_SPINLOCK(&zbpg->lock);
404 zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
405 BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0)));
406 if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */
407 ASSERT_SENTINEL(zh0, ZBH);
409 } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */
410 ASSERT_SENTINEL(zh1, ZBH);
414 list_del_init(&zbpg->bud_list);
415 zbud_unbuddied[found_good_buddy].count--;
416 list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
417 zcache_zbud_buddied_count++;
420 SET_SENTINEL(zh, ZBH);
424 zh->pool_id = pool_id;
425 zh->client_id = client_id;
426 to = zbud_data(zh, size);
427 memcpy(to, cdata, size);
428 spin_unlock(&zbpg->lock);
429 spin_unlock(&zbud_budlists_spinlock);
431 zbud_cumul_chunk_counts[nchunks]++;
432 atomic_inc(&zcache_zbud_curr_zpages);
433 zcache_zbud_cumul_zpages++;
434 zcache_zbud_curr_zbytes += size;
435 zcache_zbud_cumul_zbytes += size;
440 static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
442 struct zbud_page *zbpg;
443 unsigned budnum = zbud_budnum(zh);
444 unsigned int out_len = PAGE_SIZE;
445 char *to_va, *from_va;
449 zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
450 spin_lock(&zbpg->lock);
451 if (list_empty(&zbpg->bud_list)) {
452 /* ignore zombie page... see zbud_evict_pages() */
456 ASSERT_SENTINEL(zh, ZBH);
457 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
458 to_va = kmap_atomic(page);
460 from_va = zbud_data(zh, size);
461 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
464 BUG_ON(out_len != PAGE_SIZE);
465 kunmap_atomic(to_va);
467 spin_unlock(&zbpg->lock);
472 * The following routines handle shrinking of ephemeral pages by evicting
473 * pages "least valuable" first.
476 static unsigned long zcache_evicted_raw_pages;
477 static unsigned long zcache_evicted_buddied_pages;
478 static unsigned long zcache_evicted_unbuddied_pages;
480 static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
482 static void zcache_put_pool(struct tmem_pool *pool);
485 * Flush and free all zbuds in a zbpg, then free the pageframe
487 static void zbud_evict_zbpg(struct zbud_page *zbpg)
491 uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
492 uint32_t index[ZBUD_MAX_BUDS];
493 struct tmem_oid oid[ZBUD_MAX_BUDS];
494 struct tmem_pool *pool;
496 ASSERT_SPINLOCK(&zbpg->lock);
497 BUG_ON(!list_empty(&zbpg->bud_list));
498 for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
499 zh = &zbpg->buddy[i];
501 client_id[j] = zh->client_id;
502 pool_id[j] = zh->pool_id;
504 index[j] = zh->index;
509 spin_unlock(&zbpg->lock);
510 for (i = 0; i < j; i++) {
511 pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
513 tmem_flush_page(pool, &oid[i], index[i]);
514 zcache_put_pool(pool);
517 ASSERT_SENTINEL(zbpg, ZBPG);
518 spin_lock(&zbpg->lock);
519 zbud_free_raw_page(zbpg);
523 * Free nr pages. This code is funky because we want to hold the locks
524 * protecting various lists for as short a time as possible, and in some
525 * circumstances the list may change asynchronously when the list lock is
526 * not held. In some cases we also trylock not only to avoid waiting on a
527 * page in use by another cpu, but also to avoid potential deadlock due to
530 static void zbud_evict_pages(int nr)
532 struct zbud_page *zbpg;
535 /* first try freeing any pages on unused list */
537 spin_lock_bh(&zbpg_unused_list_spinlock);
538 if (!list_empty(&zbpg_unused_list)) {
539 /* can't walk list here, since it may change when unlocked */
540 zbpg = list_first_entry(&zbpg_unused_list,
541 struct zbud_page, bud_list);
542 list_del_init(&zbpg->bud_list);
543 zcache_zbpg_unused_list_count--;
544 atomic_dec(&zcache_zbud_curr_raw_pages);
545 spin_unlock_bh(&zbpg_unused_list_spinlock);
546 zcache_free_page(zbpg);
547 zcache_evicted_raw_pages++;
550 goto retry_unused_list;
552 spin_unlock_bh(&zbpg_unused_list_spinlock);
554 /* now try freeing unbuddied pages, starting with least space avail */
555 for (i = 0; i < MAX_CHUNK; i++) {
557 spin_lock_bh(&zbud_budlists_spinlock);
558 if (list_empty(&zbud_unbuddied[i].list)) {
559 spin_unlock_bh(&zbud_budlists_spinlock);
562 list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
563 if (unlikely(!spin_trylock(&zbpg->lock)))
565 list_del_init(&zbpg->bud_list);
566 zbud_unbuddied[i].count--;
567 spin_unlock(&zbud_budlists_spinlock);
568 zcache_evicted_unbuddied_pages++;
569 /* want budlists unlocked when doing zbpg eviction */
570 zbud_evict_zbpg(zbpg);
574 goto retry_unbud_list_i;
576 spin_unlock_bh(&zbud_budlists_spinlock);
579 /* as a last resort, free buddied pages */
581 spin_lock_bh(&zbud_budlists_spinlock);
582 if (list_empty(&zbud_buddied_list)) {
583 spin_unlock_bh(&zbud_budlists_spinlock);
586 list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
587 if (unlikely(!spin_trylock(&zbpg->lock)))
589 list_del_init(&zbpg->bud_list);
590 zcache_zbud_buddied_count--;
591 spin_unlock(&zbud_budlists_spinlock);
592 zcache_evicted_buddied_pages++;
593 /* want budlists unlocked when doing zbpg eviction */
594 zbud_evict_zbpg(zbpg);
600 spin_unlock_bh(&zbud_budlists_spinlock);
605 static void zbud_init(void)
609 INIT_LIST_HEAD(&zbud_buddied_list);
610 zcache_zbud_buddied_count = 0;
611 for (i = 0; i < NCHUNKS; i++) {
612 INIT_LIST_HEAD(&zbud_unbuddied[i].list);
613 zbud_unbuddied[i].count = 0;
619 * These sysfs routines show a nice distribution of how many zbpg's are
620 * currently (and have ever been placed) in each unbuddied list. It's fun
621 * to watch but can probably go away before final merge.
623 static int zbud_show_unbuddied_list_counts(char *buf)
628 for (i = 0; i < NCHUNKS; i++)
629 p += sprintf(p, "%u ", zbud_unbuddied[i].count);
633 static int zbud_show_cumul_chunk_counts(char *buf)
635 unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0;
636 unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0;
637 unsigned long total_chunks_lte_42 = 0;
640 for (i = 0; i < NCHUNKS; i++) {
641 p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]);
642 chunks += zbud_cumul_chunk_counts[i];
643 total_chunks += zbud_cumul_chunk_counts[i];
644 sum_total_chunks += i * zbud_cumul_chunk_counts[i];
646 total_chunks_lte_21 = total_chunks;
648 total_chunks_lte_32 = total_chunks;
650 total_chunks_lte_42 = total_chunks;
652 p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
653 total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42,
654 chunks == 0 ? 0 : sum_total_chunks / chunks);
660 * This "zv" PAM implementation combines the slab-based zsmalloc
661 * with the crypto compression API to maximize the amount of data that can
662 * be packed into a physical page.
664 * Zv represents a PAM page with the index and object (plus a "size" value
665 * necessary for decompression) immediately preceding the compressed data.
668 #define ZVH_SENTINEL 0x43214321
678 /* rudimentary policy limits */
679 /* total number of persistent pages may not exceed this percentage */
680 static unsigned int zv_page_count_policy_percent = 75;
682 * byte count defining poor compression; pages with greater zsize will be
685 static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
687 * byte count defining poor *mean* compression; pages with greater zsize
688 * will be rejected until sufficient better-compressed pages are accepted
689 * driving the mean below this threshold
691 static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
693 static atomic_t zv_curr_dist_counts[NCHUNKS];
694 static atomic_t zv_cumul_dist_counts[NCHUNKS];
696 static unsigned long zv_create(struct zs_pool *pool, uint32_t pool_id,
697 struct tmem_oid *oid, uint32_t index,
698 void *cdata, unsigned clen)
701 u32 size = clen + sizeof(struct zv_hdr);
702 int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
703 unsigned long handle = 0;
705 BUG_ON(!irqs_disabled());
706 BUG_ON(chunks >= NCHUNKS);
707 handle = zs_malloc(pool, size);
710 atomic_inc(&zv_curr_dist_counts[chunks]);
711 atomic_inc(&zv_cumul_dist_counts[chunks]);
712 zv = zs_map_object(pool, handle);
715 zv->pool_id = pool_id;
717 SET_SENTINEL(zv, ZVH);
718 memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
719 zs_unmap_object(pool, handle);
724 static void zv_free(struct zs_pool *pool, unsigned long handle)
731 zv = zs_map_object(pool, handle);
732 ASSERT_SENTINEL(zv, ZVH);
733 size = zv->size + sizeof(struct zv_hdr);
734 INVERT_SENTINEL(zv, ZVH);
735 zs_unmap_object(pool, handle);
737 chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
738 BUG_ON(chunks >= NCHUNKS);
739 atomic_dec(&zv_curr_dist_counts[chunks]);
741 local_irq_save(flags);
742 zs_free(pool, handle);
743 local_irq_restore(flags);
746 static void zv_decompress(struct page *page, unsigned long handle)
748 unsigned int clen = PAGE_SIZE;
753 zv = zs_map_object(zcache_host.zspool, handle);
754 BUG_ON(zv->size == 0);
755 ASSERT_SENTINEL(zv, ZVH);
756 to_va = kmap_atomic(page);
757 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
758 zv->size, to_va, &clen);
759 kunmap_atomic(to_va);
760 zs_unmap_object(zcache_host.zspool, handle);
762 BUG_ON(clen != PAGE_SIZE);
767 * show a distribution of compression stats for zv pages.
770 static int zv_curr_dist_counts_show(char *buf)
772 unsigned long i, n, chunks = 0, sum_total_chunks = 0;
775 for (i = 0; i < NCHUNKS; i++) {
776 n = atomic_read(&zv_curr_dist_counts[i]);
777 p += sprintf(p, "%lu ", n);
779 sum_total_chunks += i * n;
781 p += sprintf(p, "mean:%lu\n",
782 chunks == 0 ? 0 : sum_total_chunks / chunks);
786 static int zv_cumul_dist_counts_show(char *buf)
788 unsigned long i, n, chunks = 0, sum_total_chunks = 0;
791 for (i = 0; i < NCHUNKS; i++) {
792 n = atomic_read(&zv_cumul_dist_counts[i]);
793 p += sprintf(p, "%lu ", n);
795 sum_total_chunks += i * n;
797 p += sprintf(p, "mean:%lu\n",
798 chunks == 0 ? 0 : sum_total_chunks / chunks);
803 * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
804 * pages that don't compress to less than this value (including metadata
805 * overhead) to be rejected. We don't allow the value to get too close
808 static ssize_t zv_max_zsize_show(struct kobject *kobj,
809 struct kobj_attribute *attr,
812 return sprintf(buf, "%u\n", zv_max_zsize);
815 static ssize_t zv_max_zsize_store(struct kobject *kobj,
816 struct kobj_attribute *attr,
817 const char *buf, size_t count)
822 if (!capable(CAP_SYS_ADMIN))
825 err = kstrtoul(buf, 10, &val);
826 if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
833 * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
834 * pages that don't compress to less than this value (including metadata
835 * overhead) to be rejected UNLESS the mean compression is also smaller
836 * than this value. In other words, we are load-balancing-by-zsize the
837 * accepted pages. Again, we don't allow the value to get too close
840 static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
841 struct kobj_attribute *attr,
844 return sprintf(buf, "%u\n", zv_max_mean_zsize);
847 static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
848 struct kobj_attribute *attr,
849 const char *buf, size_t count)
854 if (!capable(CAP_SYS_ADMIN))
857 err = kstrtoul(buf, 10, &val);
858 if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
860 zv_max_mean_zsize = val;
865 * setting zv_page_count_policy_percent via sysfs sets an upper bound of
866 * persistent (e.g. swap) pages that will be retained according to:
867 * (zv_page_count_policy_percent * totalram_pages) / 100)
868 * when that limit is reached, further puts will be rejected (until
869 * some pages have been flushed). Note that, due to compression,
870 * this number may exceed 100; it defaults to 75 and we set an
871 * arbitary limit of 150. A poor choice will almost certainly result
872 * in OOM's, so this value should only be changed prudently.
874 static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
875 struct kobj_attribute *attr,
878 return sprintf(buf, "%u\n", zv_page_count_policy_percent);
881 static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
882 struct kobj_attribute *attr,
883 const char *buf, size_t count)
888 if (!capable(CAP_SYS_ADMIN))
891 err = kstrtoul(buf, 10, &val);
892 if (err || (val == 0) || (val > 150))
894 zv_page_count_policy_percent = val;
898 static struct kobj_attribute zcache_zv_max_zsize_attr = {
899 .attr = { .name = "zv_max_zsize", .mode = 0644 },
900 .show = zv_max_zsize_show,
901 .store = zv_max_zsize_store,
904 static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
905 .attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
906 .show = zv_max_mean_zsize_show,
907 .store = zv_max_mean_zsize_store,
910 static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
911 .attr = { .name = "zv_page_count_policy_percent",
913 .show = zv_page_count_policy_percent_show,
914 .store = zv_page_count_policy_percent_store,
919 * zcache core code starts here
922 /* useful stats not collected by cleancache or frontswap */
923 static unsigned long zcache_flush_total;
924 static unsigned long zcache_flush_found;
925 static unsigned long zcache_flobj_total;
926 static unsigned long zcache_flobj_found;
927 static unsigned long zcache_failed_eph_puts;
928 static unsigned long zcache_failed_pers_puts;
931 * Tmem operations assume the poolid implies the invoking client.
932 * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
933 * RAMster has each client numbered by cluster node, and a KVM version
934 * of zcache would have one client per guest and each client might
937 static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
939 struct tmem_pool *pool = NULL;
940 struct zcache_client *cli = NULL;
942 if (cli_id == LOCAL_CLIENT)
945 if (cli_id >= MAX_CLIENTS)
947 cli = &zcache_clients[cli_id];
950 atomic_inc(&cli->refcount);
952 if (poolid < MAX_POOLS_PER_CLIENT) {
953 pool = cli->tmem_pools[poolid];
955 atomic_inc(&pool->refcount);
961 static void zcache_put_pool(struct tmem_pool *pool)
963 struct zcache_client *cli = NULL;
968 atomic_dec(&pool->refcount);
969 atomic_dec(&cli->refcount);
972 int zcache_new_client(uint16_t cli_id)
974 struct zcache_client *cli = NULL;
977 if (cli_id == LOCAL_CLIENT)
979 else if ((unsigned int)cli_id < MAX_CLIENTS)
980 cli = &zcache_clients[cli_id];
986 #ifdef CONFIG_FRONTSWAP
987 cli->zspool = zs_create_pool("zcache", ZCACHE_GFP_MASK);
988 if (cli->zspool == NULL)
996 /* counters for debugging */
997 static unsigned long zcache_failed_get_free_pages;
998 static unsigned long zcache_failed_alloc;
999 static unsigned long zcache_put_to_flush;
1002 * for now, used named slabs so can easily track usage; later can
1003 * either just use kmalloc, or perhaps add a slab-like allocator
1004 * to more carefully manage total memory utilization
1006 static struct kmem_cache *zcache_objnode_cache;
1007 static struct kmem_cache *zcache_obj_cache;
1008 static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0);
1009 static unsigned long zcache_curr_obj_count_max;
1010 static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0);
1011 static unsigned long zcache_curr_objnode_count_max;
1014 * to avoid memory allocation recursion (e.g. due to direct reclaim), we
1015 * preload all necessary data structures so the hostops callbacks never
1016 * actually do a malloc
1018 struct zcache_preload {
1020 struct tmem_obj *obj;
1022 struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
1024 static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
1026 static int zcache_do_preload(struct tmem_pool *pool)
1028 struct zcache_preload *kp;
1029 struct tmem_objnode *objnode;
1030 struct tmem_obj *obj;
1034 if (unlikely(zcache_objnode_cache == NULL))
1036 if (unlikely(zcache_obj_cache == NULL))
1039 kp = &__get_cpu_var(zcache_preloads);
1040 while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
1041 preempt_enable_no_resched();
1042 objnode = kmem_cache_alloc(zcache_objnode_cache,
1044 if (unlikely(objnode == NULL)) {
1045 zcache_failed_alloc++;
1049 kp = &__get_cpu_var(zcache_preloads);
1050 if (kp->nr < ARRAY_SIZE(kp->objnodes))
1051 kp->objnodes[kp->nr++] = objnode;
1053 kmem_cache_free(zcache_objnode_cache, objnode);
1055 preempt_enable_no_resched();
1056 obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
1057 if (unlikely(obj == NULL)) {
1058 zcache_failed_alloc++;
1061 page = (void *)__get_free_page(ZCACHE_GFP_MASK);
1062 if (unlikely(page == NULL)) {
1063 zcache_failed_get_free_pages++;
1064 kmem_cache_free(zcache_obj_cache, obj);
1068 kp = &__get_cpu_var(zcache_preloads);
1069 if (kp->obj == NULL)
1072 kmem_cache_free(zcache_obj_cache, obj);
1073 if (kp->page == NULL)
1076 free_page((unsigned long)page);
1082 static void *zcache_get_free_page(void)
1084 struct zcache_preload *kp;
1087 kp = &__get_cpu_var(zcache_preloads);
1089 BUG_ON(page == NULL);
1094 static void zcache_free_page(void *p)
1096 free_page((unsigned long)p);
1100 * zcache implementation for tmem host ops
1103 static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
1105 struct tmem_objnode *objnode = NULL;
1106 unsigned long count;
1107 struct zcache_preload *kp;
1109 kp = &__get_cpu_var(zcache_preloads);
1112 objnode = kp->objnodes[kp->nr - 1];
1113 BUG_ON(objnode == NULL);
1114 kp->objnodes[kp->nr - 1] = NULL;
1116 count = atomic_inc_return(&zcache_curr_objnode_count);
1117 if (count > zcache_curr_objnode_count_max)
1118 zcache_curr_objnode_count_max = count;
1123 static void zcache_objnode_free(struct tmem_objnode *objnode,
1124 struct tmem_pool *pool)
1126 atomic_dec(&zcache_curr_objnode_count);
1127 BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0);
1128 kmem_cache_free(zcache_objnode_cache, objnode);
1131 static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
1133 struct tmem_obj *obj = NULL;
1134 unsigned long count;
1135 struct zcache_preload *kp;
1137 kp = &__get_cpu_var(zcache_preloads);
1139 BUG_ON(obj == NULL);
1141 count = atomic_inc_return(&zcache_curr_obj_count);
1142 if (count > zcache_curr_obj_count_max)
1143 zcache_curr_obj_count_max = count;
1147 static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
1149 atomic_dec(&zcache_curr_obj_count);
1150 BUG_ON(atomic_read(&zcache_curr_obj_count) < 0);
1151 kmem_cache_free(zcache_obj_cache, obj);
1154 static struct tmem_hostops zcache_hostops = {
1155 .obj_alloc = zcache_obj_alloc,
1156 .obj_free = zcache_obj_free,
1157 .objnode_alloc = zcache_objnode_alloc,
1158 .objnode_free = zcache_objnode_free,
1162 * zcache implementations for PAM page descriptor ops
1165 static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0);
1166 static unsigned long zcache_curr_eph_pampd_count_max;
1167 static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
1168 static unsigned long zcache_curr_pers_pampd_count_max;
1170 /* forward reference */
1171 static int zcache_compress(struct page *from, void **out_va, unsigned *out_len);
1173 static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1174 struct tmem_pool *pool, struct tmem_oid *oid,
1177 void *pampd = NULL, *cdata;
1180 unsigned long count;
1181 struct page *page = (struct page *)(data);
1182 struct zcache_client *cli = pool->client;
1183 uint16_t client_id = get_client_id_from_client(cli);
1184 unsigned long zv_mean_zsize;
1185 unsigned long curr_pers_pampd_count;
1189 ret = zcache_compress(page, &cdata, &clen);
1192 if (clen == 0 || clen > zbud_max_buddy_size()) {
1193 zcache_compress_poor++;
1196 pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
1197 index, page, cdata, clen);
1198 if (pampd != NULL) {
1199 count = atomic_inc_return(&zcache_curr_eph_pampd_count);
1200 if (count > zcache_curr_eph_pampd_count_max)
1201 zcache_curr_eph_pampd_count_max = count;
1204 curr_pers_pampd_count =
1205 atomic_read(&zcache_curr_pers_pampd_count);
1206 if (curr_pers_pampd_count >
1207 (zv_page_count_policy_percent * totalram_pages) / 100)
1209 ret = zcache_compress(page, &cdata, &clen);
1212 /* reject if compression is too poor */
1213 if (clen > zv_max_zsize) {
1214 zcache_compress_poor++;
1217 /* reject if mean compression is too poor */
1218 if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
1219 total_zsize = zs_get_total_size_bytes(cli->zspool);
1220 zv_mean_zsize = div_u64(total_zsize,
1221 curr_pers_pampd_count);
1222 if (zv_mean_zsize > zv_max_mean_zsize) {
1223 zcache_mean_compress_poor++;
1227 pampd = (void *)zv_create(cli->zspool, pool->pool_id,
1228 oid, index, cdata, clen);
1231 count = atomic_inc_return(&zcache_curr_pers_pampd_count);
1232 if (count > zcache_curr_pers_pampd_count_max)
1233 zcache_curr_pers_pampd_count_max = count;
1240 * fill the pageframe corresponding to the struct page with the data
1241 * from the passed pampd
1243 static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
1244 void *pampd, struct tmem_pool *pool,
1245 struct tmem_oid *oid, uint32_t index)
1249 BUG_ON(is_ephemeral(pool));
1250 zv_decompress((struct page *)(data), (unsigned long)pampd);
1255 * fill the pageframe corresponding to the struct page with the data
1256 * from the passed pampd
1258 static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
1259 void *pampd, struct tmem_pool *pool,
1260 struct tmem_oid *oid, uint32_t index)
1264 BUG_ON(!is_ephemeral(pool));
1265 zbud_decompress((struct page *)(data), pampd);
1266 zbud_free_and_delist((struct zbud_hdr *)pampd);
1267 atomic_dec(&zcache_curr_eph_pampd_count);
1272 * free the pampd and remove it from any zcache lists
1273 * pampd must no longer be pointed to from any tmem data structures!
1275 static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
1276 struct tmem_oid *oid, uint32_t index)
1278 struct zcache_client *cli = pool->client;
1280 if (is_ephemeral(pool)) {
1281 zbud_free_and_delist((struct zbud_hdr *)pampd);
1282 atomic_dec(&zcache_curr_eph_pampd_count);
1283 BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
1285 zv_free(cli->zspool, (unsigned long)pampd);
1286 atomic_dec(&zcache_curr_pers_pampd_count);
1287 BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
1291 static void zcache_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj)
1295 static void zcache_pampd_new_obj(struct tmem_obj *obj)
1299 static int zcache_pampd_replace_in_obj(void *pampd, struct tmem_obj *obj)
1304 static bool zcache_pampd_is_remote(void *pampd)
1309 static struct tmem_pamops zcache_pamops = {
1310 .create = zcache_pampd_create,
1311 .get_data = zcache_pampd_get_data,
1312 .get_data_and_free = zcache_pampd_get_data_and_free,
1313 .free = zcache_pampd_free,
1314 .free_obj = zcache_pampd_free_obj,
1315 .new_obj = zcache_pampd_new_obj,
1316 .replace_in_obj = zcache_pampd_replace_in_obj,
1317 .is_remote = zcache_pampd_is_remote,
1321 * zcache compression/decompression and related per-cpu stuff
1324 static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
1325 #define ZCACHE_DSTMEM_ORDER 1
1327 static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
1330 unsigned char *dmem = __get_cpu_var(zcache_dstmem);
1333 BUG_ON(!irqs_disabled());
1334 if (unlikely(dmem == NULL))
1335 goto out; /* no buffer or no compressor so can't compress */
1336 *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
1337 from_va = kmap_atomic(from);
1339 ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
1343 kunmap_atomic(from_va);
1349 static int zcache_comp_cpu_up(int cpu)
1351 struct crypto_comp *tfm;
1353 tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
1356 *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
1360 static void zcache_comp_cpu_down(int cpu)
1362 struct crypto_comp *tfm;
1364 tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
1365 crypto_free_comp(tfm);
1366 *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
1369 static int zcache_cpu_notifier(struct notifier_block *nb,
1370 unsigned long action, void *pcpu)
1372 int ret, cpu = (long)pcpu;
1373 struct zcache_preload *kp;
1376 case CPU_UP_PREPARE:
1377 ret = zcache_comp_cpu_up(cpu);
1378 if (ret != NOTIFY_OK) {
1379 pr_err("zcache: can't allocate compressor transform\n");
1382 per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
1383 GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
1386 case CPU_UP_CANCELED:
1387 zcache_comp_cpu_down(cpu);
1388 free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
1389 ZCACHE_DSTMEM_ORDER);
1390 per_cpu(zcache_dstmem, cpu) = NULL;
1391 kp = &per_cpu(zcache_preloads, cpu);
1393 kmem_cache_free(zcache_objnode_cache,
1394 kp->objnodes[kp->nr - 1]);
1395 kp->objnodes[kp->nr - 1] = NULL;
1399 kmem_cache_free(zcache_obj_cache, kp->obj);
1403 free_page((unsigned long)kp->page);
1413 static struct notifier_block zcache_cpu_notifier_block = {
1414 .notifier_call = zcache_cpu_notifier
1418 #define ZCACHE_SYSFS_RO(_name) \
1419 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1420 struct kobj_attribute *attr, char *buf) \
1422 return sprintf(buf, "%lu\n", zcache_##_name); \
1424 static struct kobj_attribute zcache_##_name##_attr = { \
1425 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1426 .show = zcache_##_name##_show, \
1429 #define ZCACHE_SYSFS_RO_ATOMIC(_name) \
1430 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1431 struct kobj_attribute *attr, char *buf) \
1433 return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
1435 static struct kobj_attribute zcache_##_name##_attr = { \
1436 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1437 .show = zcache_##_name##_show, \
1440 #define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
1441 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1442 struct kobj_attribute *attr, char *buf) \
1444 return _func(buf); \
1446 static struct kobj_attribute zcache_##_name##_attr = { \
1447 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1448 .show = zcache_##_name##_show, \
1451 ZCACHE_SYSFS_RO(curr_obj_count_max);
1452 ZCACHE_SYSFS_RO(curr_objnode_count_max);
1453 ZCACHE_SYSFS_RO(flush_total);
1454 ZCACHE_SYSFS_RO(flush_found);
1455 ZCACHE_SYSFS_RO(flobj_total);
1456 ZCACHE_SYSFS_RO(flobj_found);
1457 ZCACHE_SYSFS_RO(failed_eph_puts);
1458 ZCACHE_SYSFS_RO(failed_pers_puts);
1459 ZCACHE_SYSFS_RO(zbud_curr_zbytes);
1460 ZCACHE_SYSFS_RO(zbud_cumul_zpages);
1461 ZCACHE_SYSFS_RO(zbud_cumul_zbytes);
1462 ZCACHE_SYSFS_RO(zbud_buddied_count);
1463 ZCACHE_SYSFS_RO(zbpg_unused_list_count);
1464 ZCACHE_SYSFS_RO(evicted_raw_pages);
1465 ZCACHE_SYSFS_RO(evicted_unbuddied_pages);
1466 ZCACHE_SYSFS_RO(evicted_buddied_pages);
1467 ZCACHE_SYSFS_RO(failed_get_free_pages);
1468 ZCACHE_SYSFS_RO(failed_alloc);
1469 ZCACHE_SYSFS_RO(put_to_flush);
1470 ZCACHE_SYSFS_RO(compress_poor);
1471 ZCACHE_SYSFS_RO(mean_compress_poor);
1472 ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
1473 ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
1474 ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
1475 ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count);
1476 ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
1477 zbud_show_unbuddied_list_counts);
1478 ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
1479 zbud_show_cumul_chunk_counts);
1480 ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
1481 zv_curr_dist_counts_show);
1482 ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
1483 zv_cumul_dist_counts_show);
1485 static struct attribute *zcache_attrs[] = {
1486 &zcache_curr_obj_count_attr.attr,
1487 &zcache_curr_obj_count_max_attr.attr,
1488 &zcache_curr_objnode_count_attr.attr,
1489 &zcache_curr_objnode_count_max_attr.attr,
1490 &zcache_flush_total_attr.attr,
1491 &zcache_flobj_total_attr.attr,
1492 &zcache_flush_found_attr.attr,
1493 &zcache_flobj_found_attr.attr,
1494 &zcache_failed_eph_puts_attr.attr,
1495 &zcache_failed_pers_puts_attr.attr,
1496 &zcache_compress_poor_attr.attr,
1497 &zcache_mean_compress_poor_attr.attr,
1498 &zcache_zbud_curr_raw_pages_attr.attr,
1499 &zcache_zbud_curr_zpages_attr.attr,
1500 &zcache_zbud_curr_zbytes_attr.attr,
1501 &zcache_zbud_cumul_zpages_attr.attr,
1502 &zcache_zbud_cumul_zbytes_attr.attr,
1503 &zcache_zbud_buddied_count_attr.attr,
1504 &zcache_zbpg_unused_list_count_attr.attr,
1505 &zcache_evicted_raw_pages_attr.attr,
1506 &zcache_evicted_unbuddied_pages_attr.attr,
1507 &zcache_evicted_buddied_pages_attr.attr,
1508 &zcache_failed_get_free_pages_attr.attr,
1509 &zcache_failed_alloc_attr.attr,
1510 &zcache_put_to_flush_attr.attr,
1511 &zcache_zbud_unbuddied_list_counts_attr.attr,
1512 &zcache_zbud_cumul_chunk_counts_attr.attr,
1513 &zcache_zv_curr_dist_counts_attr.attr,
1514 &zcache_zv_cumul_dist_counts_attr.attr,
1515 &zcache_zv_max_zsize_attr.attr,
1516 &zcache_zv_max_mean_zsize_attr.attr,
1517 &zcache_zv_page_count_policy_percent_attr.attr,
1521 static struct attribute_group zcache_attr_group = {
1522 .attrs = zcache_attrs,
1526 #endif /* CONFIG_SYSFS */
1528 * When zcache is disabled ("frozen"), pools can be created and destroyed,
1529 * but all puts (and thus all other operations that require memory allocation)
1530 * must fail. If zcache is unfrozen, accepts puts, then frozen again,
1531 * data consistency requires all puts while frozen to be converted into
1534 static bool zcache_freeze;
1537 * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
1539 static int shrink_zcache_memory(struct shrinker *shrink,
1540 struct shrink_control *sc)
1543 int nr = sc->nr_to_scan;
1544 gfp_t gfp_mask = sc->gfp_mask;
1547 if (!(gfp_mask & __GFP_FS))
1548 /* does this case really need to be skipped? */
1550 zbud_evict_pages(nr);
1552 ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
1557 static struct shrinker zcache_shrinker = {
1558 .shrink = shrink_zcache_memory,
1559 .seeks = DEFAULT_SEEKS,
1563 * zcache shims between cleancache/frontswap ops and tmem
1566 static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1567 uint32_t index, struct page *page)
1569 struct tmem_pool *pool;
1572 BUG_ON(!irqs_disabled());
1573 pool = zcache_get_pool_by_id(cli_id, pool_id);
1574 if (unlikely(pool == NULL))
1576 if (!zcache_freeze && zcache_do_preload(pool) == 0) {
1577 /* preload does preempt_disable on success */
1578 ret = tmem_put(pool, oidp, index, (char *)(page),
1579 PAGE_SIZE, 0, is_ephemeral(pool));
1581 if (is_ephemeral(pool))
1582 zcache_failed_eph_puts++;
1584 zcache_failed_pers_puts++;
1586 zcache_put_pool(pool);
1587 preempt_enable_no_resched();
1589 zcache_put_to_flush++;
1590 if (atomic_read(&pool->obj_count) > 0)
1591 /* the put fails whether the flush succeeds or not */
1592 (void)tmem_flush_page(pool, oidp, index);
1593 zcache_put_pool(pool);
1599 static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1600 uint32_t index, struct page *page)
1602 struct tmem_pool *pool;
1604 unsigned long flags;
1605 size_t size = PAGE_SIZE;
1607 local_irq_save(flags);
1608 pool = zcache_get_pool_by_id(cli_id, pool_id);
1609 if (likely(pool != NULL)) {
1610 if (atomic_read(&pool->obj_count) > 0)
1611 ret = tmem_get(pool, oidp, index, (char *)(page),
1612 &size, 0, is_ephemeral(pool));
1613 zcache_put_pool(pool);
1615 local_irq_restore(flags);
1619 static int zcache_flush_page(int cli_id, int pool_id,
1620 struct tmem_oid *oidp, uint32_t index)
1622 struct tmem_pool *pool;
1624 unsigned long flags;
1626 local_irq_save(flags);
1627 zcache_flush_total++;
1628 pool = zcache_get_pool_by_id(cli_id, pool_id);
1629 if (likely(pool != NULL)) {
1630 if (atomic_read(&pool->obj_count) > 0)
1631 ret = tmem_flush_page(pool, oidp, index);
1632 zcache_put_pool(pool);
1635 zcache_flush_found++;
1636 local_irq_restore(flags);
1640 static int zcache_flush_object(int cli_id, int pool_id,
1641 struct tmem_oid *oidp)
1643 struct tmem_pool *pool;
1645 unsigned long flags;
1647 local_irq_save(flags);
1648 zcache_flobj_total++;
1649 pool = zcache_get_pool_by_id(cli_id, pool_id);
1650 if (likely(pool != NULL)) {
1651 if (atomic_read(&pool->obj_count) > 0)
1652 ret = tmem_flush_object(pool, oidp);
1653 zcache_put_pool(pool);
1656 zcache_flobj_found++;
1657 local_irq_restore(flags);
1661 static int zcache_destroy_pool(int cli_id, int pool_id)
1663 struct tmem_pool *pool = NULL;
1664 struct zcache_client *cli = NULL;
1669 if (cli_id == LOCAL_CLIENT)
1671 else if ((unsigned int)cli_id < MAX_CLIENTS)
1672 cli = &zcache_clients[cli_id];
1675 atomic_inc(&cli->refcount);
1676 pool = cli->tmem_pools[pool_id];
1679 cli->tmem_pools[pool_id] = NULL;
1680 /* wait for pool activity on other cpus to quiesce */
1681 while (atomic_read(&pool->refcount) != 0)
1683 atomic_dec(&cli->refcount);
1685 ret = tmem_destroy_pool(pool);
1688 pr_info("zcache: destroyed pool id=%d, cli_id=%d\n",
1694 static int zcache_new_pool(uint16_t cli_id, uint32_t flags)
1697 struct tmem_pool *pool;
1698 struct zcache_client *cli = NULL;
1700 if (cli_id == LOCAL_CLIENT)
1702 else if ((unsigned int)cli_id < MAX_CLIENTS)
1703 cli = &zcache_clients[cli_id];
1706 atomic_inc(&cli->refcount);
1707 pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
1709 pr_info("zcache: pool creation failed: out of memory\n");
1713 for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
1714 if (cli->tmem_pools[poolid] == NULL)
1716 if (poolid >= MAX_POOLS_PER_CLIENT) {
1717 pr_info("zcache: pool creation failed: max exceeded\n");
1722 atomic_set(&pool->refcount, 0);
1724 pool->pool_id = poolid;
1725 tmem_new_pool(pool, flags);
1726 cli->tmem_pools[poolid] = pool;
1727 pr_info("zcache: created %s tmem pool, id=%d, client=%d\n",
1728 flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
1732 atomic_dec(&cli->refcount);
1737 * Two kernel functionalities currently can be layered on top of tmem.
1738 * These are "cleancache" which is used as a second-chance cache for clean
1739 * page cache pages; and "frontswap" which is used for swap pages
1740 * to avoid writes to disk. A generic "shim" is provided here for each
1741 * to translate in-kernel semantics to zcache semantics.
1744 #ifdef CONFIG_CLEANCACHE
1745 static void zcache_cleancache_put_page(int pool_id,
1746 struct cleancache_filekey key,
1747 pgoff_t index, struct page *page)
1749 u32 ind = (u32) index;
1750 struct tmem_oid oid = *(struct tmem_oid *)&key;
1752 if (likely(ind == index))
1753 (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index, page);
1756 static int zcache_cleancache_get_page(int pool_id,
1757 struct cleancache_filekey key,
1758 pgoff_t index, struct page *page)
1760 u32 ind = (u32) index;
1761 struct tmem_oid oid = *(struct tmem_oid *)&key;
1764 if (likely(ind == index))
1765 ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index, page);
1769 static void zcache_cleancache_flush_page(int pool_id,
1770 struct cleancache_filekey key,
1773 u32 ind = (u32) index;
1774 struct tmem_oid oid = *(struct tmem_oid *)&key;
1776 if (likely(ind == index))
1777 (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind);
1780 static void zcache_cleancache_flush_inode(int pool_id,
1781 struct cleancache_filekey key)
1783 struct tmem_oid oid = *(struct tmem_oid *)&key;
1785 (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
1788 static void zcache_cleancache_flush_fs(int pool_id)
1791 (void)zcache_destroy_pool(LOCAL_CLIENT, pool_id);
1794 static int zcache_cleancache_init_fs(size_t pagesize)
1796 BUG_ON(sizeof(struct cleancache_filekey) !=
1797 sizeof(struct tmem_oid));
1798 BUG_ON(pagesize != PAGE_SIZE);
1799 return zcache_new_pool(LOCAL_CLIENT, 0);
1802 static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
1804 /* shared pools are unsupported and map to private */
1805 BUG_ON(sizeof(struct cleancache_filekey) !=
1806 sizeof(struct tmem_oid));
1807 BUG_ON(pagesize != PAGE_SIZE);
1808 return zcache_new_pool(LOCAL_CLIENT, 0);
1811 static struct cleancache_ops zcache_cleancache_ops = {
1812 .put_page = zcache_cleancache_put_page,
1813 .get_page = zcache_cleancache_get_page,
1814 .invalidate_page = zcache_cleancache_flush_page,
1815 .invalidate_inode = zcache_cleancache_flush_inode,
1816 .invalidate_fs = zcache_cleancache_flush_fs,
1817 .init_shared_fs = zcache_cleancache_init_shared_fs,
1818 .init_fs = zcache_cleancache_init_fs
1821 struct cleancache_ops zcache_cleancache_register_ops(void)
1823 struct cleancache_ops old_ops =
1824 cleancache_register_ops(&zcache_cleancache_ops);
1830 #ifdef CONFIG_FRONTSWAP
1831 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1832 static int zcache_frontswap_poolid = -1;
1835 * Swizzling increases objects per swaptype, increasing tmem concurrency
1836 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
1837 * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
1838 * frontswap_get_page(), but has side-effects. Hence using 8.
1841 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
1842 #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
1843 #define iswiz(_ind) (_ind >> SWIZ_BITS)
1845 static inline struct tmem_oid oswiz(unsigned type, u32 ind)
1847 struct tmem_oid oid = { .oid = { 0 } };
1848 oid.oid[0] = _oswiz(type, ind);
1852 static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
1855 u64 ind64 = (u64)offset;
1856 u32 ind = (u32)offset;
1857 struct tmem_oid oid = oswiz(type, ind);
1859 unsigned long flags;
1861 BUG_ON(!PageLocked(page));
1862 if (likely(ind64 == ind)) {
1863 local_irq_save(flags);
1864 ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1865 &oid, iswiz(ind), page);
1866 local_irq_restore(flags);
1871 /* returns 0 if the page was successfully gotten from frontswap, -1 if
1872 * was not present (should never happen!) */
1873 static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
1876 u64 ind64 = (u64)offset;
1877 u32 ind = (u32)offset;
1878 struct tmem_oid oid = oswiz(type, ind);
1881 BUG_ON(!PageLocked(page));
1882 if (likely(ind64 == ind))
1883 ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1884 &oid, iswiz(ind), page);
1888 /* flush a single page from frontswap */
1889 static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
1891 u64 ind64 = (u64)offset;
1892 u32 ind = (u32)offset;
1893 struct tmem_oid oid = oswiz(type, ind);
1895 if (likely(ind64 == ind))
1896 (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1900 /* flush all pages from the passed swaptype */
1901 static void zcache_frontswap_flush_area(unsigned type)
1903 struct tmem_oid oid;
1906 for (ind = SWIZ_MASK; ind >= 0; ind--) {
1907 oid = oswiz(type, ind);
1908 (void)zcache_flush_object(LOCAL_CLIENT,
1909 zcache_frontswap_poolid, &oid);
1913 static void zcache_frontswap_init(unsigned ignored)
1915 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1916 if (zcache_frontswap_poolid < 0)
1917 zcache_frontswap_poolid =
1918 zcache_new_pool(LOCAL_CLIENT, TMEM_POOL_PERSIST);
1921 static struct frontswap_ops zcache_frontswap_ops = {
1922 .put_page = zcache_frontswap_put_page,
1923 .get_page = zcache_frontswap_get_page,
1924 .invalidate_page = zcache_frontswap_flush_page,
1925 .invalidate_area = zcache_frontswap_flush_area,
1926 .init = zcache_frontswap_init
1929 struct frontswap_ops zcache_frontswap_register_ops(void)
1931 struct frontswap_ops old_ops =
1932 frontswap_register_ops(&zcache_frontswap_ops);
1939 * zcache initialization
1940 * NOTE FOR NOW zcache MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
1944 static int zcache_enabled;
1946 static int __init enable_zcache(char *s)
1951 __setup("zcache", enable_zcache);
1953 /* allow independent dynamic disabling of cleancache and frontswap */
1955 static int use_cleancache = 1;
1957 static int __init no_cleancache(char *s)
1963 __setup("nocleancache", no_cleancache);
1965 static int use_frontswap = 1;
1967 static int __init no_frontswap(char *s)
1973 __setup("nofrontswap", no_frontswap);
1975 static int __init enable_zcache_compressor(char *s)
1977 strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
1981 __setup("zcache=", enable_zcache_compressor);
1984 static int zcache_comp_init(void)
1988 /* check crypto algorithm */
1989 if (*zcache_comp_name != '\0') {
1990 ret = crypto_has_comp(zcache_comp_name, 0, 0);
1992 pr_info("zcache: %s not supported\n",
1996 strcpy(zcache_comp_name, "lzo");
1997 ret = crypto_has_comp(zcache_comp_name, 0, 0);
2002 pr_info("zcache: using %s compressor\n", zcache_comp_name);
2004 /* alloc percpu transforms */
2006 zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
2007 if (!zcache_comp_pcpu_tfms)
2013 static int __init zcache_init(void)
2018 ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
2020 pr_err("zcache: can't create sysfs\n");
2023 #endif /* CONFIG_SYSFS */
2024 #if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP)
2025 if (zcache_enabled) {
2028 tmem_register_hostops(&zcache_hostops);
2029 tmem_register_pamops(&zcache_pamops);
2030 ret = register_cpu_notifier(&zcache_cpu_notifier_block);
2032 pr_err("zcache: can't register cpu notifier\n");
2035 ret = zcache_comp_init();
2037 pr_err("zcache: compressor initialization failed\n");
2040 for_each_online_cpu(cpu) {
2041 void *pcpu = (void *)(long)cpu;
2042 zcache_cpu_notifier(&zcache_cpu_notifier_block,
2043 CPU_UP_PREPARE, pcpu);
2046 zcache_objnode_cache = kmem_cache_create("zcache_objnode",
2047 sizeof(struct tmem_objnode), 0, 0, NULL);
2048 zcache_obj_cache = kmem_cache_create("zcache_obj",
2049 sizeof(struct tmem_obj), 0, 0, NULL);
2050 ret = zcache_new_client(LOCAL_CLIENT);
2052 pr_err("zcache: can't create client\n");
2056 #ifdef CONFIG_CLEANCACHE
2057 if (zcache_enabled && use_cleancache) {
2058 struct cleancache_ops old_ops;
2061 register_shrinker(&zcache_shrinker);
2062 old_ops = zcache_cleancache_register_ops();
2063 pr_info("zcache: cleancache enabled using kernel "
2064 "transcendent memory and compression buddies\n");
2065 if (old_ops.init_fs != NULL)
2066 pr_warning("zcache: cleancache_ops overridden");
2069 #ifdef CONFIG_FRONTSWAP
2070 if (zcache_enabled && use_frontswap) {
2071 struct frontswap_ops old_ops;
2073 old_ops = zcache_frontswap_register_ops();
2074 pr_info("zcache: frontswap enabled using kernel "
2075 "transcendent memory and zsmalloc\n");
2076 if (old_ops.init != NULL)
2077 pr_warning("zcache: frontswap_ops overridden");
2084 module_init(zcache_init)