zcache/debug: Coalesce all debug under CONFIG_ZCACHE_DEBUG
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / staging / zcache / zcache-main.c
1 /*
2  * zcache.c
3  *
4  * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
5  * Copyright (c) 2010,2011, Nitin Gupta
6  *
7  * Zcache provides an in-kernel "host implementation" for transcendent memory
8  * ("tmem") and, thus indirectly, for cleancache and frontswap.  Zcache uses
9  * lzo1x compression to improve density and an embedded allocator called
10  * "zbud" which "buddies" two compressed pages semi-optimally in each physical
11  * pageframe.  Zbud is integrally tied into tmem to allow pageframes to
12  * be "reclaimed" efficiently.
13  */
14
15 #include <linux/module.h>
16 #include <linux/cpu.h>
17 #include <linux/highmem.h>
18 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/math64.h>
24 #include <linux/crypto.h>
25 #include <linux/swap.h>
26 #include <linux/swapops.h>
27 #include <linux/pagemap.h>
28 #include <linux/writeback.h>
29
30 #include <linux/cleancache.h>
31 #include <linux/frontswap.h>
32 #include "tmem.h"
33 #include "zcache.h"
34 #include "zbud.h"
35 #include "ramster.h"
36 #include "debug.h"
37 #ifdef CONFIG_RAMSTER
38 static bool ramster_enabled __read_mostly;
39 #else
40 #define ramster_enabled false
41 #endif
42
43 #ifndef __PG_WAS_ACTIVE
44 static inline bool PageWasActive(struct page *page)
45 {
46         return true;
47 }
48
49 static inline void SetPageWasActive(struct page *page)
50 {
51 }
52 #endif
53
54 #ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
55 static bool frontswap_has_exclusive_gets __read_mostly = true;
56 #else
57 static bool frontswap_has_exclusive_gets __read_mostly;
58 static inline void frontswap_tmem_exclusive_gets(bool b)
59 {
60 }
61 #endif
62
63 /* enable (or fix code) when Seth's patches are accepted upstream */
64 #define zcache_writeback_enabled 0
65
66 static bool zcache_enabled __read_mostly;
67 static bool disable_cleancache __read_mostly;
68 static bool disable_frontswap __read_mostly;
69 static bool disable_frontswap_ignore_nonactive __read_mostly;
70 static bool disable_cleancache_ignore_nonactive __read_mostly;
71 static char *namestr __read_mostly = "zcache";
72
73 #define ZCACHE_GFP_MASK \
74         (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
75
76 /* crypto API for zcache  */
77 #define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
78 static char zcache_comp_name[ZCACHE_COMP_NAME_SZ] __read_mostly;
79 static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms __read_mostly;
80
81 enum comp_op {
82         ZCACHE_COMPOP_COMPRESS,
83         ZCACHE_COMPOP_DECOMPRESS
84 };
85
86 static inline int zcache_comp_op(enum comp_op op,
87                                 const u8 *src, unsigned int slen,
88                                 u8 *dst, unsigned int *dlen)
89 {
90         struct crypto_comp *tfm;
91         int ret = -1;
92
93         BUG_ON(!zcache_comp_pcpu_tfms);
94         tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
95         BUG_ON(!tfm);
96         switch (op) {
97         case ZCACHE_COMPOP_COMPRESS:
98                 ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
99                 break;
100         case ZCACHE_COMPOP_DECOMPRESS:
101                 ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
102                 break;
103         default:
104                 ret = -EINVAL;
105         }
106         put_cpu();
107         return ret;
108 }
109
110 /*
111  * policy parameters
112  */
113
114 /*
115  * byte count defining poor compression; pages with greater zsize will be
116  * rejected
117  */
118 static unsigned int zbud_max_zsize __read_mostly = (PAGE_SIZE / 8) * 7;
119 /*
120  * byte count defining poor *mean* compression; pages with greater zsize
121  * will be rejected until sufficient better-compressed pages are accepted
122  * driving the mean below this threshold
123  */
124 static unsigned int zbud_max_mean_zsize __read_mostly = (PAGE_SIZE / 8) * 5;
125
126 /*
127  * for now, used named slabs so can easily track usage; later can
128  * either just use kmalloc, or perhaps add a slab-like allocator
129  * to more carefully manage total memory utilization
130  */
131 static struct kmem_cache *zcache_objnode_cache;
132 static struct kmem_cache *zcache_obj_cache;
133
134 static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
135
136 /* Used by debug.c */
137 ssize_t zcache_pers_zpages;
138 u64 zcache_pers_zbytes;
139 ssize_t zcache_eph_pageframes;
140 ssize_t zcache_pers_pageframes;
141
142 /* Used by this code. */
143 ssize_t zcache_last_active_file_pageframes;
144 ssize_t zcache_last_inactive_file_pageframes;
145 ssize_t zcache_last_active_anon_pageframes;
146 ssize_t zcache_last_inactive_anon_pageframes;
147 #ifdef CONFIG_ZCACHE_WRITEBACK
148 ssize_t zcache_writtenback_pages;
149 ssize_t zcache_outstanding_writeback_pages;
150 #endif
151 /*
152  * zcache core code starts here
153  */
154
155 static struct zcache_client zcache_host;
156 static struct zcache_client zcache_clients[MAX_CLIENTS];
157
158 static inline bool is_local_client(struct zcache_client *cli)
159 {
160         return cli == &zcache_host;
161 }
162
163 static struct zcache_client *zcache_get_client_by_id(uint16_t cli_id)
164 {
165         struct zcache_client *cli = &zcache_host;
166
167         if (cli_id != LOCAL_CLIENT) {
168                 if (cli_id >= MAX_CLIENTS)
169                         goto out;
170                 cli = &zcache_clients[cli_id];
171         }
172 out:
173         return cli;
174 }
175
176 /*
177  * Tmem operations assume the poolid implies the invoking client.
178  * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
179  * RAMster has each client numbered by cluster node, and a KVM version
180  * of zcache would have one client per guest and each client might
181  * have a poolid==N.
182  */
183 struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
184 {
185         struct tmem_pool *pool = NULL;
186         struct zcache_client *cli = NULL;
187
188         cli = zcache_get_client_by_id(cli_id);
189         if (cli == NULL)
190                 goto out;
191         if (!is_local_client(cli))
192                 atomic_inc(&cli->refcount);
193         if (poolid < MAX_POOLS_PER_CLIENT) {
194                 pool = cli->tmem_pools[poolid];
195                 if (pool != NULL)
196                         atomic_inc(&pool->refcount);
197         }
198 out:
199         return pool;
200 }
201
202 void zcache_put_pool(struct tmem_pool *pool)
203 {
204         struct zcache_client *cli = NULL;
205
206         if (pool == NULL)
207                 BUG();
208         cli = pool->client;
209         atomic_dec(&pool->refcount);
210         if (!is_local_client(cli))
211                 atomic_dec(&cli->refcount);
212 }
213
214 int zcache_new_client(uint16_t cli_id)
215 {
216         struct zcache_client *cli;
217         int ret = -1;
218
219         cli = zcache_get_client_by_id(cli_id);
220         if (cli == NULL)
221                 goto out;
222         if (cli->allocated)
223                 goto out;
224         cli->allocated = 1;
225         ret = 0;
226 out:
227         return ret;
228 }
229
230 /*
231  * zcache implementation for tmem host ops
232  */
233
234 static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
235 {
236         struct tmem_objnode *objnode = NULL;
237         struct zcache_preload *kp;
238         int i;
239
240         kp = &__get_cpu_var(zcache_preloads);
241         for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
242                 objnode = kp->objnodes[i];
243                 if (objnode != NULL) {
244                         kp->objnodes[i] = NULL;
245                         break;
246                 }
247         }
248         BUG_ON(objnode == NULL);
249         inc_zcache_objnode_count();
250         return objnode;
251 }
252
253 static void zcache_objnode_free(struct tmem_objnode *objnode,
254                                         struct tmem_pool *pool)
255 {
256         dec_zcache_objnode_count();
257         kmem_cache_free(zcache_objnode_cache, objnode);
258 }
259
260 static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
261 {
262         struct tmem_obj *obj = NULL;
263         struct zcache_preload *kp;
264
265         kp = &__get_cpu_var(zcache_preloads);
266         obj = kp->obj;
267         BUG_ON(obj == NULL);
268         kp->obj = NULL;
269         inc_zcache_obj_count();
270         return obj;
271 }
272
273 static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
274 {
275         dec_zcache_obj_count();
276         kmem_cache_free(zcache_obj_cache, obj);
277 }
278
279 static struct tmem_hostops zcache_hostops = {
280         .obj_alloc = zcache_obj_alloc,
281         .obj_free = zcache_obj_free,
282         .objnode_alloc = zcache_objnode_alloc,
283         .objnode_free = zcache_objnode_free,
284 };
285
286 static struct page *zcache_alloc_page(void)
287 {
288         struct page *page = alloc_page(ZCACHE_GFP_MASK);
289
290         if (page != NULL)
291                 inc_zcache_pageframes_alloced();
292         return page;
293 }
294
295 static void zcache_free_page(struct page *page)
296 {
297         long curr_pageframes;
298         static long max_pageframes, min_pageframes;
299
300         if (page == NULL)
301                 BUG();
302         __free_page(page);
303         inc_zcache_pageframes_freed();
304         curr_pageframes = curr_pageframes_count();
305         if (curr_pageframes > max_pageframes)
306                 max_pageframes = curr_pageframes;
307         if (curr_pageframes < min_pageframes)
308                 min_pageframes = curr_pageframes;
309 #ifdef CONFIG_ZCACHE_DEBUG
310         if (curr_pageframes > 2L || curr_pageframes < -2L) {
311                 /* pr_info here */
312         }
313 #endif
314 }
315
316 /*
317  * zcache implementations for PAM page descriptor ops
318  */
319
320 /* forward reference */
321 static void zcache_compress(struct page *from,
322                                 void **out_va, unsigned *out_len);
323
324 static struct page *zcache_evict_eph_pageframe(void);
325
326 static void *zcache_pampd_eph_create(char *data, size_t size, bool raw,
327                                         struct tmem_handle *th)
328 {
329         void *pampd = NULL, *cdata = data;
330         unsigned clen = size;
331         struct page *page = (struct page *)(data), *newpage;
332
333         if (!raw) {
334                 zcache_compress(page, &cdata, &clen);
335                 if (clen > zbud_max_buddy_size()) {
336                         inc_zcache_compress_poor();
337                         goto out;
338                 }
339         } else {
340                 BUG_ON(clen > zbud_max_buddy_size());
341         }
342
343         /* look for space via an existing match first */
344         pampd = (void *)zbud_match_prep(th, true, cdata, clen);
345         if (pampd != NULL)
346                 goto got_pampd;
347
348         /* no match, now we need to find (or free up) a full page */
349         newpage = zcache_alloc_page();
350         if (newpage != NULL)
351                 goto create_in_new_page;
352
353         inc_zcache_failed_getfreepages();
354         /* can't allocate a page, evict an ephemeral page via LRU */
355         newpage = zcache_evict_eph_pageframe();
356         if (newpage == NULL) {
357                 inc_zcache_eph_ate_tail_failed();
358                 goto out;
359         }
360         inc_zcache_eph_ate_tail();
361
362 create_in_new_page:
363         pampd = (void *)zbud_create_prep(th, true, cdata, clen, newpage);
364         BUG_ON(pampd == NULL);
365         inc_zcache_eph_pageframes();
366
367 got_pampd:
368         inc_zcache_eph_zbytes(clen);
369         inc_zcache_eph_zpages();
370         if (ramster_enabled && raw)
371                 ramster_count_foreign_pages(true, 1);
372 out:
373         return pampd;
374 }
375
376 static void *zcache_pampd_pers_create(char *data, size_t size, bool raw,
377                                         struct tmem_handle *th)
378 {
379         void *pampd = NULL, *cdata = data;
380         unsigned clen = size;
381         struct page *page = (struct page *)(data), *newpage;
382         unsigned long zbud_mean_zsize;
383         unsigned long curr_pers_zpages, total_zsize;
384
385         if (data == NULL) {
386                 BUG_ON(!ramster_enabled);
387                 goto create_pampd;
388         }
389         curr_pers_zpages = zcache_pers_zpages;
390 /* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */
391         if (!raw)
392                 zcache_compress(page, &cdata, &clen);
393         /* reject if compression is too poor */
394         if (clen > zbud_max_zsize) {
395                 inc_zcache_compress_poor();
396                 goto out;
397         }
398         /* reject if mean compression is too poor */
399         if ((clen > zbud_max_mean_zsize) && (curr_pers_zpages > 0)) {
400                 total_zsize = zcache_pers_zbytes;
401                 if ((long)total_zsize < 0)
402                         total_zsize = 0;
403                 zbud_mean_zsize = div_u64(total_zsize,
404                                         curr_pers_zpages);
405                 if (zbud_mean_zsize > zbud_max_mean_zsize) {
406                         inc_zcache_mean_compress_poor();
407                         goto out;
408                 }
409         }
410
411 create_pampd:
412         /* look for space via an existing match first */
413         pampd = (void *)zbud_match_prep(th, false, cdata, clen);
414         if (pampd != NULL)
415                 goto got_pampd;
416
417         /* no match, now we need to find (or free up) a full page */
418         newpage = zcache_alloc_page();
419         if (newpage != NULL)
420                 goto create_in_new_page;
421         /*
422          * FIXME do the following only if eph is oversized?
423          * if (zcache_eph_pageframes >
424          * (global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE) +
425          * global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE)))
426          */
427         inc_zcache_failed_getfreepages();
428         /* can't allocate a page, evict an ephemeral page via LRU */
429         newpage = zcache_evict_eph_pageframe();
430         if (newpage == NULL) {
431                 inc_zcache_pers_ate_eph_failed();
432                 goto out;
433         }
434         inc_zcache_pers_ate_eph();
435
436 create_in_new_page:
437         pampd = (void *)zbud_create_prep(th, false, cdata, clen, newpage);
438         BUG_ON(pampd == NULL);
439         inc_zcache_pers_pageframes();
440
441 got_pampd:
442         inc_zcache_pers_zpages();
443         inc_zcache_pers_zbytes(clen);
444         if (ramster_enabled && raw)
445                 ramster_count_foreign_pages(false, 1);
446 out:
447         return pampd;
448 }
449
450 /*
451  * This is called directly from zcache_put_page to pre-allocate space
452  * to store a zpage.
453  */
454 void *zcache_pampd_create(char *data, unsigned int size, bool raw,
455                                         int eph, struct tmem_handle *th)
456 {
457         void *pampd = NULL;
458         struct zcache_preload *kp;
459         struct tmem_objnode *objnode;
460         struct tmem_obj *obj;
461         int i;
462
463         BUG_ON(!irqs_disabled());
464         /* pre-allocate per-cpu metadata */
465         BUG_ON(zcache_objnode_cache == NULL);
466         BUG_ON(zcache_obj_cache == NULL);
467         kp = &__get_cpu_var(zcache_preloads);
468         for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
469                 objnode = kp->objnodes[i];
470                 if (objnode == NULL) {
471                         objnode = kmem_cache_alloc(zcache_objnode_cache,
472                                                         ZCACHE_GFP_MASK);
473                         if (unlikely(objnode == NULL)) {
474                                 inc_zcache_failed_alloc();
475                                 goto out;
476                         }
477                         kp->objnodes[i] = objnode;
478                 }
479         }
480         if (kp->obj == NULL) {
481                 obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
482                 kp->obj = obj;
483         }
484         if (unlikely(kp->obj == NULL)) {
485                 inc_zcache_failed_alloc();
486                 goto out;
487         }
488         /*
489          * ok, have all the metadata pre-allocated, now do the data
490          * but since how we allocate the data is dependent on ephemeral
491          * or persistent, we split the call here to different sub-functions
492          */
493         if (eph)
494                 pampd = zcache_pampd_eph_create(data, size, raw, th);
495         else
496                 pampd = zcache_pampd_pers_create(data, size, raw, th);
497 out:
498         return pampd;
499 }
500
501 /*
502  * This is a pamops called via tmem_put and is necessary to "finish"
503  * a pampd creation.
504  */
505 void zcache_pampd_create_finish(void *pampd, bool eph)
506 {
507         zbud_create_finish((struct zbudref *)pampd, eph);
508 }
509
510 /*
511  * This is passed as a function parameter to zbud_decompress so that
512  * zbud need not be familiar with the details of crypto. It assumes that
513  * the bytes from_va and to_va through from_va+size-1 and to_va+size-1 are
514  * kmapped.  It must be successful, else there is a logic bug somewhere.
515  */
516 static void zcache_decompress(char *from_va, unsigned int size, char *to_va)
517 {
518         int ret;
519         unsigned int outlen = PAGE_SIZE;
520
521         ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
522                                 to_va, &outlen);
523         BUG_ON(ret);
524         BUG_ON(outlen != PAGE_SIZE);
525 }
526
527 /*
528  * Decompress from the kernel va to a pageframe
529  */
530 void zcache_decompress_to_page(char *from_va, unsigned int size,
531                                         struct page *to_page)
532 {
533         char *to_va = kmap_atomic(to_page);
534         zcache_decompress(from_va, size, to_va);
535         kunmap_atomic(to_va);
536 }
537
538 /*
539  * fill the pageframe corresponding to the struct page with the data
540  * from the passed pampd
541  */
542 static int zcache_pampd_get_data(char *data, size_t *sizep, bool raw,
543                                         void *pampd, struct tmem_pool *pool,
544                                         struct tmem_oid *oid, uint32_t index)
545 {
546         int ret;
547         bool eph = !is_persistent(pool);
548
549         BUG_ON(preemptible());
550         BUG_ON(eph);    /* fix later if shared pools get implemented */
551         BUG_ON(pampd_is_remote(pampd));
552         if (raw)
553                 ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
554                                                 sizep, eph);
555         else {
556                 ret = zbud_decompress((struct page *)(data),
557                                         (struct zbudref *)pampd, false,
558                                         zcache_decompress);
559                 *sizep = PAGE_SIZE;
560         }
561         return ret;
562 }
563
564 /*
565  * fill the pageframe corresponding to the struct page with the data
566  * from the passed pampd
567  */
568 static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw,
569                                         void *pampd, struct tmem_pool *pool,
570                                         struct tmem_oid *oid, uint32_t index)
571 {
572         int ret;
573         bool eph = !is_persistent(pool);
574         struct page *page = NULL;
575         unsigned int zsize, zpages;
576
577         BUG_ON(preemptible());
578         BUG_ON(pampd_is_remote(pampd));
579         if (raw)
580                 ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
581                                                 sizep, eph);
582         else {
583                 ret = zbud_decompress((struct page *)(data),
584                                         (struct zbudref *)pampd, eph,
585                                         zcache_decompress);
586                 *sizep = PAGE_SIZE;
587         }
588         page = zbud_free_and_delist((struct zbudref *)pampd, eph,
589                                         &zsize, &zpages);
590         if (eph) {
591                 if (page)
592                         dec_zcache_eph_pageframes();
593                 dec_zcache_eph_zpages(zpages);
594                 dec_zcache_eph_zbytes(zsize);
595         } else {
596                 if (page)
597                         dec_zcache_pers_pageframes();
598                 dec_zcache_pers_zpages(zpages);
599                 dec_zcache_pers_zbytes(zsize);
600         }
601         if (!is_local_client(pool->client))
602                 ramster_count_foreign_pages(eph, -1);
603         if (page)
604                 zcache_free_page(page);
605         return ret;
606 }
607
608 /*
609  * free the pampd and remove it from any zcache lists
610  * pampd must no longer be pointed to from any tmem data structures!
611  */
612 static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
613                               struct tmem_oid *oid, uint32_t index, bool acct)
614 {
615         struct page *page = NULL;
616         unsigned int zsize, zpages;
617
618         BUG_ON(preemptible());
619         if (pampd_is_remote(pampd)) {
620                 BUG_ON(!ramster_enabled);
621                 pampd = ramster_pampd_free(pampd, pool, oid, index, acct);
622                 if (pampd == NULL)
623                         return;
624         }
625         if (is_ephemeral(pool)) {
626                 page = zbud_free_and_delist((struct zbudref *)pampd,
627                                                 true, &zsize, &zpages);
628                 if (page)
629                         dec_zcache_eph_pageframes();
630                 dec_zcache_eph_zpages(zpages);
631                 dec_zcache_eph_zbytes(zsize);
632                 /* FIXME CONFIG_RAMSTER... check acct parameter? */
633         } else {
634                 page = zbud_free_and_delist((struct zbudref *)pampd,
635                                                 false, &zsize, &zpages);
636                 if (page)
637                         dec_zcache_pers_pageframes();
638                 dec_zcache_pers_zpages(zpages);
639                 dec_zcache_pers_zbytes(zsize);
640         }
641         if (!is_local_client(pool->client))
642                 ramster_count_foreign_pages(is_ephemeral(pool), -1);
643         if (page)
644                 zcache_free_page(page);
645 }
646
647 static struct tmem_pamops zcache_pamops = {
648         .create_finish = zcache_pampd_create_finish,
649         .get_data = zcache_pampd_get_data,
650         .get_data_and_free = zcache_pampd_get_data_and_free,
651         .free = zcache_pampd_free,
652 };
653
654 /*
655  * zcache compression/decompression and related per-cpu stuff
656  */
657
658 static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
659 #define ZCACHE_DSTMEM_ORDER 1
660
661 static void zcache_compress(struct page *from, void **out_va, unsigned *out_len)
662 {
663         int ret;
664         unsigned char *dmem = __get_cpu_var(zcache_dstmem);
665         char *from_va;
666
667         BUG_ON(!irqs_disabled());
668         /* no buffer or no compressor so can't compress */
669         BUG_ON(dmem == NULL);
670         *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
671         from_va = kmap_atomic(from);
672         mb();
673         ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
674                                 out_len);
675         BUG_ON(ret);
676         *out_va = dmem;
677         kunmap_atomic(from_va);
678 }
679
680 static int zcache_comp_cpu_up(int cpu)
681 {
682         struct crypto_comp *tfm;
683
684         tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
685         if (IS_ERR(tfm))
686                 return NOTIFY_BAD;
687         *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
688         return NOTIFY_OK;
689 }
690
691 static void zcache_comp_cpu_down(int cpu)
692 {
693         struct crypto_comp *tfm;
694
695         tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
696         crypto_free_comp(tfm);
697         *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
698 }
699
700 static int zcache_cpu_notifier(struct notifier_block *nb,
701                                 unsigned long action, void *pcpu)
702 {
703         int ret, i, cpu = (long)pcpu;
704         struct zcache_preload *kp;
705
706         switch (action) {
707         case CPU_UP_PREPARE:
708                 ret = zcache_comp_cpu_up(cpu);
709                 if (ret != NOTIFY_OK) {
710                         pr_err("%s: can't allocate compressor xform\n",
711                                 namestr);
712                         return ret;
713                 }
714                 per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
715                         GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
716                 if (ramster_enabled)
717                         ramster_cpu_up(cpu);
718                 break;
719         case CPU_DEAD:
720         case CPU_UP_CANCELED:
721                 zcache_comp_cpu_down(cpu);
722                 free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
723                         ZCACHE_DSTMEM_ORDER);
724                 per_cpu(zcache_dstmem, cpu) = NULL;
725                 kp = &per_cpu(zcache_preloads, cpu);
726                 for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
727                         if (kp->objnodes[i])
728                                 kmem_cache_free(zcache_objnode_cache,
729                                                 kp->objnodes[i]);
730                 }
731                 if (kp->obj) {
732                         kmem_cache_free(zcache_obj_cache, kp->obj);
733                         kp->obj = NULL;
734                 }
735                 if (ramster_enabled)
736                         ramster_cpu_down(cpu);
737                 break;
738         default:
739                 break;
740         }
741         return NOTIFY_OK;
742 }
743
744 static struct notifier_block zcache_cpu_notifier_block = {
745         .notifier_call = zcache_cpu_notifier
746 };
747
748 /*
749  * The following code interacts with the zbud eviction and zbud
750  * zombify code to access LRU pages
751  */
752
753 static struct page *zcache_evict_eph_pageframe(void)
754 {
755         struct page *page;
756         unsigned int zsize = 0, zpages = 0;
757
758         page = zbud_evict_pageframe_lru(&zsize, &zpages);
759         if (page == NULL)
760                 goto out;
761         dec_zcache_eph_zbytes(zsize);
762         dec_zcache_eph_zpages(zpages);
763         inc_zcache_evicted_eph_zpages(zpages);
764         dec_zcache_eph_pageframes();
765         inc_zcache_evicted_eph_pageframes();
766 out:
767         return page;
768 }
769
770 #ifdef CONFIG_ZCACHE_WRITEBACK
771
772 static atomic_t zcache_outstanding_writeback_pages_atomic = ATOMIC_INIT(0);
773
774 static inline void inc_zcache_outstanding_writeback_pages(void)
775 {
776         zcache_outstanding_writeback_pages =
777             atomic_inc_return(&zcache_outstanding_writeback_pages_atomic);
778 }
779 static inline void dec_zcache_outstanding_writeback_pages(void)
780 {
781         zcache_outstanding_writeback_pages =
782           atomic_dec_return(&zcache_outstanding_writeback_pages_atomic);
783 };
784 static void unswiz(struct tmem_oid oid, u32 index,
785                                 unsigned *type, pgoff_t *offset);
786
787 /*
788  *  Choose an LRU persistent pageframe and attempt to write it back to
789  *  the backing swap disk by calling frontswap_writeback on both zpages.
790  *
791  *  This is work-in-progress.
792  */
793
794 static void zcache_end_swap_write(struct bio *bio, int err)
795 {
796         end_swap_bio_write(bio, err);
797         dec_zcache_outstanding_writeback_pages();
798         zcache_writtenback_pages++;
799 }
800
801 /*
802  * zcache_get_swap_cache_page
803  *
804  * This is an adaption of read_swap_cache_async()
805  *
806  * If success, page is returned in retpage
807  * Returns 0 if page was already in the swap cache, page is not locked
808  * Returns 1 if the new page needs to be populated, page is locked
809  */
810 static int zcache_get_swap_cache_page(int type, pgoff_t offset,
811                                 struct page *new_page)
812 {
813         struct page *found_page;
814         swp_entry_t entry = swp_entry(type, offset);
815         int err;
816
817         BUG_ON(new_page == NULL);
818         do {
819                 /*
820                  * First check the swap cache.  Since this is normally
821                  * called after lookup_swap_cache() failed, re-calling
822                  * that would confuse statistics.
823                  */
824                 found_page = find_get_page(&swapper_space, entry.val);
825                 if (found_page)
826                         return 0;
827
828                 /*
829                  * call radix_tree_preload() while we can wait.
830                  */
831                 err = radix_tree_preload(GFP_KERNEL);
832                 if (err)
833                         break;
834
835                 /*
836                  * Swap entry may have been freed since our caller observed it.
837                  */
838                 err = swapcache_prepare(entry);
839                 if (err == -EEXIST) { /* seems racy */
840                         radix_tree_preload_end();
841                         continue;
842                 }
843                 if (err) { /* swp entry is obsolete ? */
844                         radix_tree_preload_end();
845                         break;
846                 }
847
848                 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
849                 __set_page_locked(new_page);
850                 SetPageSwapBacked(new_page);
851                 err = __add_to_swap_cache(new_page, entry);
852                 if (likely(!err)) {
853                         radix_tree_preload_end();
854                         lru_cache_add_anon(new_page);
855                         return 1;
856                 }
857                 radix_tree_preload_end();
858                 ClearPageSwapBacked(new_page);
859                 __clear_page_locked(new_page);
860                 /*
861                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
862                  * clear SWAP_HAS_CACHE flag.
863                  */
864                 swapcache_free(entry, NULL);
865                 /* FIXME: is it possible to get here without err==-ENOMEM?
866                  * If not, we can dispense with the do loop, use goto retry */
867         } while (err != -ENOMEM);
868
869         return -ENOMEM;
870 }
871
872 /*
873  * Given a frontswap zpage in zcache (identified by type/offset) and
874  * an empty page, put the page into the swap cache, use frontswap
875  * to get the page from zcache into the empty page, then give it
876  * to the swap subsystem to send to disk (carefully avoiding the
877  * possibility that frontswap might snatch it back).
878  * Returns < 0 if error, 0 if successful, and 1 if successful but
879  * the newpage passed in not needed and should be freed.
880  */
881 static int zcache_frontswap_writeback_zpage(int type, pgoff_t offset,
882                                         struct page *newpage)
883 {
884         struct page *page = newpage;
885         int ret;
886         struct writeback_control wbc = {
887                 .sync_mode = WB_SYNC_NONE,
888         };
889
890         ret = zcache_get_swap_cache_page(type, offset, page);
891         if (ret < 0)
892                 return ret;
893         else if (ret == 0) {
894                 /* more uptodate page is already in swapcache */
895                 __frontswap_invalidate_page(type, offset);
896                 return 1;
897         }
898
899         BUG_ON(!frontswap_has_exclusive_gets); /* load must also invalidate */
900         /* FIXME: how is it possible to get here when page is unlocked? */
901         __frontswap_load(page);
902         SetPageUptodate(page);  /* above does SetPageDirty, is that enough? */
903
904         /* start writeback */
905         SetPageReclaim(page);
906         /*
907          * Return value is ignored here because it doesn't change anything
908          * for us.  Page is returned unlocked.
909          */
910         (void)__swap_writepage(page, &wbc, zcache_end_swap_write);
911         page_cache_release(page);
912         inc_zcache_outstanding_writeback_pages();
913
914         return 0;
915 }
916
917 /*
918  * The following is still a magic number... we want to allow forward progress
919  * for writeback because it clears out needed RAM when under pressure, but
920  * we don't want to allow writeback to absorb and queue too many GFP_KERNEL
921  * pages if the swap device is very slow.
922  */
923 #define ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES 6400
924
925 /*
926  * Try to allocate two free pages, first using a non-aggressive alloc,
927  * then by evicting zcache ephemeral (clean pagecache) pages, and last
928  * by aggressive GFP_KERNEL alloc.  We allow zbud to choose a pageframe
929  * consisting of 1-2 zbuds/zpages, then call the writeback_zpage helper
930  * function above for each.
931  */
932 static int zcache_frontswap_writeback(void)
933 {
934         struct tmem_handle th[2];
935         int ret = 0;
936         int nzbuds, writeback_ret;
937         unsigned type;
938         struct page *znewpage1 = NULL, *znewpage2 = NULL;
939         struct page *evictpage1 = NULL, *evictpage2 = NULL;
940         struct page *newpage1 = NULL, *newpage2 = NULL;
941         struct page *page1 = NULL, *page2 = NULL;
942         pgoff_t offset;
943
944         znewpage1 = alloc_page(ZCACHE_GFP_MASK);
945         znewpage2 = alloc_page(ZCACHE_GFP_MASK);
946         if (znewpage1 == NULL)
947                 evictpage1 = zcache_evict_eph_pageframe();
948         if (znewpage2 == NULL)
949                 evictpage2 = zcache_evict_eph_pageframe();
950
951         if ((evictpage1 == NULL || evictpage2 == NULL) &&
952             atomic_read(&zcache_outstanding_writeback_pages_atomic) >
953                                 ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES) {
954                 goto free_and_out;
955         }
956         if (znewpage1 == NULL && evictpage1 == NULL)
957                 newpage1 = alloc_page(GFP_KERNEL);
958         if (znewpage2 == NULL && evictpage2 == NULL)
959                 newpage2 = alloc_page(GFP_KERNEL);
960         if (newpage1 == NULL || newpage2 == NULL)
961                         goto free_and_out;
962
963         /* ok, we have two pageframes pre-allocated, get a pair of zbuds */
964         nzbuds = zbud_make_zombie_lru(&th[0], NULL, NULL, false);
965         if (nzbuds == 0) {
966                 ret = -ENOENT;
967                 goto free_and_out;
968         }
969
970         /* process the first zbud */
971         unswiz(th[0].oid, th[0].index, &type, &offset);
972         page1 = (znewpage1 != NULL) ? znewpage1 :
973                         ((newpage1 != NULL) ? newpage1 : evictpage1);
974         writeback_ret = zcache_frontswap_writeback_zpage(type, offset, page1);
975         if (writeback_ret < 0) {
976                 ret = -ENOMEM;
977                 goto free_and_out;
978         }
979         if (evictpage1 != NULL)
980                 zcache_pageframes_freed =
981                         atomic_inc_return(&zcache_pageframes_freed_atomic);
982         if (writeback_ret == 0) {
983                 /* zcache_get_swap_cache_page will free, don't double free */
984                 znewpage1 = NULL;
985                 newpage1 = NULL;
986                 evictpage1 = NULL;
987         }
988         if (nzbuds < 2)
989                 goto free_and_out;
990
991         /* if there is a second zbud, process it */
992         unswiz(th[1].oid, th[1].index, &type, &offset);
993         page2 = (znewpage2 != NULL) ? znewpage2 :
994                         ((newpage2 != NULL) ? newpage2 : evictpage2);
995         writeback_ret = zcache_frontswap_writeback_zpage(type, offset, page2);
996         if (writeback_ret < 0) {
997                 ret = -ENOMEM;
998                 goto free_and_out;
999         }
1000         if (evictpage2 != NULL)
1001                 zcache_pageframes_freed =
1002                         atomic_inc_return(&zcache_pageframes_freed_atomic);
1003         if (writeback_ret == 0) {
1004                 znewpage2 = NULL;
1005                 newpage2 = NULL;
1006                 evictpage2 = NULL;
1007         }
1008
1009 free_and_out:
1010         if (znewpage1 != NULL)
1011                 page_cache_release(znewpage1);
1012         if (znewpage2 != NULL)
1013                 page_cache_release(znewpage2);
1014         if (newpage1 != NULL)
1015                 page_cache_release(newpage1);
1016         if (newpage2 != NULL)
1017                 page_cache_release(newpage2);
1018         if (evictpage1 != NULL)
1019                 zcache_free_page(evictpage1);
1020         if (evictpage2 != NULL)
1021                 zcache_free_page(evictpage2);
1022         return ret;
1023 }
1024 #endif /* CONFIG_ZCACHE_WRITEBACK */
1025
1026 /*
1027  * When zcache is disabled ("frozen"), pools can be created and destroyed,
1028  * but all puts (and thus all other operations that require memory allocation)
1029  * must fail.  If zcache is unfrozen, accepts puts, then frozen again,
1030  * data consistency requires all puts while frozen to be converted into
1031  * flushes.
1032  */
1033 static bool zcache_freeze;
1034
1035 /*
1036  * This zcache shrinker interface reduces the number of ephemeral pageframes
1037  * used by zcache to approximately the same as the total number of LRU_FILE
1038  * pageframes in use, and now also reduces the number of persistent pageframes
1039  * used by zcache to approximately the same as the total number of LRU_ANON
1040  * pageframes in use.  FIXME POLICY: Probably the writeback should only occur
1041  * if the eviction doesn't free enough pages.
1042  */
1043 static int shrink_zcache_memory(struct shrinker *shrink,
1044                                 struct shrink_control *sc)
1045 {
1046         static bool in_progress;
1047         int ret = -1;
1048         int nr = sc->nr_to_scan;
1049         int nr_evict = 0;
1050         int nr_writeback = 0;
1051         struct page *page;
1052         int  file_pageframes_inuse, anon_pageframes_inuse;
1053
1054         if (nr <= 0)
1055                 goto skip_evict;
1056
1057         /* don't allow more than one eviction thread at a time */
1058         if (in_progress)
1059                 goto skip_evict;
1060
1061         in_progress = true;
1062
1063         /* we are going to ignore nr, and target a different value */
1064         zcache_last_active_file_pageframes =
1065                 global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
1066         zcache_last_inactive_file_pageframes =
1067                 global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
1068         file_pageframes_inuse = zcache_last_active_file_pageframes +
1069                                 zcache_last_inactive_file_pageframes;
1070         if (zcache_eph_pageframes > file_pageframes_inuse)
1071                 nr_evict = zcache_eph_pageframes - file_pageframes_inuse;
1072         else
1073                 nr_evict = 0;
1074         while (nr_evict-- > 0) {
1075                 page = zcache_evict_eph_pageframe();
1076                 if (page == NULL)
1077                         break;
1078                 zcache_free_page(page);
1079         }
1080
1081         zcache_last_active_anon_pageframes =
1082                 global_page_state(NR_LRU_BASE + LRU_ACTIVE_ANON);
1083         zcache_last_inactive_anon_pageframes =
1084                 global_page_state(NR_LRU_BASE + LRU_INACTIVE_ANON);
1085         anon_pageframes_inuse = zcache_last_active_anon_pageframes +
1086                                 zcache_last_inactive_anon_pageframes;
1087         if (zcache_pers_pageframes > anon_pageframes_inuse)
1088                 nr_writeback = zcache_pers_pageframes - anon_pageframes_inuse;
1089         else
1090                 nr_writeback = 0;
1091         while (nr_writeback-- > 0) {
1092 #ifdef CONFIG_ZCACHE_WRITEBACK
1093                 int writeback_ret;
1094                 writeback_ret = zcache_frontswap_writeback();
1095                 if (writeback_ret == -ENOMEM)
1096 #endif
1097                         break;
1098         }
1099         in_progress = false;
1100
1101 skip_evict:
1102         /* resample: has changed, but maybe not all the way yet */
1103         zcache_last_active_file_pageframes =
1104                 global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
1105         zcache_last_inactive_file_pageframes =
1106                 global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
1107         ret = zcache_eph_pageframes - zcache_last_active_file_pageframes +
1108                 zcache_last_inactive_file_pageframes;
1109         if (ret < 0)
1110                 ret = 0;
1111         return ret;
1112 }
1113
1114 static struct shrinker zcache_shrinker = {
1115         .shrink = shrink_zcache_memory,
1116         .seeks = DEFAULT_SEEKS,
1117 };
1118
1119 /*
1120  * zcache shims between cleancache/frontswap ops and tmem
1121  */
1122
1123 /* FIXME rename these core routines to zcache_tmemput etc? */
1124 int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1125                                 uint32_t index, void *page,
1126                                 unsigned int size, bool raw, int ephemeral)
1127 {
1128         struct tmem_pool *pool;
1129         struct tmem_handle th;
1130         int ret = -1;
1131         void *pampd = NULL;
1132
1133         BUG_ON(!irqs_disabled());
1134         pool = zcache_get_pool_by_id(cli_id, pool_id);
1135         if (unlikely(pool == NULL))
1136                 goto out;
1137         if (!zcache_freeze) {
1138                 ret = 0;
1139                 th.client_id = cli_id;
1140                 th.pool_id = pool_id;
1141                 th.oid = *oidp;
1142                 th.index = index;
1143                 pampd = zcache_pampd_create((char *)page, size, raw,
1144                                 ephemeral, &th);
1145                 if (pampd == NULL) {
1146                         ret = -ENOMEM;
1147                         if (ephemeral)
1148                                 inc_zcache_failed_eph_puts();
1149                         else
1150                                 inc_zcache_failed_pers_puts();
1151                 } else {
1152                         if (ramster_enabled)
1153                                 ramster_do_preload_flnode(pool);
1154                         ret = tmem_put(pool, oidp, index, 0, pampd);
1155                         if (ret < 0)
1156                                 BUG();
1157                 }
1158                 zcache_put_pool(pool);
1159         } else {
1160                 inc_zcache_put_to_flush();
1161                 if (ramster_enabled)
1162                         ramster_do_preload_flnode(pool);
1163                 if (atomic_read(&pool->obj_count) > 0)
1164                         /* the put fails whether the flush succeeds or not */
1165                         (void)tmem_flush_page(pool, oidp, index);
1166                 zcache_put_pool(pool);
1167         }
1168 out:
1169         return ret;
1170 }
1171
1172 int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1173                                 uint32_t index, void *page,
1174                                 size_t *sizep, bool raw, int get_and_free)
1175 {
1176         struct tmem_pool *pool;
1177         int ret = -1;
1178         bool eph;
1179
1180         if (!raw) {
1181                 BUG_ON(irqs_disabled());
1182                 BUG_ON(in_softirq());
1183         }
1184         pool = zcache_get_pool_by_id(cli_id, pool_id);
1185         eph = is_ephemeral(pool);
1186         if (likely(pool != NULL)) {
1187                 if (atomic_read(&pool->obj_count) > 0)
1188                         ret = tmem_get(pool, oidp, index, (char *)(page),
1189                                         sizep, raw, get_and_free);
1190                 zcache_put_pool(pool);
1191         }
1192         WARN_ONCE((!is_ephemeral(pool) && (ret != 0)),
1193                         "zcache_get fails on persistent pool, "
1194                         "bad things are very likely to happen soon\n");
1195 #ifdef RAMSTER_TESTING
1196         if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool)))
1197                 pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret);
1198 #endif
1199         return ret;
1200 }
1201
1202 int zcache_flush_page(int cli_id, int pool_id,
1203                                 struct tmem_oid *oidp, uint32_t index)
1204 {
1205         struct tmem_pool *pool;
1206         int ret = -1;
1207         unsigned long flags;
1208
1209         local_irq_save(flags);
1210         inc_zcache_flush_total();
1211         pool = zcache_get_pool_by_id(cli_id, pool_id);
1212         if (ramster_enabled)
1213                 ramster_do_preload_flnode(pool);
1214         if (likely(pool != NULL)) {
1215                 if (atomic_read(&pool->obj_count) > 0)
1216                         ret = tmem_flush_page(pool, oidp, index);
1217                 zcache_put_pool(pool);
1218         }
1219         if (ret >= 0)
1220                 inc_zcache_flush_found();
1221         local_irq_restore(flags);
1222         return ret;
1223 }
1224
1225 int zcache_flush_object(int cli_id, int pool_id,
1226                                 struct tmem_oid *oidp)
1227 {
1228         struct tmem_pool *pool;
1229         int ret = -1;
1230         unsigned long flags;
1231
1232         local_irq_save(flags);
1233         inc_zcache_flobj_total();
1234         pool = zcache_get_pool_by_id(cli_id, pool_id);
1235         if (ramster_enabled)
1236                 ramster_do_preload_flnode(pool);
1237         if (likely(pool != NULL)) {
1238                 if (atomic_read(&pool->obj_count) > 0)
1239                         ret = tmem_flush_object(pool, oidp);
1240                 zcache_put_pool(pool);
1241         }
1242         if (ret >= 0)
1243                 inc_zcache_flobj_found();
1244         local_irq_restore(flags);
1245         return ret;
1246 }
1247
1248 static int zcache_client_destroy_pool(int cli_id, int pool_id)
1249 {
1250         struct tmem_pool *pool = NULL;
1251         struct zcache_client *cli = NULL;
1252         int ret = -1;
1253
1254         if (pool_id < 0)
1255                 goto out;
1256         if (cli_id == LOCAL_CLIENT)
1257                 cli = &zcache_host;
1258         else if ((unsigned int)cli_id < MAX_CLIENTS)
1259                 cli = &zcache_clients[cli_id];
1260         if (cli == NULL)
1261                 goto out;
1262         atomic_inc(&cli->refcount);
1263         pool = cli->tmem_pools[pool_id];
1264         if (pool == NULL)
1265                 goto out;
1266         cli->tmem_pools[pool_id] = NULL;
1267         /* wait for pool activity on other cpus to quiesce */
1268         while (atomic_read(&pool->refcount) != 0)
1269                 ;
1270         atomic_dec(&cli->refcount);
1271         local_bh_disable();
1272         ret = tmem_destroy_pool(pool);
1273         local_bh_enable();
1274         kfree(pool);
1275         if (cli_id == LOCAL_CLIENT)
1276                 pr_info("%s: destroyed local pool id=%d\n", namestr, pool_id);
1277         else
1278                 pr_info("%s: destroyed pool id=%d, client=%d\n",
1279                                 namestr, pool_id, cli_id);
1280 out:
1281         return ret;
1282 }
1283
1284 int zcache_new_pool(uint16_t cli_id, uint32_t flags)
1285 {
1286         int poolid = -1;
1287         struct tmem_pool *pool;
1288         struct zcache_client *cli = NULL;
1289
1290         if (cli_id == LOCAL_CLIENT)
1291                 cli = &zcache_host;
1292         else if ((unsigned int)cli_id < MAX_CLIENTS)
1293                 cli = &zcache_clients[cli_id];
1294         if (cli == NULL)
1295                 goto out;
1296         atomic_inc(&cli->refcount);
1297         pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
1298         if (pool == NULL)
1299                 goto out;
1300
1301         for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
1302                 if (cli->tmem_pools[poolid] == NULL)
1303                         break;
1304         if (poolid >= MAX_POOLS_PER_CLIENT) {
1305                 pr_info("%s: pool creation failed: max exceeded\n", namestr);
1306                 kfree(pool);
1307                 poolid = -1;
1308                 goto out;
1309         }
1310         atomic_set(&pool->refcount, 0);
1311         pool->client = cli;
1312         pool->pool_id = poolid;
1313         tmem_new_pool(pool, flags);
1314         cli->tmem_pools[poolid] = pool;
1315         if (cli_id == LOCAL_CLIENT)
1316                 pr_info("%s: created %s local tmem pool, id=%d\n", namestr,
1317                         flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
1318                         poolid);
1319         else
1320                 pr_info("%s: created %s tmem pool, id=%d, client=%d\n", namestr,
1321                         flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
1322                         poolid, cli_id);
1323 out:
1324         if (cli != NULL)
1325                 atomic_dec(&cli->refcount);
1326         return poolid;
1327 }
1328
1329 static int zcache_local_new_pool(uint32_t flags)
1330 {
1331         return zcache_new_pool(LOCAL_CLIENT, flags);
1332 }
1333
1334 int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph)
1335 {
1336         struct tmem_pool *pool;
1337         struct zcache_client *cli;
1338         uint32_t flags = eph ? 0 : TMEM_POOL_PERSIST;
1339         int ret = -1;
1340
1341         BUG_ON(!ramster_enabled);
1342         if (cli_id == LOCAL_CLIENT)
1343                 goto out;
1344         if (pool_id >= MAX_POOLS_PER_CLIENT)
1345                 goto out;
1346         if (cli_id >= MAX_CLIENTS)
1347                 goto out;
1348
1349         cli = &zcache_clients[cli_id];
1350         if ((eph && disable_cleancache) || (!eph && disable_frontswap)) {
1351                 pr_err("zcache_autocreate_pool: pool type disabled\n");
1352                 goto out;
1353         }
1354         if (!cli->allocated) {
1355                 if (zcache_new_client(cli_id)) {
1356                         pr_err("zcache_autocreate_pool: can't create client\n");
1357                         goto out;
1358                 }
1359                 cli = &zcache_clients[cli_id];
1360         }
1361         atomic_inc(&cli->refcount);
1362         pool = cli->tmem_pools[pool_id];
1363         if (pool != NULL) {
1364                 if (pool->persistent && eph) {
1365                         pr_err("zcache_autocreate_pool: type mismatch\n");
1366                         goto out;
1367                 }
1368                 ret = 0;
1369                 goto out;
1370         }
1371         pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
1372         if (pool == NULL)
1373                 goto out;
1374
1375         atomic_set(&pool->refcount, 0);
1376         pool->client = cli;
1377         pool->pool_id = pool_id;
1378         tmem_new_pool(pool, flags);
1379         cli->tmem_pools[pool_id] = pool;
1380         pr_info("%s: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
1381                 namestr, flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
1382                 pool_id, cli_id);
1383         ret = 0;
1384 out:
1385         if (cli != NULL)
1386                 atomic_dec(&cli->refcount);
1387         return ret;
1388 }
1389
1390 /**********
1391  * Two kernel functionalities currently can be layered on top of tmem.
1392  * These are "cleancache" which is used as a second-chance cache for clean
1393  * page cache pages; and "frontswap" which is used for swap pages
1394  * to avoid writes to disk.  A generic "shim" is provided here for each
1395  * to translate in-kernel semantics to zcache semantics.
1396  */
1397
1398 static void zcache_cleancache_put_page(int pool_id,
1399                                         struct cleancache_filekey key,
1400                                         pgoff_t index, struct page *page)
1401 {
1402         u32 ind = (u32) index;
1403         struct tmem_oid oid = *(struct tmem_oid *)&key;
1404
1405         if (!disable_cleancache_ignore_nonactive && !PageWasActive(page)) {
1406                 inc_zcache_eph_nonactive_puts_ignored();
1407                 return;
1408         }
1409         if (likely(ind == index))
1410                 (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index,
1411                                         page, PAGE_SIZE, false, 1);
1412 }
1413
1414 static int zcache_cleancache_get_page(int pool_id,
1415                                         struct cleancache_filekey key,
1416                                         pgoff_t index, struct page *page)
1417 {
1418         u32 ind = (u32) index;
1419         struct tmem_oid oid = *(struct tmem_oid *)&key;
1420         size_t size;
1421         int ret = -1;
1422
1423         if (likely(ind == index)) {
1424                 ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index,
1425                                         page, &size, false, 0);
1426                 BUG_ON(ret >= 0 && size != PAGE_SIZE);
1427                 if (ret == 0)
1428                         SetPageWasActive(page);
1429         }
1430         return ret;
1431 }
1432
1433 static void zcache_cleancache_flush_page(int pool_id,
1434                                         struct cleancache_filekey key,
1435                                         pgoff_t index)
1436 {
1437         u32 ind = (u32) index;
1438         struct tmem_oid oid = *(struct tmem_oid *)&key;
1439
1440         if (likely(ind == index))
1441                 (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind);
1442 }
1443
1444 static void zcache_cleancache_flush_inode(int pool_id,
1445                                         struct cleancache_filekey key)
1446 {
1447         struct tmem_oid oid = *(struct tmem_oid *)&key;
1448
1449         (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
1450 }
1451
1452 static void zcache_cleancache_flush_fs(int pool_id)
1453 {
1454         if (pool_id >= 0)
1455                 (void)zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
1456 }
1457
1458 static int zcache_cleancache_init_fs(size_t pagesize)
1459 {
1460         BUG_ON(sizeof(struct cleancache_filekey) !=
1461                                 sizeof(struct tmem_oid));
1462         BUG_ON(pagesize != PAGE_SIZE);
1463         return zcache_local_new_pool(0);
1464 }
1465
1466 static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
1467 {
1468         /* shared pools are unsupported and map to private */
1469         BUG_ON(sizeof(struct cleancache_filekey) !=
1470                                 sizeof(struct tmem_oid));
1471         BUG_ON(pagesize != PAGE_SIZE);
1472         return zcache_local_new_pool(0);
1473 }
1474
1475 static struct cleancache_ops zcache_cleancache_ops = {
1476         .put_page = zcache_cleancache_put_page,
1477         .get_page = zcache_cleancache_get_page,
1478         .invalidate_page = zcache_cleancache_flush_page,
1479         .invalidate_inode = zcache_cleancache_flush_inode,
1480         .invalidate_fs = zcache_cleancache_flush_fs,
1481         .init_shared_fs = zcache_cleancache_init_shared_fs,
1482         .init_fs = zcache_cleancache_init_fs
1483 };
1484
1485 struct cleancache_ops zcache_cleancache_register_ops(void)
1486 {
1487         struct cleancache_ops old_ops =
1488                 cleancache_register_ops(&zcache_cleancache_ops);
1489
1490         return old_ops;
1491 }
1492
1493 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1494 static int zcache_frontswap_poolid __read_mostly = -1;
1495
1496 /*
1497  * Swizzling increases objects per swaptype, increasing tmem concurrency
1498  * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS
1499  * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
1500  * frontswap_get_page(), but has side-effects. Hence using 8.
1501  */
1502 #define SWIZ_BITS               8
1503 #define SWIZ_MASK               ((1 << SWIZ_BITS) - 1)
1504 #define _oswiz(_type, _ind)     ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
1505 #define iswiz(_ind)             (_ind >> SWIZ_BITS)
1506
1507 static inline struct tmem_oid oswiz(unsigned type, u32 ind)
1508 {
1509         struct tmem_oid oid = { .oid = { 0 } };
1510         oid.oid[0] = _oswiz(type, ind);
1511         return oid;
1512 }
1513
1514 #ifdef CONFIG_ZCACHE_WRITEBACK
1515 static void unswiz(struct tmem_oid oid, u32 index,
1516                                 unsigned *type, pgoff_t *offset)
1517 {
1518         *type = (unsigned)(oid.oid[0] >> SWIZ_BITS);
1519         *offset = (pgoff_t)((index << SWIZ_BITS) |
1520                         (oid.oid[0] & SWIZ_MASK));
1521 }
1522 #endif
1523
1524 static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
1525                                         struct page *page)
1526 {
1527         u64 ind64 = (u64)offset;
1528         u32 ind = (u32)offset;
1529         struct tmem_oid oid = oswiz(type, ind);
1530         int ret = -1;
1531         unsigned long flags;
1532
1533         BUG_ON(!PageLocked(page));
1534         if (!disable_frontswap_ignore_nonactive && !PageWasActive(page)) {
1535                 inc_zcache_pers_nonactive_puts_ignored();
1536                 ret = -ERANGE;
1537                 goto out;
1538         }
1539         if (likely(ind64 == ind)) {
1540                 local_irq_save(flags);
1541                 ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1542                                         &oid, iswiz(ind),
1543                                         page, PAGE_SIZE, false, 0);
1544                 local_irq_restore(flags);
1545         }
1546 out:
1547         return ret;
1548 }
1549
1550 /* returns 0 if the page was successfully gotten from frontswap, -1 if
1551  * was not present (should never happen!) */
1552 static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
1553                                         struct page *page)
1554 {
1555         u64 ind64 = (u64)offset;
1556         u32 ind = (u32)offset;
1557         struct tmem_oid oid = oswiz(type, ind);
1558         size_t size;
1559         int ret = -1, get_and_free;
1560
1561         if (frontswap_has_exclusive_gets)
1562                 get_and_free = 1;
1563         else
1564                 get_and_free = -1;
1565         BUG_ON(!PageLocked(page));
1566         if (likely(ind64 == ind)) {
1567                 ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1568                                         &oid, iswiz(ind),
1569                                         page, &size, false, get_and_free);
1570                 BUG_ON(ret >= 0 && size != PAGE_SIZE);
1571         }
1572         return ret;
1573 }
1574
1575 /* flush a single page from frontswap */
1576 static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
1577 {
1578         u64 ind64 = (u64)offset;
1579         u32 ind = (u32)offset;
1580         struct tmem_oid oid = oswiz(type, ind);
1581
1582         if (likely(ind64 == ind))
1583                 (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1584                                         &oid, iswiz(ind));
1585 }
1586
1587 /* flush all pages from the passed swaptype */
1588 static void zcache_frontswap_flush_area(unsigned type)
1589 {
1590         struct tmem_oid oid;
1591         int ind;
1592
1593         for (ind = SWIZ_MASK; ind >= 0; ind--) {
1594                 oid = oswiz(type, ind);
1595                 (void)zcache_flush_object(LOCAL_CLIENT,
1596                                                 zcache_frontswap_poolid, &oid);
1597         }
1598 }
1599
1600 static void zcache_frontswap_init(unsigned ignored)
1601 {
1602         /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1603         if (zcache_frontswap_poolid < 0)
1604                 zcache_frontswap_poolid =
1605                         zcache_local_new_pool(TMEM_POOL_PERSIST);
1606 }
1607
1608 static struct frontswap_ops zcache_frontswap_ops = {
1609         .store = zcache_frontswap_put_page,
1610         .load = zcache_frontswap_get_page,
1611         .invalidate_page = zcache_frontswap_flush_page,
1612         .invalidate_area = zcache_frontswap_flush_area,
1613         .init = zcache_frontswap_init
1614 };
1615
1616 struct frontswap_ops zcache_frontswap_register_ops(void)
1617 {
1618         struct frontswap_ops old_ops =
1619                 frontswap_register_ops(&zcache_frontswap_ops);
1620
1621         return old_ops;
1622 }
1623
1624 /*
1625  * zcache initialization
1626  * NOTE FOR NOW zcache or ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER
1627  * OR NOTHING HAPPENS!
1628  */
1629
1630 static int __init enable_zcache(char *s)
1631 {
1632         zcache_enabled = true;
1633         return 1;
1634 }
1635 __setup("zcache", enable_zcache);
1636
1637 static int __init enable_ramster(char *s)
1638 {
1639         zcache_enabled = true;
1640 #ifdef CONFIG_RAMSTER
1641         ramster_enabled = true;
1642 #endif
1643         return 1;
1644 }
1645 __setup("ramster", enable_ramster);
1646
1647 /* allow independent dynamic disabling of cleancache and frontswap */
1648
1649 static int __init no_cleancache(char *s)
1650 {
1651         disable_cleancache = true;
1652         return 1;
1653 }
1654
1655 __setup("nocleancache", no_cleancache);
1656
1657 static int __init no_frontswap(char *s)
1658 {
1659         disable_frontswap = true;
1660         return 1;
1661 }
1662
1663 __setup("nofrontswap", no_frontswap);
1664
1665 static int __init no_frontswap_exclusive_gets(char *s)
1666 {
1667         frontswap_has_exclusive_gets = false;
1668         return 1;
1669 }
1670
1671 __setup("nofrontswapexclusivegets", no_frontswap_exclusive_gets);
1672
1673 static int __init no_frontswap_ignore_nonactive(char *s)
1674 {
1675         disable_frontswap_ignore_nonactive = true;
1676         return 1;
1677 }
1678
1679 __setup("nofrontswapignorenonactive", no_frontswap_ignore_nonactive);
1680
1681 static int __init no_cleancache_ignore_nonactive(char *s)
1682 {
1683         disable_cleancache_ignore_nonactive = true;
1684         return 1;
1685 }
1686
1687 __setup("nocleancacheignorenonactive", no_cleancache_ignore_nonactive);
1688
1689 static int __init enable_zcache_compressor(char *s)
1690 {
1691         strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
1692         zcache_enabled = true;
1693         return 1;
1694 }
1695 __setup("zcache=", enable_zcache_compressor);
1696
1697
1698 static int __init zcache_comp_init(void)
1699 {
1700         int ret = 0;
1701
1702         /* check crypto algorithm */
1703         if (*zcache_comp_name != '\0') {
1704                 ret = crypto_has_comp(zcache_comp_name, 0, 0);
1705                 if (!ret)
1706                         pr_info("zcache: %s not supported\n",
1707                                         zcache_comp_name);
1708         }
1709         if (!ret)
1710                 strcpy(zcache_comp_name, "lzo");
1711         ret = crypto_has_comp(zcache_comp_name, 0, 0);
1712         if (!ret) {
1713                 ret = 1;
1714                 goto out;
1715         }
1716         pr_info("zcache: using %s compressor\n", zcache_comp_name);
1717
1718         /* alloc percpu transforms */
1719         ret = 0;
1720         zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
1721         if (!zcache_comp_pcpu_tfms)
1722                 ret = 1;
1723 out:
1724         return ret;
1725 }
1726
1727 static int __init zcache_init(void)
1728 {
1729         int ret = 0;
1730
1731         if (ramster_enabled) {
1732                 namestr = "ramster";
1733                 ramster_register_pamops(&zcache_pamops);
1734         }
1735 #ifdef CONFIG_DEBUG_FS
1736         zcache_debugfs_init();
1737 #endif
1738         if (zcache_enabled) {
1739                 unsigned int cpu;
1740
1741                 tmem_register_hostops(&zcache_hostops);
1742                 tmem_register_pamops(&zcache_pamops);
1743                 ret = register_cpu_notifier(&zcache_cpu_notifier_block);
1744                 if (ret) {
1745                         pr_err("%s: can't register cpu notifier\n", namestr);
1746                         goto out;
1747                 }
1748                 ret = zcache_comp_init();
1749                 if (ret) {
1750                         pr_err("%s: compressor initialization failed\n",
1751                                 namestr);
1752                         goto out;
1753                 }
1754                 for_each_online_cpu(cpu) {
1755                         void *pcpu = (void *)(long)cpu;
1756                         zcache_cpu_notifier(&zcache_cpu_notifier_block,
1757                                 CPU_UP_PREPARE, pcpu);
1758                 }
1759         }
1760         zcache_objnode_cache = kmem_cache_create("zcache_objnode",
1761                                 sizeof(struct tmem_objnode), 0, 0, NULL);
1762         zcache_obj_cache = kmem_cache_create("zcache_obj",
1763                                 sizeof(struct tmem_obj), 0, 0, NULL);
1764         ret = zcache_new_client(LOCAL_CLIENT);
1765         if (ret) {
1766                 pr_err("%s: can't create client\n", namestr);
1767                 goto out;
1768         }
1769         zbud_init();
1770         if (zcache_enabled && !disable_cleancache) {
1771                 struct cleancache_ops old_ops;
1772
1773                 register_shrinker(&zcache_shrinker);
1774                 old_ops = zcache_cleancache_register_ops();
1775                 pr_info("%s: cleancache enabled using kernel transcendent "
1776                         "memory and compression buddies\n", namestr);
1777 #ifdef CONFIG_ZCACHE_DEBUG
1778                 pr_info("%s: cleancache: ignorenonactive = %d\n",
1779                         namestr, !disable_cleancache_ignore_nonactive);
1780 #endif
1781                 if (old_ops.init_fs != NULL)
1782                         pr_warn("%s: cleancache_ops overridden\n", namestr);
1783         }
1784         if (zcache_enabled && !disable_frontswap) {
1785                 struct frontswap_ops old_ops;
1786
1787                 old_ops = zcache_frontswap_register_ops();
1788                 if (frontswap_has_exclusive_gets)
1789                         frontswap_tmem_exclusive_gets(true);
1790                 pr_info("%s: frontswap enabled using kernel transcendent "
1791                         "memory and compression buddies\n", namestr);
1792 #ifdef CONFIG_ZCACHE_DEBUG
1793                 pr_info("%s: frontswap: excl gets = %d active only = %d\n",
1794                         namestr, frontswap_has_exclusive_gets,
1795                         !disable_frontswap_ignore_nonactive);
1796 #endif
1797                 if (old_ops.init != NULL)
1798                         pr_warn("%s: frontswap_ops overridden\n", namestr);
1799         }
1800         if (ramster_enabled)
1801                 ramster_init(!disable_cleancache, !disable_frontswap,
1802                                 frontswap_has_exclusive_gets);
1803 out:
1804         return ret;
1805 }
1806
1807 late_initcall(zcache_init);