Merge "kfence: Use pt_regs to generate stack trace on faults" into tizen
[platform/kernel/linux-rpi.git] / mm / z3fold.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * z3fold.c
4  *
5  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6  * Copyright (C) 2016, Sony Mobile Communications Inc.
7  *
8  * This implementation is based on zbud written by Seth Jennings.
9  *
10  * z3fold is an special purpose allocator for storing compressed pages. It
11  * can store up to three compressed pages per page which improves the
12  * compression ratio of zbud while retaining its main concepts (e. g. always
13  * storing an integral number of objects per page) and simplicity.
14  * It still has simple and deterministic reclaim properties that make it
15  * preferable to a higher density approach (with no requirement on integral
16  * number of object per page) when reclaim is used.
17  *
18  * As in zbud, pages are divided into "chunks".  The size of the chunks is
19  * fixed at compile time and is determined by NCHUNKS_ORDER below.
20  *
21  * z3fold doesn't export any API and is meant to be used via zpool API.
22  */
23
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/mount.h>
38 #include <linux/pseudo_fs.h>
39 #include <linux/fs.h>
40 #include <linux/preempt.h>
41 #include <linux/workqueue.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/zpool.h>
45 #include <linux/magic.h>
46 #include <linux/kmemleak.h>
47
48 /*
49  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
50  * adjusting internal fragmentation.  It also determines the number of
51  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
53  * in the beginning of an allocated page are occupied by z3fold header, so
54  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
55  * which shows the max number of free chunks in z3fold page, also there will
56  * be 63, or 62, respectively, freelists per pool.
57  */
58 #define NCHUNKS_ORDER   6
59
60 #define CHUNK_SHIFT     (PAGE_SHIFT - NCHUNKS_ORDER)
61 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
62 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
63 #define ZHDR_CHUNKS     (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
64 #define TOTAL_CHUNKS    (PAGE_SIZE >> CHUNK_SHIFT)
65 #define NCHUNKS         ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
66
67 #define BUDDY_MASK      (0x3)
68 #define BUDDY_SHIFT     2
69 #define SLOTS_ALIGN     (0x40)
70
71 /*****************
72  * Structures
73 *****************/
74 struct z3fold_pool;
75 struct z3fold_ops {
76         int (*evict)(struct z3fold_pool *pool, unsigned long handle);
77 };
78
79 enum buddy {
80         HEADLESS = 0,
81         FIRST,
82         MIDDLE,
83         LAST,
84         BUDDIES_MAX = LAST
85 };
86
87 struct z3fold_buddy_slots {
88         /*
89          * we are using BUDDY_MASK in handle_to_buddy etc. so there should
90          * be enough slots to hold all possible variants
91          */
92         unsigned long slot[BUDDY_MASK + 1];
93         unsigned long pool; /* back link */
94         rwlock_t lock;
95 };
96 #define HANDLE_FLAG_MASK        (0x03)
97
98 /*
99  * struct z3fold_header - z3fold page metadata occupying first chunks of each
100  *                      z3fold page, except for HEADLESS pages
101  * @buddy:              links the z3fold page into the relevant list in the
102  *                      pool
103  * @page_lock:          per-page lock
104  * @refcount:           reference count for the z3fold page
105  * @work:               work_struct for page layout optimization
106  * @slots:              pointer to the structure holding buddy slots
107  * @pool:               pointer to the containing pool
108  * @cpu:                CPU which this page "belongs" to
109  * @first_chunks:       the size of the first buddy in chunks, 0 if free
110  * @middle_chunks:      the size of the middle buddy in chunks, 0 if free
111  * @last_chunks:        the size of the last buddy in chunks, 0 if free
112  * @first_num:          the starting number (for the first handle)
113  * @mapped_count:       the number of objects currently mapped
114  */
115 struct z3fold_header {
116         struct list_head buddy;
117         spinlock_t page_lock;
118         struct kref refcount;
119         struct work_struct work;
120         struct z3fold_buddy_slots *slots;
121         struct z3fold_pool *pool;
122         short cpu;
123         unsigned short first_chunks;
124         unsigned short middle_chunks;
125         unsigned short last_chunks;
126         unsigned short start_middle;
127         unsigned short first_num:2;
128         unsigned short mapped_count:2;
129         unsigned short foreign_handles:2;
130 };
131
132 /**
133  * struct z3fold_pool - stores metadata for each z3fold pool
134  * @name:       pool name
135  * @lock:       protects pool unbuddied/lru lists
136  * @stale_lock: protects pool stale page list
137  * @unbuddied:  per-cpu array of lists tracking z3fold pages that contain 2-
138  *              buddies; the list each z3fold page is added to depends on
139  *              the size of its free region.
140  * @lru:        list tracking the z3fold pages in LRU order by most recently
141  *              added buddy.
142  * @stale:      list of pages marked for freeing
143  * @pages_nr:   number of z3fold pages in the pool.
144  * @c_handle:   cache for z3fold_buddy_slots allocation
145  * @ops:        pointer to a structure of user defined operations specified at
146  *              pool creation time.
147  * @compact_wq: workqueue for page layout background optimization
148  * @release_wq: workqueue for safe page release
149  * @work:       work_struct for safe page release
150  * @inode:      inode for z3fold pseudo filesystem
151  *
152  * This structure is allocated at pool creation time and maintains metadata
153  * pertaining to a particular z3fold pool.
154  */
155 struct z3fold_pool {
156         const char *name;
157         spinlock_t lock;
158         spinlock_t stale_lock;
159         struct list_head *unbuddied;
160         struct list_head lru;
161         struct list_head stale;
162         atomic64_t pages_nr;
163         struct kmem_cache *c_handle;
164         const struct z3fold_ops *ops;
165         struct zpool *zpool;
166         const struct zpool_ops *zpool_ops;
167         struct workqueue_struct *compact_wq;
168         struct workqueue_struct *release_wq;
169         struct work_struct work;
170         struct inode *inode;
171 };
172
173 /*
174  * Internal z3fold page flags
175  */
176 enum z3fold_page_flags {
177         PAGE_HEADLESS = 0,
178         MIDDLE_CHUNK_MAPPED,
179         NEEDS_COMPACTING,
180         PAGE_STALE,
181         PAGE_CLAIMED, /* by either reclaim or free */
182 };
183
184 /*
185  * handle flags, go under HANDLE_FLAG_MASK
186  */
187 enum z3fold_handle_flags {
188         HANDLES_NOFREE = 0,
189 };
190
191 /*
192  * Forward declarations
193  */
194 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
195 static void compact_page_work(struct work_struct *w);
196
197 /*****************
198  * Helpers
199 *****************/
200
201 /* Converts an allocation size in bytes to size in z3fold chunks */
202 static int size_to_chunks(size_t size)
203 {
204         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
205 }
206
207 #define for_each_unbuddied_list(_iter, _begin) \
208         for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
209
210 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
211                                                         gfp_t gfp)
212 {
213         struct z3fold_buddy_slots *slots;
214
215         slots = kmem_cache_zalloc(pool->c_handle,
216                                  (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
217
218         if (slots) {
219                 /* It will be freed separately in free_handle(). */
220                 kmemleak_not_leak(slots);
221                 slots->pool = (unsigned long)pool;
222                 rwlock_init(&slots->lock);
223         }
224
225         return slots;
226 }
227
228 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
229 {
230         return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
231 }
232
233 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
234 {
235         return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
236 }
237
238 /* Lock a z3fold page */
239 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
240 {
241         spin_lock(&zhdr->page_lock);
242 }
243
244 /* Try to lock a z3fold page */
245 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
246 {
247         return spin_trylock(&zhdr->page_lock);
248 }
249
250 /* Unlock a z3fold page */
251 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
252 {
253         spin_unlock(&zhdr->page_lock);
254 }
255
256
257 static inline struct z3fold_header *__get_z3fold_header(unsigned long handle,
258                                                         bool lock)
259 {
260         struct z3fold_buddy_slots *slots;
261         struct z3fold_header *zhdr;
262         int locked = 0;
263
264         if (!(handle & (1 << PAGE_HEADLESS))) {
265                 slots = handle_to_slots(handle);
266                 do {
267                         unsigned long addr;
268
269                         read_lock(&slots->lock);
270                         addr = *(unsigned long *)handle;
271                         zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
272                         if (lock)
273                                 locked = z3fold_page_trylock(zhdr);
274                         read_unlock(&slots->lock);
275                         if (locked)
276                                 break;
277                         cpu_relax();
278                 } while (lock);
279         } else {
280                 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
281         }
282
283         return zhdr;
284 }
285
286 /* Returns the z3fold page where a given handle is stored */
287 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
288 {
289         return __get_z3fold_header(h, false);
290 }
291
292 /* return locked z3fold page if it's not headless */
293 static inline struct z3fold_header *get_z3fold_header(unsigned long h)
294 {
295         return __get_z3fold_header(h, true);
296 }
297
298 static inline void put_z3fold_header(struct z3fold_header *zhdr)
299 {
300         struct page *page = virt_to_page(zhdr);
301
302         if (!test_bit(PAGE_HEADLESS, &page->private))
303                 z3fold_page_unlock(zhdr);
304 }
305
306 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
307 {
308         struct z3fold_buddy_slots *slots;
309         int i;
310         bool is_free;
311
312         if (handle & (1 << PAGE_HEADLESS))
313                 return;
314
315         if (WARN_ON(*(unsigned long *)handle == 0))
316                 return;
317
318         slots = handle_to_slots(handle);
319         write_lock(&slots->lock);
320         *(unsigned long *)handle = 0;
321
322         if (test_bit(HANDLES_NOFREE, &slots->pool)) {
323                 write_unlock(&slots->lock);
324                 return; /* simple case, nothing else to do */
325         }
326
327         if (zhdr->slots != slots)
328                 zhdr->foreign_handles--;
329
330         is_free = true;
331         for (i = 0; i <= BUDDY_MASK; i++) {
332                 if (slots->slot[i]) {
333                         is_free = false;
334                         break;
335                 }
336         }
337         write_unlock(&slots->lock);
338
339         if (is_free) {
340                 struct z3fold_pool *pool = slots_to_pool(slots);
341
342                 if (zhdr->slots == slots)
343                         zhdr->slots = NULL;
344                 kmem_cache_free(pool->c_handle, slots);
345         }
346 }
347
348 static int z3fold_init_fs_context(struct fs_context *fc)
349 {
350         return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
351 }
352
353 static struct file_system_type z3fold_fs = {
354         .name           = "z3fold",
355         .init_fs_context = z3fold_init_fs_context,
356         .kill_sb        = kill_anon_super,
357 };
358
359 static struct vfsmount *z3fold_mnt;
360 static int z3fold_mount(void)
361 {
362         int ret = 0;
363
364         z3fold_mnt = kern_mount(&z3fold_fs);
365         if (IS_ERR(z3fold_mnt))
366                 ret = PTR_ERR(z3fold_mnt);
367
368         return ret;
369 }
370
371 static void z3fold_unmount(void)
372 {
373         kern_unmount(z3fold_mnt);
374 }
375
376 static const struct address_space_operations z3fold_aops;
377 static int z3fold_register_migration(struct z3fold_pool *pool)
378 {
379         pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
380         if (IS_ERR(pool->inode)) {
381                 pool->inode = NULL;
382                 return 1;
383         }
384
385         pool->inode->i_mapping->private_data = pool;
386         pool->inode->i_mapping->a_ops = &z3fold_aops;
387         return 0;
388 }
389
390 static void z3fold_unregister_migration(struct z3fold_pool *pool)
391 {
392         if (pool->inode)
393                 iput(pool->inode);
394  }
395
396 /* Initializes the z3fold header of a newly allocated z3fold page */
397 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
398                                         struct z3fold_pool *pool, gfp_t gfp)
399 {
400         struct z3fold_header *zhdr = page_address(page);
401         struct z3fold_buddy_slots *slots;
402
403         INIT_LIST_HEAD(&page->lru);
404         clear_bit(PAGE_HEADLESS, &page->private);
405         clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
406         clear_bit(NEEDS_COMPACTING, &page->private);
407         clear_bit(PAGE_STALE, &page->private);
408         clear_bit(PAGE_CLAIMED, &page->private);
409         if (headless)
410                 return zhdr;
411
412         slots = alloc_slots(pool, gfp);
413         if (!slots)
414                 return NULL;
415
416         spin_lock_init(&zhdr->page_lock);
417         kref_init(&zhdr->refcount);
418         zhdr->first_chunks = 0;
419         zhdr->middle_chunks = 0;
420         zhdr->last_chunks = 0;
421         zhdr->first_num = 0;
422         zhdr->start_middle = 0;
423         zhdr->cpu = -1;
424         zhdr->foreign_handles = 0;
425         zhdr->mapped_count = 0;
426         zhdr->slots = slots;
427         zhdr->pool = pool;
428         INIT_LIST_HEAD(&zhdr->buddy);
429         INIT_WORK(&zhdr->work, compact_page_work);
430         return zhdr;
431 }
432
433 /* Resets the struct page fields and frees the page */
434 static void free_z3fold_page(struct page *page, bool headless)
435 {
436         if (!headless) {
437                 lock_page(page);
438                 __ClearPageMovable(page);
439                 unlock_page(page);
440         }
441         ClearPagePrivate(page);
442         __free_page(page);
443 }
444
445 /* Helper function to build the index */
446 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
447 {
448         return (bud + zhdr->first_num) & BUDDY_MASK;
449 }
450
451 /*
452  * Encodes the handle of a particular buddy within a z3fold page
453  * Pool lock should be held as this function accesses first_num
454  */
455 static unsigned long __encode_handle(struct z3fold_header *zhdr,
456                                 struct z3fold_buddy_slots *slots,
457                                 enum buddy bud)
458 {
459         unsigned long h = (unsigned long)zhdr;
460         int idx = 0;
461
462         /*
463          * For a headless page, its handle is its pointer with the extra
464          * PAGE_HEADLESS bit set
465          */
466         if (bud == HEADLESS)
467                 return h | (1 << PAGE_HEADLESS);
468
469         /* otherwise, return pointer to encoded handle */
470         idx = __idx(zhdr, bud);
471         h += idx;
472         if (bud == LAST)
473                 h |= (zhdr->last_chunks << BUDDY_SHIFT);
474
475         write_lock(&slots->lock);
476         slots->slot[idx] = h;
477         write_unlock(&slots->lock);
478         return (unsigned long)&slots->slot[idx];
479 }
480
481 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
482 {
483         return __encode_handle(zhdr, zhdr->slots, bud);
484 }
485
486 /* only for LAST bud, returns zero otherwise */
487 static unsigned short handle_to_chunks(unsigned long handle)
488 {
489         struct z3fold_buddy_slots *slots = handle_to_slots(handle);
490         unsigned long addr;
491
492         read_lock(&slots->lock);
493         addr = *(unsigned long *)handle;
494         read_unlock(&slots->lock);
495         return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
496 }
497
498 /*
499  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
500  *  but that doesn't matter. because the masking will result in the
501  *  correct buddy number.
502  */
503 static enum buddy handle_to_buddy(unsigned long handle)
504 {
505         struct z3fold_header *zhdr;
506         struct z3fold_buddy_slots *slots = handle_to_slots(handle);
507         unsigned long addr;
508
509         read_lock(&slots->lock);
510         WARN_ON(handle & (1 << PAGE_HEADLESS));
511         addr = *(unsigned long *)handle;
512         read_unlock(&slots->lock);
513         zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
514         return (addr - zhdr->first_num) & BUDDY_MASK;
515 }
516
517 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
518 {
519         return zhdr->pool;
520 }
521
522 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
523 {
524         struct page *page = virt_to_page(zhdr);
525         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
526
527         WARN_ON(!list_empty(&zhdr->buddy));
528         set_bit(PAGE_STALE, &page->private);
529         clear_bit(NEEDS_COMPACTING, &page->private);
530         spin_lock(&pool->lock);
531         if (!list_empty(&page->lru))
532                 list_del_init(&page->lru);
533         spin_unlock(&pool->lock);
534
535         if (locked)
536                 z3fold_page_unlock(zhdr);
537
538         spin_lock(&pool->stale_lock);
539         list_add(&zhdr->buddy, &pool->stale);
540         queue_work(pool->release_wq, &pool->work);
541         spin_unlock(&pool->stale_lock);
542 }
543
544 static void __attribute__((__unused__))
545                         release_z3fold_page(struct kref *ref)
546 {
547         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
548                                                 refcount);
549         __release_z3fold_page(zhdr, false);
550 }
551
552 static void release_z3fold_page_locked(struct kref *ref)
553 {
554         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
555                                                 refcount);
556         WARN_ON(z3fold_page_trylock(zhdr));
557         __release_z3fold_page(zhdr, true);
558 }
559
560 static void release_z3fold_page_locked_list(struct kref *ref)
561 {
562         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
563                                                refcount);
564         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
565
566         spin_lock(&pool->lock);
567         list_del_init(&zhdr->buddy);
568         spin_unlock(&pool->lock);
569
570         WARN_ON(z3fold_page_trylock(zhdr));
571         __release_z3fold_page(zhdr, true);
572 }
573
574 static void free_pages_work(struct work_struct *w)
575 {
576         struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
577
578         spin_lock(&pool->stale_lock);
579         while (!list_empty(&pool->stale)) {
580                 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
581                                                 struct z3fold_header, buddy);
582                 struct page *page = virt_to_page(zhdr);
583
584                 list_del(&zhdr->buddy);
585                 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
586                         continue;
587                 spin_unlock(&pool->stale_lock);
588                 cancel_work_sync(&zhdr->work);
589                 free_z3fold_page(page, false);
590                 cond_resched();
591                 spin_lock(&pool->stale_lock);
592         }
593         spin_unlock(&pool->stale_lock);
594 }
595
596 /*
597  * Returns the number of free chunks in a z3fold page.
598  * NB: can't be used with HEADLESS pages.
599  */
600 static int num_free_chunks(struct z3fold_header *zhdr)
601 {
602         int nfree;
603         /*
604          * If there is a middle object, pick up the bigger free space
605          * either before or after it. Otherwise just subtract the number
606          * of chunks occupied by the first and the last objects.
607          */
608         if (zhdr->middle_chunks != 0) {
609                 int nfree_before = zhdr->first_chunks ?
610                         0 : zhdr->start_middle - ZHDR_CHUNKS;
611                 int nfree_after = zhdr->last_chunks ?
612                         0 : TOTAL_CHUNKS -
613                                 (zhdr->start_middle + zhdr->middle_chunks);
614                 nfree = max(nfree_before, nfree_after);
615         } else
616                 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
617         return nfree;
618 }
619
620 /* Add to the appropriate unbuddied list */
621 static inline void add_to_unbuddied(struct z3fold_pool *pool,
622                                 struct z3fold_header *zhdr)
623 {
624         if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
625                         zhdr->middle_chunks == 0) {
626                 struct list_head *unbuddied;
627                 int freechunks = num_free_chunks(zhdr);
628
629                 migrate_disable();
630                 unbuddied = this_cpu_ptr(pool->unbuddied);
631                 spin_lock(&pool->lock);
632                 list_add(&zhdr->buddy, &unbuddied[freechunks]);
633                 spin_unlock(&pool->lock);
634                 zhdr->cpu = smp_processor_id();
635                 migrate_enable();
636         }
637 }
638
639 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
640 {
641         enum buddy bud = HEADLESS;
642
643         if (zhdr->middle_chunks) {
644                 if (!zhdr->first_chunks &&
645                     chunks <= zhdr->start_middle - ZHDR_CHUNKS)
646                         bud = FIRST;
647                 else if (!zhdr->last_chunks)
648                         bud = LAST;
649         } else {
650                 if (!zhdr->first_chunks)
651                         bud = FIRST;
652                 else if (!zhdr->last_chunks)
653                         bud = LAST;
654                 else
655                         bud = MIDDLE;
656         }
657
658         return bud;
659 }
660
661 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
662                                 unsigned short dst_chunk)
663 {
664         void *beg = zhdr;
665         return memmove(beg + (dst_chunk << CHUNK_SHIFT),
666                        beg + (zhdr->start_middle << CHUNK_SHIFT),
667                        zhdr->middle_chunks << CHUNK_SHIFT);
668 }
669
670 static inline bool buddy_single(struct z3fold_header *zhdr)
671 {
672         return !((zhdr->first_chunks && zhdr->middle_chunks) ||
673                         (zhdr->first_chunks && zhdr->last_chunks) ||
674                         (zhdr->middle_chunks && zhdr->last_chunks));
675 }
676
677 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
678 {
679         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
680         void *p = zhdr;
681         unsigned long old_handle = 0;
682         size_t sz = 0;
683         struct z3fold_header *new_zhdr = NULL;
684         int first_idx = __idx(zhdr, FIRST);
685         int middle_idx = __idx(zhdr, MIDDLE);
686         int last_idx = __idx(zhdr, LAST);
687         unsigned short *moved_chunks = NULL;
688
689         /*
690          * No need to protect slots here -- all the slots are "local" and
691          * the page lock is already taken
692          */
693         if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
694                 p += ZHDR_SIZE_ALIGNED;
695                 sz = zhdr->first_chunks << CHUNK_SHIFT;
696                 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
697                 moved_chunks = &zhdr->first_chunks;
698         } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
699                 p += zhdr->start_middle << CHUNK_SHIFT;
700                 sz = zhdr->middle_chunks << CHUNK_SHIFT;
701                 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
702                 moved_chunks = &zhdr->middle_chunks;
703         } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
704                 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
705                 sz = zhdr->last_chunks << CHUNK_SHIFT;
706                 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
707                 moved_chunks = &zhdr->last_chunks;
708         }
709
710         if (sz > 0) {
711                 enum buddy new_bud = HEADLESS;
712                 short chunks = size_to_chunks(sz);
713                 void *q;
714
715                 new_zhdr = __z3fold_alloc(pool, sz, false);
716                 if (!new_zhdr)
717                         return NULL;
718
719                 if (WARN_ON(new_zhdr == zhdr))
720                         goto out_fail;
721
722                 new_bud = get_free_buddy(new_zhdr, chunks);
723                 q = new_zhdr;
724                 switch (new_bud) {
725                 case FIRST:
726                         new_zhdr->first_chunks = chunks;
727                         q += ZHDR_SIZE_ALIGNED;
728                         break;
729                 case MIDDLE:
730                         new_zhdr->middle_chunks = chunks;
731                         new_zhdr->start_middle =
732                                 new_zhdr->first_chunks + ZHDR_CHUNKS;
733                         q += new_zhdr->start_middle << CHUNK_SHIFT;
734                         break;
735                 case LAST:
736                         new_zhdr->last_chunks = chunks;
737                         q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
738                         break;
739                 default:
740                         goto out_fail;
741                 }
742                 new_zhdr->foreign_handles++;
743                 memcpy(q, p, sz);
744                 write_lock(&zhdr->slots->lock);
745                 *(unsigned long *)old_handle = (unsigned long)new_zhdr +
746                         __idx(new_zhdr, new_bud);
747                 if (new_bud == LAST)
748                         *(unsigned long *)old_handle |=
749                                         (new_zhdr->last_chunks << BUDDY_SHIFT);
750                 write_unlock(&zhdr->slots->lock);
751                 add_to_unbuddied(pool, new_zhdr);
752                 z3fold_page_unlock(new_zhdr);
753
754                 *moved_chunks = 0;
755         }
756
757         return new_zhdr;
758
759 out_fail:
760         if (new_zhdr) {
761                 if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked))
762                         atomic64_dec(&pool->pages_nr);
763                 else {
764                         add_to_unbuddied(pool, new_zhdr);
765                         z3fold_page_unlock(new_zhdr);
766                 }
767         }
768         return NULL;
769
770 }
771
772 #define BIG_CHUNK_GAP   3
773 /* Has to be called with lock held */
774 static int z3fold_compact_page(struct z3fold_header *zhdr)
775 {
776         struct page *page = virt_to_page(zhdr);
777
778         if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
779                 return 0; /* can't move middle chunk, it's used */
780
781         if (unlikely(PageIsolated(page)))
782                 return 0;
783
784         if (zhdr->middle_chunks == 0)
785                 return 0; /* nothing to compact */
786
787         if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
788                 /* move to the beginning */
789                 mchunk_memmove(zhdr, ZHDR_CHUNKS);
790                 zhdr->first_chunks = zhdr->middle_chunks;
791                 zhdr->middle_chunks = 0;
792                 zhdr->start_middle = 0;
793                 zhdr->first_num++;
794                 return 1;
795         }
796
797         /*
798          * moving data is expensive, so let's only do that if
799          * there's substantial gain (at least BIG_CHUNK_GAP chunks)
800          */
801         if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
802             zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
803                         BIG_CHUNK_GAP) {
804                 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
805                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
806                 return 1;
807         } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
808                    TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
809                                         + zhdr->middle_chunks) >=
810                         BIG_CHUNK_GAP) {
811                 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
812                         zhdr->middle_chunks;
813                 mchunk_memmove(zhdr, new_start);
814                 zhdr->start_middle = new_start;
815                 return 1;
816         }
817
818         return 0;
819 }
820
821 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
822 {
823         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
824         struct page *page;
825
826         page = virt_to_page(zhdr);
827         if (locked)
828                 WARN_ON(z3fold_page_trylock(zhdr));
829         else
830                 z3fold_page_lock(zhdr);
831         if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
832                 z3fold_page_unlock(zhdr);
833                 return;
834         }
835         spin_lock(&pool->lock);
836         list_del_init(&zhdr->buddy);
837         spin_unlock(&pool->lock);
838
839         if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
840                 atomic64_dec(&pool->pages_nr);
841                 return;
842         }
843
844         if (test_bit(PAGE_STALE, &page->private) ||
845             test_and_set_bit(PAGE_CLAIMED, &page->private)) {
846                 z3fold_page_unlock(zhdr);
847                 return;
848         }
849
850         if (!zhdr->foreign_handles && buddy_single(zhdr) &&
851             zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
852                 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
853                         atomic64_dec(&pool->pages_nr);
854                 else {
855                         clear_bit(PAGE_CLAIMED, &page->private);
856                         z3fold_page_unlock(zhdr);
857                 }
858                 return;
859         }
860
861         z3fold_compact_page(zhdr);
862         add_to_unbuddied(pool, zhdr);
863         clear_bit(PAGE_CLAIMED, &page->private);
864         z3fold_page_unlock(zhdr);
865 }
866
867 static void compact_page_work(struct work_struct *w)
868 {
869         struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
870                                                 work);
871
872         do_compact_page(zhdr, false);
873 }
874
875 /* returns _locked_ z3fold page header or NULL */
876 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
877                                                 size_t size, bool can_sleep)
878 {
879         struct z3fold_header *zhdr = NULL;
880         struct page *page;
881         struct list_head *unbuddied;
882         int chunks = size_to_chunks(size), i;
883
884 lookup:
885         migrate_disable();
886         /* First, try to find an unbuddied z3fold page. */
887         unbuddied = this_cpu_ptr(pool->unbuddied);
888         for_each_unbuddied_list(i, chunks) {
889                 struct list_head *l = &unbuddied[i];
890
891                 zhdr = list_first_entry_or_null(READ_ONCE(l),
892                                         struct z3fold_header, buddy);
893
894                 if (!zhdr)
895                         continue;
896
897                 /* Re-check under lock. */
898                 spin_lock(&pool->lock);
899                 l = &unbuddied[i];
900                 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
901                                                 struct z3fold_header, buddy)) ||
902                     !z3fold_page_trylock(zhdr)) {
903                         spin_unlock(&pool->lock);
904                         zhdr = NULL;
905                         migrate_enable();
906                         if (can_sleep)
907                                 cond_resched();
908                         goto lookup;
909                 }
910                 list_del_init(&zhdr->buddy);
911                 zhdr->cpu = -1;
912                 spin_unlock(&pool->lock);
913
914                 page = virt_to_page(zhdr);
915                 if (test_bit(NEEDS_COMPACTING, &page->private) ||
916                     test_bit(PAGE_CLAIMED, &page->private)) {
917                         z3fold_page_unlock(zhdr);
918                         zhdr = NULL;
919                         migrate_enable();
920                         if (can_sleep)
921                                 cond_resched();
922                         goto lookup;
923                 }
924
925                 /*
926                  * this page could not be removed from its unbuddied
927                  * list while pool lock was held, and then we've taken
928                  * page lock so kref_put could not be called before
929                  * we got here, so it's safe to just call kref_get()
930                  */
931                 kref_get(&zhdr->refcount);
932                 break;
933         }
934         migrate_enable();
935
936         if (!zhdr) {
937                 int cpu;
938
939                 /* look for _exact_ match on other cpus' lists */
940                 for_each_online_cpu(cpu) {
941                         struct list_head *l;
942
943                         unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
944                         spin_lock(&pool->lock);
945                         l = &unbuddied[chunks];
946
947                         zhdr = list_first_entry_or_null(READ_ONCE(l),
948                                                 struct z3fold_header, buddy);
949
950                         if (!zhdr || !z3fold_page_trylock(zhdr)) {
951                                 spin_unlock(&pool->lock);
952                                 zhdr = NULL;
953                                 continue;
954                         }
955                         list_del_init(&zhdr->buddy);
956                         zhdr->cpu = -1;
957                         spin_unlock(&pool->lock);
958
959                         page = virt_to_page(zhdr);
960                         if (test_bit(NEEDS_COMPACTING, &page->private) ||
961                             test_bit(PAGE_CLAIMED, &page->private)) {
962                                 z3fold_page_unlock(zhdr);
963                                 zhdr = NULL;
964                                 if (can_sleep)
965                                         cond_resched();
966                                 continue;
967                         }
968                         kref_get(&zhdr->refcount);
969                         break;
970                 }
971         }
972
973         if (zhdr && !zhdr->slots)
974                 zhdr->slots = alloc_slots(pool,
975                                         can_sleep ? GFP_NOIO : GFP_ATOMIC);
976         return zhdr;
977 }
978
979 /*
980  * API Functions
981  */
982
983 /**
984  * z3fold_create_pool() - create a new z3fold pool
985  * @name:       pool name
986  * @gfp:        gfp flags when allocating the z3fold pool structure
987  * @ops:        user-defined operations for the z3fold pool
988  *
989  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
990  * failed.
991  */
992 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
993                 const struct z3fold_ops *ops)
994 {
995         struct z3fold_pool *pool = NULL;
996         int i, cpu;
997
998         pool = kzalloc(sizeof(struct z3fold_pool), gfp);
999         if (!pool)
1000                 goto out;
1001         pool->c_handle = kmem_cache_create("z3fold_handle",
1002                                 sizeof(struct z3fold_buddy_slots),
1003                                 SLOTS_ALIGN, 0, NULL);
1004         if (!pool->c_handle)
1005                 goto out_c;
1006         spin_lock_init(&pool->lock);
1007         spin_lock_init(&pool->stale_lock);
1008         pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
1009         if (!pool->unbuddied)
1010                 goto out_pool;
1011         for_each_possible_cpu(cpu) {
1012                 struct list_head *unbuddied =
1013                                 per_cpu_ptr(pool->unbuddied, cpu);
1014                 for_each_unbuddied_list(i, 0)
1015                         INIT_LIST_HEAD(&unbuddied[i]);
1016         }
1017         INIT_LIST_HEAD(&pool->lru);
1018         INIT_LIST_HEAD(&pool->stale);
1019         atomic64_set(&pool->pages_nr, 0);
1020         pool->name = name;
1021         pool->compact_wq = create_singlethread_workqueue(pool->name);
1022         if (!pool->compact_wq)
1023                 goto out_unbuddied;
1024         pool->release_wq = create_singlethread_workqueue(pool->name);
1025         if (!pool->release_wq)
1026                 goto out_wq;
1027         if (z3fold_register_migration(pool))
1028                 goto out_rwq;
1029         INIT_WORK(&pool->work, free_pages_work);
1030         pool->ops = ops;
1031         return pool;
1032
1033 out_rwq:
1034         destroy_workqueue(pool->release_wq);
1035 out_wq:
1036         destroy_workqueue(pool->compact_wq);
1037 out_unbuddied:
1038         free_percpu(pool->unbuddied);
1039 out_pool:
1040         kmem_cache_destroy(pool->c_handle);
1041 out_c:
1042         kfree(pool);
1043 out:
1044         return NULL;
1045 }
1046
1047 /**
1048  * z3fold_destroy_pool() - destroys an existing z3fold pool
1049  * @pool:       the z3fold pool to be destroyed
1050  *
1051  * The pool should be emptied before this function is called.
1052  */
1053 static void z3fold_destroy_pool(struct z3fold_pool *pool)
1054 {
1055         kmem_cache_destroy(pool->c_handle);
1056
1057         /*
1058          * We need to destroy pool->compact_wq before pool->release_wq,
1059          * as any pending work on pool->compact_wq will call
1060          * queue_work(pool->release_wq, &pool->work).
1061          *
1062          * There are still outstanding pages until both workqueues are drained,
1063          * so we cannot unregister migration until then.
1064          */
1065
1066         destroy_workqueue(pool->compact_wq);
1067         destroy_workqueue(pool->release_wq);
1068         z3fold_unregister_migration(pool);
1069         free_percpu(pool->unbuddied);
1070         kfree(pool);
1071 }
1072
1073 /**
1074  * z3fold_alloc() - allocates a region of a given size
1075  * @pool:       z3fold pool from which to allocate
1076  * @size:       size in bytes of the desired allocation
1077  * @gfp:        gfp flags used if the pool needs to grow
1078  * @handle:     handle of the new allocation
1079  *
1080  * This function will attempt to find a free region in the pool large enough to
1081  * satisfy the allocation request.  A search of the unbuddied lists is
1082  * performed first. If no suitable free region is found, then a new page is
1083  * allocated and added to the pool to satisfy the request.
1084  *
1085  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
1086  * as z3fold pool pages.
1087  *
1088  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
1089  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1090  * a new page.
1091  */
1092 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1093                         unsigned long *handle)
1094 {
1095         int chunks = size_to_chunks(size);
1096         struct z3fold_header *zhdr = NULL;
1097         struct page *page = NULL;
1098         enum buddy bud;
1099         bool can_sleep = gfpflags_allow_blocking(gfp);
1100
1101         if (!size)
1102                 return -EINVAL;
1103
1104         if (size > PAGE_SIZE)
1105                 return -ENOSPC;
1106
1107         if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1108                 bud = HEADLESS;
1109         else {
1110 retry:
1111                 zhdr = __z3fold_alloc(pool, size, can_sleep);
1112                 if (zhdr) {
1113                         bud = get_free_buddy(zhdr, chunks);
1114                         if (bud == HEADLESS) {
1115                                 if (kref_put(&zhdr->refcount,
1116                                              release_z3fold_page_locked))
1117                                         atomic64_dec(&pool->pages_nr);
1118                                 else
1119                                         z3fold_page_unlock(zhdr);
1120                                 pr_err("No free chunks in unbuddied\n");
1121                                 WARN_ON(1);
1122                                 goto retry;
1123                         }
1124                         page = virt_to_page(zhdr);
1125                         goto found;
1126                 }
1127                 bud = FIRST;
1128         }
1129
1130         page = NULL;
1131         if (can_sleep) {
1132                 spin_lock(&pool->stale_lock);
1133                 zhdr = list_first_entry_or_null(&pool->stale,
1134                                                 struct z3fold_header, buddy);
1135                 /*
1136                  * Before allocating a page, let's see if we can take one from
1137                  * the stale pages list. cancel_work_sync() can sleep so we
1138                  * limit this case to the contexts where we can sleep
1139                  */
1140                 if (zhdr) {
1141                         list_del(&zhdr->buddy);
1142                         spin_unlock(&pool->stale_lock);
1143                         cancel_work_sync(&zhdr->work);
1144                         page = virt_to_page(zhdr);
1145                 } else {
1146                         spin_unlock(&pool->stale_lock);
1147                 }
1148         }
1149         if (!page)
1150                 page = alloc_page(gfp);
1151
1152         if (!page)
1153                 return -ENOMEM;
1154
1155         zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1156         if (!zhdr) {
1157                 __free_page(page);
1158                 return -ENOMEM;
1159         }
1160         atomic64_inc(&pool->pages_nr);
1161
1162         if (bud == HEADLESS) {
1163                 set_bit(PAGE_HEADLESS, &page->private);
1164                 goto headless;
1165         }
1166         if (can_sleep) {
1167                 lock_page(page);
1168                 __SetPageMovable(page, pool->inode->i_mapping);
1169                 unlock_page(page);
1170         } else {
1171                 if (trylock_page(page)) {
1172                         __SetPageMovable(page, pool->inode->i_mapping);
1173                         unlock_page(page);
1174                 }
1175         }
1176         z3fold_page_lock(zhdr);
1177
1178 found:
1179         if (bud == FIRST)
1180                 zhdr->first_chunks = chunks;
1181         else if (bud == LAST)
1182                 zhdr->last_chunks = chunks;
1183         else {
1184                 zhdr->middle_chunks = chunks;
1185                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1186         }
1187         add_to_unbuddied(pool, zhdr);
1188
1189 headless:
1190         spin_lock(&pool->lock);
1191         /* Add/move z3fold page to beginning of LRU */
1192         if (!list_empty(&page->lru))
1193                 list_del(&page->lru);
1194
1195         list_add(&page->lru, &pool->lru);
1196
1197         *handle = encode_handle(zhdr, bud);
1198         spin_unlock(&pool->lock);
1199         if (bud != HEADLESS)
1200                 z3fold_page_unlock(zhdr);
1201
1202         return 0;
1203 }
1204
1205 /**
1206  * z3fold_free() - frees the allocation associated with the given handle
1207  * @pool:       pool in which the allocation resided
1208  * @handle:     handle associated with the allocation returned by z3fold_alloc()
1209  *
1210  * In the case that the z3fold page in which the allocation resides is under
1211  * reclaim, as indicated by the PG_reclaim flag being set, this function
1212  * only sets the first|last_chunks to 0.  The page is actually freed
1213  * once both buddies are evicted (see z3fold_reclaim_page() below).
1214  */
1215 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1216 {
1217         struct z3fold_header *zhdr;
1218         struct page *page;
1219         enum buddy bud;
1220         bool page_claimed;
1221
1222         zhdr = get_z3fold_header(handle);
1223         page = virt_to_page(zhdr);
1224         page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1225
1226         if (test_bit(PAGE_HEADLESS, &page->private)) {
1227                 /* if a headless page is under reclaim, just leave.
1228                  * NB: we use test_and_set_bit for a reason: if the bit
1229                  * has not been set before, we release this page
1230                  * immediately so we don't care about its value any more.
1231                  */
1232                 if (!page_claimed) {
1233                         spin_lock(&pool->lock);
1234                         list_del(&page->lru);
1235                         spin_unlock(&pool->lock);
1236                         put_z3fold_header(zhdr);
1237                         free_z3fold_page(page, true);
1238                         atomic64_dec(&pool->pages_nr);
1239                 }
1240                 return;
1241         }
1242
1243         /* Non-headless case */
1244         bud = handle_to_buddy(handle);
1245
1246         switch (bud) {
1247         case FIRST:
1248                 zhdr->first_chunks = 0;
1249                 break;
1250         case MIDDLE:
1251                 zhdr->middle_chunks = 0;
1252                 break;
1253         case LAST:
1254                 zhdr->last_chunks = 0;
1255                 break;
1256         default:
1257                 pr_err("%s: unknown bud %d\n", __func__, bud);
1258                 WARN_ON(1);
1259                 put_z3fold_header(zhdr);
1260                 return;
1261         }
1262
1263         if (!page_claimed)
1264                 free_handle(handle, zhdr);
1265         if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1266                 atomic64_dec(&pool->pages_nr);
1267                 return;
1268         }
1269         if (page_claimed) {
1270                 /* the page has not been claimed by us */
1271                 z3fold_page_unlock(zhdr);
1272                 return;
1273         }
1274         if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1275                 put_z3fold_header(zhdr);
1276                 clear_bit(PAGE_CLAIMED, &page->private);
1277                 return;
1278         }
1279         if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1280                 spin_lock(&pool->lock);
1281                 list_del_init(&zhdr->buddy);
1282                 spin_unlock(&pool->lock);
1283                 zhdr->cpu = -1;
1284                 kref_get(&zhdr->refcount);
1285                 clear_bit(PAGE_CLAIMED, &page->private);
1286                 do_compact_page(zhdr, true);
1287                 return;
1288         }
1289         kref_get(&zhdr->refcount);
1290         clear_bit(PAGE_CLAIMED, &page->private);
1291         queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1292         put_z3fold_header(zhdr);
1293 }
1294
1295 /**
1296  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1297  * @pool:       pool from which a page will attempt to be evicted
1298  * @retries:    number of pages on the LRU list for which eviction will
1299  *              be attempted before failing
1300  *
1301  * z3fold reclaim is different from normal system reclaim in that it is done
1302  * from the bottom, up. This is because only the bottom layer, z3fold, has
1303  * information on how the allocations are organized within each z3fold page.
1304  * This has the potential to create interesting locking situations between
1305  * z3fold and the user, however.
1306  *
1307  * To avoid these, this is how z3fold_reclaim_page() should be called:
1308  *
1309  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1310  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1311  * call the user-defined eviction handler with the pool and handle as
1312  * arguments.
1313  *
1314  * If the handle can not be evicted, the eviction handler should return
1315  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1316  * appropriate list and try the next z3fold page on the LRU up to
1317  * a user defined number of retries.
1318  *
1319  * If the handle is successfully evicted, the eviction handler should
1320  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1321  * contains logic to delay freeing the page if the page is under reclaim,
1322  * as indicated by the setting of the PG_reclaim flag on the underlying page.
1323  *
1324  * If all buddies in the z3fold page are successfully evicted, then the
1325  * z3fold page can be freed.
1326  *
1327  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1328  * no pages to evict or an eviction handler is not registered, -EAGAIN if
1329  * the retry limit was hit.
1330  */
1331 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1332 {
1333         int i, ret = -1;
1334         struct z3fold_header *zhdr = NULL;
1335         struct page *page = NULL;
1336         struct list_head *pos;
1337         unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1338         struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN)));
1339
1340         rwlock_init(&slots.lock);
1341         slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
1342
1343         spin_lock(&pool->lock);
1344         if (!pool->ops || !pool->ops->evict || retries == 0) {
1345                 spin_unlock(&pool->lock);
1346                 return -EINVAL;
1347         }
1348         for (i = 0; i < retries; i++) {
1349                 if (list_empty(&pool->lru)) {
1350                         spin_unlock(&pool->lock);
1351                         return -EINVAL;
1352                 }
1353                 list_for_each_prev(pos, &pool->lru) {
1354                         page = list_entry(pos, struct page, lru);
1355
1356                         zhdr = page_address(page);
1357                         if (test_bit(PAGE_HEADLESS, &page->private)) {
1358                                 /*
1359                                  * For non-headless pages, we wait to do this
1360                                  * until we have the page lock to avoid racing
1361                                  * with __z3fold_alloc(). Headless pages don't
1362                                  * have a lock (and __z3fold_alloc() will never
1363                                  * see them), but we still need to test and set
1364                                  * PAGE_CLAIMED to avoid racing with
1365                                  * z3fold_free(), so just do it now before
1366                                  * leaving the loop.
1367                                  */
1368                                 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1369                                         continue;
1370
1371                                 break;
1372                         }
1373
1374                         if (kref_get_unless_zero(&zhdr->refcount) == 0) {
1375                                 zhdr = NULL;
1376                                 break;
1377                         }
1378                         if (!z3fold_page_trylock(zhdr)) {
1379                                 if (kref_put(&zhdr->refcount,
1380                                                 release_z3fold_page))
1381                                         atomic64_dec(&pool->pages_nr);
1382                                 zhdr = NULL;
1383                                 continue; /* can't evict at this point */
1384                         }
1385
1386                         /* test_and_set_bit is of course atomic, but we still
1387                          * need to do it under page lock, otherwise checking
1388                          * that bit in __z3fold_alloc wouldn't make sense
1389                          */
1390                         if (zhdr->foreign_handles ||
1391                             test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1392                                 if (kref_put(&zhdr->refcount,
1393                                                 release_z3fold_page_locked))
1394                                         atomic64_dec(&pool->pages_nr);
1395                                 else
1396                                         z3fold_page_unlock(zhdr);
1397                                 zhdr = NULL;
1398                                 continue; /* can't evict such page */
1399                         }
1400                         list_del_init(&zhdr->buddy);
1401                         zhdr->cpu = -1;
1402                         break;
1403                 }
1404
1405                 if (!zhdr)
1406                         break;
1407
1408                 list_del_init(&page->lru);
1409                 spin_unlock(&pool->lock);
1410
1411                 if (!test_bit(PAGE_HEADLESS, &page->private)) {
1412                         /*
1413                          * We need encode the handles before unlocking, and
1414                          * use our local slots structure because z3fold_free
1415                          * can zero out zhdr->slots and we can't do much
1416                          * about that
1417                          */
1418                         first_handle = 0;
1419                         last_handle = 0;
1420                         middle_handle = 0;
1421                         memset(slots.slot, 0, sizeof(slots.slot));
1422                         if (zhdr->first_chunks)
1423                                 first_handle = __encode_handle(zhdr, &slots,
1424                                                                 FIRST);
1425                         if (zhdr->middle_chunks)
1426                                 middle_handle = __encode_handle(zhdr, &slots,
1427                                                                 MIDDLE);
1428                         if (zhdr->last_chunks)
1429                                 last_handle = __encode_handle(zhdr, &slots,
1430                                                                 LAST);
1431                         /*
1432                          * it's safe to unlock here because we hold a
1433                          * reference to this page
1434                          */
1435                         z3fold_page_unlock(zhdr);
1436                 } else {
1437                         first_handle = encode_handle(zhdr, HEADLESS);
1438                         last_handle = middle_handle = 0;
1439                 }
1440                 /* Issue the eviction callback(s) */
1441                 if (middle_handle) {
1442                         ret = pool->ops->evict(pool, middle_handle);
1443                         if (ret)
1444                                 goto next;
1445                 }
1446                 if (first_handle) {
1447                         ret = pool->ops->evict(pool, first_handle);
1448                         if (ret)
1449                                 goto next;
1450                 }
1451                 if (last_handle) {
1452                         ret = pool->ops->evict(pool, last_handle);
1453                         if (ret)
1454                                 goto next;
1455                 }
1456 next:
1457                 if (test_bit(PAGE_HEADLESS, &page->private)) {
1458                         if (ret == 0) {
1459                                 free_z3fold_page(page, true);
1460                                 atomic64_dec(&pool->pages_nr);
1461                                 return 0;
1462                         }
1463                         spin_lock(&pool->lock);
1464                         list_add(&page->lru, &pool->lru);
1465                         spin_unlock(&pool->lock);
1466                         clear_bit(PAGE_CLAIMED, &page->private);
1467                 } else {
1468                         struct z3fold_buddy_slots *slots = zhdr->slots;
1469                         z3fold_page_lock(zhdr);
1470                         if (kref_put(&zhdr->refcount,
1471                                         release_z3fold_page_locked)) {
1472                                 kmem_cache_free(pool->c_handle, slots);
1473                                 atomic64_dec(&pool->pages_nr);
1474                                 return 0;
1475                         }
1476                         /*
1477                          * if we are here, the page is still not completely
1478                          * free. Take the global pool lock then to be able
1479                          * to add it back to the lru list
1480                          */
1481                         spin_lock(&pool->lock);
1482                         list_add(&page->lru, &pool->lru);
1483                         spin_unlock(&pool->lock);
1484                         z3fold_page_unlock(zhdr);
1485                         clear_bit(PAGE_CLAIMED, &page->private);
1486                 }
1487
1488                 /* We started off locked to we need to lock the pool back */
1489                 spin_lock(&pool->lock);
1490         }
1491         spin_unlock(&pool->lock);
1492         return -EAGAIN;
1493 }
1494
1495 /**
1496  * z3fold_map() - maps the allocation associated with the given handle
1497  * @pool:       pool in which the allocation resides
1498  * @handle:     handle associated with the allocation to be mapped
1499  *
1500  * Extracts the buddy number from handle and constructs the pointer to the
1501  * correct starting chunk within the page.
1502  *
1503  * Returns: a pointer to the mapped allocation
1504  */
1505 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1506 {
1507         struct z3fold_header *zhdr;
1508         struct page *page;
1509         void *addr;
1510         enum buddy buddy;
1511
1512         zhdr = get_z3fold_header(handle);
1513         addr = zhdr;
1514         page = virt_to_page(zhdr);
1515
1516         if (test_bit(PAGE_HEADLESS, &page->private))
1517                 goto out;
1518
1519         buddy = handle_to_buddy(handle);
1520         switch (buddy) {
1521         case FIRST:
1522                 addr += ZHDR_SIZE_ALIGNED;
1523                 break;
1524         case MIDDLE:
1525                 addr += zhdr->start_middle << CHUNK_SHIFT;
1526                 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1527                 break;
1528         case LAST:
1529                 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1530                 break;
1531         default:
1532                 pr_err("unknown buddy id %d\n", buddy);
1533                 WARN_ON(1);
1534                 addr = NULL;
1535                 break;
1536         }
1537
1538         if (addr)
1539                 zhdr->mapped_count++;
1540 out:
1541         put_z3fold_header(zhdr);
1542         return addr;
1543 }
1544
1545 /**
1546  * z3fold_unmap() - unmaps the allocation associated with the given handle
1547  * @pool:       pool in which the allocation resides
1548  * @handle:     handle associated with the allocation to be unmapped
1549  */
1550 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1551 {
1552         struct z3fold_header *zhdr;
1553         struct page *page;
1554         enum buddy buddy;
1555
1556         zhdr = get_z3fold_header(handle);
1557         page = virt_to_page(zhdr);
1558
1559         if (test_bit(PAGE_HEADLESS, &page->private))
1560                 return;
1561
1562         buddy = handle_to_buddy(handle);
1563         if (buddy == MIDDLE)
1564                 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1565         zhdr->mapped_count--;
1566         put_z3fold_header(zhdr);
1567 }
1568
1569 /**
1570  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1571  * @pool:       pool whose size is being queried
1572  *
1573  * Returns: size in pages of the given pool.
1574  */
1575 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1576 {
1577         return atomic64_read(&pool->pages_nr);
1578 }
1579
1580 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1581 {
1582         struct z3fold_header *zhdr;
1583         struct z3fold_pool *pool;
1584
1585         VM_BUG_ON_PAGE(!PageMovable(page), page);
1586         VM_BUG_ON_PAGE(PageIsolated(page), page);
1587
1588         if (test_bit(PAGE_HEADLESS, &page->private))
1589                 return false;
1590
1591         zhdr = page_address(page);
1592         z3fold_page_lock(zhdr);
1593         if (test_bit(NEEDS_COMPACTING, &page->private) ||
1594             test_bit(PAGE_STALE, &page->private))
1595                 goto out;
1596
1597         if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1598                 goto out;
1599
1600         if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1601                 goto out;
1602         pool = zhdr_to_pool(zhdr);
1603         spin_lock(&pool->lock);
1604         if (!list_empty(&zhdr->buddy))
1605                 list_del_init(&zhdr->buddy);
1606         if (!list_empty(&page->lru))
1607                 list_del_init(&page->lru);
1608         spin_unlock(&pool->lock);
1609
1610         kref_get(&zhdr->refcount);
1611         z3fold_page_unlock(zhdr);
1612         return true;
1613
1614 out:
1615         z3fold_page_unlock(zhdr);
1616         return false;
1617 }
1618
1619 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1620                                struct page *page, enum migrate_mode mode)
1621 {
1622         struct z3fold_header *zhdr, *new_zhdr;
1623         struct z3fold_pool *pool;
1624         struct address_space *new_mapping;
1625
1626         VM_BUG_ON_PAGE(!PageMovable(page), page);
1627         VM_BUG_ON_PAGE(!PageIsolated(page), page);
1628         VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
1629         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1630
1631         zhdr = page_address(page);
1632         pool = zhdr_to_pool(zhdr);
1633
1634         if (!z3fold_page_trylock(zhdr))
1635                 return -EAGAIN;
1636         if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1637                 z3fold_page_unlock(zhdr);
1638                 clear_bit(PAGE_CLAIMED, &page->private);
1639                 return -EBUSY;
1640         }
1641         if (work_pending(&zhdr->work)) {
1642                 z3fold_page_unlock(zhdr);
1643                 return -EAGAIN;
1644         }
1645         new_zhdr = page_address(newpage);
1646         memcpy(new_zhdr, zhdr, PAGE_SIZE);
1647         newpage->private = page->private;
1648         page->private = 0;
1649         z3fold_page_unlock(zhdr);
1650         spin_lock_init(&new_zhdr->page_lock);
1651         INIT_WORK(&new_zhdr->work, compact_page_work);
1652         /*
1653          * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1654          * so we only have to reinitialize it.
1655          */
1656         INIT_LIST_HEAD(&new_zhdr->buddy);
1657         new_mapping = page_mapping(page);
1658         __ClearPageMovable(page);
1659         ClearPagePrivate(page);
1660
1661         get_page(newpage);
1662         z3fold_page_lock(new_zhdr);
1663         if (new_zhdr->first_chunks)
1664                 encode_handle(new_zhdr, FIRST);
1665         if (new_zhdr->last_chunks)
1666                 encode_handle(new_zhdr, LAST);
1667         if (new_zhdr->middle_chunks)
1668                 encode_handle(new_zhdr, MIDDLE);
1669         set_bit(NEEDS_COMPACTING, &newpage->private);
1670         new_zhdr->cpu = smp_processor_id();
1671         spin_lock(&pool->lock);
1672         list_add(&newpage->lru, &pool->lru);
1673         spin_unlock(&pool->lock);
1674         __SetPageMovable(newpage, new_mapping);
1675         z3fold_page_unlock(new_zhdr);
1676
1677         queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1678
1679         page_mapcount_reset(page);
1680         clear_bit(PAGE_CLAIMED, &page->private);
1681         put_page(page);
1682         return 0;
1683 }
1684
1685 static void z3fold_page_putback(struct page *page)
1686 {
1687         struct z3fold_header *zhdr;
1688         struct z3fold_pool *pool;
1689
1690         zhdr = page_address(page);
1691         pool = zhdr_to_pool(zhdr);
1692
1693         z3fold_page_lock(zhdr);
1694         if (!list_empty(&zhdr->buddy))
1695                 list_del_init(&zhdr->buddy);
1696         INIT_LIST_HEAD(&page->lru);
1697         if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1698                 atomic64_dec(&pool->pages_nr);
1699                 return;
1700         }
1701         spin_lock(&pool->lock);
1702         list_add(&page->lru, &pool->lru);
1703         spin_unlock(&pool->lock);
1704         clear_bit(PAGE_CLAIMED, &page->private);
1705         z3fold_page_unlock(zhdr);
1706 }
1707
1708 static const struct address_space_operations z3fold_aops = {
1709         .isolate_page = z3fold_page_isolate,
1710         .migratepage = z3fold_page_migrate,
1711         .putback_page = z3fold_page_putback,
1712 };
1713
1714 /*****************
1715  * zpool
1716  ****************/
1717
1718 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1719 {
1720         if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1721                 return pool->zpool_ops->evict(pool->zpool, handle);
1722         else
1723                 return -ENOENT;
1724 }
1725
1726 static const struct z3fold_ops z3fold_zpool_ops = {
1727         .evict =        z3fold_zpool_evict
1728 };
1729
1730 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1731                                const struct zpool_ops *zpool_ops,
1732                                struct zpool *zpool)
1733 {
1734         struct z3fold_pool *pool;
1735
1736         pool = z3fold_create_pool(name, gfp,
1737                                 zpool_ops ? &z3fold_zpool_ops : NULL);
1738         if (pool) {
1739                 pool->zpool = zpool;
1740                 pool->zpool_ops = zpool_ops;
1741         }
1742         return pool;
1743 }
1744
1745 static void z3fold_zpool_destroy(void *pool)
1746 {
1747         z3fold_destroy_pool(pool);
1748 }
1749
1750 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1751                         unsigned long *handle)
1752 {
1753         return z3fold_alloc(pool, size, gfp, handle);
1754 }
1755 static void z3fold_zpool_free(void *pool, unsigned long handle)
1756 {
1757         z3fold_free(pool, handle);
1758 }
1759
1760 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1761                         unsigned int *reclaimed)
1762 {
1763         unsigned int total = 0;
1764         int ret = -EINVAL;
1765
1766         while (total < pages) {
1767                 ret = z3fold_reclaim_page(pool, 8);
1768                 if (ret < 0)
1769                         break;
1770                 total++;
1771         }
1772
1773         if (reclaimed)
1774                 *reclaimed = total;
1775
1776         return ret;
1777 }
1778
1779 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1780                         enum zpool_mapmode mm)
1781 {
1782         return z3fold_map(pool, handle);
1783 }
1784 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1785 {
1786         z3fold_unmap(pool, handle);
1787 }
1788
1789 static u64 z3fold_zpool_total_size(void *pool)
1790 {
1791         return z3fold_get_pool_size(pool) * PAGE_SIZE;
1792 }
1793
1794 static struct zpool_driver z3fold_zpool_driver = {
1795         .type =         "z3fold",
1796         .owner =        THIS_MODULE,
1797         .create =       z3fold_zpool_create,
1798         .destroy =      z3fold_zpool_destroy,
1799         .malloc =       z3fold_zpool_malloc,
1800         .free =         z3fold_zpool_free,
1801         .shrink =       z3fold_zpool_shrink,
1802         .map =          z3fold_zpool_map,
1803         .unmap =        z3fold_zpool_unmap,
1804         .total_size =   z3fold_zpool_total_size,
1805 };
1806
1807 MODULE_ALIAS("zpool-z3fold");
1808
1809 static int __init init_z3fold(void)
1810 {
1811         int ret;
1812
1813         /* Make sure the z3fold header is not larger than the page size */
1814         BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1815         ret = z3fold_mount();
1816         if (ret)
1817                 return ret;
1818
1819         zpool_register_driver(&z3fold_zpool_driver);
1820
1821         return 0;
1822 }
1823
1824 static void __exit exit_z3fold(void)
1825 {
1826         z3fold_unmount();
1827         zpool_unregister_driver(&z3fold_zpool_driver);
1828 }
1829
1830 module_init(init_z3fold);
1831 module_exit(exit_z3fold);
1832
1833 MODULE_LICENSE("GPL");
1834 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1835 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");