Merge remote-tracking branch 'stable/linux-5.10.y' into rpi-5.10.y
[platform/kernel/linux-rpi.git] / mm / z3fold.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * z3fold.c
4  *
5  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6  * Copyright (C) 2016, Sony Mobile Communications Inc.
7  *
8  * This implementation is based on zbud written by Seth Jennings.
9  *
10  * z3fold is an special purpose allocator for storing compressed pages. It
11  * can store up to three compressed pages per page which improves the
12  * compression ratio of zbud while retaining its main concepts (e. g. always
13  * storing an integral number of objects per page) and simplicity.
14  * It still has simple and deterministic reclaim properties that make it
15  * preferable to a higher density approach (with no requirement on integral
16  * number of object per page) when reclaim is used.
17  *
18  * As in zbud, pages are divided into "chunks".  The size of the chunks is
19  * fixed at compile time and is determined by NCHUNKS_ORDER below.
20  *
21  * z3fold doesn't export any API and is meant to be used via zpool API.
22  */
23
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/mount.h>
38 #include <linux/pseudo_fs.h>
39 #include <linux/fs.h>
40 #include <linux/preempt.h>
41 #include <linux/workqueue.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/zpool.h>
45 #include <linux/magic.h>
46 #include <linux/kmemleak.h>
47
48 /*
49  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
50  * adjusting internal fragmentation.  It also determines the number of
51  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
53  * in the beginning of an allocated page are occupied by z3fold header, so
54  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
55  * which shows the max number of free chunks in z3fold page, also there will
56  * be 63, or 62, respectively, freelists per pool.
57  */
58 #define NCHUNKS_ORDER   6
59
60 #define CHUNK_SHIFT     (PAGE_SHIFT - NCHUNKS_ORDER)
61 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
62 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
63 #define ZHDR_CHUNKS     (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
64 #define TOTAL_CHUNKS    (PAGE_SIZE >> CHUNK_SHIFT)
65 #define NCHUNKS         ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
66
67 #define BUDDY_MASK      (0x3)
68 #define BUDDY_SHIFT     2
69 #define SLOTS_ALIGN     (0x40)
70
71 /*****************
72  * Structures
73 *****************/
74 struct z3fold_pool;
75 struct z3fold_ops {
76         int (*evict)(struct z3fold_pool *pool, unsigned long handle);
77 };
78
79 enum buddy {
80         HEADLESS = 0,
81         FIRST,
82         MIDDLE,
83         LAST,
84         BUDDIES_MAX = LAST
85 };
86
87 struct z3fold_buddy_slots {
88         /*
89          * we are using BUDDY_MASK in handle_to_buddy etc. so there should
90          * be enough slots to hold all possible variants
91          */
92         unsigned long slot[BUDDY_MASK + 1];
93         unsigned long pool; /* back link */
94         rwlock_t lock;
95 };
96 #define HANDLE_FLAG_MASK        (0x03)
97
98 /*
99  * struct z3fold_header - z3fold page metadata occupying first chunks of each
100  *                      z3fold page, except for HEADLESS pages
101  * @buddy:              links the z3fold page into the relevant list in the
102  *                      pool
103  * @page_lock:          per-page lock
104  * @refcount:           reference count for the z3fold page
105  * @work:               work_struct for page layout optimization
106  * @slots:              pointer to the structure holding buddy slots
107  * @pool:               pointer to the containing pool
108  * @cpu:                CPU which this page "belongs" to
109  * @first_chunks:       the size of the first buddy in chunks, 0 if free
110  * @middle_chunks:      the size of the middle buddy in chunks, 0 if free
111  * @last_chunks:        the size of the last buddy in chunks, 0 if free
112  * @first_num:          the starting number (for the first handle)
113  * @mapped_count:       the number of objects currently mapped
114  */
115 struct z3fold_header {
116         struct list_head buddy;
117         spinlock_t page_lock;
118         struct kref refcount;
119         struct work_struct work;
120         struct z3fold_buddy_slots *slots;
121         struct z3fold_pool *pool;
122         short cpu;
123         unsigned short first_chunks;
124         unsigned short middle_chunks;
125         unsigned short last_chunks;
126         unsigned short start_middle;
127         unsigned short first_num:2;
128         unsigned short mapped_count:2;
129         unsigned short foreign_handles:2;
130 };
131
132 /**
133  * struct z3fold_pool - stores metadata for each z3fold pool
134  * @name:       pool name
135  * @lock:       protects pool unbuddied/lru lists
136  * @stale_lock: protects pool stale page list
137  * @unbuddied:  per-cpu array of lists tracking z3fold pages that contain 2-
138  *              buddies; the list each z3fold page is added to depends on
139  *              the size of its free region.
140  * @lru:        list tracking the z3fold pages in LRU order by most recently
141  *              added buddy.
142  * @stale:      list of pages marked for freeing
143  * @pages_nr:   number of z3fold pages in the pool.
144  * @c_handle:   cache for z3fold_buddy_slots allocation
145  * @ops:        pointer to a structure of user defined operations specified at
146  *              pool creation time.
147  * @compact_wq: workqueue for page layout background optimization
148  * @release_wq: workqueue for safe page release
149  * @work:       work_struct for safe page release
150  * @inode:      inode for z3fold pseudo filesystem
151  *
152  * This structure is allocated at pool creation time and maintains metadata
153  * pertaining to a particular z3fold pool.
154  */
155 struct z3fold_pool {
156         const char *name;
157         spinlock_t lock;
158         spinlock_t stale_lock;
159         struct list_head *unbuddied;
160         struct list_head lru;
161         struct list_head stale;
162         atomic64_t pages_nr;
163         struct kmem_cache *c_handle;
164         const struct z3fold_ops *ops;
165         struct zpool *zpool;
166         const struct zpool_ops *zpool_ops;
167         struct workqueue_struct *compact_wq;
168         struct workqueue_struct *release_wq;
169         struct work_struct work;
170         struct inode *inode;
171 };
172
173 /*
174  * Internal z3fold page flags
175  */
176 enum z3fold_page_flags {
177         PAGE_HEADLESS = 0,
178         MIDDLE_CHUNK_MAPPED,
179         NEEDS_COMPACTING,
180         PAGE_STALE,
181         PAGE_CLAIMED, /* by either reclaim or free */
182 };
183
184 /*
185  * handle flags, go under HANDLE_FLAG_MASK
186  */
187 enum z3fold_handle_flags {
188         HANDLES_NOFREE = 0,
189 };
190
191 /*
192  * Forward declarations
193  */
194 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
195 static void compact_page_work(struct work_struct *w);
196
197 /*****************
198  * Helpers
199 *****************/
200
201 /* Converts an allocation size in bytes to size in z3fold chunks */
202 static int size_to_chunks(size_t size)
203 {
204         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
205 }
206
207 #define for_each_unbuddied_list(_iter, _begin) \
208         for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
209
210 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
211                                                         gfp_t gfp)
212 {
213         struct z3fold_buddy_slots *slots;
214
215         slots = kmem_cache_zalloc(pool->c_handle,
216                                  (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
217
218         if (slots) {
219                 /* It will be freed separately in free_handle(). */
220                 kmemleak_not_leak(slots);
221                 slots->pool = (unsigned long)pool;
222                 rwlock_init(&slots->lock);
223         }
224
225         return slots;
226 }
227
228 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
229 {
230         return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
231 }
232
233 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
234 {
235         return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
236 }
237
238 /* Lock a z3fold page */
239 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
240 {
241         spin_lock(&zhdr->page_lock);
242 }
243
244 /* Try to lock a z3fold page */
245 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
246 {
247         return spin_trylock(&zhdr->page_lock);
248 }
249
250 /* Unlock a z3fold page */
251 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
252 {
253         spin_unlock(&zhdr->page_lock);
254 }
255
256
257 static inline struct z3fold_header *__get_z3fold_header(unsigned long handle,
258                                                         bool lock)
259 {
260         struct z3fold_buddy_slots *slots;
261         struct z3fold_header *zhdr;
262         int locked = 0;
263
264         if (!(handle & (1 << PAGE_HEADLESS))) {
265                 slots = handle_to_slots(handle);
266                 do {
267                         unsigned long addr;
268
269                         read_lock(&slots->lock);
270                         addr = *(unsigned long *)handle;
271                         zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
272                         if (lock)
273                                 locked = z3fold_page_trylock(zhdr);
274                         read_unlock(&slots->lock);
275                         if (locked)
276                                 break;
277                         cpu_relax();
278                 } while (lock);
279         } else {
280                 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
281         }
282
283         return zhdr;
284 }
285
286 /* Returns the z3fold page where a given handle is stored */
287 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
288 {
289         return __get_z3fold_header(h, false);
290 }
291
292 /* return locked z3fold page if it's not headless */
293 static inline struct z3fold_header *get_z3fold_header(unsigned long h)
294 {
295         return __get_z3fold_header(h, true);
296 }
297
298 static inline void put_z3fold_header(struct z3fold_header *zhdr)
299 {
300         struct page *page = virt_to_page(zhdr);
301
302         if (!test_bit(PAGE_HEADLESS, &page->private))
303                 z3fold_page_unlock(zhdr);
304 }
305
306 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
307 {
308         struct z3fold_buddy_slots *slots;
309         int i;
310         bool is_free;
311
312         if (handle & (1 << PAGE_HEADLESS))
313                 return;
314
315         if (WARN_ON(*(unsigned long *)handle == 0))
316                 return;
317
318         slots = handle_to_slots(handle);
319         write_lock(&slots->lock);
320         *(unsigned long *)handle = 0;
321
322         if (test_bit(HANDLES_NOFREE, &slots->pool)) {
323                 write_unlock(&slots->lock);
324                 return; /* simple case, nothing else to do */
325         }
326
327         if (zhdr->slots != slots)
328                 zhdr->foreign_handles--;
329
330         is_free = true;
331         for (i = 0; i <= BUDDY_MASK; i++) {
332                 if (slots->slot[i]) {
333                         is_free = false;
334                         break;
335                 }
336         }
337         write_unlock(&slots->lock);
338
339         if (is_free) {
340                 struct z3fold_pool *pool = slots_to_pool(slots);
341
342                 if (zhdr->slots == slots)
343                         zhdr->slots = NULL;
344                 kmem_cache_free(pool->c_handle, slots);
345         }
346 }
347
348 static int z3fold_init_fs_context(struct fs_context *fc)
349 {
350         return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
351 }
352
353 static struct file_system_type z3fold_fs = {
354         .name           = "z3fold",
355         .init_fs_context = z3fold_init_fs_context,
356         .kill_sb        = kill_anon_super,
357 };
358
359 static struct vfsmount *z3fold_mnt;
360 static int z3fold_mount(void)
361 {
362         int ret = 0;
363
364         z3fold_mnt = kern_mount(&z3fold_fs);
365         if (IS_ERR(z3fold_mnt))
366                 ret = PTR_ERR(z3fold_mnt);
367
368         return ret;
369 }
370
371 static void z3fold_unmount(void)
372 {
373         kern_unmount(z3fold_mnt);
374 }
375
376 static const struct address_space_operations z3fold_aops;
377 static int z3fold_register_migration(struct z3fold_pool *pool)
378 {
379         pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
380         if (IS_ERR(pool->inode)) {
381                 pool->inode = NULL;
382                 return 1;
383         }
384
385         pool->inode->i_mapping->private_data = pool;
386         pool->inode->i_mapping->a_ops = &z3fold_aops;
387         return 0;
388 }
389
390 static void z3fold_unregister_migration(struct z3fold_pool *pool)
391 {
392         if (pool->inode)
393                 iput(pool->inode);
394  }
395
396 /* Initializes the z3fold header of a newly allocated z3fold page */
397 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
398                                         struct z3fold_pool *pool, gfp_t gfp)
399 {
400         struct z3fold_header *zhdr = page_address(page);
401         struct z3fold_buddy_slots *slots;
402
403         INIT_LIST_HEAD(&page->lru);
404         clear_bit(PAGE_HEADLESS, &page->private);
405         clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
406         clear_bit(NEEDS_COMPACTING, &page->private);
407         clear_bit(PAGE_STALE, &page->private);
408         clear_bit(PAGE_CLAIMED, &page->private);
409         if (headless)
410                 return zhdr;
411
412         slots = alloc_slots(pool, gfp);
413         if (!slots)
414                 return NULL;
415
416         spin_lock_init(&zhdr->page_lock);
417         kref_init(&zhdr->refcount);
418         zhdr->first_chunks = 0;
419         zhdr->middle_chunks = 0;
420         zhdr->last_chunks = 0;
421         zhdr->first_num = 0;
422         zhdr->start_middle = 0;
423         zhdr->cpu = -1;
424         zhdr->foreign_handles = 0;
425         zhdr->mapped_count = 0;
426         zhdr->slots = slots;
427         zhdr->pool = pool;
428         INIT_LIST_HEAD(&zhdr->buddy);
429         INIT_WORK(&zhdr->work, compact_page_work);
430         return zhdr;
431 }
432
433 /* Resets the struct page fields and frees the page */
434 static void free_z3fold_page(struct page *page, bool headless)
435 {
436         if (!headless) {
437                 lock_page(page);
438                 __ClearPageMovable(page);
439                 unlock_page(page);
440         }
441         ClearPagePrivate(page);
442         __free_page(page);
443 }
444
445 /* Helper function to build the index */
446 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
447 {
448         return (bud + zhdr->first_num) & BUDDY_MASK;
449 }
450
451 /*
452  * Encodes the handle of a particular buddy within a z3fold page
453  * Pool lock should be held as this function accesses first_num
454  */
455 static unsigned long __encode_handle(struct z3fold_header *zhdr,
456                                 struct z3fold_buddy_slots *slots,
457                                 enum buddy bud)
458 {
459         unsigned long h = (unsigned long)zhdr;
460         int idx = 0;
461
462         /*
463          * For a headless page, its handle is its pointer with the extra
464          * PAGE_HEADLESS bit set
465          */
466         if (bud == HEADLESS)
467                 return h | (1 << PAGE_HEADLESS);
468
469         /* otherwise, return pointer to encoded handle */
470         idx = __idx(zhdr, bud);
471         h += idx;
472         if (bud == LAST)
473                 h |= (zhdr->last_chunks << BUDDY_SHIFT);
474
475         write_lock(&slots->lock);
476         slots->slot[idx] = h;
477         write_unlock(&slots->lock);
478         return (unsigned long)&slots->slot[idx];
479 }
480
481 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
482 {
483         return __encode_handle(zhdr, zhdr->slots, bud);
484 }
485
486 /* only for LAST bud, returns zero otherwise */
487 static unsigned short handle_to_chunks(unsigned long handle)
488 {
489         struct z3fold_buddy_slots *slots = handle_to_slots(handle);
490         unsigned long addr;
491
492         read_lock(&slots->lock);
493         addr = *(unsigned long *)handle;
494         read_unlock(&slots->lock);
495         return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
496 }
497
498 /*
499  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
500  *  but that doesn't matter. because the masking will result in the
501  *  correct buddy number.
502  */
503 static enum buddy handle_to_buddy(unsigned long handle)
504 {
505         struct z3fold_header *zhdr;
506         struct z3fold_buddy_slots *slots = handle_to_slots(handle);
507         unsigned long addr;
508
509         read_lock(&slots->lock);
510         WARN_ON(handle & (1 << PAGE_HEADLESS));
511         addr = *(unsigned long *)handle;
512         read_unlock(&slots->lock);
513         zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
514         return (addr - zhdr->first_num) & BUDDY_MASK;
515 }
516
517 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
518 {
519         return zhdr->pool;
520 }
521
522 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
523 {
524         struct page *page = virt_to_page(zhdr);
525         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
526
527         WARN_ON(!list_empty(&zhdr->buddy));
528         set_bit(PAGE_STALE, &page->private);
529         clear_bit(NEEDS_COMPACTING, &page->private);
530         spin_lock(&pool->lock);
531         if (!list_empty(&page->lru))
532                 list_del_init(&page->lru);
533         spin_unlock(&pool->lock);
534
535         if (locked)
536                 z3fold_page_unlock(zhdr);
537
538         spin_lock(&pool->stale_lock);
539         list_add(&zhdr->buddy, &pool->stale);
540         queue_work(pool->release_wq, &pool->work);
541         spin_unlock(&pool->stale_lock);
542 }
543
544 static void __attribute__((__unused__))
545                         release_z3fold_page(struct kref *ref)
546 {
547         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
548                                                 refcount);
549         __release_z3fold_page(zhdr, false);
550 }
551
552 static void release_z3fold_page_locked(struct kref *ref)
553 {
554         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
555                                                 refcount);
556         WARN_ON(z3fold_page_trylock(zhdr));
557         __release_z3fold_page(zhdr, true);
558 }
559
560 static void release_z3fold_page_locked_list(struct kref *ref)
561 {
562         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
563                                                refcount);
564         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
565
566         spin_lock(&pool->lock);
567         list_del_init(&zhdr->buddy);
568         spin_unlock(&pool->lock);
569
570         WARN_ON(z3fold_page_trylock(zhdr));
571         __release_z3fold_page(zhdr, true);
572 }
573
574 static void free_pages_work(struct work_struct *w)
575 {
576         struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
577
578         spin_lock(&pool->stale_lock);
579         while (!list_empty(&pool->stale)) {
580                 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
581                                                 struct z3fold_header, buddy);
582                 struct page *page = virt_to_page(zhdr);
583
584                 list_del(&zhdr->buddy);
585                 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
586                         continue;
587                 spin_unlock(&pool->stale_lock);
588                 cancel_work_sync(&zhdr->work);
589                 free_z3fold_page(page, false);
590                 cond_resched();
591                 spin_lock(&pool->stale_lock);
592         }
593         spin_unlock(&pool->stale_lock);
594 }
595
596 /*
597  * Returns the number of free chunks in a z3fold page.
598  * NB: can't be used with HEADLESS pages.
599  */
600 static int num_free_chunks(struct z3fold_header *zhdr)
601 {
602         int nfree;
603         /*
604          * If there is a middle object, pick up the bigger free space
605          * either before or after it. Otherwise just subtract the number
606          * of chunks occupied by the first and the last objects.
607          */
608         if (zhdr->middle_chunks != 0) {
609                 int nfree_before = zhdr->first_chunks ?
610                         0 : zhdr->start_middle - ZHDR_CHUNKS;
611                 int nfree_after = zhdr->last_chunks ?
612                         0 : TOTAL_CHUNKS -
613                                 (zhdr->start_middle + zhdr->middle_chunks);
614                 nfree = max(nfree_before, nfree_after);
615         } else
616                 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
617         return nfree;
618 }
619
620 /* Add to the appropriate unbuddied list */
621 static inline void add_to_unbuddied(struct z3fold_pool *pool,
622                                 struct z3fold_header *zhdr)
623 {
624         if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
625                         zhdr->middle_chunks == 0) {
626                 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
627
628                 int freechunks = num_free_chunks(zhdr);
629                 spin_lock(&pool->lock);
630                 list_add(&zhdr->buddy, &unbuddied[freechunks]);
631                 spin_unlock(&pool->lock);
632                 zhdr->cpu = smp_processor_id();
633                 put_cpu_ptr(pool->unbuddied);
634         }
635 }
636
637 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
638 {
639         enum buddy bud = HEADLESS;
640
641         if (zhdr->middle_chunks) {
642                 if (!zhdr->first_chunks &&
643                     chunks <= zhdr->start_middle - ZHDR_CHUNKS)
644                         bud = FIRST;
645                 else if (!zhdr->last_chunks)
646                         bud = LAST;
647         } else {
648                 if (!zhdr->first_chunks)
649                         bud = FIRST;
650                 else if (!zhdr->last_chunks)
651                         bud = LAST;
652                 else
653                         bud = MIDDLE;
654         }
655
656         return bud;
657 }
658
659 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
660                                 unsigned short dst_chunk)
661 {
662         void *beg = zhdr;
663         return memmove(beg + (dst_chunk << CHUNK_SHIFT),
664                        beg + (zhdr->start_middle << CHUNK_SHIFT),
665                        zhdr->middle_chunks << CHUNK_SHIFT);
666 }
667
668 static inline bool buddy_single(struct z3fold_header *zhdr)
669 {
670         return !((zhdr->first_chunks && zhdr->middle_chunks) ||
671                         (zhdr->first_chunks && zhdr->last_chunks) ||
672                         (zhdr->middle_chunks && zhdr->last_chunks));
673 }
674
675 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
676 {
677         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
678         void *p = zhdr;
679         unsigned long old_handle = 0;
680         size_t sz = 0;
681         struct z3fold_header *new_zhdr = NULL;
682         int first_idx = __idx(zhdr, FIRST);
683         int middle_idx = __idx(zhdr, MIDDLE);
684         int last_idx = __idx(zhdr, LAST);
685         unsigned short *moved_chunks = NULL;
686
687         /*
688          * No need to protect slots here -- all the slots are "local" and
689          * the page lock is already taken
690          */
691         if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
692                 p += ZHDR_SIZE_ALIGNED;
693                 sz = zhdr->first_chunks << CHUNK_SHIFT;
694                 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
695                 moved_chunks = &zhdr->first_chunks;
696         } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
697                 p += zhdr->start_middle << CHUNK_SHIFT;
698                 sz = zhdr->middle_chunks << CHUNK_SHIFT;
699                 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
700                 moved_chunks = &zhdr->middle_chunks;
701         } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
702                 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
703                 sz = zhdr->last_chunks << CHUNK_SHIFT;
704                 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
705                 moved_chunks = &zhdr->last_chunks;
706         }
707
708         if (sz > 0) {
709                 enum buddy new_bud = HEADLESS;
710                 short chunks = size_to_chunks(sz);
711                 void *q;
712
713                 new_zhdr = __z3fold_alloc(pool, sz, false);
714                 if (!new_zhdr)
715                         return NULL;
716
717                 if (WARN_ON(new_zhdr == zhdr))
718                         goto out_fail;
719
720                 new_bud = get_free_buddy(new_zhdr, chunks);
721                 q = new_zhdr;
722                 switch (new_bud) {
723                 case FIRST:
724                         new_zhdr->first_chunks = chunks;
725                         q += ZHDR_SIZE_ALIGNED;
726                         break;
727                 case MIDDLE:
728                         new_zhdr->middle_chunks = chunks;
729                         new_zhdr->start_middle =
730                                 new_zhdr->first_chunks + ZHDR_CHUNKS;
731                         q += new_zhdr->start_middle << CHUNK_SHIFT;
732                         break;
733                 case LAST:
734                         new_zhdr->last_chunks = chunks;
735                         q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
736                         break;
737                 default:
738                         goto out_fail;
739                 }
740                 new_zhdr->foreign_handles++;
741                 memcpy(q, p, sz);
742                 write_lock(&zhdr->slots->lock);
743                 *(unsigned long *)old_handle = (unsigned long)new_zhdr +
744                         __idx(new_zhdr, new_bud);
745                 if (new_bud == LAST)
746                         *(unsigned long *)old_handle |=
747                                         (new_zhdr->last_chunks << BUDDY_SHIFT);
748                 write_unlock(&zhdr->slots->lock);
749                 add_to_unbuddied(pool, new_zhdr);
750                 z3fold_page_unlock(new_zhdr);
751
752                 *moved_chunks = 0;
753         }
754
755         return new_zhdr;
756
757 out_fail:
758         if (new_zhdr) {
759                 if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked))
760                         atomic64_dec(&pool->pages_nr);
761                 else {
762                         add_to_unbuddied(pool, new_zhdr);
763                         z3fold_page_unlock(new_zhdr);
764                 }
765         }
766         return NULL;
767
768 }
769
770 #define BIG_CHUNK_GAP   3
771 /* Has to be called with lock held */
772 static int z3fold_compact_page(struct z3fold_header *zhdr)
773 {
774         struct page *page = virt_to_page(zhdr);
775
776         if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
777                 return 0; /* can't move middle chunk, it's used */
778
779         if (unlikely(PageIsolated(page)))
780                 return 0;
781
782         if (zhdr->middle_chunks == 0)
783                 return 0; /* nothing to compact */
784
785         if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
786                 /* move to the beginning */
787                 mchunk_memmove(zhdr, ZHDR_CHUNKS);
788                 zhdr->first_chunks = zhdr->middle_chunks;
789                 zhdr->middle_chunks = 0;
790                 zhdr->start_middle = 0;
791                 zhdr->first_num++;
792                 return 1;
793         }
794
795         /*
796          * moving data is expensive, so let's only do that if
797          * there's substantial gain (at least BIG_CHUNK_GAP chunks)
798          */
799         if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
800             zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
801                         BIG_CHUNK_GAP) {
802                 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
803                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
804                 return 1;
805         } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
806                    TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
807                                         + zhdr->middle_chunks) >=
808                         BIG_CHUNK_GAP) {
809                 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
810                         zhdr->middle_chunks;
811                 mchunk_memmove(zhdr, new_start);
812                 zhdr->start_middle = new_start;
813                 return 1;
814         }
815
816         return 0;
817 }
818
819 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
820 {
821         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
822         struct page *page;
823
824         page = virt_to_page(zhdr);
825         if (locked)
826                 WARN_ON(z3fold_page_trylock(zhdr));
827         else
828                 z3fold_page_lock(zhdr);
829         if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
830                 z3fold_page_unlock(zhdr);
831                 return;
832         }
833         spin_lock(&pool->lock);
834         list_del_init(&zhdr->buddy);
835         spin_unlock(&pool->lock);
836
837         if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
838                 atomic64_dec(&pool->pages_nr);
839                 return;
840         }
841
842         if (test_bit(PAGE_STALE, &page->private) ||
843             test_and_set_bit(PAGE_CLAIMED, &page->private)) {
844                 z3fold_page_unlock(zhdr);
845                 return;
846         }
847
848         if (!zhdr->foreign_handles && buddy_single(zhdr) &&
849             zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
850                 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
851                         atomic64_dec(&pool->pages_nr);
852                 else {
853                         clear_bit(PAGE_CLAIMED, &page->private);
854                         z3fold_page_unlock(zhdr);
855                 }
856                 return;
857         }
858
859         z3fold_compact_page(zhdr);
860         add_to_unbuddied(pool, zhdr);
861         clear_bit(PAGE_CLAIMED, &page->private);
862         z3fold_page_unlock(zhdr);
863 }
864
865 static void compact_page_work(struct work_struct *w)
866 {
867         struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
868                                                 work);
869
870         do_compact_page(zhdr, false);
871 }
872
873 /* returns _locked_ z3fold page header or NULL */
874 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
875                                                 size_t size, bool can_sleep)
876 {
877         struct z3fold_header *zhdr = NULL;
878         struct page *page;
879         struct list_head *unbuddied;
880         int chunks = size_to_chunks(size), i;
881
882 lookup:
883         /* First, try to find an unbuddied z3fold page. */
884         unbuddied = get_cpu_ptr(pool->unbuddied);
885         for_each_unbuddied_list(i, chunks) {
886                 struct list_head *l = &unbuddied[i];
887
888                 zhdr = list_first_entry_or_null(READ_ONCE(l),
889                                         struct z3fold_header, buddy);
890
891                 if (!zhdr)
892                         continue;
893
894                 /* Re-check under lock. */
895                 spin_lock(&pool->lock);
896                 l = &unbuddied[i];
897                 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
898                                                 struct z3fold_header, buddy)) ||
899                     !z3fold_page_trylock(zhdr)) {
900                         spin_unlock(&pool->lock);
901                         zhdr = NULL;
902                         put_cpu_ptr(pool->unbuddied);
903                         if (can_sleep)
904                                 cond_resched();
905                         goto lookup;
906                 }
907                 list_del_init(&zhdr->buddy);
908                 zhdr->cpu = -1;
909                 spin_unlock(&pool->lock);
910
911                 page = virt_to_page(zhdr);
912                 if (test_bit(NEEDS_COMPACTING, &page->private) ||
913                     test_bit(PAGE_CLAIMED, &page->private)) {
914                         z3fold_page_unlock(zhdr);
915                         zhdr = NULL;
916                         put_cpu_ptr(pool->unbuddied);
917                         if (can_sleep)
918                                 cond_resched();
919                         goto lookup;
920                 }
921
922                 /*
923                  * this page could not be removed from its unbuddied
924                  * list while pool lock was held, and then we've taken
925                  * page lock so kref_put could not be called before
926                  * we got here, so it's safe to just call kref_get()
927                  */
928                 kref_get(&zhdr->refcount);
929                 break;
930         }
931         put_cpu_ptr(pool->unbuddied);
932
933         if (!zhdr) {
934                 int cpu;
935
936                 /* look for _exact_ match on other cpus' lists */
937                 for_each_online_cpu(cpu) {
938                         struct list_head *l;
939
940                         unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
941                         spin_lock(&pool->lock);
942                         l = &unbuddied[chunks];
943
944                         zhdr = list_first_entry_or_null(READ_ONCE(l),
945                                                 struct z3fold_header, buddy);
946
947                         if (!zhdr || !z3fold_page_trylock(zhdr)) {
948                                 spin_unlock(&pool->lock);
949                                 zhdr = NULL;
950                                 continue;
951                         }
952                         list_del_init(&zhdr->buddy);
953                         zhdr->cpu = -1;
954                         spin_unlock(&pool->lock);
955
956                         page = virt_to_page(zhdr);
957                         if (test_bit(NEEDS_COMPACTING, &page->private) ||
958                             test_bit(PAGE_CLAIMED, &page->private)) {
959                                 z3fold_page_unlock(zhdr);
960                                 zhdr = NULL;
961                                 if (can_sleep)
962                                         cond_resched();
963                                 continue;
964                         }
965                         kref_get(&zhdr->refcount);
966                         break;
967                 }
968         }
969
970         if (zhdr && !zhdr->slots)
971                 zhdr->slots = alloc_slots(pool,
972                                         can_sleep ? GFP_NOIO : GFP_ATOMIC);
973         return zhdr;
974 }
975
976 /*
977  * API Functions
978  */
979
980 /**
981  * z3fold_create_pool() - create a new z3fold pool
982  * @name:       pool name
983  * @gfp:        gfp flags when allocating the z3fold pool structure
984  * @ops:        user-defined operations for the z3fold pool
985  *
986  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
987  * failed.
988  */
989 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
990                 const struct z3fold_ops *ops)
991 {
992         struct z3fold_pool *pool = NULL;
993         int i, cpu;
994
995         pool = kzalloc(sizeof(struct z3fold_pool), gfp);
996         if (!pool)
997                 goto out;
998         pool->c_handle = kmem_cache_create("z3fold_handle",
999                                 sizeof(struct z3fold_buddy_slots),
1000                                 SLOTS_ALIGN, 0, NULL);
1001         if (!pool->c_handle)
1002                 goto out_c;
1003         spin_lock_init(&pool->lock);
1004         spin_lock_init(&pool->stale_lock);
1005         pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
1006         if (!pool->unbuddied)
1007                 goto out_pool;
1008         for_each_possible_cpu(cpu) {
1009                 struct list_head *unbuddied =
1010                                 per_cpu_ptr(pool->unbuddied, cpu);
1011                 for_each_unbuddied_list(i, 0)
1012                         INIT_LIST_HEAD(&unbuddied[i]);
1013         }
1014         INIT_LIST_HEAD(&pool->lru);
1015         INIT_LIST_HEAD(&pool->stale);
1016         atomic64_set(&pool->pages_nr, 0);
1017         pool->name = name;
1018         pool->compact_wq = create_singlethread_workqueue(pool->name);
1019         if (!pool->compact_wq)
1020                 goto out_unbuddied;
1021         pool->release_wq = create_singlethread_workqueue(pool->name);
1022         if (!pool->release_wq)
1023                 goto out_wq;
1024         if (z3fold_register_migration(pool))
1025                 goto out_rwq;
1026         INIT_WORK(&pool->work, free_pages_work);
1027         pool->ops = ops;
1028         return pool;
1029
1030 out_rwq:
1031         destroy_workqueue(pool->release_wq);
1032 out_wq:
1033         destroy_workqueue(pool->compact_wq);
1034 out_unbuddied:
1035         free_percpu(pool->unbuddied);
1036 out_pool:
1037         kmem_cache_destroy(pool->c_handle);
1038 out_c:
1039         kfree(pool);
1040 out:
1041         return NULL;
1042 }
1043
1044 /**
1045  * z3fold_destroy_pool() - destroys an existing z3fold pool
1046  * @pool:       the z3fold pool to be destroyed
1047  *
1048  * The pool should be emptied before this function is called.
1049  */
1050 static void z3fold_destroy_pool(struct z3fold_pool *pool)
1051 {
1052         kmem_cache_destroy(pool->c_handle);
1053
1054         /*
1055          * We need to destroy pool->compact_wq before pool->release_wq,
1056          * as any pending work on pool->compact_wq will call
1057          * queue_work(pool->release_wq, &pool->work).
1058          *
1059          * There are still outstanding pages until both workqueues are drained,
1060          * so we cannot unregister migration until then.
1061          */
1062
1063         destroy_workqueue(pool->compact_wq);
1064         destroy_workqueue(pool->release_wq);
1065         z3fold_unregister_migration(pool);
1066         kfree(pool);
1067 }
1068
1069 /**
1070  * z3fold_alloc() - allocates a region of a given size
1071  * @pool:       z3fold pool from which to allocate
1072  * @size:       size in bytes of the desired allocation
1073  * @gfp:        gfp flags used if the pool needs to grow
1074  * @handle:     handle of the new allocation
1075  *
1076  * This function will attempt to find a free region in the pool large enough to
1077  * satisfy the allocation request.  A search of the unbuddied lists is
1078  * performed first. If no suitable free region is found, then a new page is
1079  * allocated and added to the pool to satisfy the request.
1080  *
1081  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
1082  * as z3fold pool pages.
1083  *
1084  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
1085  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1086  * a new page.
1087  */
1088 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1089                         unsigned long *handle)
1090 {
1091         int chunks = size_to_chunks(size);
1092         struct z3fold_header *zhdr = NULL;
1093         struct page *page = NULL;
1094         enum buddy bud;
1095         bool can_sleep = gfpflags_allow_blocking(gfp);
1096
1097         if (!size)
1098                 return -EINVAL;
1099
1100         if (size > PAGE_SIZE)
1101                 return -ENOSPC;
1102
1103         if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1104                 bud = HEADLESS;
1105         else {
1106 retry:
1107                 zhdr = __z3fold_alloc(pool, size, can_sleep);
1108                 if (zhdr) {
1109                         bud = get_free_buddy(zhdr, chunks);
1110                         if (bud == HEADLESS) {
1111                                 if (kref_put(&zhdr->refcount,
1112                                              release_z3fold_page_locked))
1113                                         atomic64_dec(&pool->pages_nr);
1114                                 else
1115                                         z3fold_page_unlock(zhdr);
1116                                 pr_err("No free chunks in unbuddied\n");
1117                                 WARN_ON(1);
1118                                 goto retry;
1119                         }
1120                         page = virt_to_page(zhdr);
1121                         goto found;
1122                 }
1123                 bud = FIRST;
1124         }
1125
1126         page = NULL;
1127         if (can_sleep) {
1128                 spin_lock(&pool->stale_lock);
1129                 zhdr = list_first_entry_or_null(&pool->stale,
1130                                                 struct z3fold_header, buddy);
1131                 /*
1132                  * Before allocating a page, let's see if we can take one from
1133                  * the stale pages list. cancel_work_sync() can sleep so we
1134                  * limit this case to the contexts where we can sleep
1135                  */
1136                 if (zhdr) {
1137                         list_del(&zhdr->buddy);
1138                         spin_unlock(&pool->stale_lock);
1139                         cancel_work_sync(&zhdr->work);
1140                         page = virt_to_page(zhdr);
1141                 } else {
1142                         spin_unlock(&pool->stale_lock);
1143                 }
1144         }
1145         if (!page)
1146                 page = alloc_page(gfp);
1147
1148         if (!page)
1149                 return -ENOMEM;
1150
1151         zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1152         if (!zhdr) {
1153                 __free_page(page);
1154                 return -ENOMEM;
1155         }
1156         atomic64_inc(&pool->pages_nr);
1157
1158         if (bud == HEADLESS) {
1159                 set_bit(PAGE_HEADLESS, &page->private);
1160                 goto headless;
1161         }
1162         if (can_sleep) {
1163                 lock_page(page);
1164                 __SetPageMovable(page, pool->inode->i_mapping);
1165                 unlock_page(page);
1166         } else {
1167                 if (trylock_page(page)) {
1168                         __SetPageMovable(page, pool->inode->i_mapping);
1169                         unlock_page(page);
1170                 }
1171         }
1172         z3fold_page_lock(zhdr);
1173
1174 found:
1175         if (bud == FIRST)
1176                 zhdr->first_chunks = chunks;
1177         else if (bud == LAST)
1178                 zhdr->last_chunks = chunks;
1179         else {
1180                 zhdr->middle_chunks = chunks;
1181                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1182         }
1183         add_to_unbuddied(pool, zhdr);
1184
1185 headless:
1186         spin_lock(&pool->lock);
1187         /* Add/move z3fold page to beginning of LRU */
1188         if (!list_empty(&page->lru))
1189                 list_del(&page->lru);
1190
1191         list_add(&page->lru, &pool->lru);
1192
1193         *handle = encode_handle(zhdr, bud);
1194         spin_unlock(&pool->lock);
1195         if (bud != HEADLESS)
1196                 z3fold_page_unlock(zhdr);
1197
1198         return 0;
1199 }
1200
1201 /**
1202  * z3fold_free() - frees the allocation associated with the given handle
1203  * @pool:       pool in which the allocation resided
1204  * @handle:     handle associated with the allocation returned by z3fold_alloc()
1205  *
1206  * In the case that the z3fold page in which the allocation resides is under
1207  * reclaim, as indicated by the PG_reclaim flag being set, this function
1208  * only sets the first|last_chunks to 0.  The page is actually freed
1209  * once both buddies are evicted (see z3fold_reclaim_page() below).
1210  */
1211 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1212 {
1213         struct z3fold_header *zhdr;
1214         struct page *page;
1215         enum buddy bud;
1216         bool page_claimed;
1217
1218         zhdr = get_z3fold_header(handle);
1219         page = virt_to_page(zhdr);
1220         page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1221
1222         if (test_bit(PAGE_HEADLESS, &page->private)) {
1223                 /* if a headless page is under reclaim, just leave.
1224                  * NB: we use test_and_set_bit for a reason: if the bit
1225                  * has not been set before, we release this page
1226                  * immediately so we don't care about its value any more.
1227                  */
1228                 if (!page_claimed) {
1229                         spin_lock(&pool->lock);
1230                         list_del(&page->lru);
1231                         spin_unlock(&pool->lock);
1232                         put_z3fold_header(zhdr);
1233                         free_z3fold_page(page, true);
1234                         atomic64_dec(&pool->pages_nr);
1235                 }
1236                 return;
1237         }
1238
1239         /* Non-headless case */
1240         bud = handle_to_buddy(handle);
1241
1242         switch (bud) {
1243         case FIRST:
1244                 zhdr->first_chunks = 0;
1245                 break;
1246         case MIDDLE:
1247                 zhdr->middle_chunks = 0;
1248                 break;
1249         case LAST:
1250                 zhdr->last_chunks = 0;
1251                 break;
1252         default:
1253                 pr_err("%s: unknown bud %d\n", __func__, bud);
1254                 WARN_ON(1);
1255                 put_z3fold_header(zhdr);
1256                 return;
1257         }
1258
1259         if (!page_claimed)
1260                 free_handle(handle, zhdr);
1261         if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1262                 atomic64_dec(&pool->pages_nr);
1263                 return;
1264         }
1265         if (page_claimed) {
1266                 /* the page has not been claimed by us */
1267                 z3fold_page_unlock(zhdr);
1268                 return;
1269         }
1270         if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1271                 put_z3fold_header(zhdr);
1272                 clear_bit(PAGE_CLAIMED, &page->private);
1273                 return;
1274         }
1275         if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1276                 spin_lock(&pool->lock);
1277                 list_del_init(&zhdr->buddy);
1278                 spin_unlock(&pool->lock);
1279                 zhdr->cpu = -1;
1280                 kref_get(&zhdr->refcount);
1281                 clear_bit(PAGE_CLAIMED, &page->private);
1282                 do_compact_page(zhdr, true);
1283                 return;
1284         }
1285         kref_get(&zhdr->refcount);
1286         clear_bit(PAGE_CLAIMED, &page->private);
1287         queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1288         put_z3fold_header(zhdr);
1289 }
1290
1291 /**
1292  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1293  * @pool:       pool from which a page will attempt to be evicted
1294  * @retries:    number of pages on the LRU list for which eviction will
1295  *              be attempted before failing
1296  *
1297  * z3fold reclaim is different from normal system reclaim in that it is done
1298  * from the bottom, up. This is because only the bottom layer, z3fold, has
1299  * information on how the allocations are organized within each z3fold page.
1300  * This has the potential to create interesting locking situations between
1301  * z3fold and the user, however.
1302  *
1303  * To avoid these, this is how z3fold_reclaim_page() should be called:
1304  *
1305  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1306  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1307  * call the user-defined eviction handler with the pool and handle as
1308  * arguments.
1309  *
1310  * If the handle can not be evicted, the eviction handler should return
1311  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1312  * appropriate list and try the next z3fold page on the LRU up to
1313  * a user defined number of retries.
1314  *
1315  * If the handle is successfully evicted, the eviction handler should
1316  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1317  * contains logic to delay freeing the page if the page is under reclaim,
1318  * as indicated by the setting of the PG_reclaim flag on the underlying page.
1319  *
1320  * If all buddies in the z3fold page are successfully evicted, then the
1321  * z3fold page can be freed.
1322  *
1323  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1324  * no pages to evict or an eviction handler is not registered, -EAGAIN if
1325  * the retry limit was hit.
1326  */
1327 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1328 {
1329         int i, ret = -1;
1330         struct z3fold_header *zhdr = NULL;
1331         struct page *page = NULL;
1332         struct list_head *pos;
1333         unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1334         struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN)));
1335
1336         rwlock_init(&slots.lock);
1337         slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
1338
1339         spin_lock(&pool->lock);
1340         if (!pool->ops || !pool->ops->evict || retries == 0) {
1341                 spin_unlock(&pool->lock);
1342                 return -EINVAL;
1343         }
1344         for (i = 0; i < retries; i++) {
1345                 if (list_empty(&pool->lru)) {
1346                         spin_unlock(&pool->lock);
1347                         return -EINVAL;
1348                 }
1349                 list_for_each_prev(pos, &pool->lru) {
1350                         page = list_entry(pos, struct page, lru);
1351
1352                         zhdr = page_address(page);
1353                         if (test_bit(PAGE_HEADLESS, &page->private))
1354                                 break;
1355
1356                         if (kref_get_unless_zero(&zhdr->refcount) == 0) {
1357                                 zhdr = NULL;
1358                                 break;
1359                         }
1360                         if (!z3fold_page_trylock(zhdr)) {
1361                                 if (kref_put(&zhdr->refcount,
1362                                                 release_z3fold_page))
1363                                         atomic64_dec(&pool->pages_nr);
1364                                 zhdr = NULL;
1365                                 continue; /* can't evict at this point */
1366                         }
1367
1368                         /* test_and_set_bit is of course atomic, but we still
1369                          * need to do it under page lock, otherwise checking
1370                          * that bit in __z3fold_alloc wouldn't make sense
1371                          */
1372                         if (zhdr->foreign_handles ||
1373                             test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1374                                 if (kref_put(&zhdr->refcount,
1375                                                 release_z3fold_page))
1376                                         atomic64_dec(&pool->pages_nr);
1377                                 else
1378                                         z3fold_page_unlock(zhdr);
1379                                 zhdr = NULL;
1380                                 continue; /* can't evict such page */
1381                         }
1382                         list_del_init(&zhdr->buddy);
1383                         zhdr->cpu = -1;
1384                         break;
1385                 }
1386
1387                 if (!zhdr)
1388                         break;
1389
1390                 list_del_init(&page->lru);
1391                 spin_unlock(&pool->lock);
1392
1393                 if (!test_bit(PAGE_HEADLESS, &page->private)) {
1394                         /*
1395                          * We need encode the handles before unlocking, and
1396                          * use our local slots structure because z3fold_free
1397                          * can zero out zhdr->slots and we can't do much
1398                          * about that
1399                          */
1400                         first_handle = 0;
1401                         last_handle = 0;
1402                         middle_handle = 0;
1403                         memset(slots.slot, 0, sizeof(slots.slot));
1404                         if (zhdr->first_chunks)
1405                                 first_handle = __encode_handle(zhdr, &slots,
1406                                                                 FIRST);
1407                         if (zhdr->middle_chunks)
1408                                 middle_handle = __encode_handle(zhdr, &slots,
1409                                                                 MIDDLE);
1410                         if (zhdr->last_chunks)
1411                                 last_handle = __encode_handle(zhdr, &slots,
1412                                                                 LAST);
1413                         /*
1414                          * it's safe to unlock here because we hold a
1415                          * reference to this page
1416                          */
1417                         z3fold_page_unlock(zhdr);
1418                 } else {
1419                         first_handle = encode_handle(zhdr, HEADLESS);
1420                         last_handle = middle_handle = 0;
1421                 }
1422                 /* Issue the eviction callback(s) */
1423                 if (middle_handle) {
1424                         ret = pool->ops->evict(pool, middle_handle);
1425                         if (ret)
1426                                 goto next;
1427                 }
1428                 if (first_handle) {
1429                         ret = pool->ops->evict(pool, first_handle);
1430                         if (ret)
1431                                 goto next;
1432                 }
1433                 if (last_handle) {
1434                         ret = pool->ops->evict(pool, last_handle);
1435                         if (ret)
1436                                 goto next;
1437                 }
1438 next:
1439                 if (test_bit(PAGE_HEADLESS, &page->private)) {
1440                         if (ret == 0) {
1441                                 free_z3fold_page(page, true);
1442                                 atomic64_dec(&pool->pages_nr);
1443                                 return 0;
1444                         }
1445                         spin_lock(&pool->lock);
1446                         list_add(&page->lru, &pool->lru);
1447                         spin_unlock(&pool->lock);
1448                         clear_bit(PAGE_CLAIMED, &page->private);
1449                 } else {
1450                         struct z3fold_buddy_slots *slots = zhdr->slots;
1451                         z3fold_page_lock(zhdr);
1452                         if (kref_put(&zhdr->refcount,
1453                                         release_z3fold_page_locked)) {
1454                                 kmem_cache_free(pool->c_handle, slots);
1455                                 atomic64_dec(&pool->pages_nr);
1456                                 return 0;
1457                         }
1458                         /*
1459                          * if we are here, the page is still not completely
1460                          * free. Take the global pool lock then to be able
1461                          * to add it back to the lru list
1462                          */
1463                         spin_lock(&pool->lock);
1464                         list_add(&page->lru, &pool->lru);
1465                         spin_unlock(&pool->lock);
1466                         z3fold_page_unlock(zhdr);
1467                         clear_bit(PAGE_CLAIMED, &page->private);
1468                 }
1469
1470                 /* We started off locked to we need to lock the pool back */
1471                 spin_lock(&pool->lock);
1472         }
1473         spin_unlock(&pool->lock);
1474         return -EAGAIN;
1475 }
1476
1477 /**
1478  * z3fold_map() - maps the allocation associated with the given handle
1479  * @pool:       pool in which the allocation resides
1480  * @handle:     handle associated with the allocation to be mapped
1481  *
1482  * Extracts the buddy number from handle and constructs the pointer to the
1483  * correct starting chunk within the page.
1484  *
1485  * Returns: a pointer to the mapped allocation
1486  */
1487 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1488 {
1489         struct z3fold_header *zhdr;
1490         struct page *page;
1491         void *addr;
1492         enum buddy buddy;
1493
1494         zhdr = get_z3fold_header(handle);
1495         addr = zhdr;
1496         page = virt_to_page(zhdr);
1497
1498         if (test_bit(PAGE_HEADLESS, &page->private))
1499                 goto out;
1500
1501         buddy = handle_to_buddy(handle);
1502         switch (buddy) {
1503         case FIRST:
1504                 addr += ZHDR_SIZE_ALIGNED;
1505                 break;
1506         case MIDDLE:
1507                 addr += zhdr->start_middle << CHUNK_SHIFT;
1508                 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1509                 break;
1510         case LAST:
1511                 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1512                 break;
1513         default:
1514                 pr_err("unknown buddy id %d\n", buddy);
1515                 WARN_ON(1);
1516                 addr = NULL;
1517                 break;
1518         }
1519
1520         if (addr)
1521                 zhdr->mapped_count++;
1522 out:
1523         put_z3fold_header(zhdr);
1524         return addr;
1525 }
1526
1527 /**
1528  * z3fold_unmap() - unmaps the allocation associated with the given handle
1529  * @pool:       pool in which the allocation resides
1530  * @handle:     handle associated with the allocation to be unmapped
1531  */
1532 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1533 {
1534         struct z3fold_header *zhdr;
1535         struct page *page;
1536         enum buddy buddy;
1537
1538         zhdr = get_z3fold_header(handle);
1539         page = virt_to_page(zhdr);
1540
1541         if (test_bit(PAGE_HEADLESS, &page->private))
1542                 return;
1543
1544         buddy = handle_to_buddy(handle);
1545         if (buddy == MIDDLE)
1546                 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1547         zhdr->mapped_count--;
1548         put_z3fold_header(zhdr);
1549 }
1550
1551 /**
1552  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1553  * @pool:       pool whose size is being queried
1554  *
1555  * Returns: size in pages of the given pool.
1556  */
1557 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1558 {
1559         return atomic64_read(&pool->pages_nr);
1560 }
1561
1562 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1563 {
1564         struct z3fold_header *zhdr;
1565         struct z3fold_pool *pool;
1566
1567         VM_BUG_ON_PAGE(!PageMovable(page), page);
1568         VM_BUG_ON_PAGE(PageIsolated(page), page);
1569
1570         if (test_bit(PAGE_HEADLESS, &page->private))
1571                 return false;
1572
1573         zhdr = page_address(page);
1574         z3fold_page_lock(zhdr);
1575         if (test_bit(NEEDS_COMPACTING, &page->private) ||
1576             test_bit(PAGE_STALE, &page->private))
1577                 goto out;
1578
1579         if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1580                 goto out;
1581
1582         if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1583                 goto out;
1584         pool = zhdr_to_pool(zhdr);
1585         spin_lock(&pool->lock);
1586         if (!list_empty(&zhdr->buddy))
1587                 list_del_init(&zhdr->buddy);
1588         if (!list_empty(&page->lru))
1589                 list_del_init(&page->lru);
1590         spin_unlock(&pool->lock);
1591
1592         kref_get(&zhdr->refcount);
1593         z3fold_page_unlock(zhdr);
1594         return true;
1595
1596 out:
1597         z3fold_page_unlock(zhdr);
1598         return false;
1599 }
1600
1601 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1602                                struct page *page, enum migrate_mode mode)
1603 {
1604         struct z3fold_header *zhdr, *new_zhdr;
1605         struct z3fold_pool *pool;
1606         struct address_space *new_mapping;
1607
1608         VM_BUG_ON_PAGE(!PageMovable(page), page);
1609         VM_BUG_ON_PAGE(!PageIsolated(page), page);
1610         VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
1611         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1612
1613         zhdr = page_address(page);
1614         pool = zhdr_to_pool(zhdr);
1615
1616         if (!z3fold_page_trylock(zhdr))
1617                 return -EAGAIN;
1618         if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1619                 z3fold_page_unlock(zhdr);
1620                 clear_bit(PAGE_CLAIMED, &page->private);
1621                 return -EBUSY;
1622         }
1623         if (work_pending(&zhdr->work)) {
1624                 z3fold_page_unlock(zhdr);
1625                 return -EAGAIN;
1626         }
1627         new_zhdr = page_address(newpage);
1628         memcpy(new_zhdr, zhdr, PAGE_SIZE);
1629         newpage->private = page->private;
1630         page->private = 0;
1631         z3fold_page_unlock(zhdr);
1632         spin_lock_init(&new_zhdr->page_lock);
1633         INIT_WORK(&new_zhdr->work, compact_page_work);
1634         /*
1635          * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1636          * so we only have to reinitialize it.
1637          */
1638         INIT_LIST_HEAD(&new_zhdr->buddy);
1639         new_mapping = page_mapping(page);
1640         __ClearPageMovable(page);
1641         ClearPagePrivate(page);
1642
1643         get_page(newpage);
1644         z3fold_page_lock(new_zhdr);
1645         if (new_zhdr->first_chunks)
1646                 encode_handle(new_zhdr, FIRST);
1647         if (new_zhdr->last_chunks)
1648                 encode_handle(new_zhdr, LAST);
1649         if (new_zhdr->middle_chunks)
1650                 encode_handle(new_zhdr, MIDDLE);
1651         set_bit(NEEDS_COMPACTING, &newpage->private);
1652         new_zhdr->cpu = smp_processor_id();
1653         spin_lock(&pool->lock);
1654         list_add(&newpage->lru, &pool->lru);
1655         spin_unlock(&pool->lock);
1656         __SetPageMovable(newpage, new_mapping);
1657         z3fold_page_unlock(new_zhdr);
1658
1659         queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1660
1661         page_mapcount_reset(page);
1662         clear_bit(PAGE_CLAIMED, &page->private);
1663         put_page(page);
1664         return 0;
1665 }
1666
1667 static void z3fold_page_putback(struct page *page)
1668 {
1669         struct z3fold_header *zhdr;
1670         struct z3fold_pool *pool;
1671
1672         zhdr = page_address(page);
1673         pool = zhdr_to_pool(zhdr);
1674
1675         z3fold_page_lock(zhdr);
1676         if (!list_empty(&zhdr->buddy))
1677                 list_del_init(&zhdr->buddy);
1678         INIT_LIST_HEAD(&page->lru);
1679         if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1680                 atomic64_dec(&pool->pages_nr);
1681                 return;
1682         }
1683         spin_lock(&pool->lock);
1684         list_add(&page->lru, &pool->lru);
1685         spin_unlock(&pool->lock);
1686         clear_bit(PAGE_CLAIMED, &page->private);
1687         z3fold_page_unlock(zhdr);
1688 }
1689
1690 static const struct address_space_operations z3fold_aops = {
1691         .isolate_page = z3fold_page_isolate,
1692         .migratepage = z3fold_page_migrate,
1693         .putback_page = z3fold_page_putback,
1694 };
1695
1696 /*****************
1697  * zpool
1698  ****************/
1699
1700 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1701 {
1702         if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1703                 return pool->zpool_ops->evict(pool->zpool, handle);
1704         else
1705                 return -ENOENT;
1706 }
1707
1708 static const struct z3fold_ops z3fold_zpool_ops = {
1709         .evict =        z3fold_zpool_evict
1710 };
1711
1712 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1713                                const struct zpool_ops *zpool_ops,
1714                                struct zpool *zpool)
1715 {
1716         struct z3fold_pool *pool;
1717
1718         pool = z3fold_create_pool(name, gfp,
1719                                 zpool_ops ? &z3fold_zpool_ops : NULL);
1720         if (pool) {
1721                 pool->zpool = zpool;
1722                 pool->zpool_ops = zpool_ops;
1723         }
1724         return pool;
1725 }
1726
1727 static void z3fold_zpool_destroy(void *pool)
1728 {
1729         z3fold_destroy_pool(pool);
1730 }
1731
1732 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1733                         unsigned long *handle)
1734 {
1735         return z3fold_alloc(pool, size, gfp, handle);
1736 }
1737 static void z3fold_zpool_free(void *pool, unsigned long handle)
1738 {
1739         z3fold_free(pool, handle);
1740 }
1741
1742 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1743                         unsigned int *reclaimed)
1744 {
1745         unsigned int total = 0;
1746         int ret = -EINVAL;
1747
1748         while (total < pages) {
1749                 ret = z3fold_reclaim_page(pool, 8);
1750                 if (ret < 0)
1751                         break;
1752                 total++;
1753         }
1754
1755         if (reclaimed)
1756                 *reclaimed = total;
1757
1758         return ret;
1759 }
1760
1761 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1762                         enum zpool_mapmode mm)
1763 {
1764         return z3fold_map(pool, handle);
1765 }
1766 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1767 {
1768         z3fold_unmap(pool, handle);
1769 }
1770
1771 static u64 z3fold_zpool_total_size(void *pool)
1772 {
1773         return z3fold_get_pool_size(pool) * PAGE_SIZE;
1774 }
1775
1776 static struct zpool_driver z3fold_zpool_driver = {
1777         .type =         "z3fold",
1778         .owner =        THIS_MODULE,
1779         .create =       z3fold_zpool_create,
1780         .destroy =      z3fold_zpool_destroy,
1781         .malloc =       z3fold_zpool_malloc,
1782         .free =         z3fold_zpool_free,
1783         .shrink =       z3fold_zpool_shrink,
1784         .map =          z3fold_zpool_map,
1785         .unmap =        z3fold_zpool_unmap,
1786         .total_size =   z3fold_zpool_total_size,
1787 };
1788
1789 MODULE_ALIAS("zpool-z3fold");
1790
1791 static int __init init_z3fold(void)
1792 {
1793         int ret;
1794
1795         /* Make sure the z3fold header is not larger than the page size */
1796         BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1797         ret = z3fold_mount();
1798         if (ret)
1799                 return ret;
1800
1801         zpool_register_driver(&z3fold_zpool_driver);
1802
1803         return 0;
1804 }
1805
1806 static void __exit exit_z3fold(void)
1807 {
1808         z3fold_unmount();
1809         zpool_unregister_driver(&z3fold_zpool_driver);
1810 }
1811
1812 module_init(init_z3fold);
1813 module_exit(exit_z3fold);
1814
1815 MODULE_LICENSE("GPL");
1816 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1817 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");