Merge tag 'amd-drm-next-6.3-2023-01-27' of https://gitlab.freedesktop.org/agd5f/linux...
[platform/kernel/linux-starfive.git] / mm / z3fold.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * z3fold.c
4  *
5  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6  * Copyright (C) 2016, Sony Mobile Communications Inc.
7  *
8  * This implementation is based on zbud written by Seth Jennings.
9  *
10  * z3fold is an special purpose allocator for storing compressed pages. It
11  * can store up to three compressed pages per page which improves the
12  * compression ratio of zbud while retaining its main concepts (e. g. always
13  * storing an integral number of objects per page) and simplicity.
14  * It still has simple and deterministic reclaim properties that make it
15  * preferable to a higher density approach (with no requirement on integral
16  * number of object per page) when reclaim is used.
17  *
18  * As in zbud, pages are divided into "chunks".  The size of the chunks is
19  * fixed at compile time and is determined by NCHUNKS_ORDER below.
20  *
21  * z3fold doesn't export any API and is meant to be used via zpool API.
22  */
23
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/preempt.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/zpool.h>
42 #include <linux/kmemleak.h>
43
44 /*
45  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
46  * adjusting internal fragmentation.  It also determines the number of
47  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
48  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
49  * in the beginning of an allocated page are occupied by z3fold header, so
50  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
51  * which shows the max number of free chunks in z3fold page, also there will
52  * be 63, or 62, respectively, freelists per pool.
53  */
54 #define NCHUNKS_ORDER   6
55
56 #define CHUNK_SHIFT     (PAGE_SHIFT - NCHUNKS_ORDER)
57 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
58 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
59 #define ZHDR_CHUNKS     (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
60 #define TOTAL_CHUNKS    (PAGE_SIZE >> CHUNK_SHIFT)
61 #define NCHUNKS         (TOTAL_CHUNKS - ZHDR_CHUNKS)
62
63 #define BUDDY_MASK      (0x3)
64 #define BUDDY_SHIFT     2
65 #define SLOTS_ALIGN     (0x40)
66
67 /*****************
68  * Structures
69 *****************/
70 struct z3fold_pool;
71
72 enum buddy {
73         HEADLESS = 0,
74         FIRST,
75         MIDDLE,
76         LAST,
77         BUDDIES_MAX = LAST
78 };
79
80 struct z3fold_buddy_slots {
81         /*
82          * we are using BUDDY_MASK in handle_to_buddy etc. so there should
83          * be enough slots to hold all possible variants
84          */
85         unsigned long slot[BUDDY_MASK + 1];
86         unsigned long pool; /* back link */
87         rwlock_t lock;
88 };
89 #define HANDLE_FLAG_MASK        (0x03)
90
91 /*
92  * struct z3fold_header - z3fold page metadata occupying first chunks of each
93  *                      z3fold page, except for HEADLESS pages
94  * @buddy:              links the z3fold page into the relevant list in the
95  *                      pool
96  * @page_lock:          per-page lock
97  * @refcount:           reference count for the z3fold page
98  * @work:               work_struct for page layout optimization
99  * @slots:              pointer to the structure holding buddy slots
100  * @pool:               pointer to the containing pool
101  * @cpu:                CPU which this page "belongs" to
102  * @first_chunks:       the size of the first buddy in chunks, 0 if free
103  * @middle_chunks:      the size of the middle buddy in chunks, 0 if free
104  * @last_chunks:        the size of the last buddy in chunks, 0 if free
105  * @first_num:          the starting number (for the first handle)
106  * @mapped_count:       the number of objects currently mapped
107  */
108 struct z3fold_header {
109         struct list_head buddy;
110         spinlock_t page_lock;
111         struct kref refcount;
112         struct work_struct work;
113         struct z3fold_buddy_slots *slots;
114         struct z3fold_pool *pool;
115         short cpu;
116         unsigned short first_chunks;
117         unsigned short middle_chunks;
118         unsigned short last_chunks;
119         unsigned short start_middle;
120         unsigned short first_num:2;
121         unsigned short mapped_count:2;
122         unsigned short foreign_handles:2;
123 };
124
125 /**
126  * struct z3fold_pool - stores metadata for each z3fold pool
127  * @name:       pool name
128  * @lock:       protects pool unbuddied/lru lists
129  * @stale_lock: protects pool stale page list
130  * @unbuddied:  per-cpu array of lists tracking z3fold pages that contain 2-
131  *              buddies; the list each z3fold page is added to depends on
132  *              the size of its free region.
133  * @lru:        list tracking the z3fold pages in LRU order by most recently
134  *              added buddy.
135  * @stale:      list of pages marked for freeing
136  * @pages_nr:   number of z3fold pages in the pool.
137  * @c_handle:   cache for z3fold_buddy_slots allocation
138  * @zpool:      zpool driver
139  * @zpool_ops:  zpool operations structure with an evict callback
140  * @compact_wq: workqueue for page layout background optimization
141  * @release_wq: workqueue for safe page release
142  * @work:       work_struct for safe page release
143  *
144  * This structure is allocated at pool creation time and maintains metadata
145  * pertaining to a particular z3fold pool.
146  */
147 struct z3fold_pool {
148         const char *name;
149         spinlock_t lock;
150         spinlock_t stale_lock;
151         struct list_head *unbuddied;
152         struct list_head lru;
153         struct list_head stale;
154         atomic64_t pages_nr;
155         struct kmem_cache *c_handle;
156         struct zpool *zpool;
157         const struct zpool_ops *zpool_ops;
158         struct workqueue_struct *compact_wq;
159         struct workqueue_struct *release_wq;
160         struct work_struct work;
161 };
162
163 /*
164  * Internal z3fold page flags
165  */
166 enum z3fold_page_flags {
167         PAGE_HEADLESS = 0,
168         MIDDLE_CHUNK_MAPPED,
169         NEEDS_COMPACTING,
170         PAGE_STALE,
171         PAGE_CLAIMED, /* by either reclaim or free */
172         PAGE_MIGRATED, /* page is migrated and soon to be released */
173 };
174
175 /*
176  * handle flags, go under HANDLE_FLAG_MASK
177  */
178 enum z3fold_handle_flags {
179         HANDLES_NOFREE = 0,
180 };
181
182 /*
183  * Forward declarations
184  */
185 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
186 static void compact_page_work(struct work_struct *w);
187
188 /*****************
189  * Helpers
190 *****************/
191
192 /* Converts an allocation size in bytes to size in z3fold chunks */
193 static int size_to_chunks(size_t size)
194 {
195         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
196 }
197
198 #define for_each_unbuddied_list(_iter, _begin) \
199         for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
200
201 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
202                                                         gfp_t gfp)
203 {
204         struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
205                                                              gfp);
206
207         if (slots) {
208                 /* It will be freed separately in free_handle(). */
209                 kmemleak_not_leak(slots);
210                 slots->pool = (unsigned long)pool;
211                 rwlock_init(&slots->lock);
212         }
213
214         return slots;
215 }
216
217 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
218 {
219         return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
220 }
221
222 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
223 {
224         return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
225 }
226
227 /* Lock a z3fold page */
228 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
229 {
230         spin_lock(&zhdr->page_lock);
231 }
232
233 /* Try to lock a z3fold page */
234 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
235 {
236         return spin_trylock(&zhdr->page_lock);
237 }
238
239 /* Unlock a z3fold page */
240 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
241 {
242         spin_unlock(&zhdr->page_lock);
243 }
244
245 /* return locked z3fold page if it's not headless */
246 static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
247 {
248         struct z3fold_buddy_slots *slots;
249         struct z3fold_header *zhdr;
250         int locked = 0;
251
252         if (!(handle & (1 << PAGE_HEADLESS))) {
253                 slots = handle_to_slots(handle);
254                 do {
255                         unsigned long addr;
256
257                         read_lock(&slots->lock);
258                         addr = *(unsigned long *)handle;
259                         zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
260                         locked = z3fold_page_trylock(zhdr);
261                         read_unlock(&slots->lock);
262                         if (locked) {
263                                 struct page *page = virt_to_page(zhdr);
264
265                                 if (!test_bit(PAGE_MIGRATED, &page->private))
266                                         break;
267                                 z3fold_page_unlock(zhdr);
268                         }
269                         cpu_relax();
270                 } while (true);
271         } else {
272                 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
273         }
274
275         return zhdr;
276 }
277
278 static inline void put_z3fold_header(struct z3fold_header *zhdr)
279 {
280         struct page *page = virt_to_page(zhdr);
281
282         if (!test_bit(PAGE_HEADLESS, &page->private))
283                 z3fold_page_unlock(zhdr);
284 }
285
286 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
287 {
288         struct z3fold_buddy_slots *slots;
289         int i;
290         bool is_free;
291
292         if (WARN_ON(*(unsigned long *)handle == 0))
293                 return;
294
295         slots = handle_to_slots(handle);
296         write_lock(&slots->lock);
297         *(unsigned long *)handle = 0;
298
299         if (test_bit(HANDLES_NOFREE, &slots->pool)) {
300                 write_unlock(&slots->lock);
301                 return; /* simple case, nothing else to do */
302         }
303
304         if (zhdr->slots != slots)
305                 zhdr->foreign_handles--;
306
307         is_free = true;
308         for (i = 0; i <= BUDDY_MASK; i++) {
309                 if (slots->slot[i]) {
310                         is_free = false;
311                         break;
312                 }
313         }
314         write_unlock(&slots->lock);
315
316         if (is_free) {
317                 struct z3fold_pool *pool = slots_to_pool(slots);
318
319                 if (zhdr->slots == slots)
320                         zhdr->slots = NULL;
321                 kmem_cache_free(pool->c_handle, slots);
322         }
323 }
324
325 /* Initializes the z3fold header of a newly allocated z3fold page */
326 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
327                                         struct z3fold_pool *pool, gfp_t gfp)
328 {
329         struct z3fold_header *zhdr = page_address(page);
330         struct z3fold_buddy_slots *slots;
331
332         INIT_LIST_HEAD(&page->lru);
333         clear_bit(PAGE_HEADLESS, &page->private);
334         clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
335         clear_bit(NEEDS_COMPACTING, &page->private);
336         clear_bit(PAGE_STALE, &page->private);
337         clear_bit(PAGE_CLAIMED, &page->private);
338         clear_bit(PAGE_MIGRATED, &page->private);
339         if (headless)
340                 return zhdr;
341
342         slots = alloc_slots(pool, gfp);
343         if (!slots)
344                 return NULL;
345
346         memset(zhdr, 0, sizeof(*zhdr));
347         spin_lock_init(&zhdr->page_lock);
348         kref_init(&zhdr->refcount);
349         zhdr->cpu = -1;
350         zhdr->slots = slots;
351         zhdr->pool = pool;
352         INIT_LIST_HEAD(&zhdr->buddy);
353         INIT_WORK(&zhdr->work, compact_page_work);
354         return zhdr;
355 }
356
357 /* Resets the struct page fields and frees the page */
358 static void free_z3fold_page(struct page *page, bool headless)
359 {
360         if (!headless) {
361                 lock_page(page);
362                 __ClearPageMovable(page);
363                 unlock_page(page);
364         }
365         __free_page(page);
366 }
367
368 /* Helper function to build the index */
369 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
370 {
371         return (bud + zhdr->first_num) & BUDDY_MASK;
372 }
373
374 /*
375  * Encodes the handle of a particular buddy within a z3fold page
376  * Pool lock should be held as this function accesses first_num
377  */
378 static unsigned long __encode_handle(struct z3fold_header *zhdr,
379                                 struct z3fold_buddy_slots *slots,
380                                 enum buddy bud)
381 {
382         unsigned long h = (unsigned long)zhdr;
383         int idx = 0;
384
385         /*
386          * For a headless page, its handle is its pointer with the extra
387          * PAGE_HEADLESS bit set
388          */
389         if (bud == HEADLESS)
390                 return h | (1 << PAGE_HEADLESS);
391
392         /* otherwise, return pointer to encoded handle */
393         idx = __idx(zhdr, bud);
394         h += idx;
395         if (bud == LAST)
396                 h |= (zhdr->last_chunks << BUDDY_SHIFT);
397
398         write_lock(&slots->lock);
399         slots->slot[idx] = h;
400         write_unlock(&slots->lock);
401         return (unsigned long)&slots->slot[idx];
402 }
403
404 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
405 {
406         return __encode_handle(zhdr, zhdr->slots, bud);
407 }
408
409 /* only for LAST bud, returns zero otherwise */
410 static unsigned short handle_to_chunks(unsigned long handle)
411 {
412         struct z3fold_buddy_slots *slots = handle_to_slots(handle);
413         unsigned long addr;
414
415         read_lock(&slots->lock);
416         addr = *(unsigned long *)handle;
417         read_unlock(&slots->lock);
418         return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
419 }
420
421 /*
422  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
423  *  but that doesn't matter. because the masking will result in the
424  *  correct buddy number.
425  */
426 static enum buddy handle_to_buddy(unsigned long handle)
427 {
428         struct z3fold_header *zhdr;
429         struct z3fold_buddy_slots *slots = handle_to_slots(handle);
430         unsigned long addr;
431
432         read_lock(&slots->lock);
433         WARN_ON(handle & (1 << PAGE_HEADLESS));
434         addr = *(unsigned long *)handle;
435         read_unlock(&slots->lock);
436         zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
437         return (addr - zhdr->first_num) & BUDDY_MASK;
438 }
439
440 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
441 {
442         return zhdr->pool;
443 }
444
445 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
446 {
447         struct page *page = virt_to_page(zhdr);
448         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
449
450         WARN_ON(!list_empty(&zhdr->buddy));
451         set_bit(PAGE_STALE, &page->private);
452         clear_bit(NEEDS_COMPACTING, &page->private);
453         spin_lock(&pool->lock);
454         if (!list_empty(&page->lru))
455                 list_del_init(&page->lru);
456         spin_unlock(&pool->lock);
457
458         if (locked)
459                 z3fold_page_unlock(zhdr);
460
461         spin_lock(&pool->stale_lock);
462         list_add(&zhdr->buddy, &pool->stale);
463         queue_work(pool->release_wq, &pool->work);
464         spin_unlock(&pool->stale_lock);
465
466         atomic64_dec(&pool->pages_nr);
467 }
468
469 static void release_z3fold_page_locked(struct kref *ref)
470 {
471         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
472                                                 refcount);
473         WARN_ON(z3fold_page_trylock(zhdr));
474         __release_z3fold_page(zhdr, true);
475 }
476
477 static void release_z3fold_page_locked_list(struct kref *ref)
478 {
479         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
480                                                refcount);
481         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
482
483         spin_lock(&pool->lock);
484         list_del_init(&zhdr->buddy);
485         spin_unlock(&pool->lock);
486
487         WARN_ON(z3fold_page_trylock(zhdr));
488         __release_z3fold_page(zhdr, true);
489 }
490
491 static void free_pages_work(struct work_struct *w)
492 {
493         struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
494
495         spin_lock(&pool->stale_lock);
496         while (!list_empty(&pool->stale)) {
497                 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
498                                                 struct z3fold_header, buddy);
499                 struct page *page = virt_to_page(zhdr);
500
501                 list_del(&zhdr->buddy);
502                 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
503                         continue;
504                 spin_unlock(&pool->stale_lock);
505                 cancel_work_sync(&zhdr->work);
506                 free_z3fold_page(page, false);
507                 cond_resched();
508                 spin_lock(&pool->stale_lock);
509         }
510         spin_unlock(&pool->stale_lock);
511 }
512
513 /*
514  * Returns the number of free chunks in a z3fold page.
515  * NB: can't be used with HEADLESS pages.
516  */
517 static int num_free_chunks(struct z3fold_header *zhdr)
518 {
519         int nfree;
520         /*
521          * If there is a middle object, pick up the bigger free space
522          * either before or after it. Otherwise just subtract the number
523          * of chunks occupied by the first and the last objects.
524          */
525         if (zhdr->middle_chunks != 0) {
526                 int nfree_before = zhdr->first_chunks ?
527                         0 : zhdr->start_middle - ZHDR_CHUNKS;
528                 int nfree_after = zhdr->last_chunks ?
529                         0 : TOTAL_CHUNKS -
530                                 (zhdr->start_middle + zhdr->middle_chunks);
531                 nfree = max(nfree_before, nfree_after);
532         } else
533                 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
534         return nfree;
535 }
536
537 /* Add to the appropriate unbuddied list */
538 static inline void add_to_unbuddied(struct z3fold_pool *pool,
539                                 struct z3fold_header *zhdr)
540 {
541         if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
542                         zhdr->middle_chunks == 0) {
543                 struct list_head *unbuddied;
544                 int freechunks = num_free_chunks(zhdr);
545
546                 migrate_disable();
547                 unbuddied = this_cpu_ptr(pool->unbuddied);
548                 spin_lock(&pool->lock);
549                 list_add(&zhdr->buddy, &unbuddied[freechunks]);
550                 spin_unlock(&pool->lock);
551                 zhdr->cpu = smp_processor_id();
552                 migrate_enable();
553         }
554 }
555
556 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
557 {
558         enum buddy bud = HEADLESS;
559
560         if (zhdr->middle_chunks) {
561                 if (!zhdr->first_chunks &&
562                     chunks <= zhdr->start_middle - ZHDR_CHUNKS)
563                         bud = FIRST;
564                 else if (!zhdr->last_chunks)
565                         bud = LAST;
566         } else {
567                 if (!zhdr->first_chunks)
568                         bud = FIRST;
569                 else if (!zhdr->last_chunks)
570                         bud = LAST;
571                 else
572                         bud = MIDDLE;
573         }
574
575         return bud;
576 }
577
578 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
579                                 unsigned short dst_chunk)
580 {
581         void *beg = zhdr;
582         return memmove(beg + (dst_chunk << CHUNK_SHIFT),
583                        beg + (zhdr->start_middle << CHUNK_SHIFT),
584                        zhdr->middle_chunks << CHUNK_SHIFT);
585 }
586
587 static inline bool buddy_single(struct z3fold_header *zhdr)
588 {
589         return !((zhdr->first_chunks && zhdr->middle_chunks) ||
590                         (zhdr->first_chunks && zhdr->last_chunks) ||
591                         (zhdr->middle_chunks && zhdr->last_chunks));
592 }
593
594 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
595 {
596         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
597         void *p = zhdr;
598         unsigned long old_handle = 0;
599         size_t sz = 0;
600         struct z3fold_header *new_zhdr = NULL;
601         int first_idx = __idx(zhdr, FIRST);
602         int middle_idx = __idx(zhdr, MIDDLE);
603         int last_idx = __idx(zhdr, LAST);
604         unsigned short *moved_chunks = NULL;
605
606         /*
607          * No need to protect slots here -- all the slots are "local" and
608          * the page lock is already taken
609          */
610         if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
611                 p += ZHDR_SIZE_ALIGNED;
612                 sz = zhdr->first_chunks << CHUNK_SHIFT;
613                 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
614                 moved_chunks = &zhdr->first_chunks;
615         } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
616                 p += zhdr->start_middle << CHUNK_SHIFT;
617                 sz = zhdr->middle_chunks << CHUNK_SHIFT;
618                 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
619                 moved_chunks = &zhdr->middle_chunks;
620         } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
621                 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
622                 sz = zhdr->last_chunks << CHUNK_SHIFT;
623                 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
624                 moved_chunks = &zhdr->last_chunks;
625         }
626
627         if (sz > 0) {
628                 enum buddy new_bud = HEADLESS;
629                 short chunks = size_to_chunks(sz);
630                 void *q;
631
632                 new_zhdr = __z3fold_alloc(pool, sz, false);
633                 if (!new_zhdr)
634                         return NULL;
635
636                 if (WARN_ON(new_zhdr == zhdr))
637                         goto out_fail;
638
639                 new_bud = get_free_buddy(new_zhdr, chunks);
640                 q = new_zhdr;
641                 switch (new_bud) {
642                 case FIRST:
643                         new_zhdr->first_chunks = chunks;
644                         q += ZHDR_SIZE_ALIGNED;
645                         break;
646                 case MIDDLE:
647                         new_zhdr->middle_chunks = chunks;
648                         new_zhdr->start_middle =
649                                 new_zhdr->first_chunks + ZHDR_CHUNKS;
650                         q += new_zhdr->start_middle << CHUNK_SHIFT;
651                         break;
652                 case LAST:
653                         new_zhdr->last_chunks = chunks;
654                         q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
655                         break;
656                 default:
657                         goto out_fail;
658                 }
659                 new_zhdr->foreign_handles++;
660                 memcpy(q, p, sz);
661                 write_lock(&zhdr->slots->lock);
662                 *(unsigned long *)old_handle = (unsigned long)new_zhdr +
663                         __idx(new_zhdr, new_bud);
664                 if (new_bud == LAST)
665                         *(unsigned long *)old_handle |=
666                                         (new_zhdr->last_chunks << BUDDY_SHIFT);
667                 write_unlock(&zhdr->slots->lock);
668                 add_to_unbuddied(pool, new_zhdr);
669                 z3fold_page_unlock(new_zhdr);
670
671                 *moved_chunks = 0;
672         }
673
674         return new_zhdr;
675
676 out_fail:
677         if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) {
678                 add_to_unbuddied(pool, new_zhdr);
679                 z3fold_page_unlock(new_zhdr);
680         }
681         return NULL;
682
683 }
684
685 #define BIG_CHUNK_GAP   3
686 /* Has to be called with lock held */
687 static int z3fold_compact_page(struct z3fold_header *zhdr)
688 {
689         struct page *page = virt_to_page(zhdr);
690
691         if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
692                 return 0; /* can't move middle chunk, it's used */
693
694         if (unlikely(PageIsolated(page)))
695                 return 0;
696
697         if (zhdr->middle_chunks == 0)
698                 return 0; /* nothing to compact */
699
700         if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
701                 /* move to the beginning */
702                 mchunk_memmove(zhdr, ZHDR_CHUNKS);
703                 zhdr->first_chunks = zhdr->middle_chunks;
704                 zhdr->middle_chunks = 0;
705                 zhdr->start_middle = 0;
706                 zhdr->first_num++;
707                 return 1;
708         }
709
710         /*
711          * moving data is expensive, so let's only do that if
712          * there's substantial gain (at least BIG_CHUNK_GAP chunks)
713          */
714         if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
715             zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
716                         BIG_CHUNK_GAP) {
717                 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
718                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
719                 return 1;
720         } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
721                    TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
722                                         + zhdr->middle_chunks) >=
723                         BIG_CHUNK_GAP) {
724                 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
725                         zhdr->middle_chunks;
726                 mchunk_memmove(zhdr, new_start);
727                 zhdr->start_middle = new_start;
728                 return 1;
729         }
730
731         return 0;
732 }
733
734 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
735 {
736         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
737         struct page *page;
738
739         page = virt_to_page(zhdr);
740         if (locked)
741                 WARN_ON(z3fold_page_trylock(zhdr));
742         else
743                 z3fold_page_lock(zhdr);
744         if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
745                 z3fold_page_unlock(zhdr);
746                 return;
747         }
748         spin_lock(&pool->lock);
749         list_del_init(&zhdr->buddy);
750         spin_unlock(&pool->lock);
751
752         if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
753                 return;
754
755         if (test_bit(PAGE_STALE, &page->private) ||
756             test_and_set_bit(PAGE_CLAIMED, &page->private)) {
757                 z3fold_page_unlock(zhdr);
758                 return;
759         }
760
761         if (!zhdr->foreign_handles && buddy_single(zhdr) &&
762             zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
763                 if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
764                         clear_bit(PAGE_CLAIMED, &page->private);
765                         z3fold_page_unlock(zhdr);
766                 }
767                 return;
768         }
769
770         z3fold_compact_page(zhdr);
771         add_to_unbuddied(pool, zhdr);
772         clear_bit(PAGE_CLAIMED, &page->private);
773         z3fold_page_unlock(zhdr);
774 }
775
776 static void compact_page_work(struct work_struct *w)
777 {
778         struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
779                                                 work);
780
781         do_compact_page(zhdr, false);
782 }
783
784 /* returns _locked_ z3fold page header or NULL */
785 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
786                                                 size_t size, bool can_sleep)
787 {
788         struct z3fold_header *zhdr = NULL;
789         struct page *page;
790         struct list_head *unbuddied;
791         int chunks = size_to_chunks(size), i;
792
793 lookup:
794         migrate_disable();
795         /* First, try to find an unbuddied z3fold page. */
796         unbuddied = this_cpu_ptr(pool->unbuddied);
797         for_each_unbuddied_list(i, chunks) {
798                 struct list_head *l = &unbuddied[i];
799
800                 zhdr = list_first_entry_or_null(READ_ONCE(l),
801                                         struct z3fold_header, buddy);
802
803                 if (!zhdr)
804                         continue;
805
806                 /* Re-check under lock. */
807                 spin_lock(&pool->lock);
808                 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
809                                                 struct z3fold_header, buddy)) ||
810                     !z3fold_page_trylock(zhdr)) {
811                         spin_unlock(&pool->lock);
812                         zhdr = NULL;
813                         migrate_enable();
814                         if (can_sleep)
815                                 cond_resched();
816                         goto lookup;
817                 }
818                 list_del_init(&zhdr->buddy);
819                 zhdr->cpu = -1;
820                 spin_unlock(&pool->lock);
821
822                 page = virt_to_page(zhdr);
823                 if (test_bit(NEEDS_COMPACTING, &page->private) ||
824                     test_bit(PAGE_CLAIMED, &page->private)) {
825                         z3fold_page_unlock(zhdr);
826                         zhdr = NULL;
827                         migrate_enable();
828                         if (can_sleep)
829                                 cond_resched();
830                         goto lookup;
831                 }
832
833                 /*
834                  * this page could not be removed from its unbuddied
835                  * list while pool lock was held, and then we've taken
836                  * page lock so kref_put could not be called before
837                  * we got here, so it's safe to just call kref_get()
838                  */
839                 kref_get(&zhdr->refcount);
840                 break;
841         }
842         migrate_enable();
843
844         if (!zhdr) {
845                 int cpu;
846
847                 /* look for _exact_ match on other cpus' lists */
848                 for_each_online_cpu(cpu) {
849                         struct list_head *l;
850
851                         unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
852                         spin_lock(&pool->lock);
853                         l = &unbuddied[chunks];
854
855                         zhdr = list_first_entry_or_null(READ_ONCE(l),
856                                                 struct z3fold_header, buddy);
857
858                         if (!zhdr || !z3fold_page_trylock(zhdr)) {
859                                 spin_unlock(&pool->lock);
860                                 zhdr = NULL;
861                                 continue;
862                         }
863                         list_del_init(&zhdr->buddy);
864                         zhdr->cpu = -1;
865                         spin_unlock(&pool->lock);
866
867                         page = virt_to_page(zhdr);
868                         if (test_bit(NEEDS_COMPACTING, &page->private) ||
869                             test_bit(PAGE_CLAIMED, &page->private)) {
870                                 z3fold_page_unlock(zhdr);
871                                 zhdr = NULL;
872                                 if (can_sleep)
873                                         cond_resched();
874                                 continue;
875                         }
876                         kref_get(&zhdr->refcount);
877                         break;
878                 }
879         }
880
881         if (zhdr && !zhdr->slots) {
882                 zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
883                 if (!zhdr->slots)
884                         goto out_fail;
885         }
886         return zhdr;
887
888 out_fail:
889         if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
890                 add_to_unbuddied(pool, zhdr);
891                 z3fold_page_unlock(zhdr);
892         }
893         return NULL;
894 }
895
896 /*
897  * API Functions
898  */
899
900 /**
901  * z3fold_create_pool() - create a new z3fold pool
902  * @name:       pool name
903  * @gfp:        gfp flags when allocating the z3fold pool structure
904  *
905  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
906  * failed.
907  */
908 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp)
909 {
910         struct z3fold_pool *pool = NULL;
911         int i, cpu;
912
913         pool = kzalloc(sizeof(struct z3fold_pool), gfp);
914         if (!pool)
915                 goto out;
916         pool->c_handle = kmem_cache_create("z3fold_handle",
917                                 sizeof(struct z3fold_buddy_slots),
918                                 SLOTS_ALIGN, 0, NULL);
919         if (!pool->c_handle)
920                 goto out_c;
921         spin_lock_init(&pool->lock);
922         spin_lock_init(&pool->stale_lock);
923         pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
924                                          __alignof__(struct list_head));
925         if (!pool->unbuddied)
926                 goto out_pool;
927         for_each_possible_cpu(cpu) {
928                 struct list_head *unbuddied =
929                                 per_cpu_ptr(pool->unbuddied, cpu);
930                 for_each_unbuddied_list(i, 0)
931                         INIT_LIST_HEAD(&unbuddied[i]);
932         }
933         INIT_LIST_HEAD(&pool->lru);
934         INIT_LIST_HEAD(&pool->stale);
935         atomic64_set(&pool->pages_nr, 0);
936         pool->name = name;
937         pool->compact_wq = create_singlethread_workqueue(pool->name);
938         if (!pool->compact_wq)
939                 goto out_unbuddied;
940         pool->release_wq = create_singlethread_workqueue(pool->name);
941         if (!pool->release_wq)
942                 goto out_wq;
943         INIT_WORK(&pool->work, free_pages_work);
944         return pool;
945
946 out_wq:
947         destroy_workqueue(pool->compact_wq);
948 out_unbuddied:
949         free_percpu(pool->unbuddied);
950 out_pool:
951         kmem_cache_destroy(pool->c_handle);
952 out_c:
953         kfree(pool);
954 out:
955         return NULL;
956 }
957
958 /**
959  * z3fold_destroy_pool() - destroys an existing z3fold pool
960  * @pool:       the z3fold pool to be destroyed
961  *
962  * The pool should be emptied before this function is called.
963  */
964 static void z3fold_destroy_pool(struct z3fold_pool *pool)
965 {
966         kmem_cache_destroy(pool->c_handle);
967
968         /*
969          * We need to destroy pool->compact_wq before pool->release_wq,
970          * as any pending work on pool->compact_wq will call
971          * queue_work(pool->release_wq, &pool->work).
972          *
973          * There are still outstanding pages until both workqueues are drained,
974          * so we cannot unregister migration until then.
975          */
976
977         destroy_workqueue(pool->compact_wq);
978         destroy_workqueue(pool->release_wq);
979         free_percpu(pool->unbuddied);
980         kfree(pool);
981 }
982
983 static const struct movable_operations z3fold_mops;
984
985 /**
986  * z3fold_alloc() - allocates a region of a given size
987  * @pool:       z3fold pool from which to allocate
988  * @size:       size in bytes of the desired allocation
989  * @gfp:        gfp flags used if the pool needs to grow
990  * @handle:     handle of the new allocation
991  *
992  * This function will attempt to find a free region in the pool large enough to
993  * satisfy the allocation request.  A search of the unbuddied lists is
994  * performed first. If no suitable free region is found, then a new page is
995  * allocated and added to the pool to satisfy the request.
996  *
997  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
998  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
999  * a new page.
1000  */
1001 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1002                         unsigned long *handle)
1003 {
1004         int chunks = size_to_chunks(size);
1005         struct z3fold_header *zhdr = NULL;
1006         struct page *page = NULL;
1007         enum buddy bud;
1008         bool can_sleep = gfpflags_allow_blocking(gfp);
1009
1010         if (!size || (gfp & __GFP_HIGHMEM))
1011                 return -EINVAL;
1012
1013         if (size > PAGE_SIZE)
1014                 return -ENOSPC;
1015
1016         if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1017                 bud = HEADLESS;
1018         else {
1019 retry:
1020                 zhdr = __z3fold_alloc(pool, size, can_sleep);
1021                 if (zhdr) {
1022                         bud = get_free_buddy(zhdr, chunks);
1023                         if (bud == HEADLESS) {
1024                                 if (!kref_put(&zhdr->refcount,
1025                                              release_z3fold_page_locked))
1026                                         z3fold_page_unlock(zhdr);
1027                                 pr_err("No free chunks in unbuddied\n");
1028                                 WARN_ON(1);
1029                                 goto retry;
1030                         }
1031                         page = virt_to_page(zhdr);
1032                         goto found;
1033                 }
1034                 bud = FIRST;
1035         }
1036
1037         page = alloc_page(gfp);
1038         if (!page)
1039                 return -ENOMEM;
1040
1041         zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1042         if (!zhdr) {
1043                 __free_page(page);
1044                 return -ENOMEM;
1045         }
1046         atomic64_inc(&pool->pages_nr);
1047
1048         if (bud == HEADLESS) {
1049                 set_bit(PAGE_HEADLESS, &page->private);
1050                 goto headless;
1051         }
1052         if (can_sleep) {
1053                 lock_page(page);
1054                 __SetPageMovable(page, &z3fold_mops);
1055                 unlock_page(page);
1056         } else {
1057                 WARN_ON(!trylock_page(page));
1058                 __SetPageMovable(page, &z3fold_mops);
1059                 unlock_page(page);
1060         }
1061         z3fold_page_lock(zhdr);
1062
1063 found:
1064         if (bud == FIRST)
1065                 zhdr->first_chunks = chunks;
1066         else if (bud == LAST)
1067                 zhdr->last_chunks = chunks;
1068         else {
1069                 zhdr->middle_chunks = chunks;
1070                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1071         }
1072         add_to_unbuddied(pool, zhdr);
1073
1074 headless:
1075         spin_lock(&pool->lock);
1076         /* Add/move z3fold page to beginning of LRU */
1077         if (!list_empty(&page->lru))
1078                 list_del(&page->lru);
1079
1080         list_add(&page->lru, &pool->lru);
1081
1082         *handle = encode_handle(zhdr, bud);
1083         spin_unlock(&pool->lock);
1084         if (bud != HEADLESS)
1085                 z3fold_page_unlock(zhdr);
1086
1087         return 0;
1088 }
1089
1090 /**
1091  * z3fold_free() - frees the allocation associated with the given handle
1092  * @pool:       pool in which the allocation resided
1093  * @handle:     handle associated with the allocation returned by z3fold_alloc()
1094  *
1095  * In the case that the z3fold page in which the allocation resides is under
1096  * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
1097  * only sets the first|middle|last_chunks to 0.  The page is actually freed
1098  * once all buddies are evicted (see z3fold_reclaim_page() below).
1099  */
1100 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1101 {
1102         struct z3fold_header *zhdr;
1103         struct page *page;
1104         enum buddy bud;
1105         bool page_claimed;
1106
1107         zhdr = get_z3fold_header(handle);
1108         page = virt_to_page(zhdr);
1109         page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1110
1111         if (test_bit(PAGE_HEADLESS, &page->private)) {
1112                 /* if a headless page is under reclaim, just leave.
1113                  * NB: we use test_and_set_bit for a reason: if the bit
1114                  * has not been set before, we release this page
1115                  * immediately so we don't care about its value any more.
1116                  */
1117                 if (!page_claimed) {
1118                         spin_lock(&pool->lock);
1119                         list_del(&page->lru);
1120                         spin_unlock(&pool->lock);
1121                         put_z3fold_header(zhdr);
1122                         free_z3fold_page(page, true);
1123                         atomic64_dec(&pool->pages_nr);
1124                 }
1125                 return;
1126         }
1127
1128         /* Non-headless case */
1129         bud = handle_to_buddy(handle);
1130
1131         switch (bud) {
1132         case FIRST:
1133                 zhdr->first_chunks = 0;
1134                 break;
1135         case MIDDLE:
1136                 zhdr->middle_chunks = 0;
1137                 break;
1138         case LAST:
1139                 zhdr->last_chunks = 0;
1140                 break;
1141         default:
1142                 pr_err("%s: unknown bud %d\n", __func__, bud);
1143                 WARN_ON(1);
1144                 put_z3fold_header(zhdr);
1145                 return;
1146         }
1147
1148         if (!page_claimed)
1149                 free_handle(handle, zhdr);
1150         if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list))
1151                 return;
1152         if (page_claimed) {
1153                 /* the page has not been claimed by us */
1154                 put_z3fold_header(zhdr);
1155                 return;
1156         }
1157         if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1158                 clear_bit(PAGE_CLAIMED, &page->private);
1159                 put_z3fold_header(zhdr);
1160                 return;
1161         }
1162         if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1163                 zhdr->cpu = -1;
1164                 kref_get(&zhdr->refcount);
1165                 clear_bit(PAGE_CLAIMED, &page->private);
1166                 do_compact_page(zhdr, true);
1167                 return;
1168         }
1169         kref_get(&zhdr->refcount);
1170         clear_bit(PAGE_CLAIMED, &page->private);
1171         queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1172         put_z3fold_header(zhdr);
1173 }
1174
1175 /**
1176  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1177  * @pool:       pool from which a page will attempt to be evicted
1178  * @retries:    number of pages on the LRU list for which eviction will
1179  *              be attempted before failing
1180  *
1181  * z3fold reclaim is different from normal system reclaim in that it is done
1182  * from the bottom, up. This is because only the bottom layer, z3fold, has
1183  * information on how the allocations are organized within each z3fold page.
1184  * This has the potential to create interesting locking situations between
1185  * z3fold and the user, however.
1186  *
1187  * To avoid these, this is how z3fold_reclaim_page() should be called:
1188  *
1189  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1190  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1191  * call the user-defined eviction handler with the pool and handle as
1192  * arguments.
1193  *
1194  * If the handle can not be evicted, the eviction handler should return
1195  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1196  * appropriate list and try the next z3fold page on the LRU up to
1197  * a user defined number of retries.
1198  *
1199  * If the handle is successfully evicted, the eviction handler should
1200  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1201  * contains logic to delay freeing the page if the page is under reclaim,
1202  * as indicated by the setting of the PG_reclaim flag on the underlying page.
1203  *
1204  * If all buddies in the z3fold page are successfully evicted, then the
1205  * z3fold page can be freed.
1206  *
1207  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1208  * no pages to evict or an eviction handler is not registered, -EAGAIN if
1209  * the retry limit was hit.
1210  */
1211 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1212 {
1213         int i, ret = -1;
1214         struct z3fold_header *zhdr = NULL;
1215         struct page *page = NULL;
1216         struct list_head *pos;
1217         unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1218         struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN)));
1219
1220         rwlock_init(&slots.lock);
1221         slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
1222
1223         spin_lock(&pool->lock);
1224         for (i = 0; i < retries; i++) {
1225                 if (list_empty(&pool->lru)) {
1226                         spin_unlock(&pool->lock);
1227                         return -EINVAL;
1228                 }
1229                 list_for_each_prev(pos, &pool->lru) {
1230                         page = list_entry(pos, struct page, lru);
1231
1232                         zhdr = page_address(page);
1233                         if (test_bit(PAGE_HEADLESS, &page->private)) {
1234                                 /*
1235                                  * For non-headless pages, we wait to do this
1236                                  * until we have the page lock to avoid racing
1237                                  * with __z3fold_alloc(). Headless pages don't
1238                                  * have a lock (and __z3fold_alloc() will never
1239                                  * see them), but we still need to test and set
1240                                  * PAGE_CLAIMED to avoid racing with
1241                                  * z3fold_free(), so just do it now before
1242                                  * leaving the loop.
1243                                  */
1244                                 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1245                                         continue;
1246
1247                                 break;
1248                         }
1249
1250                         if (!z3fold_page_trylock(zhdr)) {
1251                                 zhdr = NULL;
1252                                 continue; /* can't evict at this point */
1253                         }
1254
1255                         /* test_and_set_bit is of course atomic, but we still
1256                          * need to do it under page lock, otherwise checking
1257                          * that bit in __z3fold_alloc wouldn't make sense
1258                          */
1259                         if (zhdr->foreign_handles ||
1260                             test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1261                                 z3fold_page_unlock(zhdr);
1262                                 zhdr = NULL;
1263                                 continue; /* can't evict such page */
1264                         }
1265                         list_del_init(&zhdr->buddy);
1266                         zhdr->cpu = -1;
1267                         /* See comment in __z3fold_alloc. */
1268                         kref_get(&zhdr->refcount);
1269                         break;
1270                 }
1271
1272                 if (!zhdr)
1273                         break;
1274
1275                 list_del_init(&page->lru);
1276                 spin_unlock(&pool->lock);
1277
1278                 if (!test_bit(PAGE_HEADLESS, &page->private)) {
1279                         /*
1280                          * We need encode the handles before unlocking, and
1281                          * use our local slots structure because z3fold_free
1282                          * can zero out zhdr->slots and we can't do much
1283                          * about that
1284                          */
1285                         first_handle = 0;
1286                         last_handle = 0;
1287                         middle_handle = 0;
1288                         memset(slots.slot, 0, sizeof(slots.slot));
1289                         if (zhdr->first_chunks)
1290                                 first_handle = __encode_handle(zhdr, &slots,
1291                                                                 FIRST);
1292                         if (zhdr->middle_chunks)
1293                                 middle_handle = __encode_handle(zhdr, &slots,
1294                                                                 MIDDLE);
1295                         if (zhdr->last_chunks)
1296                                 last_handle = __encode_handle(zhdr, &slots,
1297                                                                 LAST);
1298                         /*
1299                          * it's safe to unlock here because we hold a
1300                          * reference to this page
1301                          */
1302                         z3fold_page_unlock(zhdr);
1303                 } else {
1304                         first_handle = encode_handle(zhdr, HEADLESS);
1305                         last_handle = middle_handle = 0;
1306                 }
1307                 /* Issue the eviction callback(s) */
1308                 if (middle_handle) {
1309                         ret = pool->zpool_ops->evict(pool->zpool, middle_handle);
1310                         if (ret)
1311                                 goto next;
1312                 }
1313                 if (first_handle) {
1314                         ret = pool->zpool_ops->evict(pool->zpool, first_handle);
1315                         if (ret)
1316                                 goto next;
1317                 }
1318                 if (last_handle) {
1319                         ret = pool->zpool_ops->evict(pool->zpool, last_handle);
1320                         if (ret)
1321                                 goto next;
1322                 }
1323 next:
1324                 if (test_bit(PAGE_HEADLESS, &page->private)) {
1325                         if (ret == 0) {
1326                                 free_z3fold_page(page, true);
1327                                 atomic64_dec(&pool->pages_nr);
1328                                 return 0;
1329                         }
1330                         spin_lock(&pool->lock);
1331                         list_add(&page->lru, &pool->lru);
1332                         spin_unlock(&pool->lock);
1333                         clear_bit(PAGE_CLAIMED, &page->private);
1334                 } else {
1335                         struct z3fold_buddy_slots *slots = zhdr->slots;
1336                         z3fold_page_lock(zhdr);
1337                         if (kref_put(&zhdr->refcount,
1338                                         release_z3fold_page_locked)) {
1339                                 kmem_cache_free(pool->c_handle, slots);
1340                                 return 0;
1341                         }
1342                         /*
1343                          * if we are here, the page is still not completely
1344                          * free. Take the global pool lock then to be able
1345                          * to add it back to the lru list
1346                          */
1347                         spin_lock(&pool->lock);
1348                         list_add(&page->lru, &pool->lru);
1349                         spin_unlock(&pool->lock);
1350                         if (list_empty(&zhdr->buddy))
1351                                 add_to_unbuddied(pool, zhdr);
1352                         clear_bit(PAGE_CLAIMED, &page->private);
1353                         z3fold_page_unlock(zhdr);
1354                 }
1355
1356                 /* We started off locked to we need to lock the pool back */
1357                 spin_lock(&pool->lock);
1358         }
1359         spin_unlock(&pool->lock);
1360         return -EAGAIN;
1361 }
1362
1363 /**
1364  * z3fold_map() - maps the allocation associated with the given handle
1365  * @pool:       pool in which the allocation resides
1366  * @handle:     handle associated with the allocation to be mapped
1367  *
1368  * Extracts the buddy number from handle and constructs the pointer to the
1369  * correct starting chunk within the page.
1370  *
1371  * Returns: a pointer to the mapped allocation
1372  */
1373 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1374 {
1375         struct z3fold_header *zhdr;
1376         struct page *page;
1377         void *addr;
1378         enum buddy buddy;
1379
1380         zhdr = get_z3fold_header(handle);
1381         addr = zhdr;
1382         page = virt_to_page(zhdr);
1383
1384         if (test_bit(PAGE_HEADLESS, &page->private))
1385                 goto out;
1386
1387         buddy = handle_to_buddy(handle);
1388         switch (buddy) {
1389         case FIRST:
1390                 addr += ZHDR_SIZE_ALIGNED;
1391                 break;
1392         case MIDDLE:
1393                 addr += zhdr->start_middle << CHUNK_SHIFT;
1394                 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1395                 break;
1396         case LAST:
1397                 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1398                 break;
1399         default:
1400                 pr_err("unknown buddy id %d\n", buddy);
1401                 WARN_ON(1);
1402                 addr = NULL;
1403                 break;
1404         }
1405
1406         if (addr)
1407                 zhdr->mapped_count++;
1408 out:
1409         put_z3fold_header(zhdr);
1410         return addr;
1411 }
1412
1413 /**
1414  * z3fold_unmap() - unmaps the allocation associated with the given handle
1415  * @pool:       pool in which the allocation resides
1416  * @handle:     handle associated with the allocation to be unmapped
1417  */
1418 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1419 {
1420         struct z3fold_header *zhdr;
1421         struct page *page;
1422         enum buddy buddy;
1423
1424         zhdr = get_z3fold_header(handle);
1425         page = virt_to_page(zhdr);
1426
1427         if (test_bit(PAGE_HEADLESS, &page->private))
1428                 return;
1429
1430         buddy = handle_to_buddy(handle);
1431         if (buddy == MIDDLE)
1432                 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1433         zhdr->mapped_count--;
1434         put_z3fold_header(zhdr);
1435 }
1436
1437 /**
1438  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1439  * @pool:       pool whose size is being queried
1440  *
1441  * Returns: size in pages of the given pool.
1442  */
1443 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1444 {
1445         return atomic64_read(&pool->pages_nr);
1446 }
1447
1448 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1449 {
1450         struct z3fold_header *zhdr;
1451         struct z3fold_pool *pool;
1452
1453         VM_BUG_ON_PAGE(!PageMovable(page), page);
1454         VM_BUG_ON_PAGE(PageIsolated(page), page);
1455
1456         if (test_bit(PAGE_HEADLESS, &page->private))
1457                 return false;
1458
1459         zhdr = page_address(page);
1460         z3fold_page_lock(zhdr);
1461         if (test_bit(NEEDS_COMPACTING, &page->private) ||
1462             test_bit(PAGE_STALE, &page->private))
1463                 goto out;
1464
1465         if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1466                 goto out;
1467
1468         if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1469                 goto out;
1470         pool = zhdr_to_pool(zhdr);
1471         spin_lock(&pool->lock);
1472         if (!list_empty(&zhdr->buddy))
1473                 list_del_init(&zhdr->buddy);
1474         if (!list_empty(&page->lru))
1475                 list_del_init(&page->lru);
1476         spin_unlock(&pool->lock);
1477
1478         kref_get(&zhdr->refcount);
1479         z3fold_page_unlock(zhdr);
1480         return true;
1481
1482 out:
1483         z3fold_page_unlock(zhdr);
1484         return false;
1485 }
1486
1487 static int z3fold_page_migrate(struct page *newpage, struct page *page,
1488                 enum migrate_mode mode)
1489 {
1490         struct z3fold_header *zhdr, *new_zhdr;
1491         struct z3fold_pool *pool;
1492
1493         VM_BUG_ON_PAGE(!PageMovable(page), page);
1494         VM_BUG_ON_PAGE(!PageIsolated(page), page);
1495         VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
1496         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1497
1498         zhdr = page_address(page);
1499         pool = zhdr_to_pool(zhdr);
1500
1501         if (!z3fold_page_trylock(zhdr))
1502                 return -EAGAIN;
1503         if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1504                 clear_bit(PAGE_CLAIMED, &page->private);
1505                 z3fold_page_unlock(zhdr);
1506                 return -EBUSY;
1507         }
1508         if (work_pending(&zhdr->work)) {
1509                 z3fold_page_unlock(zhdr);
1510                 return -EAGAIN;
1511         }
1512         new_zhdr = page_address(newpage);
1513         memcpy(new_zhdr, zhdr, PAGE_SIZE);
1514         newpage->private = page->private;
1515         set_bit(PAGE_MIGRATED, &page->private);
1516         z3fold_page_unlock(zhdr);
1517         spin_lock_init(&new_zhdr->page_lock);
1518         INIT_WORK(&new_zhdr->work, compact_page_work);
1519         /*
1520          * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1521          * so we only have to reinitialize it.
1522          */
1523         INIT_LIST_HEAD(&new_zhdr->buddy);
1524         __ClearPageMovable(page);
1525
1526         get_page(newpage);
1527         z3fold_page_lock(new_zhdr);
1528         if (new_zhdr->first_chunks)
1529                 encode_handle(new_zhdr, FIRST);
1530         if (new_zhdr->last_chunks)
1531                 encode_handle(new_zhdr, LAST);
1532         if (new_zhdr->middle_chunks)
1533                 encode_handle(new_zhdr, MIDDLE);
1534         set_bit(NEEDS_COMPACTING, &newpage->private);
1535         new_zhdr->cpu = smp_processor_id();
1536         spin_lock(&pool->lock);
1537         list_add(&newpage->lru, &pool->lru);
1538         spin_unlock(&pool->lock);
1539         __SetPageMovable(newpage, &z3fold_mops);
1540         z3fold_page_unlock(new_zhdr);
1541
1542         queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1543
1544         /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
1545         page->private = 0;
1546         put_page(page);
1547         return 0;
1548 }
1549
1550 static void z3fold_page_putback(struct page *page)
1551 {
1552         struct z3fold_header *zhdr;
1553         struct z3fold_pool *pool;
1554
1555         zhdr = page_address(page);
1556         pool = zhdr_to_pool(zhdr);
1557
1558         z3fold_page_lock(zhdr);
1559         if (!list_empty(&zhdr->buddy))
1560                 list_del_init(&zhdr->buddy);
1561         INIT_LIST_HEAD(&page->lru);
1562         if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
1563                 return;
1564         spin_lock(&pool->lock);
1565         list_add(&page->lru, &pool->lru);
1566         spin_unlock(&pool->lock);
1567         if (list_empty(&zhdr->buddy))
1568                 add_to_unbuddied(pool, zhdr);
1569         clear_bit(PAGE_CLAIMED, &page->private);
1570         z3fold_page_unlock(zhdr);
1571 }
1572
1573 static const struct movable_operations z3fold_mops = {
1574         .isolate_page = z3fold_page_isolate,
1575         .migrate_page = z3fold_page_migrate,
1576         .putback_page = z3fold_page_putback,
1577 };
1578
1579 /*****************
1580  * zpool
1581  ****************/
1582
1583 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1584                                const struct zpool_ops *zpool_ops,
1585                                struct zpool *zpool)
1586 {
1587         struct z3fold_pool *pool;
1588
1589         pool = z3fold_create_pool(name, gfp);
1590         if (pool) {
1591                 pool->zpool = zpool;
1592                 pool->zpool_ops = zpool_ops;
1593         }
1594         return pool;
1595 }
1596
1597 static void z3fold_zpool_destroy(void *pool)
1598 {
1599         z3fold_destroy_pool(pool);
1600 }
1601
1602 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1603                         unsigned long *handle)
1604 {
1605         return z3fold_alloc(pool, size, gfp, handle);
1606 }
1607 static void z3fold_zpool_free(void *pool, unsigned long handle)
1608 {
1609         z3fold_free(pool, handle);
1610 }
1611
1612 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1613                         unsigned int *reclaimed)
1614 {
1615         unsigned int total = 0;
1616         int ret = -EINVAL;
1617
1618         while (total < pages) {
1619                 ret = z3fold_reclaim_page(pool, 8);
1620                 if (ret < 0)
1621                         break;
1622                 total++;
1623         }
1624
1625         if (reclaimed)
1626                 *reclaimed = total;
1627
1628         return ret;
1629 }
1630
1631 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1632                         enum zpool_mapmode mm)
1633 {
1634         return z3fold_map(pool, handle);
1635 }
1636 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1637 {
1638         z3fold_unmap(pool, handle);
1639 }
1640
1641 static u64 z3fold_zpool_total_size(void *pool)
1642 {
1643         return z3fold_get_pool_size(pool) * PAGE_SIZE;
1644 }
1645
1646 static struct zpool_driver z3fold_zpool_driver = {
1647         .type =         "z3fold",
1648         .sleep_mapped = true,
1649         .owner =        THIS_MODULE,
1650         .create =       z3fold_zpool_create,
1651         .destroy =      z3fold_zpool_destroy,
1652         .malloc =       z3fold_zpool_malloc,
1653         .free =         z3fold_zpool_free,
1654         .shrink =       z3fold_zpool_shrink,
1655         .map =          z3fold_zpool_map,
1656         .unmap =        z3fold_zpool_unmap,
1657         .total_size =   z3fold_zpool_total_size,
1658 };
1659
1660 MODULE_ALIAS("zpool-z3fold");
1661
1662 static int __init init_z3fold(void)
1663 {
1664         /*
1665          * Make sure the z3fold header is not larger than the page size and
1666          * there has remaining spaces for its buddy.
1667          */
1668         BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
1669         zpool_register_driver(&z3fold_zpool_driver);
1670
1671         return 0;
1672 }
1673
1674 static void __exit exit_z3fold(void)
1675 {
1676         zpool_unregister_driver(&z3fold_zpool_driver);
1677 }
1678
1679 module_init(init_z3fold);
1680 module_exit(exit_z3fold);
1681
1682 MODULE_LICENSE("GPL");
1683 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1684 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");