1 // SPDX-License-Identifier: GPL-2.0-only
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
8 * This implementation is based on zbud written by Seth Jennings.
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
21 * z3fold doesn't export any API and is meant to be used via zpool API.
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/preempt.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/zpool.h>
42 #include <linux/kmemleak.h>
45 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
46 * adjusting internal fragmentation. It also determines the number of
47 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
48 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
49 * in the beginning of an allocated page are occupied by z3fold header, so
50 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
51 * which shows the max number of free chunks in z3fold page, also there will
52 * be 63, or 62, respectively, freelists per pool.
54 #define NCHUNKS_ORDER 6
56 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
57 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
58 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
59 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
60 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
61 #define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS)
63 #define BUDDY_MASK (0x3)
65 #define SLOTS_ALIGN (0x40)
80 struct z3fold_buddy_slots {
82 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
83 * be enough slots to hold all possible variants
85 unsigned long slot[BUDDY_MASK + 1];
86 unsigned long pool; /* back link */
89 #define HANDLE_FLAG_MASK (0x03)
92 * struct z3fold_header - z3fold page metadata occupying first chunks of each
93 * z3fold page, except for HEADLESS pages
94 * @buddy: links the z3fold page into the relevant list in the
96 * @page_lock: per-page lock
97 * @refcount: reference count for the z3fold page
98 * @work: work_struct for page layout optimization
99 * @slots: pointer to the structure holding buddy slots
100 * @pool: pointer to the containing pool
101 * @cpu: CPU which this page "belongs" to
102 * @first_chunks: the size of the first buddy in chunks, 0 if free
103 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
104 * @last_chunks: the size of the last buddy in chunks, 0 if free
105 * @first_num: the starting number (for the first handle)
106 * @mapped_count: the number of objects currently mapped
108 struct z3fold_header {
109 struct list_head buddy;
110 spinlock_t page_lock;
111 struct kref refcount;
112 struct work_struct work;
113 struct z3fold_buddy_slots *slots;
114 struct z3fold_pool *pool;
116 unsigned short first_chunks;
117 unsigned short middle_chunks;
118 unsigned short last_chunks;
119 unsigned short start_middle;
120 unsigned short first_num:2;
121 unsigned short mapped_count:2;
122 unsigned short foreign_handles:2;
126 * struct z3fold_pool - stores metadata for each z3fold pool
128 * @lock: protects pool unbuddied/lru lists
129 * @stale_lock: protects pool stale page list
130 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
131 * buddies; the list each z3fold page is added to depends on
132 * the size of its free region.
133 * @lru: list tracking the z3fold pages in LRU order by most recently
135 * @stale: list of pages marked for freeing
136 * @pages_nr: number of z3fold pages in the pool.
137 * @c_handle: cache for z3fold_buddy_slots allocation
138 * @zpool: zpool driver
139 * @zpool_ops: zpool operations structure with an evict callback
140 * @compact_wq: workqueue for page layout background optimization
141 * @release_wq: workqueue for safe page release
142 * @work: work_struct for safe page release
144 * This structure is allocated at pool creation time and maintains metadata
145 * pertaining to a particular z3fold pool.
150 spinlock_t stale_lock;
151 struct list_head *unbuddied;
152 struct list_head lru;
153 struct list_head stale;
155 struct kmem_cache *c_handle;
157 const struct zpool_ops *zpool_ops;
158 struct workqueue_struct *compact_wq;
159 struct workqueue_struct *release_wq;
160 struct work_struct work;
164 * Internal z3fold page flags
166 enum z3fold_page_flags {
171 PAGE_CLAIMED, /* by either reclaim or free */
172 PAGE_MIGRATED, /* page is migrated and soon to be released */
176 * handle flags, go under HANDLE_FLAG_MASK
178 enum z3fold_handle_flags {
183 * Forward declarations
185 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
186 static void compact_page_work(struct work_struct *w);
192 /* Converts an allocation size in bytes to size in z3fold chunks */
193 static int size_to_chunks(size_t size)
195 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
198 #define for_each_unbuddied_list(_iter, _begin) \
199 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
201 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
204 struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
208 /* It will be freed separately in free_handle(). */
209 kmemleak_not_leak(slots);
210 slots->pool = (unsigned long)pool;
211 rwlock_init(&slots->lock);
217 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
219 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
222 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
224 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
227 /* Lock a z3fold page */
228 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
230 spin_lock(&zhdr->page_lock);
233 /* Try to lock a z3fold page */
234 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
236 return spin_trylock(&zhdr->page_lock);
239 /* Unlock a z3fold page */
240 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
242 spin_unlock(&zhdr->page_lock);
245 /* return locked z3fold page if it's not headless */
246 static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
248 struct z3fold_buddy_slots *slots;
249 struct z3fold_header *zhdr;
252 if (!(handle & (1 << PAGE_HEADLESS))) {
253 slots = handle_to_slots(handle);
257 read_lock(&slots->lock);
258 addr = *(unsigned long *)handle;
259 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
260 locked = z3fold_page_trylock(zhdr);
261 read_unlock(&slots->lock);
263 struct page *page = virt_to_page(zhdr);
265 if (!test_bit(PAGE_MIGRATED, &page->private))
267 z3fold_page_unlock(zhdr);
272 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
278 static inline void put_z3fold_header(struct z3fold_header *zhdr)
280 struct page *page = virt_to_page(zhdr);
282 if (!test_bit(PAGE_HEADLESS, &page->private))
283 z3fold_page_unlock(zhdr);
286 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
288 struct z3fold_buddy_slots *slots;
292 if (WARN_ON(*(unsigned long *)handle == 0))
295 slots = handle_to_slots(handle);
296 write_lock(&slots->lock);
297 *(unsigned long *)handle = 0;
299 if (test_bit(HANDLES_NOFREE, &slots->pool)) {
300 write_unlock(&slots->lock);
301 return; /* simple case, nothing else to do */
304 if (zhdr->slots != slots)
305 zhdr->foreign_handles--;
308 for (i = 0; i <= BUDDY_MASK; i++) {
309 if (slots->slot[i]) {
314 write_unlock(&slots->lock);
317 struct z3fold_pool *pool = slots_to_pool(slots);
319 if (zhdr->slots == slots)
321 kmem_cache_free(pool->c_handle, slots);
325 /* Initializes the z3fold header of a newly allocated z3fold page */
326 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
327 struct z3fold_pool *pool, gfp_t gfp)
329 struct z3fold_header *zhdr = page_address(page);
330 struct z3fold_buddy_slots *slots;
332 INIT_LIST_HEAD(&page->lru);
333 clear_bit(PAGE_HEADLESS, &page->private);
334 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
335 clear_bit(NEEDS_COMPACTING, &page->private);
336 clear_bit(PAGE_STALE, &page->private);
337 clear_bit(PAGE_CLAIMED, &page->private);
338 clear_bit(PAGE_MIGRATED, &page->private);
342 slots = alloc_slots(pool, gfp);
346 memset(zhdr, 0, sizeof(*zhdr));
347 spin_lock_init(&zhdr->page_lock);
348 kref_init(&zhdr->refcount);
352 INIT_LIST_HEAD(&zhdr->buddy);
353 INIT_WORK(&zhdr->work, compact_page_work);
357 /* Resets the struct page fields and frees the page */
358 static void free_z3fold_page(struct page *page, bool headless)
362 __ClearPageMovable(page);
368 /* Helper function to build the index */
369 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
371 return (bud + zhdr->first_num) & BUDDY_MASK;
375 * Encodes the handle of a particular buddy within a z3fold page
376 * Pool lock should be held as this function accesses first_num
378 static unsigned long __encode_handle(struct z3fold_header *zhdr,
379 struct z3fold_buddy_slots *slots,
382 unsigned long h = (unsigned long)zhdr;
386 * For a headless page, its handle is its pointer with the extra
387 * PAGE_HEADLESS bit set
390 return h | (1 << PAGE_HEADLESS);
392 /* otherwise, return pointer to encoded handle */
393 idx = __idx(zhdr, bud);
396 h |= (zhdr->last_chunks << BUDDY_SHIFT);
398 write_lock(&slots->lock);
399 slots->slot[idx] = h;
400 write_unlock(&slots->lock);
401 return (unsigned long)&slots->slot[idx];
404 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
406 return __encode_handle(zhdr, zhdr->slots, bud);
409 /* only for LAST bud, returns zero otherwise */
410 static unsigned short handle_to_chunks(unsigned long handle)
412 struct z3fold_buddy_slots *slots = handle_to_slots(handle);
415 read_lock(&slots->lock);
416 addr = *(unsigned long *)handle;
417 read_unlock(&slots->lock);
418 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
422 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
423 * but that doesn't matter. because the masking will result in the
424 * correct buddy number.
426 static enum buddy handle_to_buddy(unsigned long handle)
428 struct z3fold_header *zhdr;
429 struct z3fold_buddy_slots *slots = handle_to_slots(handle);
432 read_lock(&slots->lock);
433 WARN_ON(handle & (1 << PAGE_HEADLESS));
434 addr = *(unsigned long *)handle;
435 read_unlock(&slots->lock);
436 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
437 return (addr - zhdr->first_num) & BUDDY_MASK;
440 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
445 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
447 struct page *page = virt_to_page(zhdr);
448 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
450 WARN_ON(!list_empty(&zhdr->buddy));
451 set_bit(PAGE_STALE, &page->private);
452 clear_bit(NEEDS_COMPACTING, &page->private);
453 spin_lock(&pool->lock);
454 if (!list_empty(&page->lru))
455 list_del_init(&page->lru);
456 spin_unlock(&pool->lock);
459 z3fold_page_unlock(zhdr);
461 spin_lock(&pool->stale_lock);
462 list_add(&zhdr->buddy, &pool->stale);
463 queue_work(pool->release_wq, &pool->work);
464 spin_unlock(&pool->stale_lock);
466 atomic64_dec(&pool->pages_nr);
469 static void release_z3fold_page_locked(struct kref *ref)
471 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
473 WARN_ON(z3fold_page_trylock(zhdr));
474 __release_z3fold_page(zhdr, true);
477 static void release_z3fold_page_locked_list(struct kref *ref)
479 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
481 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
483 spin_lock(&pool->lock);
484 list_del_init(&zhdr->buddy);
485 spin_unlock(&pool->lock);
487 WARN_ON(z3fold_page_trylock(zhdr));
488 __release_z3fold_page(zhdr, true);
491 static void free_pages_work(struct work_struct *w)
493 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
495 spin_lock(&pool->stale_lock);
496 while (!list_empty(&pool->stale)) {
497 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
498 struct z3fold_header, buddy);
499 struct page *page = virt_to_page(zhdr);
501 list_del(&zhdr->buddy);
502 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
504 spin_unlock(&pool->stale_lock);
505 cancel_work_sync(&zhdr->work);
506 free_z3fold_page(page, false);
508 spin_lock(&pool->stale_lock);
510 spin_unlock(&pool->stale_lock);
514 * Returns the number of free chunks in a z3fold page.
515 * NB: can't be used with HEADLESS pages.
517 static int num_free_chunks(struct z3fold_header *zhdr)
521 * If there is a middle object, pick up the bigger free space
522 * either before or after it. Otherwise just subtract the number
523 * of chunks occupied by the first and the last objects.
525 if (zhdr->middle_chunks != 0) {
526 int nfree_before = zhdr->first_chunks ?
527 0 : zhdr->start_middle - ZHDR_CHUNKS;
528 int nfree_after = zhdr->last_chunks ?
530 (zhdr->start_middle + zhdr->middle_chunks);
531 nfree = max(nfree_before, nfree_after);
533 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
537 /* Add to the appropriate unbuddied list */
538 static inline void add_to_unbuddied(struct z3fold_pool *pool,
539 struct z3fold_header *zhdr)
541 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
542 zhdr->middle_chunks == 0) {
543 struct list_head *unbuddied;
544 int freechunks = num_free_chunks(zhdr);
547 unbuddied = this_cpu_ptr(pool->unbuddied);
548 spin_lock(&pool->lock);
549 list_add(&zhdr->buddy, &unbuddied[freechunks]);
550 spin_unlock(&pool->lock);
551 zhdr->cpu = smp_processor_id();
556 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
558 enum buddy bud = HEADLESS;
560 if (zhdr->middle_chunks) {
561 if (!zhdr->first_chunks &&
562 chunks <= zhdr->start_middle - ZHDR_CHUNKS)
564 else if (!zhdr->last_chunks)
567 if (!zhdr->first_chunks)
569 else if (!zhdr->last_chunks)
578 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
579 unsigned short dst_chunk)
582 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
583 beg + (zhdr->start_middle << CHUNK_SHIFT),
584 zhdr->middle_chunks << CHUNK_SHIFT);
587 static inline bool buddy_single(struct z3fold_header *zhdr)
589 return !((zhdr->first_chunks && zhdr->middle_chunks) ||
590 (zhdr->first_chunks && zhdr->last_chunks) ||
591 (zhdr->middle_chunks && zhdr->last_chunks));
594 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
596 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
598 unsigned long old_handle = 0;
600 struct z3fold_header *new_zhdr = NULL;
601 int first_idx = __idx(zhdr, FIRST);
602 int middle_idx = __idx(zhdr, MIDDLE);
603 int last_idx = __idx(zhdr, LAST);
604 unsigned short *moved_chunks = NULL;
607 * No need to protect slots here -- all the slots are "local" and
608 * the page lock is already taken
610 if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
611 p += ZHDR_SIZE_ALIGNED;
612 sz = zhdr->first_chunks << CHUNK_SHIFT;
613 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
614 moved_chunks = &zhdr->first_chunks;
615 } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
616 p += zhdr->start_middle << CHUNK_SHIFT;
617 sz = zhdr->middle_chunks << CHUNK_SHIFT;
618 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
619 moved_chunks = &zhdr->middle_chunks;
620 } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
621 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
622 sz = zhdr->last_chunks << CHUNK_SHIFT;
623 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
624 moved_chunks = &zhdr->last_chunks;
628 enum buddy new_bud = HEADLESS;
629 short chunks = size_to_chunks(sz);
632 new_zhdr = __z3fold_alloc(pool, sz, false);
636 if (WARN_ON(new_zhdr == zhdr))
639 new_bud = get_free_buddy(new_zhdr, chunks);
643 new_zhdr->first_chunks = chunks;
644 q += ZHDR_SIZE_ALIGNED;
647 new_zhdr->middle_chunks = chunks;
648 new_zhdr->start_middle =
649 new_zhdr->first_chunks + ZHDR_CHUNKS;
650 q += new_zhdr->start_middle << CHUNK_SHIFT;
653 new_zhdr->last_chunks = chunks;
654 q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
659 new_zhdr->foreign_handles++;
661 write_lock(&zhdr->slots->lock);
662 *(unsigned long *)old_handle = (unsigned long)new_zhdr +
663 __idx(new_zhdr, new_bud);
665 *(unsigned long *)old_handle |=
666 (new_zhdr->last_chunks << BUDDY_SHIFT);
667 write_unlock(&zhdr->slots->lock);
668 add_to_unbuddied(pool, new_zhdr);
669 z3fold_page_unlock(new_zhdr);
677 if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) {
678 add_to_unbuddied(pool, new_zhdr);
679 z3fold_page_unlock(new_zhdr);
685 #define BIG_CHUNK_GAP 3
686 /* Has to be called with lock held */
687 static int z3fold_compact_page(struct z3fold_header *zhdr)
689 struct page *page = virt_to_page(zhdr);
691 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
692 return 0; /* can't move middle chunk, it's used */
694 if (unlikely(PageIsolated(page)))
697 if (zhdr->middle_chunks == 0)
698 return 0; /* nothing to compact */
700 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
701 /* move to the beginning */
702 mchunk_memmove(zhdr, ZHDR_CHUNKS);
703 zhdr->first_chunks = zhdr->middle_chunks;
704 zhdr->middle_chunks = 0;
705 zhdr->start_middle = 0;
711 * moving data is expensive, so let's only do that if
712 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
714 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
715 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
717 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
718 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
720 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
721 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
722 + zhdr->middle_chunks) >=
724 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
726 mchunk_memmove(zhdr, new_start);
727 zhdr->start_middle = new_start;
734 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
736 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
739 page = virt_to_page(zhdr);
741 WARN_ON(z3fold_page_trylock(zhdr));
743 z3fold_page_lock(zhdr);
744 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
745 z3fold_page_unlock(zhdr);
748 spin_lock(&pool->lock);
749 list_del_init(&zhdr->buddy);
750 spin_unlock(&pool->lock);
752 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
755 if (test_bit(PAGE_STALE, &page->private) ||
756 test_and_set_bit(PAGE_CLAIMED, &page->private)) {
757 z3fold_page_unlock(zhdr);
761 if (!zhdr->foreign_handles && buddy_single(zhdr) &&
762 zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
763 if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
764 clear_bit(PAGE_CLAIMED, &page->private);
765 z3fold_page_unlock(zhdr);
770 z3fold_compact_page(zhdr);
771 add_to_unbuddied(pool, zhdr);
772 clear_bit(PAGE_CLAIMED, &page->private);
773 z3fold_page_unlock(zhdr);
776 static void compact_page_work(struct work_struct *w)
778 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
781 do_compact_page(zhdr, false);
784 /* returns _locked_ z3fold page header or NULL */
785 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
786 size_t size, bool can_sleep)
788 struct z3fold_header *zhdr = NULL;
790 struct list_head *unbuddied;
791 int chunks = size_to_chunks(size), i;
795 /* First, try to find an unbuddied z3fold page. */
796 unbuddied = this_cpu_ptr(pool->unbuddied);
797 for_each_unbuddied_list(i, chunks) {
798 struct list_head *l = &unbuddied[i];
800 zhdr = list_first_entry_or_null(READ_ONCE(l),
801 struct z3fold_header, buddy);
806 /* Re-check under lock. */
807 spin_lock(&pool->lock);
808 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
809 struct z3fold_header, buddy)) ||
810 !z3fold_page_trylock(zhdr)) {
811 spin_unlock(&pool->lock);
818 list_del_init(&zhdr->buddy);
820 spin_unlock(&pool->lock);
822 page = virt_to_page(zhdr);
823 if (test_bit(NEEDS_COMPACTING, &page->private) ||
824 test_bit(PAGE_CLAIMED, &page->private)) {
825 z3fold_page_unlock(zhdr);
834 * this page could not be removed from its unbuddied
835 * list while pool lock was held, and then we've taken
836 * page lock so kref_put could not be called before
837 * we got here, so it's safe to just call kref_get()
839 kref_get(&zhdr->refcount);
847 /* look for _exact_ match on other cpus' lists */
848 for_each_online_cpu(cpu) {
851 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
852 spin_lock(&pool->lock);
853 l = &unbuddied[chunks];
855 zhdr = list_first_entry_or_null(READ_ONCE(l),
856 struct z3fold_header, buddy);
858 if (!zhdr || !z3fold_page_trylock(zhdr)) {
859 spin_unlock(&pool->lock);
863 list_del_init(&zhdr->buddy);
865 spin_unlock(&pool->lock);
867 page = virt_to_page(zhdr);
868 if (test_bit(NEEDS_COMPACTING, &page->private) ||
869 test_bit(PAGE_CLAIMED, &page->private)) {
870 z3fold_page_unlock(zhdr);
876 kref_get(&zhdr->refcount);
881 if (zhdr && !zhdr->slots) {
882 zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
889 if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
890 add_to_unbuddied(pool, zhdr);
891 z3fold_page_unlock(zhdr);
901 * z3fold_create_pool() - create a new z3fold pool
903 * @gfp: gfp flags when allocating the z3fold pool structure
905 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
908 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp)
910 struct z3fold_pool *pool = NULL;
913 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
916 pool->c_handle = kmem_cache_create("z3fold_handle",
917 sizeof(struct z3fold_buddy_slots),
918 SLOTS_ALIGN, 0, NULL);
921 spin_lock_init(&pool->lock);
922 spin_lock_init(&pool->stale_lock);
923 pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
924 __alignof__(struct list_head));
925 if (!pool->unbuddied)
927 for_each_possible_cpu(cpu) {
928 struct list_head *unbuddied =
929 per_cpu_ptr(pool->unbuddied, cpu);
930 for_each_unbuddied_list(i, 0)
931 INIT_LIST_HEAD(&unbuddied[i]);
933 INIT_LIST_HEAD(&pool->lru);
934 INIT_LIST_HEAD(&pool->stale);
935 atomic64_set(&pool->pages_nr, 0);
937 pool->compact_wq = create_singlethread_workqueue(pool->name);
938 if (!pool->compact_wq)
940 pool->release_wq = create_singlethread_workqueue(pool->name);
941 if (!pool->release_wq)
943 INIT_WORK(&pool->work, free_pages_work);
947 destroy_workqueue(pool->compact_wq);
949 free_percpu(pool->unbuddied);
951 kmem_cache_destroy(pool->c_handle);
959 * z3fold_destroy_pool() - destroys an existing z3fold pool
960 * @pool: the z3fold pool to be destroyed
962 * The pool should be emptied before this function is called.
964 static void z3fold_destroy_pool(struct z3fold_pool *pool)
966 kmem_cache_destroy(pool->c_handle);
969 * We need to destroy pool->compact_wq before pool->release_wq,
970 * as any pending work on pool->compact_wq will call
971 * queue_work(pool->release_wq, &pool->work).
973 * There are still outstanding pages until both workqueues are drained,
974 * so we cannot unregister migration until then.
977 destroy_workqueue(pool->compact_wq);
978 destroy_workqueue(pool->release_wq);
979 free_percpu(pool->unbuddied);
983 static const struct movable_operations z3fold_mops;
986 * z3fold_alloc() - allocates a region of a given size
987 * @pool: z3fold pool from which to allocate
988 * @size: size in bytes of the desired allocation
989 * @gfp: gfp flags used if the pool needs to grow
990 * @handle: handle of the new allocation
992 * This function will attempt to find a free region in the pool large enough to
993 * satisfy the allocation request. A search of the unbuddied lists is
994 * performed first. If no suitable free region is found, then a new page is
995 * allocated and added to the pool to satisfy the request.
997 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
998 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1001 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1002 unsigned long *handle)
1004 int chunks = size_to_chunks(size);
1005 struct z3fold_header *zhdr = NULL;
1006 struct page *page = NULL;
1008 bool can_sleep = gfpflags_allow_blocking(gfp);
1010 if (!size || (gfp & __GFP_HIGHMEM))
1013 if (size > PAGE_SIZE)
1016 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1020 zhdr = __z3fold_alloc(pool, size, can_sleep);
1022 bud = get_free_buddy(zhdr, chunks);
1023 if (bud == HEADLESS) {
1024 if (!kref_put(&zhdr->refcount,
1025 release_z3fold_page_locked))
1026 z3fold_page_unlock(zhdr);
1027 pr_err("No free chunks in unbuddied\n");
1031 page = virt_to_page(zhdr);
1037 page = alloc_page(gfp);
1041 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1046 atomic64_inc(&pool->pages_nr);
1048 if (bud == HEADLESS) {
1049 set_bit(PAGE_HEADLESS, &page->private);
1054 __SetPageMovable(page, &z3fold_mops);
1057 WARN_ON(!trylock_page(page));
1058 __SetPageMovable(page, &z3fold_mops);
1061 z3fold_page_lock(zhdr);
1065 zhdr->first_chunks = chunks;
1066 else if (bud == LAST)
1067 zhdr->last_chunks = chunks;
1069 zhdr->middle_chunks = chunks;
1070 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1072 add_to_unbuddied(pool, zhdr);
1075 spin_lock(&pool->lock);
1076 /* Add/move z3fold page to beginning of LRU */
1077 if (!list_empty(&page->lru))
1078 list_del(&page->lru);
1080 list_add(&page->lru, &pool->lru);
1082 *handle = encode_handle(zhdr, bud);
1083 spin_unlock(&pool->lock);
1084 if (bud != HEADLESS)
1085 z3fold_page_unlock(zhdr);
1091 * z3fold_free() - frees the allocation associated with the given handle
1092 * @pool: pool in which the allocation resided
1093 * @handle: handle associated with the allocation returned by z3fold_alloc()
1095 * In the case that the z3fold page in which the allocation resides is under
1096 * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
1097 * only sets the first|middle|last_chunks to 0. The page is actually freed
1098 * once all buddies are evicted (see z3fold_reclaim_page() below).
1100 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1102 struct z3fold_header *zhdr;
1107 zhdr = get_z3fold_header(handle);
1108 page = virt_to_page(zhdr);
1109 page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1111 if (test_bit(PAGE_HEADLESS, &page->private)) {
1112 /* if a headless page is under reclaim, just leave.
1113 * NB: we use test_and_set_bit for a reason: if the bit
1114 * has not been set before, we release this page
1115 * immediately so we don't care about its value any more.
1117 if (!page_claimed) {
1118 spin_lock(&pool->lock);
1119 list_del(&page->lru);
1120 spin_unlock(&pool->lock);
1121 put_z3fold_header(zhdr);
1122 free_z3fold_page(page, true);
1123 atomic64_dec(&pool->pages_nr);
1128 /* Non-headless case */
1129 bud = handle_to_buddy(handle);
1133 zhdr->first_chunks = 0;
1136 zhdr->middle_chunks = 0;
1139 zhdr->last_chunks = 0;
1142 pr_err("%s: unknown bud %d\n", __func__, bud);
1144 put_z3fold_header(zhdr);
1149 free_handle(handle, zhdr);
1150 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list))
1153 /* the page has not been claimed by us */
1154 put_z3fold_header(zhdr);
1157 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1158 clear_bit(PAGE_CLAIMED, &page->private);
1159 put_z3fold_header(zhdr);
1162 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1164 kref_get(&zhdr->refcount);
1165 clear_bit(PAGE_CLAIMED, &page->private);
1166 do_compact_page(zhdr, true);
1169 kref_get(&zhdr->refcount);
1170 clear_bit(PAGE_CLAIMED, &page->private);
1171 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1172 put_z3fold_header(zhdr);
1176 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1177 * @pool: pool from which a page will attempt to be evicted
1178 * @retries: number of pages on the LRU list for which eviction will
1179 * be attempted before failing
1181 * z3fold reclaim is different from normal system reclaim in that it is done
1182 * from the bottom, up. This is because only the bottom layer, z3fold, has
1183 * information on how the allocations are organized within each z3fold page.
1184 * This has the potential to create interesting locking situations between
1185 * z3fold and the user, however.
1187 * To avoid these, this is how z3fold_reclaim_page() should be called:
1189 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1190 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1191 * call the user-defined eviction handler with the pool and handle as
1194 * If the handle can not be evicted, the eviction handler should return
1195 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1196 * appropriate list and try the next z3fold page on the LRU up to
1197 * a user defined number of retries.
1199 * If the handle is successfully evicted, the eviction handler should
1200 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1201 * contains logic to delay freeing the page if the page is under reclaim,
1202 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1204 * If all buddies in the z3fold page are successfully evicted, then the
1205 * z3fold page can be freed.
1207 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1208 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1209 * the retry limit was hit.
1211 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1214 struct z3fold_header *zhdr = NULL;
1215 struct page *page = NULL;
1216 struct list_head *pos;
1217 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1218 struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN)));
1220 rwlock_init(&slots.lock);
1221 slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
1223 spin_lock(&pool->lock);
1224 for (i = 0; i < retries; i++) {
1225 if (list_empty(&pool->lru)) {
1226 spin_unlock(&pool->lock);
1229 list_for_each_prev(pos, &pool->lru) {
1230 page = list_entry(pos, struct page, lru);
1232 zhdr = page_address(page);
1233 if (test_bit(PAGE_HEADLESS, &page->private)) {
1235 * For non-headless pages, we wait to do this
1236 * until we have the page lock to avoid racing
1237 * with __z3fold_alloc(). Headless pages don't
1238 * have a lock (and __z3fold_alloc() will never
1239 * see them), but we still need to test and set
1240 * PAGE_CLAIMED to avoid racing with
1241 * z3fold_free(), so just do it now before
1244 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1250 if (!z3fold_page_trylock(zhdr)) {
1252 continue; /* can't evict at this point */
1255 /* test_and_set_bit is of course atomic, but we still
1256 * need to do it under page lock, otherwise checking
1257 * that bit in __z3fold_alloc wouldn't make sense
1259 if (zhdr->foreign_handles ||
1260 test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1261 z3fold_page_unlock(zhdr);
1263 continue; /* can't evict such page */
1265 list_del_init(&zhdr->buddy);
1267 /* See comment in __z3fold_alloc. */
1268 kref_get(&zhdr->refcount);
1275 list_del_init(&page->lru);
1276 spin_unlock(&pool->lock);
1278 if (!test_bit(PAGE_HEADLESS, &page->private)) {
1280 * We need encode the handles before unlocking, and
1281 * use our local slots structure because z3fold_free
1282 * can zero out zhdr->slots and we can't do much
1288 memset(slots.slot, 0, sizeof(slots.slot));
1289 if (zhdr->first_chunks)
1290 first_handle = __encode_handle(zhdr, &slots,
1292 if (zhdr->middle_chunks)
1293 middle_handle = __encode_handle(zhdr, &slots,
1295 if (zhdr->last_chunks)
1296 last_handle = __encode_handle(zhdr, &slots,
1299 * it's safe to unlock here because we hold a
1300 * reference to this page
1302 z3fold_page_unlock(zhdr);
1304 first_handle = encode_handle(zhdr, HEADLESS);
1305 last_handle = middle_handle = 0;
1307 /* Issue the eviction callback(s) */
1308 if (middle_handle) {
1309 ret = pool->zpool_ops->evict(pool->zpool, middle_handle);
1314 ret = pool->zpool_ops->evict(pool->zpool, first_handle);
1319 ret = pool->zpool_ops->evict(pool->zpool, last_handle);
1324 if (test_bit(PAGE_HEADLESS, &page->private)) {
1326 free_z3fold_page(page, true);
1327 atomic64_dec(&pool->pages_nr);
1330 spin_lock(&pool->lock);
1331 list_add(&page->lru, &pool->lru);
1332 spin_unlock(&pool->lock);
1333 clear_bit(PAGE_CLAIMED, &page->private);
1335 struct z3fold_buddy_slots *slots = zhdr->slots;
1336 z3fold_page_lock(zhdr);
1337 if (kref_put(&zhdr->refcount,
1338 release_z3fold_page_locked)) {
1339 kmem_cache_free(pool->c_handle, slots);
1343 * if we are here, the page is still not completely
1344 * free. Take the global pool lock then to be able
1345 * to add it back to the lru list
1347 spin_lock(&pool->lock);
1348 list_add(&page->lru, &pool->lru);
1349 spin_unlock(&pool->lock);
1350 if (list_empty(&zhdr->buddy))
1351 add_to_unbuddied(pool, zhdr);
1352 clear_bit(PAGE_CLAIMED, &page->private);
1353 z3fold_page_unlock(zhdr);
1356 /* We started off locked to we need to lock the pool back */
1357 spin_lock(&pool->lock);
1359 spin_unlock(&pool->lock);
1364 * z3fold_map() - maps the allocation associated with the given handle
1365 * @pool: pool in which the allocation resides
1366 * @handle: handle associated with the allocation to be mapped
1368 * Extracts the buddy number from handle and constructs the pointer to the
1369 * correct starting chunk within the page.
1371 * Returns: a pointer to the mapped allocation
1373 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1375 struct z3fold_header *zhdr;
1380 zhdr = get_z3fold_header(handle);
1382 page = virt_to_page(zhdr);
1384 if (test_bit(PAGE_HEADLESS, &page->private))
1387 buddy = handle_to_buddy(handle);
1390 addr += ZHDR_SIZE_ALIGNED;
1393 addr += zhdr->start_middle << CHUNK_SHIFT;
1394 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1397 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1400 pr_err("unknown buddy id %d\n", buddy);
1407 zhdr->mapped_count++;
1409 put_z3fold_header(zhdr);
1414 * z3fold_unmap() - unmaps the allocation associated with the given handle
1415 * @pool: pool in which the allocation resides
1416 * @handle: handle associated with the allocation to be unmapped
1418 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1420 struct z3fold_header *zhdr;
1424 zhdr = get_z3fold_header(handle);
1425 page = virt_to_page(zhdr);
1427 if (test_bit(PAGE_HEADLESS, &page->private))
1430 buddy = handle_to_buddy(handle);
1431 if (buddy == MIDDLE)
1432 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1433 zhdr->mapped_count--;
1434 put_z3fold_header(zhdr);
1438 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1439 * @pool: pool whose size is being queried
1441 * Returns: size in pages of the given pool.
1443 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1445 return atomic64_read(&pool->pages_nr);
1448 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1450 struct z3fold_header *zhdr;
1451 struct z3fold_pool *pool;
1453 VM_BUG_ON_PAGE(PageIsolated(page), page);
1455 if (test_bit(PAGE_HEADLESS, &page->private))
1458 zhdr = page_address(page);
1459 z3fold_page_lock(zhdr);
1460 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1461 test_bit(PAGE_STALE, &page->private))
1464 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1467 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1469 pool = zhdr_to_pool(zhdr);
1470 spin_lock(&pool->lock);
1471 if (!list_empty(&zhdr->buddy))
1472 list_del_init(&zhdr->buddy);
1473 if (!list_empty(&page->lru))
1474 list_del_init(&page->lru);
1475 spin_unlock(&pool->lock);
1477 kref_get(&zhdr->refcount);
1478 z3fold_page_unlock(zhdr);
1482 z3fold_page_unlock(zhdr);
1486 static int z3fold_page_migrate(struct page *newpage, struct page *page,
1487 enum migrate_mode mode)
1489 struct z3fold_header *zhdr, *new_zhdr;
1490 struct z3fold_pool *pool;
1492 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1493 VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
1494 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1496 zhdr = page_address(page);
1497 pool = zhdr_to_pool(zhdr);
1499 if (!z3fold_page_trylock(zhdr))
1501 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1502 clear_bit(PAGE_CLAIMED, &page->private);
1503 z3fold_page_unlock(zhdr);
1506 if (work_pending(&zhdr->work)) {
1507 z3fold_page_unlock(zhdr);
1510 new_zhdr = page_address(newpage);
1511 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1512 newpage->private = page->private;
1513 set_bit(PAGE_MIGRATED, &page->private);
1514 z3fold_page_unlock(zhdr);
1515 spin_lock_init(&new_zhdr->page_lock);
1516 INIT_WORK(&new_zhdr->work, compact_page_work);
1518 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1519 * so we only have to reinitialize it.
1521 INIT_LIST_HEAD(&new_zhdr->buddy);
1522 __ClearPageMovable(page);
1525 z3fold_page_lock(new_zhdr);
1526 if (new_zhdr->first_chunks)
1527 encode_handle(new_zhdr, FIRST);
1528 if (new_zhdr->last_chunks)
1529 encode_handle(new_zhdr, LAST);
1530 if (new_zhdr->middle_chunks)
1531 encode_handle(new_zhdr, MIDDLE);
1532 set_bit(NEEDS_COMPACTING, &newpage->private);
1533 new_zhdr->cpu = smp_processor_id();
1534 spin_lock(&pool->lock);
1535 list_add(&newpage->lru, &pool->lru);
1536 spin_unlock(&pool->lock);
1537 __SetPageMovable(newpage, &z3fold_mops);
1538 z3fold_page_unlock(new_zhdr);
1540 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1542 /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
1548 static void z3fold_page_putback(struct page *page)
1550 struct z3fold_header *zhdr;
1551 struct z3fold_pool *pool;
1553 zhdr = page_address(page);
1554 pool = zhdr_to_pool(zhdr);
1556 z3fold_page_lock(zhdr);
1557 if (!list_empty(&zhdr->buddy))
1558 list_del_init(&zhdr->buddy);
1559 INIT_LIST_HEAD(&page->lru);
1560 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
1562 spin_lock(&pool->lock);
1563 list_add(&page->lru, &pool->lru);
1564 spin_unlock(&pool->lock);
1565 if (list_empty(&zhdr->buddy))
1566 add_to_unbuddied(pool, zhdr);
1567 clear_bit(PAGE_CLAIMED, &page->private);
1568 z3fold_page_unlock(zhdr);
1571 static const struct movable_operations z3fold_mops = {
1572 .isolate_page = z3fold_page_isolate,
1573 .migrate_page = z3fold_page_migrate,
1574 .putback_page = z3fold_page_putback,
1581 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1582 const struct zpool_ops *zpool_ops,
1583 struct zpool *zpool)
1585 struct z3fold_pool *pool;
1587 pool = z3fold_create_pool(name, gfp);
1589 pool->zpool = zpool;
1590 pool->zpool_ops = zpool_ops;
1595 static void z3fold_zpool_destroy(void *pool)
1597 z3fold_destroy_pool(pool);
1600 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1601 unsigned long *handle)
1603 return z3fold_alloc(pool, size, gfp, handle);
1605 static void z3fold_zpool_free(void *pool, unsigned long handle)
1607 z3fold_free(pool, handle);
1610 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1611 unsigned int *reclaimed)
1613 unsigned int total = 0;
1616 while (total < pages) {
1617 ret = z3fold_reclaim_page(pool, 8);
1629 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1630 enum zpool_mapmode mm)
1632 return z3fold_map(pool, handle);
1634 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1636 z3fold_unmap(pool, handle);
1639 static u64 z3fold_zpool_total_size(void *pool)
1641 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1644 static struct zpool_driver z3fold_zpool_driver = {
1646 .sleep_mapped = true,
1647 .owner = THIS_MODULE,
1648 .create = z3fold_zpool_create,
1649 .destroy = z3fold_zpool_destroy,
1650 .malloc = z3fold_zpool_malloc,
1651 .free = z3fold_zpool_free,
1652 .shrink = z3fold_zpool_shrink,
1653 .map = z3fold_zpool_map,
1654 .unmap = z3fold_zpool_unmap,
1655 .total_size = z3fold_zpool_total_size,
1658 MODULE_ALIAS("zpool-z3fold");
1660 static int __init init_z3fold(void)
1663 * Make sure the z3fold header is not larger than the page size and
1664 * there has remaining spaces for its buddy.
1666 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
1667 zpool_register_driver(&z3fold_zpool_driver);
1672 static void __exit exit_z3fold(void)
1674 zpool_unregister_driver(&z3fold_zpool_driver);
1677 module_init(init_z3fold);
1678 module_exit(exit_z3fold);
1680 MODULE_LICENSE("GPL");
1681 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1682 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");