1 // SPDX-License-Identifier: GPL-2.0-only
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
8 * This implementation is based on zbud written by Seth Jennings.
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
21 * z3fold doesn't export any API and is meant to be used via zpool API.
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/mount.h>
38 #include <linux/pseudo_fs.h>
40 #include <linux/preempt.h>
41 #include <linux/workqueue.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/zpool.h>
45 #include <linux/magic.h>
46 #include <linux/kmemleak.h>
49 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
50 * adjusting internal fragmentation. It also determines the number of
51 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
53 * in the beginning of an allocated page are occupied by z3fold header, so
54 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
55 * which shows the max number of free chunks in z3fold page, also there will
56 * be 63, or 62, respectively, freelists per pool.
58 #define NCHUNKS_ORDER 6
60 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
61 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
62 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
63 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
64 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
65 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
67 #define BUDDY_MASK (0x3)
69 #define SLOTS_ALIGN (0x40)
76 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
87 struct z3fold_buddy_slots {
89 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
90 * be enough slots to hold all possible variants
92 unsigned long slot[BUDDY_MASK + 1];
93 unsigned long pool; /* back link */
96 #define HANDLE_FLAG_MASK (0x03)
99 * struct z3fold_header - z3fold page metadata occupying first chunks of each
100 * z3fold page, except for HEADLESS pages
101 * @buddy: links the z3fold page into the relevant list in the
103 * @page_lock: per-page lock
104 * @refcount: reference count for the z3fold page
105 * @work: work_struct for page layout optimization
106 * @slots: pointer to the structure holding buddy slots
107 * @pool: pointer to the containing pool
108 * @cpu: CPU which this page "belongs" to
109 * @first_chunks: the size of the first buddy in chunks, 0 if free
110 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
111 * @last_chunks: the size of the last buddy in chunks, 0 if free
112 * @first_num: the starting number (for the first handle)
113 * @mapped_count: the number of objects currently mapped
115 struct z3fold_header {
116 struct list_head buddy;
117 spinlock_t page_lock;
118 struct kref refcount;
119 struct work_struct work;
120 struct z3fold_buddy_slots *slots;
121 struct z3fold_pool *pool;
123 unsigned short first_chunks;
124 unsigned short middle_chunks;
125 unsigned short last_chunks;
126 unsigned short start_middle;
127 unsigned short first_num:2;
128 unsigned short mapped_count:2;
129 unsigned short foreign_handles:2;
133 * struct z3fold_pool - stores metadata for each z3fold pool
135 * @lock: protects pool unbuddied/lru lists
136 * @stale_lock: protects pool stale page list
137 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
138 * buddies; the list each z3fold page is added to depends on
139 * the size of its free region.
140 * @lru: list tracking the z3fold pages in LRU order by most recently
142 * @stale: list of pages marked for freeing
143 * @pages_nr: number of z3fold pages in the pool.
144 * @c_handle: cache for z3fold_buddy_slots allocation
145 * @ops: pointer to a structure of user defined operations specified at
146 * pool creation time.
147 * @compact_wq: workqueue for page layout background optimization
148 * @release_wq: workqueue for safe page release
149 * @work: work_struct for safe page release
150 * @inode: inode for z3fold pseudo filesystem
152 * This structure is allocated at pool creation time and maintains metadata
153 * pertaining to a particular z3fold pool.
158 spinlock_t stale_lock;
159 struct list_head *unbuddied;
160 struct list_head lru;
161 struct list_head stale;
163 struct kmem_cache *c_handle;
164 const struct z3fold_ops *ops;
166 const struct zpool_ops *zpool_ops;
167 struct workqueue_struct *compact_wq;
168 struct workqueue_struct *release_wq;
169 struct work_struct work;
174 * Internal z3fold page flags
176 enum z3fold_page_flags {
181 PAGE_CLAIMED, /* by either reclaim or free */
185 * handle flags, go under HANDLE_FLAG_MASK
187 enum z3fold_handle_flags {
192 * Forward declarations
194 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
195 static void compact_page_work(struct work_struct *w);
201 /* Converts an allocation size in bytes to size in z3fold chunks */
202 static int size_to_chunks(size_t size)
204 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
207 #define for_each_unbuddied_list(_iter, _begin) \
208 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
210 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
213 struct z3fold_buddy_slots *slots;
215 slots = kmem_cache_zalloc(pool->c_handle,
216 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
219 /* It will be freed separately in free_handle(). */
220 kmemleak_not_leak(slots);
221 slots->pool = (unsigned long)pool;
222 rwlock_init(&slots->lock);
228 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
230 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
233 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
235 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
238 /* Lock a z3fold page */
239 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
241 spin_lock(&zhdr->page_lock);
244 /* Try to lock a z3fold page */
245 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
247 return spin_trylock(&zhdr->page_lock);
250 /* Unlock a z3fold page */
251 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
253 spin_unlock(&zhdr->page_lock);
257 static inline struct z3fold_header *__get_z3fold_header(unsigned long handle,
260 struct z3fold_buddy_slots *slots;
261 struct z3fold_header *zhdr;
264 if (!(handle & (1 << PAGE_HEADLESS))) {
265 slots = handle_to_slots(handle);
269 read_lock(&slots->lock);
270 addr = *(unsigned long *)handle;
271 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
273 locked = z3fold_page_trylock(zhdr);
274 read_unlock(&slots->lock);
280 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
286 /* Returns the z3fold page where a given handle is stored */
287 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
289 return __get_z3fold_header(h, false);
292 /* return locked z3fold page if it's not headless */
293 static inline struct z3fold_header *get_z3fold_header(unsigned long h)
295 return __get_z3fold_header(h, true);
298 static inline void put_z3fold_header(struct z3fold_header *zhdr)
300 struct page *page = virt_to_page(zhdr);
302 if (!test_bit(PAGE_HEADLESS, &page->private))
303 z3fold_page_unlock(zhdr);
306 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
308 struct z3fold_buddy_slots *slots;
312 if (handle & (1 << PAGE_HEADLESS))
315 if (WARN_ON(*(unsigned long *)handle == 0))
318 slots = handle_to_slots(handle);
319 write_lock(&slots->lock);
320 *(unsigned long *)handle = 0;
322 if (test_bit(HANDLES_NOFREE, &slots->pool)) {
323 write_unlock(&slots->lock);
324 return; /* simple case, nothing else to do */
327 if (zhdr->slots != slots)
328 zhdr->foreign_handles--;
331 for (i = 0; i <= BUDDY_MASK; i++) {
332 if (slots->slot[i]) {
337 write_unlock(&slots->lock);
340 struct z3fold_pool *pool = slots_to_pool(slots);
342 if (zhdr->slots == slots)
344 kmem_cache_free(pool->c_handle, slots);
348 static int z3fold_init_fs_context(struct fs_context *fc)
350 return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
353 static struct file_system_type z3fold_fs = {
355 .init_fs_context = z3fold_init_fs_context,
356 .kill_sb = kill_anon_super,
359 static struct vfsmount *z3fold_mnt;
360 static int z3fold_mount(void)
364 z3fold_mnt = kern_mount(&z3fold_fs);
365 if (IS_ERR(z3fold_mnt))
366 ret = PTR_ERR(z3fold_mnt);
371 static void z3fold_unmount(void)
373 kern_unmount(z3fold_mnt);
376 static const struct address_space_operations z3fold_aops;
377 static int z3fold_register_migration(struct z3fold_pool *pool)
379 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
380 if (IS_ERR(pool->inode)) {
385 pool->inode->i_mapping->private_data = pool;
386 pool->inode->i_mapping->a_ops = &z3fold_aops;
390 static void z3fold_unregister_migration(struct z3fold_pool *pool)
396 /* Initializes the z3fold header of a newly allocated z3fold page */
397 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
398 struct z3fold_pool *pool, gfp_t gfp)
400 struct z3fold_header *zhdr = page_address(page);
401 struct z3fold_buddy_slots *slots;
403 INIT_LIST_HEAD(&page->lru);
404 clear_bit(PAGE_HEADLESS, &page->private);
405 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
406 clear_bit(NEEDS_COMPACTING, &page->private);
407 clear_bit(PAGE_STALE, &page->private);
408 clear_bit(PAGE_CLAIMED, &page->private);
412 slots = alloc_slots(pool, gfp);
416 spin_lock_init(&zhdr->page_lock);
417 kref_init(&zhdr->refcount);
418 zhdr->first_chunks = 0;
419 zhdr->middle_chunks = 0;
420 zhdr->last_chunks = 0;
422 zhdr->start_middle = 0;
424 zhdr->foreign_handles = 0;
425 zhdr->mapped_count = 0;
428 INIT_LIST_HEAD(&zhdr->buddy);
429 INIT_WORK(&zhdr->work, compact_page_work);
433 /* Resets the struct page fields and frees the page */
434 static void free_z3fold_page(struct page *page, bool headless)
438 __ClearPageMovable(page);
441 ClearPagePrivate(page);
445 /* Helper function to build the index */
446 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
448 return (bud + zhdr->first_num) & BUDDY_MASK;
452 * Encodes the handle of a particular buddy within a z3fold page
453 * Pool lock should be held as this function accesses first_num
455 static unsigned long __encode_handle(struct z3fold_header *zhdr,
456 struct z3fold_buddy_slots *slots,
459 unsigned long h = (unsigned long)zhdr;
463 * For a headless page, its handle is its pointer with the extra
464 * PAGE_HEADLESS bit set
467 return h | (1 << PAGE_HEADLESS);
469 /* otherwise, return pointer to encoded handle */
470 idx = __idx(zhdr, bud);
473 h |= (zhdr->last_chunks << BUDDY_SHIFT);
475 write_lock(&slots->lock);
476 slots->slot[idx] = h;
477 write_unlock(&slots->lock);
478 return (unsigned long)&slots->slot[idx];
481 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
483 return __encode_handle(zhdr, zhdr->slots, bud);
486 /* only for LAST bud, returns zero otherwise */
487 static unsigned short handle_to_chunks(unsigned long handle)
489 struct z3fold_buddy_slots *slots = handle_to_slots(handle);
492 read_lock(&slots->lock);
493 addr = *(unsigned long *)handle;
494 read_unlock(&slots->lock);
495 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
499 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
500 * but that doesn't matter. because the masking will result in the
501 * correct buddy number.
503 static enum buddy handle_to_buddy(unsigned long handle)
505 struct z3fold_header *zhdr;
506 struct z3fold_buddy_slots *slots = handle_to_slots(handle);
509 read_lock(&slots->lock);
510 WARN_ON(handle & (1 << PAGE_HEADLESS));
511 addr = *(unsigned long *)handle;
512 read_unlock(&slots->lock);
513 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
514 return (addr - zhdr->first_num) & BUDDY_MASK;
517 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
522 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
524 struct page *page = virt_to_page(zhdr);
525 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
527 WARN_ON(!list_empty(&zhdr->buddy));
528 set_bit(PAGE_STALE, &page->private);
529 clear_bit(NEEDS_COMPACTING, &page->private);
530 spin_lock(&pool->lock);
531 if (!list_empty(&page->lru))
532 list_del_init(&page->lru);
533 spin_unlock(&pool->lock);
536 z3fold_page_unlock(zhdr);
538 spin_lock(&pool->stale_lock);
539 list_add(&zhdr->buddy, &pool->stale);
540 queue_work(pool->release_wq, &pool->work);
541 spin_unlock(&pool->stale_lock);
544 static void __attribute__((__unused__))
545 release_z3fold_page(struct kref *ref)
547 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
549 __release_z3fold_page(zhdr, false);
552 static void release_z3fold_page_locked(struct kref *ref)
554 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
556 WARN_ON(z3fold_page_trylock(zhdr));
557 __release_z3fold_page(zhdr, true);
560 static void release_z3fold_page_locked_list(struct kref *ref)
562 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
564 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
566 spin_lock(&pool->lock);
567 list_del_init(&zhdr->buddy);
568 spin_unlock(&pool->lock);
570 WARN_ON(z3fold_page_trylock(zhdr));
571 __release_z3fold_page(zhdr, true);
574 static void free_pages_work(struct work_struct *w)
576 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
578 spin_lock(&pool->stale_lock);
579 while (!list_empty(&pool->stale)) {
580 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
581 struct z3fold_header, buddy);
582 struct page *page = virt_to_page(zhdr);
584 list_del(&zhdr->buddy);
585 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
587 spin_unlock(&pool->stale_lock);
588 cancel_work_sync(&zhdr->work);
589 free_z3fold_page(page, false);
591 spin_lock(&pool->stale_lock);
593 spin_unlock(&pool->stale_lock);
597 * Returns the number of free chunks in a z3fold page.
598 * NB: can't be used with HEADLESS pages.
600 static int num_free_chunks(struct z3fold_header *zhdr)
604 * If there is a middle object, pick up the bigger free space
605 * either before or after it. Otherwise just subtract the number
606 * of chunks occupied by the first and the last objects.
608 if (zhdr->middle_chunks != 0) {
609 int nfree_before = zhdr->first_chunks ?
610 0 : zhdr->start_middle - ZHDR_CHUNKS;
611 int nfree_after = zhdr->last_chunks ?
613 (zhdr->start_middle + zhdr->middle_chunks);
614 nfree = max(nfree_before, nfree_after);
616 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
620 /* Add to the appropriate unbuddied list */
621 static inline void add_to_unbuddied(struct z3fold_pool *pool,
622 struct z3fold_header *zhdr)
624 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
625 zhdr->middle_chunks == 0) {
626 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
628 int freechunks = num_free_chunks(zhdr);
629 spin_lock(&pool->lock);
630 list_add(&zhdr->buddy, &unbuddied[freechunks]);
631 spin_unlock(&pool->lock);
632 zhdr->cpu = smp_processor_id();
633 put_cpu_ptr(pool->unbuddied);
637 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
639 enum buddy bud = HEADLESS;
641 if (zhdr->middle_chunks) {
642 if (!zhdr->first_chunks &&
643 chunks <= zhdr->start_middle - ZHDR_CHUNKS)
645 else if (!zhdr->last_chunks)
648 if (!zhdr->first_chunks)
650 else if (!zhdr->last_chunks)
659 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
660 unsigned short dst_chunk)
663 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
664 beg + (zhdr->start_middle << CHUNK_SHIFT),
665 zhdr->middle_chunks << CHUNK_SHIFT);
668 static inline bool buddy_single(struct z3fold_header *zhdr)
670 return !((zhdr->first_chunks && zhdr->middle_chunks) ||
671 (zhdr->first_chunks && zhdr->last_chunks) ||
672 (zhdr->middle_chunks && zhdr->last_chunks));
675 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
677 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
679 unsigned long old_handle = 0;
681 struct z3fold_header *new_zhdr = NULL;
682 int first_idx = __idx(zhdr, FIRST);
683 int middle_idx = __idx(zhdr, MIDDLE);
684 int last_idx = __idx(zhdr, LAST);
685 unsigned short *moved_chunks = NULL;
688 * No need to protect slots here -- all the slots are "local" and
689 * the page lock is already taken
691 if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
692 p += ZHDR_SIZE_ALIGNED;
693 sz = zhdr->first_chunks << CHUNK_SHIFT;
694 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
695 moved_chunks = &zhdr->first_chunks;
696 } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
697 p += zhdr->start_middle << CHUNK_SHIFT;
698 sz = zhdr->middle_chunks << CHUNK_SHIFT;
699 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
700 moved_chunks = &zhdr->middle_chunks;
701 } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
702 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
703 sz = zhdr->last_chunks << CHUNK_SHIFT;
704 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
705 moved_chunks = &zhdr->last_chunks;
709 enum buddy new_bud = HEADLESS;
710 short chunks = size_to_chunks(sz);
713 new_zhdr = __z3fold_alloc(pool, sz, false);
717 if (WARN_ON(new_zhdr == zhdr))
720 new_bud = get_free_buddy(new_zhdr, chunks);
724 new_zhdr->first_chunks = chunks;
725 q += ZHDR_SIZE_ALIGNED;
728 new_zhdr->middle_chunks = chunks;
729 new_zhdr->start_middle =
730 new_zhdr->first_chunks + ZHDR_CHUNKS;
731 q += new_zhdr->start_middle << CHUNK_SHIFT;
734 new_zhdr->last_chunks = chunks;
735 q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
740 new_zhdr->foreign_handles++;
742 write_lock(&zhdr->slots->lock);
743 *(unsigned long *)old_handle = (unsigned long)new_zhdr +
744 __idx(new_zhdr, new_bud);
746 *(unsigned long *)old_handle |=
747 (new_zhdr->last_chunks << BUDDY_SHIFT);
748 write_unlock(&zhdr->slots->lock);
749 add_to_unbuddied(pool, new_zhdr);
750 z3fold_page_unlock(new_zhdr);
759 if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked))
760 atomic64_dec(&pool->pages_nr);
762 add_to_unbuddied(pool, new_zhdr);
763 z3fold_page_unlock(new_zhdr);
770 #define BIG_CHUNK_GAP 3
771 /* Has to be called with lock held */
772 static int z3fold_compact_page(struct z3fold_header *zhdr)
774 struct page *page = virt_to_page(zhdr);
776 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
777 return 0; /* can't move middle chunk, it's used */
779 if (unlikely(PageIsolated(page)))
782 if (zhdr->middle_chunks == 0)
783 return 0; /* nothing to compact */
785 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
786 /* move to the beginning */
787 mchunk_memmove(zhdr, ZHDR_CHUNKS);
788 zhdr->first_chunks = zhdr->middle_chunks;
789 zhdr->middle_chunks = 0;
790 zhdr->start_middle = 0;
796 * moving data is expensive, so let's only do that if
797 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
799 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
800 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
802 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
803 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
805 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
806 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
807 + zhdr->middle_chunks) >=
809 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
811 mchunk_memmove(zhdr, new_start);
812 zhdr->start_middle = new_start;
819 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
821 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
824 page = virt_to_page(zhdr);
826 WARN_ON(z3fold_page_trylock(zhdr));
828 z3fold_page_lock(zhdr);
829 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
830 z3fold_page_unlock(zhdr);
833 spin_lock(&pool->lock);
834 list_del_init(&zhdr->buddy);
835 spin_unlock(&pool->lock);
837 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
838 atomic64_dec(&pool->pages_nr);
842 if (test_bit(PAGE_STALE, &page->private) ||
843 test_and_set_bit(PAGE_CLAIMED, &page->private)) {
844 z3fold_page_unlock(zhdr);
848 if (!zhdr->foreign_handles && buddy_single(zhdr) &&
849 zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
850 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
851 atomic64_dec(&pool->pages_nr);
853 clear_bit(PAGE_CLAIMED, &page->private);
854 z3fold_page_unlock(zhdr);
859 z3fold_compact_page(zhdr);
860 add_to_unbuddied(pool, zhdr);
861 clear_bit(PAGE_CLAIMED, &page->private);
862 z3fold_page_unlock(zhdr);
865 static void compact_page_work(struct work_struct *w)
867 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
870 do_compact_page(zhdr, false);
873 /* returns _locked_ z3fold page header or NULL */
874 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
875 size_t size, bool can_sleep)
877 struct z3fold_header *zhdr = NULL;
879 struct list_head *unbuddied;
880 int chunks = size_to_chunks(size), i;
883 /* First, try to find an unbuddied z3fold page. */
884 unbuddied = get_cpu_ptr(pool->unbuddied);
885 for_each_unbuddied_list(i, chunks) {
886 struct list_head *l = &unbuddied[i];
888 zhdr = list_first_entry_or_null(READ_ONCE(l),
889 struct z3fold_header, buddy);
894 /* Re-check under lock. */
895 spin_lock(&pool->lock);
897 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
898 struct z3fold_header, buddy)) ||
899 !z3fold_page_trylock(zhdr)) {
900 spin_unlock(&pool->lock);
902 put_cpu_ptr(pool->unbuddied);
907 list_del_init(&zhdr->buddy);
909 spin_unlock(&pool->lock);
911 page = virt_to_page(zhdr);
912 if (test_bit(NEEDS_COMPACTING, &page->private) ||
913 test_bit(PAGE_CLAIMED, &page->private)) {
914 z3fold_page_unlock(zhdr);
916 put_cpu_ptr(pool->unbuddied);
923 * this page could not be removed from its unbuddied
924 * list while pool lock was held, and then we've taken
925 * page lock so kref_put could not be called before
926 * we got here, so it's safe to just call kref_get()
928 kref_get(&zhdr->refcount);
931 put_cpu_ptr(pool->unbuddied);
936 /* look for _exact_ match on other cpus' lists */
937 for_each_online_cpu(cpu) {
940 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
941 spin_lock(&pool->lock);
942 l = &unbuddied[chunks];
944 zhdr = list_first_entry_or_null(READ_ONCE(l),
945 struct z3fold_header, buddy);
947 if (!zhdr || !z3fold_page_trylock(zhdr)) {
948 spin_unlock(&pool->lock);
952 list_del_init(&zhdr->buddy);
954 spin_unlock(&pool->lock);
956 page = virt_to_page(zhdr);
957 if (test_bit(NEEDS_COMPACTING, &page->private) ||
958 test_bit(PAGE_CLAIMED, &page->private)) {
959 z3fold_page_unlock(zhdr);
965 kref_get(&zhdr->refcount);
970 if (zhdr && !zhdr->slots)
971 zhdr->slots = alloc_slots(pool,
972 can_sleep ? GFP_NOIO : GFP_ATOMIC);
981 * z3fold_create_pool() - create a new z3fold pool
983 * @gfp: gfp flags when allocating the z3fold pool structure
984 * @ops: user-defined operations for the z3fold pool
986 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
989 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
990 const struct z3fold_ops *ops)
992 struct z3fold_pool *pool = NULL;
995 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
998 pool->c_handle = kmem_cache_create("z3fold_handle",
999 sizeof(struct z3fold_buddy_slots),
1000 SLOTS_ALIGN, 0, NULL);
1001 if (!pool->c_handle)
1003 spin_lock_init(&pool->lock);
1004 spin_lock_init(&pool->stale_lock);
1005 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
1006 if (!pool->unbuddied)
1008 for_each_possible_cpu(cpu) {
1009 struct list_head *unbuddied =
1010 per_cpu_ptr(pool->unbuddied, cpu);
1011 for_each_unbuddied_list(i, 0)
1012 INIT_LIST_HEAD(&unbuddied[i]);
1014 INIT_LIST_HEAD(&pool->lru);
1015 INIT_LIST_HEAD(&pool->stale);
1016 atomic64_set(&pool->pages_nr, 0);
1018 pool->compact_wq = create_singlethread_workqueue(pool->name);
1019 if (!pool->compact_wq)
1021 pool->release_wq = create_singlethread_workqueue(pool->name);
1022 if (!pool->release_wq)
1024 if (z3fold_register_migration(pool))
1026 INIT_WORK(&pool->work, free_pages_work);
1031 destroy_workqueue(pool->release_wq);
1033 destroy_workqueue(pool->compact_wq);
1035 free_percpu(pool->unbuddied);
1037 kmem_cache_destroy(pool->c_handle);
1045 * z3fold_destroy_pool() - destroys an existing z3fold pool
1046 * @pool: the z3fold pool to be destroyed
1048 * The pool should be emptied before this function is called.
1050 static void z3fold_destroy_pool(struct z3fold_pool *pool)
1052 kmem_cache_destroy(pool->c_handle);
1055 * We need to destroy pool->compact_wq before pool->release_wq,
1056 * as any pending work on pool->compact_wq will call
1057 * queue_work(pool->release_wq, &pool->work).
1059 * There are still outstanding pages until both workqueues are drained,
1060 * so we cannot unregister migration until then.
1063 destroy_workqueue(pool->compact_wq);
1064 destroy_workqueue(pool->release_wq);
1065 z3fold_unregister_migration(pool);
1070 * z3fold_alloc() - allocates a region of a given size
1071 * @pool: z3fold pool from which to allocate
1072 * @size: size in bytes of the desired allocation
1073 * @gfp: gfp flags used if the pool needs to grow
1074 * @handle: handle of the new allocation
1076 * This function will attempt to find a free region in the pool large enough to
1077 * satisfy the allocation request. A search of the unbuddied lists is
1078 * performed first. If no suitable free region is found, then a new page is
1079 * allocated and added to the pool to satisfy the request.
1081 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
1082 * as z3fold pool pages.
1084 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
1085 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1088 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1089 unsigned long *handle)
1091 int chunks = size_to_chunks(size);
1092 struct z3fold_header *zhdr = NULL;
1093 struct page *page = NULL;
1095 bool can_sleep = gfpflags_allow_blocking(gfp);
1100 if (size > PAGE_SIZE)
1103 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1107 zhdr = __z3fold_alloc(pool, size, can_sleep);
1109 bud = get_free_buddy(zhdr, chunks);
1110 if (bud == HEADLESS) {
1111 if (kref_put(&zhdr->refcount,
1112 release_z3fold_page_locked))
1113 atomic64_dec(&pool->pages_nr);
1115 z3fold_page_unlock(zhdr);
1116 pr_err("No free chunks in unbuddied\n");
1120 page = virt_to_page(zhdr);
1128 spin_lock(&pool->stale_lock);
1129 zhdr = list_first_entry_or_null(&pool->stale,
1130 struct z3fold_header, buddy);
1132 * Before allocating a page, let's see if we can take one from
1133 * the stale pages list. cancel_work_sync() can sleep so we
1134 * limit this case to the contexts where we can sleep
1137 list_del(&zhdr->buddy);
1138 spin_unlock(&pool->stale_lock);
1139 cancel_work_sync(&zhdr->work);
1140 page = virt_to_page(zhdr);
1142 spin_unlock(&pool->stale_lock);
1146 page = alloc_page(gfp);
1151 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1156 atomic64_inc(&pool->pages_nr);
1158 if (bud == HEADLESS) {
1159 set_bit(PAGE_HEADLESS, &page->private);
1164 __SetPageMovable(page, pool->inode->i_mapping);
1167 if (trylock_page(page)) {
1168 __SetPageMovable(page, pool->inode->i_mapping);
1172 z3fold_page_lock(zhdr);
1176 zhdr->first_chunks = chunks;
1177 else if (bud == LAST)
1178 zhdr->last_chunks = chunks;
1180 zhdr->middle_chunks = chunks;
1181 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1183 add_to_unbuddied(pool, zhdr);
1186 spin_lock(&pool->lock);
1187 /* Add/move z3fold page to beginning of LRU */
1188 if (!list_empty(&page->lru))
1189 list_del(&page->lru);
1191 list_add(&page->lru, &pool->lru);
1193 *handle = encode_handle(zhdr, bud);
1194 spin_unlock(&pool->lock);
1195 if (bud != HEADLESS)
1196 z3fold_page_unlock(zhdr);
1202 * z3fold_free() - frees the allocation associated with the given handle
1203 * @pool: pool in which the allocation resided
1204 * @handle: handle associated with the allocation returned by z3fold_alloc()
1206 * In the case that the z3fold page in which the allocation resides is under
1207 * reclaim, as indicated by the PG_reclaim flag being set, this function
1208 * only sets the first|last_chunks to 0. The page is actually freed
1209 * once both buddies are evicted (see z3fold_reclaim_page() below).
1211 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1213 struct z3fold_header *zhdr;
1218 zhdr = get_z3fold_header(handle);
1219 page = virt_to_page(zhdr);
1220 page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1222 if (test_bit(PAGE_HEADLESS, &page->private)) {
1223 /* if a headless page is under reclaim, just leave.
1224 * NB: we use test_and_set_bit for a reason: if the bit
1225 * has not been set before, we release this page
1226 * immediately so we don't care about its value any more.
1228 if (!page_claimed) {
1229 spin_lock(&pool->lock);
1230 list_del(&page->lru);
1231 spin_unlock(&pool->lock);
1232 put_z3fold_header(zhdr);
1233 free_z3fold_page(page, true);
1234 atomic64_dec(&pool->pages_nr);
1239 /* Non-headless case */
1240 bud = handle_to_buddy(handle);
1244 zhdr->first_chunks = 0;
1247 zhdr->middle_chunks = 0;
1250 zhdr->last_chunks = 0;
1253 pr_err("%s: unknown bud %d\n", __func__, bud);
1255 put_z3fold_header(zhdr);
1260 free_handle(handle, zhdr);
1261 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1262 atomic64_dec(&pool->pages_nr);
1266 /* the page has not been claimed by us */
1267 z3fold_page_unlock(zhdr);
1270 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1271 put_z3fold_header(zhdr);
1272 clear_bit(PAGE_CLAIMED, &page->private);
1275 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1276 spin_lock(&pool->lock);
1277 list_del_init(&zhdr->buddy);
1278 spin_unlock(&pool->lock);
1280 kref_get(&zhdr->refcount);
1281 clear_bit(PAGE_CLAIMED, &page->private);
1282 do_compact_page(zhdr, true);
1285 kref_get(&zhdr->refcount);
1286 clear_bit(PAGE_CLAIMED, &page->private);
1287 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1288 put_z3fold_header(zhdr);
1292 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1293 * @pool: pool from which a page will attempt to be evicted
1294 * @retries: number of pages on the LRU list for which eviction will
1295 * be attempted before failing
1297 * z3fold reclaim is different from normal system reclaim in that it is done
1298 * from the bottom, up. This is because only the bottom layer, z3fold, has
1299 * information on how the allocations are organized within each z3fold page.
1300 * This has the potential to create interesting locking situations between
1301 * z3fold and the user, however.
1303 * To avoid these, this is how z3fold_reclaim_page() should be called:
1305 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1306 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1307 * call the user-defined eviction handler with the pool and handle as
1310 * If the handle can not be evicted, the eviction handler should return
1311 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1312 * appropriate list and try the next z3fold page on the LRU up to
1313 * a user defined number of retries.
1315 * If the handle is successfully evicted, the eviction handler should
1316 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1317 * contains logic to delay freeing the page if the page is under reclaim,
1318 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1320 * If all buddies in the z3fold page are successfully evicted, then the
1321 * z3fold page can be freed.
1323 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1324 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1325 * the retry limit was hit.
1327 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1330 struct z3fold_header *zhdr = NULL;
1331 struct page *page = NULL;
1332 struct list_head *pos;
1333 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1334 struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN)));
1336 rwlock_init(&slots.lock);
1337 slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
1339 spin_lock(&pool->lock);
1340 if (!pool->ops || !pool->ops->evict || retries == 0) {
1341 spin_unlock(&pool->lock);
1344 for (i = 0; i < retries; i++) {
1345 if (list_empty(&pool->lru)) {
1346 spin_unlock(&pool->lock);
1349 list_for_each_prev(pos, &pool->lru) {
1350 page = list_entry(pos, struct page, lru);
1352 zhdr = page_address(page);
1353 if (test_bit(PAGE_HEADLESS, &page->private))
1356 if (kref_get_unless_zero(&zhdr->refcount) == 0) {
1360 if (!z3fold_page_trylock(zhdr)) {
1361 if (kref_put(&zhdr->refcount,
1362 release_z3fold_page))
1363 atomic64_dec(&pool->pages_nr);
1365 continue; /* can't evict at this point */
1368 /* test_and_set_bit is of course atomic, but we still
1369 * need to do it under page lock, otherwise checking
1370 * that bit in __z3fold_alloc wouldn't make sense
1372 if (zhdr->foreign_handles ||
1373 test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1374 if (kref_put(&zhdr->refcount,
1375 release_z3fold_page))
1376 atomic64_dec(&pool->pages_nr);
1378 z3fold_page_unlock(zhdr);
1380 continue; /* can't evict such page */
1382 list_del_init(&zhdr->buddy);
1390 list_del_init(&page->lru);
1391 spin_unlock(&pool->lock);
1393 if (!test_bit(PAGE_HEADLESS, &page->private)) {
1395 * We need encode the handles before unlocking, and
1396 * use our local slots structure because z3fold_free
1397 * can zero out zhdr->slots and we can't do much
1403 memset(slots.slot, 0, sizeof(slots.slot));
1404 if (zhdr->first_chunks)
1405 first_handle = __encode_handle(zhdr, &slots,
1407 if (zhdr->middle_chunks)
1408 middle_handle = __encode_handle(zhdr, &slots,
1410 if (zhdr->last_chunks)
1411 last_handle = __encode_handle(zhdr, &slots,
1414 * it's safe to unlock here because we hold a
1415 * reference to this page
1417 z3fold_page_unlock(zhdr);
1419 first_handle = encode_handle(zhdr, HEADLESS);
1420 last_handle = middle_handle = 0;
1422 /* Issue the eviction callback(s) */
1423 if (middle_handle) {
1424 ret = pool->ops->evict(pool, middle_handle);
1429 ret = pool->ops->evict(pool, first_handle);
1434 ret = pool->ops->evict(pool, last_handle);
1439 if (test_bit(PAGE_HEADLESS, &page->private)) {
1441 free_z3fold_page(page, true);
1442 atomic64_dec(&pool->pages_nr);
1445 spin_lock(&pool->lock);
1446 list_add(&page->lru, &pool->lru);
1447 spin_unlock(&pool->lock);
1448 clear_bit(PAGE_CLAIMED, &page->private);
1450 struct z3fold_buddy_slots *slots = zhdr->slots;
1451 z3fold_page_lock(zhdr);
1452 if (kref_put(&zhdr->refcount,
1453 release_z3fold_page_locked)) {
1454 kmem_cache_free(pool->c_handle, slots);
1455 atomic64_dec(&pool->pages_nr);
1459 * if we are here, the page is still not completely
1460 * free. Take the global pool lock then to be able
1461 * to add it back to the lru list
1463 spin_lock(&pool->lock);
1464 list_add(&page->lru, &pool->lru);
1465 spin_unlock(&pool->lock);
1466 z3fold_page_unlock(zhdr);
1467 clear_bit(PAGE_CLAIMED, &page->private);
1470 /* We started off locked to we need to lock the pool back */
1471 spin_lock(&pool->lock);
1473 spin_unlock(&pool->lock);
1478 * z3fold_map() - maps the allocation associated with the given handle
1479 * @pool: pool in which the allocation resides
1480 * @handle: handle associated with the allocation to be mapped
1482 * Extracts the buddy number from handle and constructs the pointer to the
1483 * correct starting chunk within the page.
1485 * Returns: a pointer to the mapped allocation
1487 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1489 struct z3fold_header *zhdr;
1494 zhdr = get_z3fold_header(handle);
1496 page = virt_to_page(zhdr);
1498 if (test_bit(PAGE_HEADLESS, &page->private))
1501 buddy = handle_to_buddy(handle);
1504 addr += ZHDR_SIZE_ALIGNED;
1507 addr += zhdr->start_middle << CHUNK_SHIFT;
1508 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1511 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1514 pr_err("unknown buddy id %d\n", buddy);
1521 zhdr->mapped_count++;
1523 put_z3fold_header(zhdr);
1528 * z3fold_unmap() - unmaps the allocation associated with the given handle
1529 * @pool: pool in which the allocation resides
1530 * @handle: handle associated with the allocation to be unmapped
1532 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1534 struct z3fold_header *zhdr;
1538 zhdr = get_z3fold_header(handle);
1539 page = virt_to_page(zhdr);
1541 if (test_bit(PAGE_HEADLESS, &page->private))
1544 buddy = handle_to_buddy(handle);
1545 if (buddy == MIDDLE)
1546 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1547 zhdr->mapped_count--;
1548 put_z3fold_header(zhdr);
1552 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1553 * @pool: pool whose size is being queried
1555 * Returns: size in pages of the given pool.
1557 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1559 return atomic64_read(&pool->pages_nr);
1562 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1564 struct z3fold_header *zhdr;
1565 struct z3fold_pool *pool;
1567 VM_BUG_ON_PAGE(!PageMovable(page), page);
1568 VM_BUG_ON_PAGE(PageIsolated(page), page);
1570 if (test_bit(PAGE_HEADLESS, &page->private))
1573 zhdr = page_address(page);
1574 z3fold_page_lock(zhdr);
1575 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1576 test_bit(PAGE_STALE, &page->private))
1579 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1582 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1584 pool = zhdr_to_pool(zhdr);
1585 spin_lock(&pool->lock);
1586 if (!list_empty(&zhdr->buddy))
1587 list_del_init(&zhdr->buddy);
1588 if (!list_empty(&page->lru))
1589 list_del_init(&page->lru);
1590 spin_unlock(&pool->lock);
1592 kref_get(&zhdr->refcount);
1593 z3fold_page_unlock(zhdr);
1597 z3fold_page_unlock(zhdr);
1601 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1602 struct page *page, enum migrate_mode mode)
1604 struct z3fold_header *zhdr, *new_zhdr;
1605 struct z3fold_pool *pool;
1606 struct address_space *new_mapping;
1608 VM_BUG_ON_PAGE(!PageMovable(page), page);
1609 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1610 VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
1611 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1613 zhdr = page_address(page);
1614 pool = zhdr_to_pool(zhdr);
1616 if (!z3fold_page_trylock(zhdr))
1618 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1619 z3fold_page_unlock(zhdr);
1620 clear_bit(PAGE_CLAIMED, &page->private);
1623 if (work_pending(&zhdr->work)) {
1624 z3fold_page_unlock(zhdr);
1627 new_zhdr = page_address(newpage);
1628 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1629 newpage->private = page->private;
1631 z3fold_page_unlock(zhdr);
1632 spin_lock_init(&new_zhdr->page_lock);
1633 INIT_WORK(&new_zhdr->work, compact_page_work);
1635 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1636 * so we only have to reinitialize it.
1638 INIT_LIST_HEAD(&new_zhdr->buddy);
1639 new_mapping = page_mapping(page);
1640 __ClearPageMovable(page);
1641 ClearPagePrivate(page);
1644 z3fold_page_lock(new_zhdr);
1645 if (new_zhdr->first_chunks)
1646 encode_handle(new_zhdr, FIRST);
1647 if (new_zhdr->last_chunks)
1648 encode_handle(new_zhdr, LAST);
1649 if (new_zhdr->middle_chunks)
1650 encode_handle(new_zhdr, MIDDLE);
1651 set_bit(NEEDS_COMPACTING, &newpage->private);
1652 new_zhdr->cpu = smp_processor_id();
1653 spin_lock(&pool->lock);
1654 list_add(&newpage->lru, &pool->lru);
1655 spin_unlock(&pool->lock);
1656 __SetPageMovable(newpage, new_mapping);
1657 z3fold_page_unlock(new_zhdr);
1659 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1661 page_mapcount_reset(page);
1662 clear_bit(PAGE_CLAIMED, &page->private);
1667 static void z3fold_page_putback(struct page *page)
1669 struct z3fold_header *zhdr;
1670 struct z3fold_pool *pool;
1672 zhdr = page_address(page);
1673 pool = zhdr_to_pool(zhdr);
1675 z3fold_page_lock(zhdr);
1676 if (!list_empty(&zhdr->buddy))
1677 list_del_init(&zhdr->buddy);
1678 INIT_LIST_HEAD(&page->lru);
1679 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1680 atomic64_dec(&pool->pages_nr);
1683 spin_lock(&pool->lock);
1684 list_add(&page->lru, &pool->lru);
1685 spin_unlock(&pool->lock);
1686 clear_bit(PAGE_CLAIMED, &page->private);
1687 z3fold_page_unlock(zhdr);
1690 static const struct address_space_operations z3fold_aops = {
1691 .isolate_page = z3fold_page_isolate,
1692 .migratepage = z3fold_page_migrate,
1693 .putback_page = z3fold_page_putback,
1700 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1702 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1703 return pool->zpool_ops->evict(pool->zpool, handle);
1708 static const struct z3fold_ops z3fold_zpool_ops = {
1709 .evict = z3fold_zpool_evict
1712 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1713 const struct zpool_ops *zpool_ops,
1714 struct zpool *zpool)
1716 struct z3fold_pool *pool;
1718 pool = z3fold_create_pool(name, gfp,
1719 zpool_ops ? &z3fold_zpool_ops : NULL);
1721 pool->zpool = zpool;
1722 pool->zpool_ops = zpool_ops;
1727 static void z3fold_zpool_destroy(void *pool)
1729 z3fold_destroy_pool(pool);
1732 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1733 unsigned long *handle)
1735 return z3fold_alloc(pool, size, gfp, handle);
1737 static void z3fold_zpool_free(void *pool, unsigned long handle)
1739 z3fold_free(pool, handle);
1742 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1743 unsigned int *reclaimed)
1745 unsigned int total = 0;
1748 while (total < pages) {
1749 ret = z3fold_reclaim_page(pool, 8);
1761 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1762 enum zpool_mapmode mm)
1764 return z3fold_map(pool, handle);
1766 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1768 z3fold_unmap(pool, handle);
1771 static u64 z3fold_zpool_total_size(void *pool)
1773 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1776 static struct zpool_driver z3fold_zpool_driver = {
1778 .owner = THIS_MODULE,
1779 .create = z3fold_zpool_create,
1780 .destroy = z3fold_zpool_destroy,
1781 .malloc = z3fold_zpool_malloc,
1782 .free = z3fold_zpool_free,
1783 .shrink = z3fold_zpool_shrink,
1784 .map = z3fold_zpool_map,
1785 .unmap = z3fold_zpool_unmap,
1786 .total_size = z3fold_zpool_total_size,
1789 MODULE_ALIAS("zpool-z3fold");
1791 static int __init init_z3fold(void)
1795 /* Make sure the z3fold header is not larger than the page size */
1796 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1797 ret = z3fold_mount();
1801 zpool_register_driver(&z3fold_zpool_driver);
1806 static void __exit exit_z3fold(void)
1809 zpool_unregister_driver(&z3fold_zpool_driver);
1812 module_init(init_z3fold);
1813 module_exit(exit_z3fold);
1815 MODULE_LICENSE("GPL");
1816 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1817 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");