1 // SPDX-License-Identifier: GPL-2.0-only
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
8 * This implementation is based on zbud written by Seth Jennings.
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
21 * z3fold doesn't export any API and is meant to be used via zpool API.
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/preempt.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/zpool.h>
42 #include <linux/kmemleak.h>
45 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
46 * adjusting internal fragmentation. It also determines the number of
47 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
48 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
49 * in the beginning of an allocated page are occupied by z3fold header, so
50 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
51 * which shows the max number of free chunks in z3fold page, also there will
52 * be 63, or 62, respectively, freelists per pool.
54 #define NCHUNKS_ORDER 6
56 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
57 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
58 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
59 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
60 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
61 #define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS)
63 #define BUDDY_MASK (0x3)
65 #define SLOTS_ALIGN (0x40)
80 struct z3fold_buddy_slots {
82 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
83 * be enough slots to hold all possible variants
85 unsigned long slot[BUDDY_MASK + 1];
86 unsigned long pool; /* back link */
89 #define HANDLE_FLAG_MASK (0x03)
92 * struct z3fold_header - z3fold page metadata occupying first chunks of each
93 * z3fold page, except for HEADLESS pages
94 * @buddy: links the z3fold page into the relevant list in the
96 * @page_lock: per-page lock
97 * @refcount: reference count for the z3fold page
98 * @work: work_struct for page layout optimization
99 * @slots: pointer to the structure holding buddy slots
100 * @pool: pointer to the containing pool
101 * @cpu: CPU which this page "belongs" to
102 * @first_chunks: the size of the first buddy in chunks, 0 if free
103 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
104 * @last_chunks: the size of the last buddy in chunks, 0 if free
105 * @first_num: the starting number (for the first handle)
106 * @mapped_count: the number of objects currently mapped
108 struct z3fold_header {
109 struct list_head buddy;
110 spinlock_t page_lock;
111 struct kref refcount;
112 struct work_struct work;
113 struct z3fold_buddy_slots *slots;
114 struct z3fold_pool *pool;
116 unsigned short first_chunks;
117 unsigned short middle_chunks;
118 unsigned short last_chunks;
119 unsigned short start_middle;
120 unsigned short first_num:2;
121 unsigned short mapped_count:2;
122 unsigned short foreign_handles:2;
126 * struct z3fold_pool - stores metadata for each z3fold pool
128 * @lock: protects pool unbuddied lists
129 * @stale_lock: protects pool stale page list
130 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
131 * buddies; the list each z3fold page is added to depends on
132 * the size of its free region.
133 * @stale: list of pages marked for freeing
134 * @pages_nr: number of z3fold pages in the pool.
135 * @c_handle: cache for z3fold_buddy_slots allocation
136 * @zpool: zpool driver
137 * @zpool_ops: zpool operations structure with an evict callback
138 * @compact_wq: workqueue for page layout background optimization
139 * @release_wq: workqueue for safe page release
140 * @work: work_struct for safe page release
142 * This structure is allocated at pool creation time and maintains metadata
143 * pertaining to a particular z3fold pool.
148 spinlock_t stale_lock;
149 struct list_head *unbuddied;
150 struct list_head stale;
152 struct kmem_cache *c_handle;
153 struct workqueue_struct *compact_wq;
154 struct workqueue_struct *release_wq;
155 struct work_struct work;
159 * Internal z3fold page flags
161 enum z3fold_page_flags {
166 PAGE_CLAIMED, /* by either reclaim or free */
167 PAGE_MIGRATED, /* page is migrated and soon to be released */
171 * handle flags, go under HANDLE_FLAG_MASK
173 enum z3fold_handle_flags {
178 * Forward declarations
180 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
181 static void compact_page_work(struct work_struct *w);
187 /* Converts an allocation size in bytes to size in z3fold chunks */
188 static int size_to_chunks(size_t size)
190 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
193 #define for_each_unbuddied_list(_iter, _begin) \
194 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
196 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
199 struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
203 /* It will be freed separately in free_handle(). */
204 kmemleak_not_leak(slots);
205 slots->pool = (unsigned long)pool;
206 rwlock_init(&slots->lock);
212 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
214 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
217 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
219 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
222 /* Lock a z3fold page */
223 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
225 spin_lock(&zhdr->page_lock);
228 /* Try to lock a z3fold page */
229 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
231 return spin_trylock(&zhdr->page_lock);
234 /* Unlock a z3fold page */
235 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
237 spin_unlock(&zhdr->page_lock);
240 /* return locked z3fold page if it's not headless */
241 static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
243 struct z3fold_buddy_slots *slots;
244 struct z3fold_header *zhdr;
247 if (!(handle & (1 << PAGE_HEADLESS))) {
248 slots = handle_to_slots(handle);
252 read_lock(&slots->lock);
253 addr = *(unsigned long *)handle;
254 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
255 locked = z3fold_page_trylock(zhdr);
256 read_unlock(&slots->lock);
258 struct page *page = virt_to_page(zhdr);
260 if (!test_bit(PAGE_MIGRATED, &page->private))
262 z3fold_page_unlock(zhdr);
267 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
273 static inline void put_z3fold_header(struct z3fold_header *zhdr)
275 struct page *page = virt_to_page(zhdr);
277 if (!test_bit(PAGE_HEADLESS, &page->private))
278 z3fold_page_unlock(zhdr);
281 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
283 struct z3fold_buddy_slots *slots;
287 if (WARN_ON(*(unsigned long *)handle == 0))
290 slots = handle_to_slots(handle);
291 write_lock(&slots->lock);
292 *(unsigned long *)handle = 0;
294 if (test_bit(HANDLES_NOFREE, &slots->pool)) {
295 write_unlock(&slots->lock);
296 return; /* simple case, nothing else to do */
299 if (zhdr->slots != slots)
300 zhdr->foreign_handles--;
303 for (i = 0; i <= BUDDY_MASK; i++) {
304 if (slots->slot[i]) {
309 write_unlock(&slots->lock);
312 struct z3fold_pool *pool = slots_to_pool(slots);
314 if (zhdr->slots == slots)
316 kmem_cache_free(pool->c_handle, slots);
320 /* Initializes the z3fold header of a newly allocated z3fold page */
321 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
322 struct z3fold_pool *pool, gfp_t gfp)
324 struct z3fold_header *zhdr = page_address(page);
325 struct z3fold_buddy_slots *slots;
327 clear_bit(PAGE_HEADLESS, &page->private);
328 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
329 clear_bit(NEEDS_COMPACTING, &page->private);
330 clear_bit(PAGE_STALE, &page->private);
331 clear_bit(PAGE_CLAIMED, &page->private);
332 clear_bit(PAGE_MIGRATED, &page->private);
336 slots = alloc_slots(pool, gfp);
340 memset(zhdr, 0, sizeof(*zhdr));
341 spin_lock_init(&zhdr->page_lock);
342 kref_init(&zhdr->refcount);
346 INIT_LIST_HEAD(&zhdr->buddy);
347 INIT_WORK(&zhdr->work, compact_page_work);
351 /* Resets the struct page fields and frees the page */
352 static void free_z3fold_page(struct page *page, bool headless)
356 __ClearPageMovable(page);
362 /* Helper function to build the index */
363 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
365 return (bud + zhdr->first_num) & BUDDY_MASK;
369 * Encodes the handle of a particular buddy within a z3fold page
370 * Pool lock should be held as this function accesses first_num
372 static unsigned long __encode_handle(struct z3fold_header *zhdr,
373 struct z3fold_buddy_slots *slots,
376 unsigned long h = (unsigned long)zhdr;
380 * For a headless page, its handle is its pointer with the extra
381 * PAGE_HEADLESS bit set
384 return h | (1 << PAGE_HEADLESS);
386 /* otherwise, return pointer to encoded handle */
387 idx = __idx(zhdr, bud);
390 h |= (zhdr->last_chunks << BUDDY_SHIFT);
392 write_lock(&slots->lock);
393 slots->slot[idx] = h;
394 write_unlock(&slots->lock);
395 return (unsigned long)&slots->slot[idx];
398 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
400 return __encode_handle(zhdr, zhdr->slots, bud);
403 /* only for LAST bud, returns zero otherwise */
404 static unsigned short handle_to_chunks(unsigned long handle)
406 struct z3fold_buddy_slots *slots = handle_to_slots(handle);
409 read_lock(&slots->lock);
410 addr = *(unsigned long *)handle;
411 read_unlock(&slots->lock);
412 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
416 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
417 * but that doesn't matter. because the masking will result in the
418 * correct buddy number.
420 static enum buddy handle_to_buddy(unsigned long handle)
422 struct z3fold_header *zhdr;
423 struct z3fold_buddy_slots *slots = handle_to_slots(handle);
426 read_lock(&slots->lock);
427 WARN_ON(handle & (1 << PAGE_HEADLESS));
428 addr = *(unsigned long *)handle;
429 read_unlock(&slots->lock);
430 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
431 return (addr - zhdr->first_num) & BUDDY_MASK;
434 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
439 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
441 struct page *page = virt_to_page(zhdr);
442 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
444 WARN_ON(!list_empty(&zhdr->buddy));
445 set_bit(PAGE_STALE, &page->private);
446 clear_bit(NEEDS_COMPACTING, &page->private);
447 spin_lock(&pool->lock);
448 spin_unlock(&pool->lock);
451 z3fold_page_unlock(zhdr);
453 spin_lock(&pool->stale_lock);
454 list_add(&zhdr->buddy, &pool->stale);
455 queue_work(pool->release_wq, &pool->work);
456 spin_unlock(&pool->stale_lock);
458 atomic64_dec(&pool->pages_nr);
461 static void release_z3fold_page_locked(struct kref *ref)
463 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
465 WARN_ON(z3fold_page_trylock(zhdr));
466 __release_z3fold_page(zhdr, true);
469 static void release_z3fold_page_locked_list(struct kref *ref)
471 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
473 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
475 spin_lock(&pool->lock);
476 list_del_init(&zhdr->buddy);
477 spin_unlock(&pool->lock);
479 WARN_ON(z3fold_page_trylock(zhdr));
480 __release_z3fold_page(zhdr, true);
483 static void free_pages_work(struct work_struct *w)
485 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
487 spin_lock(&pool->stale_lock);
488 while (!list_empty(&pool->stale)) {
489 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
490 struct z3fold_header, buddy);
491 struct page *page = virt_to_page(zhdr);
493 list_del(&zhdr->buddy);
494 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
496 spin_unlock(&pool->stale_lock);
497 cancel_work_sync(&zhdr->work);
498 free_z3fold_page(page, false);
500 spin_lock(&pool->stale_lock);
502 spin_unlock(&pool->stale_lock);
506 * Returns the number of free chunks in a z3fold page.
507 * NB: can't be used with HEADLESS pages.
509 static int num_free_chunks(struct z3fold_header *zhdr)
513 * If there is a middle object, pick up the bigger free space
514 * either before or after it. Otherwise just subtract the number
515 * of chunks occupied by the first and the last objects.
517 if (zhdr->middle_chunks != 0) {
518 int nfree_before = zhdr->first_chunks ?
519 0 : zhdr->start_middle - ZHDR_CHUNKS;
520 int nfree_after = zhdr->last_chunks ?
522 (zhdr->start_middle + zhdr->middle_chunks);
523 nfree = max(nfree_before, nfree_after);
525 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
529 /* Add to the appropriate unbuddied list */
530 static inline void add_to_unbuddied(struct z3fold_pool *pool,
531 struct z3fold_header *zhdr)
533 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
534 zhdr->middle_chunks == 0) {
535 struct list_head *unbuddied;
536 int freechunks = num_free_chunks(zhdr);
539 unbuddied = this_cpu_ptr(pool->unbuddied);
540 spin_lock(&pool->lock);
541 list_add(&zhdr->buddy, &unbuddied[freechunks]);
542 spin_unlock(&pool->lock);
543 zhdr->cpu = smp_processor_id();
548 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
550 enum buddy bud = HEADLESS;
552 if (zhdr->middle_chunks) {
553 if (!zhdr->first_chunks &&
554 chunks <= zhdr->start_middle - ZHDR_CHUNKS)
556 else if (!zhdr->last_chunks)
559 if (!zhdr->first_chunks)
561 else if (!zhdr->last_chunks)
570 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
571 unsigned short dst_chunk)
574 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
575 beg + (zhdr->start_middle << CHUNK_SHIFT),
576 zhdr->middle_chunks << CHUNK_SHIFT);
579 static inline bool buddy_single(struct z3fold_header *zhdr)
581 return !((zhdr->first_chunks && zhdr->middle_chunks) ||
582 (zhdr->first_chunks && zhdr->last_chunks) ||
583 (zhdr->middle_chunks && zhdr->last_chunks));
586 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
588 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
590 unsigned long old_handle = 0;
592 struct z3fold_header *new_zhdr = NULL;
593 int first_idx = __idx(zhdr, FIRST);
594 int middle_idx = __idx(zhdr, MIDDLE);
595 int last_idx = __idx(zhdr, LAST);
596 unsigned short *moved_chunks = NULL;
599 * No need to protect slots here -- all the slots are "local" and
600 * the page lock is already taken
602 if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
603 p += ZHDR_SIZE_ALIGNED;
604 sz = zhdr->first_chunks << CHUNK_SHIFT;
605 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
606 moved_chunks = &zhdr->first_chunks;
607 } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
608 p += zhdr->start_middle << CHUNK_SHIFT;
609 sz = zhdr->middle_chunks << CHUNK_SHIFT;
610 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
611 moved_chunks = &zhdr->middle_chunks;
612 } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
613 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
614 sz = zhdr->last_chunks << CHUNK_SHIFT;
615 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
616 moved_chunks = &zhdr->last_chunks;
620 enum buddy new_bud = HEADLESS;
621 short chunks = size_to_chunks(sz);
624 new_zhdr = __z3fold_alloc(pool, sz, false);
628 if (WARN_ON(new_zhdr == zhdr))
631 new_bud = get_free_buddy(new_zhdr, chunks);
635 new_zhdr->first_chunks = chunks;
636 q += ZHDR_SIZE_ALIGNED;
639 new_zhdr->middle_chunks = chunks;
640 new_zhdr->start_middle =
641 new_zhdr->first_chunks + ZHDR_CHUNKS;
642 q += new_zhdr->start_middle << CHUNK_SHIFT;
645 new_zhdr->last_chunks = chunks;
646 q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
651 new_zhdr->foreign_handles++;
653 write_lock(&zhdr->slots->lock);
654 *(unsigned long *)old_handle = (unsigned long)new_zhdr +
655 __idx(new_zhdr, new_bud);
657 *(unsigned long *)old_handle |=
658 (new_zhdr->last_chunks << BUDDY_SHIFT);
659 write_unlock(&zhdr->slots->lock);
660 add_to_unbuddied(pool, new_zhdr);
661 z3fold_page_unlock(new_zhdr);
669 if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) {
670 add_to_unbuddied(pool, new_zhdr);
671 z3fold_page_unlock(new_zhdr);
677 #define BIG_CHUNK_GAP 3
678 /* Has to be called with lock held */
679 static int z3fold_compact_page(struct z3fold_header *zhdr)
681 struct page *page = virt_to_page(zhdr);
683 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
684 return 0; /* can't move middle chunk, it's used */
686 if (unlikely(PageIsolated(page)))
689 if (zhdr->middle_chunks == 0)
690 return 0; /* nothing to compact */
692 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
693 /* move to the beginning */
694 mchunk_memmove(zhdr, ZHDR_CHUNKS);
695 zhdr->first_chunks = zhdr->middle_chunks;
696 zhdr->middle_chunks = 0;
697 zhdr->start_middle = 0;
703 * moving data is expensive, so let's only do that if
704 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
706 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
707 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
709 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
710 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
712 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
713 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
714 + zhdr->middle_chunks) >=
716 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
718 mchunk_memmove(zhdr, new_start);
719 zhdr->start_middle = new_start;
726 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
728 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
731 page = virt_to_page(zhdr);
733 WARN_ON(z3fold_page_trylock(zhdr));
735 z3fold_page_lock(zhdr);
736 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
737 z3fold_page_unlock(zhdr);
740 spin_lock(&pool->lock);
741 list_del_init(&zhdr->buddy);
742 spin_unlock(&pool->lock);
744 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
747 if (test_bit(PAGE_STALE, &page->private) ||
748 test_and_set_bit(PAGE_CLAIMED, &page->private)) {
749 z3fold_page_unlock(zhdr);
753 if (!zhdr->foreign_handles && buddy_single(zhdr) &&
754 zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
755 if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
756 clear_bit(PAGE_CLAIMED, &page->private);
757 z3fold_page_unlock(zhdr);
762 z3fold_compact_page(zhdr);
763 add_to_unbuddied(pool, zhdr);
764 clear_bit(PAGE_CLAIMED, &page->private);
765 z3fold_page_unlock(zhdr);
768 static void compact_page_work(struct work_struct *w)
770 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
773 do_compact_page(zhdr, false);
776 /* returns _locked_ z3fold page header or NULL */
777 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
778 size_t size, bool can_sleep)
780 struct z3fold_header *zhdr = NULL;
782 struct list_head *unbuddied;
783 int chunks = size_to_chunks(size), i;
787 /* First, try to find an unbuddied z3fold page. */
788 unbuddied = this_cpu_ptr(pool->unbuddied);
789 for_each_unbuddied_list(i, chunks) {
790 struct list_head *l = &unbuddied[i];
792 zhdr = list_first_entry_or_null(READ_ONCE(l),
793 struct z3fold_header, buddy);
798 /* Re-check under lock. */
799 spin_lock(&pool->lock);
800 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
801 struct z3fold_header, buddy)) ||
802 !z3fold_page_trylock(zhdr)) {
803 spin_unlock(&pool->lock);
810 list_del_init(&zhdr->buddy);
812 spin_unlock(&pool->lock);
814 page = virt_to_page(zhdr);
815 if (test_bit(NEEDS_COMPACTING, &page->private) ||
816 test_bit(PAGE_CLAIMED, &page->private)) {
817 z3fold_page_unlock(zhdr);
826 * this page could not be removed from its unbuddied
827 * list while pool lock was held, and then we've taken
828 * page lock so kref_put could not be called before
829 * we got here, so it's safe to just call kref_get()
831 kref_get(&zhdr->refcount);
839 /* look for _exact_ match on other cpus' lists */
840 for_each_online_cpu(cpu) {
843 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
844 spin_lock(&pool->lock);
845 l = &unbuddied[chunks];
847 zhdr = list_first_entry_or_null(READ_ONCE(l),
848 struct z3fold_header, buddy);
850 if (!zhdr || !z3fold_page_trylock(zhdr)) {
851 spin_unlock(&pool->lock);
855 list_del_init(&zhdr->buddy);
857 spin_unlock(&pool->lock);
859 page = virt_to_page(zhdr);
860 if (test_bit(NEEDS_COMPACTING, &page->private) ||
861 test_bit(PAGE_CLAIMED, &page->private)) {
862 z3fold_page_unlock(zhdr);
868 kref_get(&zhdr->refcount);
873 if (zhdr && !zhdr->slots) {
874 zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
881 if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
882 add_to_unbuddied(pool, zhdr);
883 z3fold_page_unlock(zhdr);
893 * z3fold_create_pool() - create a new z3fold pool
895 * @gfp: gfp flags when allocating the z3fold pool structure
897 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
900 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp)
902 struct z3fold_pool *pool = NULL;
905 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
908 pool->c_handle = kmem_cache_create("z3fold_handle",
909 sizeof(struct z3fold_buddy_slots),
910 SLOTS_ALIGN, 0, NULL);
913 spin_lock_init(&pool->lock);
914 spin_lock_init(&pool->stale_lock);
915 pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
916 __alignof__(struct list_head));
917 if (!pool->unbuddied)
919 for_each_possible_cpu(cpu) {
920 struct list_head *unbuddied =
921 per_cpu_ptr(pool->unbuddied, cpu);
922 for_each_unbuddied_list(i, 0)
923 INIT_LIST_HEAD(&unbuddied[i]);
925 INIT_LIST_HEAD(&pool->stale);
926 atomic64_set(&pool->pages_nr, 0);
928 pool->compact_wq = create_singlethread_workqueue(pool->name);
929 if (!pool->compact_wq)
931 pool->release_wq = create_singlethread_workqueue(pool->name);
932 if (!pool->release_wq)
934 INIT_WORK(&pool->work, free_pages_work);
938 destroy_workqueue(pool->compact_wq);
940 free_percpu(pool->unbuddied);
942 kmem_cache_destroy(pool->c_handle);
950 * z3fold_destroy_pool() - destroys an existing z3fold pool
951 * @pool: the z3fold pool to be destroyed
953 * The pool should be emptied before this function is called.
955 static void z3fold_destroy_pool(struct z3fold_pool *pool)
957 kmem_cache_destroy(pool->c_handle);
960 * We need to destroy pool->compact_wq before pool->release_wq,
961 * as any pending work on pool->compact_wq will call
962 * queue_work(pool->release_wq, &pool->work).
964 * There are still outstanding pages until both workqueues are drained,
965 * so we cannot unregister migration until then.
968 destroy_workqueue(pool->compact_wq);
969 destroy_workqueue(pool->release_wq);
970 free_percpu(pool->unbuddied);
974 static const struct movable_operations z3fold_mops;
977 * z3fold_alloc() - allocates a region of a given size
978 * @pool: z3fold pool from which to allocate
979 * @size: size in bytes of the desired allocation
980 * @gfp: gfp flags used if the pool needs to grow
981 * @handle: handle of the new allocation
983 * This function will attempt to find a free region in the pool large enough to
984 * satisfy the allocation request. A search of the unbuddied lists is
985 * performed first. If no suitable free region is found, then a new page is
986 * allocated and added to the pool to satisfy the request.
988 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
989 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
992 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
993 unsigned long *handle)
995 int chunks = size_to_chunks(size);
996 struct z3fold_header *zhdr = NULL;
997 struct page *page = NULL;
999 bool can_sleep = gfpflags_allow_blocking(gfp);
1001 if (!size || (gfp & __GFP_HIGHMEM))
1004 if (size > PAGE_SIZE)
1007 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1011 zhdr = __z3fold_alloc(pool, size, can_sleep);
1013 bud = get_free_buddy(zhdr, chunks);
1014 if (bud == HEADLESS) {
1015 if (!kref_put(&zhdr->refcount,
1016 release_z3fold_page_locked))
1017 z3fold_page_unlock(zhdr);
1018 pr_err("No free chunks in unbuddied\n");
1022 page = virt_to_page(zhdr);
1028 page = alloc_page(gfp);
1032 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1037 atomic64_inc(&pool->pages_nr);
1039 if (bud == HEADLESS) {
1040 set_bit(PAGE_HEADLESS, &page->private);
1045 __SetPageMovable(page, &z3fold_mops);
1048 WARN_ON(!trylock_page(page));
1049 __SetPageMovable(page, &z3fold_mops);
1052 z3fold_page_lock(zhdr);
1056 zhdr->first_chunks = chunks;
1057 else if (bud == LAST)
1058 zhdr->last_chunks = chunks;
1060 zhdr->middle_chunks = chunks;
1061 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1063 add_to_unbuddied(pool, zhdr);
1066 spin_lock(&pool->lock);
1067 *handle = encode_handle(zhdr, bud);
1068 spin_unlock(&pool->lock);
1069 if (bud != HEADLESS)
1070 z3fold_page_unlock(zhdr);
1076 * z3fold_free() - frees the allocation associated with the given handle
1077 * @pool: pool in which the allocation resided
1078 * @handle: handle associated with the allocation returned by z3fold_alloc()
1080 * In the case that the z3fold page in which the allocation resides is under
1081 * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
1082 * only sets the first|middle|last_chunks to 0. The page is actually freed
1083 * once all buddies are evicted (see z3fold_reclaim_page() below).
1085 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1087 struct z3fold_header *zhdr;
1092 zhdr = get_z3fold_header(handle);
1093 page = virt_to_page(zhdr);
1094 page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1096 if (test_bit(PAGE_HEADLESS, &page->private)) {
1097 /* if a headless page is under reclaim, just leave.
1098 * NB: we use test_and_set_bit for a reason: if the bit
1099 * has not been set before, we release this page
1100 * immediately so we don't care about its value any more.
1102 if (!page_claimed) {
1103 put_z3fold_header(zhdr);
1104 free_z3fold_page(page, true);
1105 atomic64_dec(&pool->pages_nr);
1110 /* Non-headless case */
1111 bud = handle_to_buddy(handle);
1115 zhdr->first_chunks = 0;
1118 zhdr->middle_chunks = 0;
1121 zhdr->last_chunks = 0;
1124 pr_err("%s: unknown bud %d\n", __func__, bud);
1126 put_z3fold_header(zhdr);
1131 free_handle(handle, zhdr);
1132 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list))
1135 /* the page has not been claimed by us */
1136 put_z3fold_header(zhdr);
1139 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1140 clear_bit(PAGE_CLAIMED, &page->private);
1141 put_z3fold_header(zhdr);
1144 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1146 kref_get(&zhdr->refcount);
1147 clear_bit(PAGE_CLAIMED, &page->private);
1148 do_compact_page(zhdr, true);
1151 kref_get(&zhdr->refcount);
1152 clear_bit(PAGE_CLAIMED, &page->private);
1153 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1154 put_z3fold_header(zhdr);
1158 * z3fold_map() - maps the allocation associated with the given handle
1159 * @pool: pool in which the allocation resides
1160 * @handle: handle associated with the allocation to be mapped
1162 * Extracts the buddy number from handle and constructs the pointer to the
1163 * correct starting chunk within the page.
1165 * Returns: a pointer to the mapped allocation
1167 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1169 struct z3fold_header *zhdr;
1174 zhdr = get_z3fold_header(handle);
1176 page = virt_to_page(zhdr);
1178 if (test_bit(PAGE_HEADLESS, &page->private))
1181 buddy = handle_to_buddy(handle);
1184 addr += ZHDR_SIZE_ALIGNED;
1187 addr += zhdr->start_middle << CHUNK_SHIFT;
1188 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1191 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1194 pr_err("unknown buddy id %d\n", buddy);
1201 zhdr->mapped_count++;
1203 put_z3fold_header(zhdr);
1208 * z3fold_unmap() - unmaps the allocation associated with the given handle
1209 * @pool: pool in which the allocation resides
1210 * @handle: handle associated with the allocation to be unmapped
1212 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1214 struct z3fold_header *zhdr;
1218 zhdr = get_z3fold_header(handle);
1219 page = virt_to_page(zhdr);
1221 if (test_bit(PAGE_HEADLESS, &page->private))
1224 buddy = handle_to_buddy(handle);
1225 if (buddy == MIDDLE)
1226 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1227 zhdr->mapped_count--;
1228 put_z3fold_header(zhdr);
1232 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1233 * @pool: pool whose size is being queried
1235 * Returns: size in pages of the given pool.
1237 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1239 return atomic64_read(&pool->pages_nr);
1242 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1244 struct z3fold_header *zhdr;
1245 struct z3fold_pool *pool;
1247 VM_BUG_ON_PAGE(PageIsolated(page), page);
1249 if (test_bit(PAGE_HEADLESS, &page->private))
1252 zhdr = page_address(page);
1253 z3fold_page_lock(zhdr);
1254 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1255 test_bit(PAGE_STALE, &page->private))
1258 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1261 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1263 pool = zhdr_to_pool(zhdr);
1264 spin_lock(&pool->lock);
1265 if (!list_empty(&zhdr->buddy))
1266 list_del_init(&zhdr->buddy);
1267 spin_unlock(&pool->lock);
1269 kref_get(&zhdr->refcount);
1270 z3fold_page_unlock(zhdr);
1274 z3fold_page_unlock(zhdr);
1278 static int z3fold_page_migrate(struct page *newpage, struct page *page,
1279 enum migrate_mode mode)
1281 struct z3fold_header *zhdr, *new_zhdr;
1282 struct z3fold_pool *pool;
1284 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1285 VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
1286 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1288 zhdr = page_address(page);
1289 pool = zhdr_to_pool(zhdr);
1291 if (!z3fold_page_trylock(zhdr))
1293 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1294 clear_bit(PAGE_CLAIMED, &page->private);
1295 z3fold_page_unlock(zhdr);
1298 if (work_pending(&zhdr->work)) {
1299 z3fold_page_unlock(zhdr);
1302 new_zhdr = page_address(newpage);
1303 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1304 newpage->private = page->private;
1305 set_bit(PAGE_MIGRATED, &page->private);
1306 z3fold_page_unlock(zhdr);
1307 spin_lock_init(&new_zhdr->page_lock);
1308 INIT_WORK(&new_zhdr->work, compact_page_work);
1310 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1311 * so we only have to reinitialize it.
1313 INIT_LIST_HEAD(&new_zhdr->buddy);
1314 __ClearPageMovable(page);
1317 z3fold_page_lock(new_zhdr);
1318 if (new_zhdr->first_chunks)
1319 encode_handle(new_zhdr, FIRST);
1320 if (new_zhdr->last_chunks)
1321 encode_handle(new_zhdr, LAST);
1322 if (new_zhdr->middle_chunks)
1323 encode_handle(new_zhdr, MIDDLE);
1324 set_bit(NEEDS_COMPACTING, &newpage->private);
1325 new_zhdr->cpu = smp_processor_id();
1326 __SetPageMovable(newpage, &z3fold_mops);
1327 z3fold_page_unlock(new_zhdr);
1329 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1331 /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
1337 static void z3fold_page_putback(struct page *page)
1339 struct z3fold_header *zhdr;
1340 struct z3fold_pool *pool;
1342 zhdr = page_address(page);
1343 pool = zhdr_to_pool(zhdr);
1345 z3fold_page_lock(zhdr);
1346 if (!list_empty(&zhdr->buddy))
1347 list_del_init(&zhdr->buddy);
1348 INIT_LIST_HEAD(&page->lru);
1349 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
1351 if (list_empty(&zhdr->buddy))
1352 add_to_unbuddied(pool, zhdr);
1353 clear_bit(PAGE_CLAIMED, &page->private);
1354 z3fold_page_unlock(zhdr);
1357 static const struct movable_operations z3fold_mops = {
1358 .isolate_page = z3fold_page_isolate,
1359 .migrate_page = z3fold_page_migrate,
1360 .putback_page = z3fold_page_putback,
1367 static void *z3fold_zpool_create(const char *name, gfp_t gfp)
1369 return z3fold_create_pool(name, gfp);
1372 static void z3fold_zpool_destroy(void *pool)
1374 z3fold_destroy_pool(pool);
1377 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1378 unsigned long *handle)
1380 return z3fold_alloc(pool, size, gfp, handle);
1382 static void z3fold_zpool_free(void *pool, unsigned long handle)
1384 z3fold_free(pool, handle);
1387 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1388 enum zpool_mapmode mm)
1390 return z3fold_map(pool, handle);
1392 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1394 z3fold_unmap(pool, handle);
1397 static u64 z3fold_zpool_total_size(void *pool)
1399 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1402 static struct zpool_driver z3fold_zpool_driver = {
1404 .sleep_mapped = true,
1405 .owner = THIS_MODULE,
1406 .create = z3fold_zpool_create,
1407 .destroy = z3fold_zpool_destroy,
1408 .malloc = z3fold_zpool_malloc,
1409 .free = z3fold_zpool_free,
1410 .map = z3fold_zpool_map,
1411 .unmap = z3fold_zpool_unmap,
1412 .total_size = z3fold_zpool_total_size,
1415 MODULE_ALIAS("zpool-z3fold");
1417 static int __init init_z3fold(void)
1420 * Make sure the z3fold header is not larger than the page size and
1421 * there has remaining spaces for its buddy.
1423 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
1424 zpool_register_driver(&z3fold_zpool_driver);
1429 static void __exit exit_z3fold(void)
1431 zpool_unregister_driver(&z3fold_zpool_driver);
1434 module_init(init_z3fold);
1435 module_exit(exit_z3fold);
1437 MODULE_LICENSE("GPL");
1438 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1439 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");