1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
7 #include <linux/pagevec.h>
9 struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
13 if (!list_empty(pool)) {
14 page = lru_to_page(pool);
15 DBG_BUGON(page_ref_count(page) != 1);
18 page = alloc_page(gfp);
23 #ifdef CONFIG_EROFS_FS_ZIP
24 /* global shrink count (for all mounted EROFS instances) */
25 static atomic_long_t erofs_global_shrink_cnt;
27 static int erofs_workgroup_get(struct erofs_workgroup *grp)
32 o = erofs_wait_on_workgroup_freezed(grp);
36 if (atomic_cmpxchg(&grp->refcount, o, o + 1) != o)
39 /* decrease refcount paired by erofs_workgroup_put */
41 atomic_long_dec(&erofs_global_shrink_cnt);
45 struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
48 struct erofs_sb_info *sbi = EROFS_SB(sb);
49 struct erofs_workgroup *grp;
53 grp = xa_load(&sbi->managed_pslots, index);
55 if (erofs_workgroup_get(grp)) {
56 /* prefer to relax rcu read side */
61 DBG_BUGON(index != grp->index);
67 struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
68 struct erofs_workgroup *grp)
70 struct erofs_sb_info *const sbi = EROFS_SB(sb);
71 struct erofs_workgroup *pre;
74 * Bump up a reference count before making this visible
75 * to others for the XArray in order to avoid potential
76 * UAF without serialized by xa_lock.
78 atomic_inc(&grp->refcount);
81 xa_lock(&sbi->managed_pslots);
82 pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
86 pre = ERR_PTR(xa_err(pre));
87 } else if (erofs_workgroup_get(pre)) {
88 /* try to legitimize the current in-tree one */
89 xa_unlock(&sbi->managed_pslots);
93 atomic_dec(&grp->refcount);
96 xa_unlock(&sbi->managed_pslots);
100 static void __erofs_workgroup_free(struct erofs_workgroup *grp)
102 atomic_long_dec(&erofs_global_shrink_cnt);
103 erofs_workgroup_free_rcu(grp);
106 int erofs_workgroup_put(struct erofs_workgroup *grp)
108 int count = atomic_dec_return(&grp->refcount);
111 atomic_long_inc(&erofs_global_shrink_cnt);
113 __erofs_workgroup_free(grp);
117 static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
118 struct erofs_workgroup *grp)
121 * If managed cache is on, refcount of workgroups
122 * themselves could be < 0 (freezed). In other words,
123 * there is no guarantee that all refcounts > 0.
125 if (!erofs_workgroup_try_to_freeze(grp, 1))
129 * Note that all cached pages should be unattached
130 * before deleted from the XArray. Otherwise some
131 * cached pages could be still attached to the orphan
132 * old workgroup when the new one is available in the tree.
134 if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
135 erofs_workgroup_unfreeze(grp, 1);
140 * It's impossible to fail after the workgroup is freezed,
141 * however in order to avoid some race conditions, add a
142 * DBG_BUGON to observe this in advance.
144 DBG_BUGON(xa_erase(&sbi->managed_pslots, grp->index) != grp);
146 /* last refcount should be connected with its managed pslot. */
147 erofs_workgroup_unfreeze(grp, 0);
148 __erofs_workgroup_free(grp);
152 static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
153 unsigned long nr_shrink)
155 struct erofs_workgroup *grp;
156 unsigned int freed = 0;
159 xa_for_each(&sbi->managed_pslots, index, grp) {
160 /* try to shrink each valid workgroup */
161 if (!erofs_try_to_release_workgroup(sbi, grp))
171 /* protected by 'erofs_sb_list_lock' */
172 static unsigned int shrinker_run_no;
174 /* protects the mounted 'erofs_sb_list' */
175 static DEFINE_SPINLOCK(erofs_sb_list_lock);
176 static LIST_HEAD(erofs_sb_list);
178 void erofs_shrinker_register(struct super_block *sb)
180 struct erofs_sb_info *sbi = EROFS_SB(sb);
182 mutex_init(&sbi->umount_mutex);
184 spin_lock(&erofs_sb_list_lock);
185 list_add(&sbi->list, &erofs_sb_list);
186 spin_unlock(&erofs_sb_list_lock);
189 void erofs_shrinker_unregister(struct super_block *sb)
191 struct erofs_sb_info *const sbi = EROFS_SB(sb);
193 mutex_lock(&sbi->umount_mutex);
194 /* clean up all remaining workgroups in memory */
195 erofs_shrink_workstation(sbi, ~0UL);
197 spin_lock(&erofs_sb_list_lock);
198 list_del(&sbi->list);
199 spin_unlock(&erofs_sb_list_lock);
200 mutex_unlock(&sbi->umount_mutex);
203 static unsigned long erofs_shrink_count(struct shrinker *shrink,
204 struct shrink_control *sc)
206 return atomic_long_read(&erofs_global_shrink_cnt);
209 static unsigned long erofs_shrink_scan(struct shrinker *shrink,
210 struct shrink_control *sc)
212 struct erofs_sb_info *sbi;
215 unsigned long nr = sc->nr_to_scan;
217 unsigned long freed = 0;
219 spin_lock(&erofs_sb_list_lock);
221 run_no = ++shrinker_run_no;
222 } while (run_no == 0);
224 /* Iterate over all mounted superblocks and try to shrink them */
225 p = erofs_sb_list.next;
226 while (p != &erofs_sb_list) {
227 sbi = list_entry(p, struct erofs_sb_info, list);
230 * We move the ones we do to the end of the list, so we stop
231 * when we see one we have already done.
233 if (sbi->shrinker_run_no == run_no)
236 if (!mutex_trylock(&sbi->umount_mutex)) {
241 spin_unlock(&erofs_sb_list_lock);
242 sbi->shrinker_run_no = run_no;
244 freed += erofs_shrink_workstation(sbi, nr - freed);
246 spin_lock(&erofs_sb_list_lock);
247 /* Get the next list element before we move this one */
251 * Move this one to the end of the list to provide some
254 list_move_tail(&sbi->list, &erofs_sb_list);
255 mutex_unlock(&sbi->umount_mutex);
260 spin_unlock(&erofs_sb_list_lock);
264 static struct shrinker erofs_shrinker_info = {
265 .scan_objects = erofs_shrink_scan,
266 .count_objects = erofs_shrink_count,
267 .seeks = DEFAULT_SEEKS,
270 int __init erofs_init_shrinker(void)
272 return register_shrinker(&erofs_shrinker_info);
275 void erofs_exit_shrinker(void)
277 unregister_shrinker(&erofs_shrinker_info);
279 #endif /* !CONFIG_EROFS_FS_ZIP */