1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
8 struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
10 struct page *page = *pagepool;
13 DBG_BUGON(page_ref_count(page) != 1);
14 *pagepool = (struct page *)page_private(page);
16 page = alloc_page(gfp);
21 void erofs_release_pages(struct page **pagepool)
24 struct page *page = *pagepool;
26 *pagepool = (struct page *)page_private(page);
31 #ifdef CONFIG_EROFS_FS_ZIP
32 /* global shrink count (for all mounted EROFS instances) */
33 static atomic_long_t erofs_global_shrink_cnt;
35 static bool erofs_workgroup_get(struct erofs_workgroup *grp)
37 if (lockref_get_not_zero(&grp->lockref))
40 spin_lock(&grp->lockref.lock);
41 if (__lockref_is_dead(&grp->lockref)) {
42 spin_unlock(&grp->lockref.lock);
46 if (!grp->lockref.count++)
47 atomic_long_dec(&erofs_global_shrink_cnt);
48 spin_unlock(&grp->lockref.lock);
52 struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
55 struct erofs_sb_info *sbi = EROFS_SB(sb);
56 struct erofs_workgroup *grp;
60 grp = xa_load(&sbi->managed_pslots, index);
62 if (!erofs_workgroup_get(grp)) {
63 /* prefer to relax rcu read side */
68 DBG_BUGON(index != grp->index);
74 struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
75 struct erofs_workgroup *grp)
77 struct erofs_sb_info *const sbi = EROFS_SB(sb);
78 struct erofs_workgroup *pre;
81 * Bump up before making this visible to others for the XArray in order
82 * to avoid potential UAF without serialized by xa_lock.
84 lockref_get(&grp->lockref);
87 xa_lock(&sbi->managed_pslots);
88 pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
92 pre = ERR_PTR(xa_err(pre));
93 } else if (!erofs_workgroup_get(pre)) {
94 /* try to legitimize the current in-tree one */
95 xa_unlock(&sbi->managed_pslots);
99 lockref_put_return(&grp->lockref);
102 xa_unlock(&sbi->managed_pslots);
106 static void __erofs_workgroup_free(struct erofs_workgroup *grp)
108 atomic_long_dec(&erofs_global_shrink_cnt);
109 erofs_workgroup_free_rcu(grp);
112 void erofs_workgroup_put(struct erofs_workgroup *grp)
114 if (lockref_put_or_lock(&grp->lockref))
117 DBG_BUGON(__lockref_is_dead(&grp->lockref));
118 if (grp->lockref.count == 1)
119 atomic_long_inc(&erofs_global_shrink_cnt);
120 --grp->lockref.count;
121 spin_unlock(&grp->lockref.lock);
124 static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
125 struct erofs_workgroup *grp)
129 spin_lock(&grp->lockref.lock);
130 if (grp->lockref.count)
134 * Note that all cached pages should be detached before deleted from
135 * the XArray. Otherwise some cached pages could be still attached to
136 * the orphan old workgroup when the new one is available in the tree.
138 if (erofs_try_to_free_all_cached_pages(sbi, grp))
142 * It's impossible to fail after the workgroup is freezed,
143 * however in order to avoid some race conditions, add a
144 * DBG_BUGON to observe this in advance.
146 DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
148 lockref_mark_dead(&grp->lockref);
151 spin_unlock(&grp->lockref.lock);
153 __erofs_workgroup_free(grp);
157 static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
158 unsigned long nr_shrink)
160 struct erofs_workgroup *grp;
161 unsigned int freed = 0;
164 xa_lock(&sbi->managed_pslots);
165 xa_for_each(&sbi->managed_pslots, index, grp) {
166 /* try to shrink each valid workgroup */
167 if (!erofs_try_to_release_workgroup(sbi, grp))
169 xa_unlock(&sbi->managed_pslots);
174 xa_lock(&sbi->managed_pslots);
176 xa_unlock(&sbi->managed_pslots);
180 /* protected by 'erofs_sb_list_lock' */
181 static unsigned int shrinker_run_no;
183 /* protects the mounted 'erofs_sb_list' */
184 static DEFINE_SPINLOCK(erofs_sb_list_lock);
185 static LIST_HEAD(erofs_sb_list);
187 void erofs_shrinker_register(struct super_block *sb)
189 struct erofs_sb_info *sbi = EROFS_SB(sb);
191 mutex_init(&sbi->umount_mutex);
193 spin_lock(&erofs_sb_list_lock);
194 list_add(&sbi->list, &erofs_sb_list);
195 spin_unlock(&erofs_sb_list_lock);
198 void erofs_shrinker_unregister(struct super_block *sb)
200 struct erofs_sb_info *const sbi = EROFS_SB(sb);
202 mutex_lock(&sbi->umount_mutex);
203 /* clean up all remaining workgroups in memory */
204 erofs_shrink_workstation(sbi, ~0UL);
206 spin_lock(&erofs_sb_list_lock);
207 list_del(&sbi->list);
208 spin_unlock(&erofs_sb_list_lock);
209 mutex_unlock(&sbi->umount_mutex);
212 static unsigned long erofs_shrink_count(struct shrinker *shrink,
213 struct shrink_control *sc)
215 return atomic_long_read(&erofs_global_shrink_cnt);
218 static unsigned long erofs_shrink_scan(struct shrinker *shrink,
219 struct shrink_control *sc)
221 struct erofs_sb_info *sbi;
224 unsigned long nr = sc->nr_to_scan;
226 unsigned long freed = 0;
228 spin_lock(&erofs_sb_list_lock);
230 run_no = ++shrinker_run_no;
231 } while (run_no == 0);
233 /* Iterate over all mounted superblocks and try to shrink them */
234 p = erofs_sb_list.next;
235 while (p != &erofs_sb_list) {
236 sbi = list_entry(p, struct erofs_sb_info, list);
239 * We move the ones we do to the end of the list, so we stop
240 * when we see one we have already done.
242 if (sbi->shrinker_run_no == run_no)
245 if (!mutex_trylock(&sbi->umount_mutex)) {
250 spin_unlock(&erofs_sb_list_lock);
251 sbi->shrinker_run_no = run_no;
253 freed += erofs_shrink_workstation(sbi, nr - freed);
255 spin_lock(&erofs_sb_list_lock);
256 /* Get the next list element before we move this one */
260 * Move this one to the end of the list to provide some
263 list_move_tail(&sbi->list, &erofs_sb_list);
264 mutex_unlock(&sbi->umount_mutex);
269 spin_unlock(&erofs_sb_list_lock);
273 static struct shrinker erofs_shrinker_info = {
274 .scan_objects = erofs_shrink_scan,
275 .count_objects = erofs_shrink_count,
276 .seeks = DEFAULT_SEEKS,
279 int __init erofs_init_shrinker(void)
281 return register_shrinker(&erofs_shrinker_info, "erofs-shrinker");
284 void erofs_exit_shrinker(void)
286 unregister_shrinker(&erofs_shrinker_info);
288 #endif /* !CONFIG_EROFS_FS_ZIP */