1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
8 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_btree.h"
14 #include "scrub/scrub.h"
15 #include "scrub/bitmap.h"
17 #include <linux/interval_tree_generic.h>
20 struct rb_node bn_rbnode;
22 /* First set bit of this interval and subtree. */
25 /* Last set bit of this interval. */
28 /* Last set bit of this subtree. Do not touch this. */
29 uint64_t __bn_subtree_last;
32 /* Define our own interval tree type with uint64_t parameters. */
34 #define START(node) ((node)->bn_start)
35 #define LAST(node) ((node)->bn_last)
38 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
39 * forward-declare them anyway for clarity.
42 xbitmap_tree_insert(struct xbitmap_node *node, struct rb_root_cached *root);
45 xbitmap_tree_remove(struct xbitmap_node *node, struct rb_root_cached *root);
47 static inline struct xbitmap_node *
48 xbitmap_tree_iter_first(struct rb_root_cached *root, uint64_t start,
51 static inline struct xbitmap_node *
52 xbitmap_tree_iter_next(struct xbitmap_node *node, uint64_t start,
55 INTERVAL_TREE_DEFINE(struct xbitmap_node, bn_rbnode, uint64_t,
56 __bn_subtree_last, START, LAST, static inline, xbitmap_tree)
58 /* Iterate each interval of a bitmap. Do not change the bitmap. */
59 #define for_each_xbitmap_extent(bn, bitmap) \
60 for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
61 struct xbitmap_node, bn_rbnode); \
63 (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
64 struct xbitmap_node, bn_rbnode))
66 /* Clear a range of this bitmap. */
69 struct xbitmap *bitmap,
73 struct xbitmap_node *bn;
74 struct xbitmap_node *new_bn;
75 uint64_t last = start + len - 1;
77 while ((bn = xbitmap_tree_iter_first(&bitmap->xb_root, start, last))) {
78 if (bn->bn_start < start && bn->bn_last > last) {
79 uint64_t old_last = bn->bn_last;
81 /* overlaps with the entire clearing range */
82 xbitmap_tree_remove(bn, &bitmap->xb_root);
83 bn->bn_last = start - 1;
84 xbitmap_tree_insert(bn, &bitmap->xb_root);
87 new_bn = kmalloc(sizeof(struct xbitmap_node),
91 new_bn->bn_start = last + 1;
92 new_bn->bn_last = old_last;
93 xbitmap_tree_insert(new_bn, &bitmap->xb_root);
94 } else if (bn->bn_start < start) {
95 /* overlaps with the left side of the clearing range */
96 xbitmap_tree_remove(bn, &bitmap->xb_root);
97 bn->bn_last = start - 1;
98 xbitmap_tree_insert(bn, &bitmap->xb_root);
99 } else if (bn->bn_last > last) {
100 /* overlaps with the right side of the clearing range */
101 xbitmap_tree_remove(bn, &bitmap->xb_root);
102 bn->bn_start = last + 1;
103 xbitmap_tree_insert(bn, &bitmap->xb_root);
106 /* in the middle of the clearing range */
107 xbitmap_tree_remove(bn, &bitmap->xb_root);
115 /* Set a range of this bitmap. */
118 struct xbitmap *bitmap,
122 struct xbitmap_node *left;
123 struct xbitmap_node *right;
124 uint64_t last = start + len - 1;
127 /* Is this whole range already set? */
128 left = xbitmap_tree_iter_first(&bitmap->xb_root, start, last);
129 if (left && left->bn_start <= start && left->bn_last >= last)
132 /* Clear out everything in the range we want to set. */
133 error = xbitmap_clear(bitmap, start, len);
137 /* Do we have a left-adjacent extent? */
138 left = xbitmap_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
139 ASSERT(!left || left->bn_last + 1 == start);
141 /* Do we have a right-adjacent extent? */
142 right = xbitmap_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
143 ASSERT(!right || right->bn_start == last + 1);
146 /* combine left and right adjacent extent */
147 xbitmap_tree_remove(left, &bitmap->xb_root);
148 xbitmap_tree_remove(right, &bitmap->xb_root);
149 left->bn_last = right->bn_last;
150 xbitmap_tree_insert(left, &bitmap->xb_root);
153 /* combine with left extent */
154 xbitmap_tree_remove(left, &bitmap->xb_root);
155 left->bn_last = last;
156 xbitmap_tree_insert(left, &bitmap->xb_root);
158 /* combine with right extent */
159 xbitmap_tree_remove(right, &bitmap->xb_root);
160 right->bn_start = start;
161 xbitmap_tree_insert(right, &bitmap->xb_root);
164 left = kmalloc(sizeof(struct xbitmap_node), XCHK_GFP_FLAGS);
167 left->bn_start = start;
168 left->bn_last = last;
169 xbitmap_tree_insert(left, &bitmap->xb_root);
175 /* Free everything related to this bitmap. */
178 struct xbitmap *bitmap)
180 struct xbitmap_node *bn;
182 while ((bn = xbitmap_tree_iter_first(&bitmap->xb_root, 0, -1ULL))) {
183 xbitmap_tree_remove(bn, &bitmap->xb_root);
188 /* Set up a per-AG block bitmap. */
191 struct xbitmap *bitmap)
193 bitmap->xb_root = RB_ROOT_CACHED;
197 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
199 * The intent is that callers will iterate the rmapbt for all of its records
200 * for a given owner to generate @bitmap; and iterate all the blocks of the
201 * metadata structures that are not being rebuilt and have the same rmapbt
202 * owner to generate @sub. This routine subtracts all the extents
203 * mentioned in sub from all the extents linked in @bitmap, which leaves
204 * @bitmap as the list of blocks that are not accounted for, which we assume
205 * are the dead blocks of the old metadata structure. The blocks mentioned in
206 * @bitmap can be reaped.
208 * This is the logical equivalent of bitmap &= ~sub.
212 struct xbitmap *bitmap,
215 struct xbitmap_node *bn;
218 if (xbitmap_empty(bitmap) || xbitmap_empty(sub))
221 for_each_xbitmap_extent(bn, sub) {
222 error = xbitmap_clear(bitmap, bn->bn_start,
223 bn->bn_last - bn->bn_start + 1);
232 * Record all btree blocks seen while iterating all records of a btree.
234 * We know that the btree query_all function starts at the left edge and walks
235 * towards the right edge of the tree. Therefore, we know that we can walk up
236 * the btree cursor towards the root; if the pointer for a given level points
237 * to the first record/key in that block, we haven't seen this block before;
238 * and therefore we need to remember that we saw this block in the btree.
240 * So if our btree is:
246 * Pretend for this example that each leaf block has 100 btree records. For
247 * the first btree record, we'll observe that bc_levels[0].ptr == 1, so we
248 * record that we saw block 1. Then we observe that bc_levels[1].ptr == 1, so
249 * we record block 4. The list is [1, 4].
251 * For the second btree record, we see that bc_levels[0].ptr == 2, so we exit
252 * the loop. The list remains [1, 4].
254 * For the 101st btree record, we've moved onto leaf block 2. Now
255 * bc_levels[0].ptr == 1 again, so we record that we saw block 2. We see that
256 * bc_levels[1].ptr == 2, so we exit the loop. The list is now [1, 4, 2].
258 * For the 102nd record, bc_levels[0].ptr == 2, so we continue.
260 * For the 201st record, we've moved on to leaf block 3.
261 * bc_levels[0].ptr == 1, so we add 3 to the list. Now it is [1, 4, 2, 3].
263 * For the 300th record we just exit, with the list being [1, 4, 2, 3].
266 /* Mark a btree block to the agblock bitmap. */
268 xagb_bitmap_visit_btblock(
269 struct xfs_btree_cur *cur,
273 struct xagb_bitmap *bitmap = priv;
278 xfs_btree_get_block(cur, level, &bp);
282 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
283 agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
285 return xagb_bitmap_set(bitmap, agbno, 1);
288 /* Mark all (per-AG) btree blocks in the agblock bitmap. */
290 xagb_bitmap_set_btblocks(
291 struct xagb_bitmap *bitmap,
292 struct xfs_btree_cur *cur)
294 return xfs_btree_visit_blocks(cur, xagb_bitmap_visit_btblock,
295 XFS_BTREE_VISIT_ALL, bitmap);
299 * Record all the buffers pointed to by the btree cursor. Callers already
300 * engaged in a btree walk should call this function to capture the list of
301 * blocks going from the leaf towards the root.
304 xbitmap_set_btcur_path(
305 struct xbitmap *bitmap,
306 struct xfs_btree_cur *cur)
313 for (i = 0; i < cur->bc_nlevels && cur->bc_levels[i].ptr == 1; i++) {
314 xfs_btree_get_block(cur, i, &bp);
317 fsb = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
318 error = xbitmap_set(bitmap, fsb, 1);
326 /* Collect a btree's block in the bitmap. */
328 xbitmap_collect_btblock(
329 struct xfs_btree_cur *cur,
333 struct xbitmap *bitmap = priv;
337 xfs_btree_get_block(cur, level, &bp);
341 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
342 return xbitmap_set(bitmap, fsbno, 1);
345 /* Walk the btree and mark the bitmap wherever a btree block is found. */
347 xbitmap_set_btblocks(
348 struct xbitmap *bitmap,
349 struct xfs_btree_cur *cur)
351 return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock,
352 XFS_BTREE_VISIT_ALL, bitmap);
355 /* How many bits are set in this bitmap? */
358 struct xbitmap *bitmap)
360 struct xbitmap_node *bn;
363 for_each_xbitmap_extent(bn, bitmap)
364 ret += bn->bn_last - bn->bn_start + 1;
369 /* Call a function for every run of set bits in this bitmap. */
372 struct xbitmap *bitmap,
376 struct xbitmap_node *bn;
379 for_each_xbitmap_extent(bn, bitmap) {
380 error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
388 struct xbitmap_walk_bits {
389 xbitmap_walk_bits_fn fn;
393 /* Walk all the bits in a run. */
395 xbitmap_walk_bits_in_run(
400 struct xbitmap_walk_bits *wb = priv;
404 for (i = start; i < start + len; i++) {
405 error = wb->fn(i, wb->priv);
413 /* Call a function for every set bit in this bitmap. */
416 struct xbitmap *bitmap,
417 xbitmap_walk_bits_fn fn,
420 struct xbitmap_walk_bits wb = {.fn = fn, .priv = priv};
422 return xbitmap_walk(bitmap, xbitmap_walk_bits_in_run, &wb);
425 /* Does this bitmap have no bits set at all? */
428 struct xbitmap *bitmap)
430 return bitmap->xb_root.rb_root.rb_node == NULL;
433 /* Is the start of the range set or clear? And for how long? */
436 struct xbitmap *bitmap,
440 struct xbitmap_node *bn;
441 uint64_t last = start + *len - 1;
443 bn = xbitmap_tree_iter_first(&bitmap->xb_root, start, last);
446 if (bn->bn_start <= start) {
447 if (bn->bn_last < last)
448 *len = bn->bn_last - start + 1;
451 *len = bn->bn_start - start;