btrfs: ref-verify: free ref cache before clearing mount opt
[platform/kernel/linux-rpi.git] / fs / btrfs / scrub.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
4  */
5
6 #include <linux/blkdev.h>
7 #include <linux/ratelimit.h>
8 #include <linux/sched/mm.h>
9 #include <crypto/hash.h>
10 #include "ctree.h"
11 #include "discard.h"
12 #include "volumes.h"
13 #include "disk-io.h"
14 #include "ordered-data.h"
15 #include "transaction.h"
16 #include "backref.h"
17 #include "extent_io.h"
18 #include "dev-replace.h"
19 #include "check-integrity.h"
20 #include "raid56.h"
21 #include "block-group.h"
22 #include "zoned.h"
23 #include "fs.h"
24 #include "accessors.h"
25 #include "file-item.h"
26 #include "scrub.h"
27
28 /*
29  * This is only the first step towards a full-features scrub. It reads all
30  * extent and super block and verifies the checksums. In case a bad checksum
31  * is found or the extent cannot be read, good data will be written back if
32  * any can be found.
33  *
34  * Future enhancements:
35  *  - In case an unrepairable extent is encountered, track which files are
36  *    affected and report them
37  *  - track and record media errors, throw out bad devices
38  *  - add a mode to also read unallocated space
39  */
40
41 struct scrub_ctx;
42
43 /*
44  * The following value only influences the performance.
45  *
46  * This detemines how many stripes would be submitted in one go,
47  * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
48  */
49 #define SCRUB_STRIPES_PER_GROUP         8
50
51 /*
52  * How many groups we have for each sctx.
53  *
54  * This would be 8M per device, the same value as the old scrub in-flight bios
55  * size limit.
56  */
57 #define SCRUB_GROUPS_PER_SCTX           16
58
59 #define SCRUB_TOTAL_STRIPES             (SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
60
61 /*
62  * The following value times PAGE_SIZE needs to be large enough to match the
63  * largest node/leaf/sector size that shall be supported.
64  */
65 #define SCRUB_MAX_SECTORS_PER_BLOCK     (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
66
67 /* Represent one sector and its needed info to verify the content. */
68 struct scrub_sector_verification {
69         bool is_metadata;
70
71         union {
72                 /*
73                  * Csum pointer for data csum verification.  Should point to a
74                  * sector csum inside scrub_stripe::csums.
75                  *
76                  * NULL if this data sector has no csum.
77                  */
78                 u8 *csum;
79
80                 /*
81                  * Extra info for metadata verification.  All sectors inside a
82                  * tree block share the same generation.
83                  */
84                 u64 generation;
85         };
86 };
87
88 enum scrub_stripe_flags {
89         /* Set when @mirror_num, @dev, @physical and @logical are set. */
90         SCRUB_STRIPE_FLAG_INITIALIZED,
91
92         /* Set when the read-repair is finished. */
93         SCRUB_STRIPE_FLAG_REPAIR_DONE,
94
95         /*
96          * Set for data stripes if it's triggered from P/Q stripe.
97          * During such scrub, we should not report errors in data stripes, nor
98          * update the accounting.
99          */
100         SCRUB_STRIPE_FLAG_NO_REPORT,
101 };
102
103 #define SCRUB_STRIPE_PAGES              (BTRFS_STRIPE_LEN / PAGE_SIZE)
104
105 /*
106  * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
107  */
108 struct scrub_stripe {
109         struct scrub_ctx *sctx;
110         struct btrfs_block_group *bg;
111
112         struct page *pages[SCRUB_STRIPE_PAGES];
113         struct scrub_sector_verification *sectors;
114
115         struct btrfs_device *dev;
116         u64 logical;
117         u64 physical;
118
119         u16 mirror_num;
120
121         /* Should be BTRFS_STRIPE_LEN / sectorsize. */
122         u16 nr_sectors;
123
124         /*
125          * How many data/meta extents are in this stripe.  Only for scrub status
126          * reporting purposes.
127          */
128         u16 nr_data_extents;
129         u16 nr_meta_extents;
130
131         atomic_t pending_io;
132         wait_queue_head_t io_wait;
133         wait_queue_head_t repair_wait;
134
135         /*
136          * Indicate the states of the stripe.  Bits are defined in
137          * scrub_stripe_flags enum.
138          */
139         unsigned long state;
140
141         /* Indicate which sectors are covered by extent items. */
142         unsigned long extent_sector_bitmap;
143
144         /*
145          * The errors hit during the initial read of the stripe.
146          *
147          * Would be utilized for error reporting and repair.
148          *
149          * The remaining init_nr_* records the number of errors hit, only used
150          * by error reporting.
151          */
152         unsigned long init_error_bitmap;
153         unsigned int init_nr_io_errors;
154         unsigned int init_nr_csum_errors;
155         unsigned int init_nr_meta_errors;
156
157         /*
158          * The following error bitmaps are all for the current status.
159          * Every time we submit a new read, these bitmaps may be updated.
160          *
161          * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
162          *
163          * IO and csum errors can happen for both metadata and data.
164          */
165         unsigned long error_bitmap;
166         unsigned long io_error_bitmap;
167         unsigned long csum_error_bitmap;
168         unsigned long meta_error_bitmap;
169
170         /* For writeback (repair or replace) error reporting. */
171         unsigned long write_error_bitmap;
172
173         /* Writeback can be concurrent, thus we need to protect the bitmap. */
174         spinlock_t write_error_lock;
175
176         /*
177          * Checksum for the whole stripe if this stripe is inside a data block
178          * group.
179          */
180         u8 *csums;
181
182         struct work_struct work;
183 };
184
185 struct scrub_ctx {
186         struct scrub_stripe     stripes[SCRUB_TOTAL_STRIPES];
187         struct scrub_stripe     *raid56_data_stripes;
188         struct btrfs_fs_info    *fs_info;
189         struct btrfs_path       extent_path;
190         struct btrfs_path       csum_path;
191         int                     first_free;
192         int                     cur_stripe;
193         atomic_t                cancel_req;
194         int                     readonly;
195         int                     sectors_per_bio;
196
197         /* State of IO submission throttling affecting the associated device */
198         ktime_t                 throttle_deadline;
199         u64                     throttle_sent;
200
201         int                     is_dev_replace;
202         u64                     write_pointer;
203
204         struct mutex            wr_lock;
205         struct btrfs_device     *wr_tgtdev;
206
207         /*
208          * statistics
209          */
210         struct btrfs_scrub_progress stat;
211         spinlock_t              stat_lock;
212
213         /*
214          * Use a ref counter to avoid use-after-free issues. Scrub workers
215          * decrement bios_in_flight and workers_pending and then do a wakeup
216          * on the list_wait wait queue. We must ensure the main scrub task
217          * doesn't free the scrub context before or while the workers are
218          * doing the wakeup() call.
219          */
220         refcount_t              refs;
221 };
222
223 struct scrub_warning {
224         struct btrfs_path       *path;
225         u64                     extent_item_size;
226         const char              *errstr;
227         u64                     physical;
228         u64                     logical;
229         struct btrfs_device     *dev;
230 };
231
232 static void release_scrub_stripe(struct scrub_stripe *stripe)
233 {
234         if (!stripe)
235                 return;
236
237         for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
238                 if (stripe->pages[i])
239                         __free_page(stripe->pages[i]);
240                 stripe->pages[i] = NULL;
241         }
242         kfree(stripe->sectors);
243         kfree(stripe->csums);
244         stripe->sectors = NULL;
245         stripe->csums = NULL;
246         stripe->sctx = NULL;
247         stripe->state = 0;
248 }
249
250 static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
251                              struct scrub_stripe *stripe)
252 {
253         int ret;
254
255         memset(stripe, 0, sizeof(*stripe));
256
257         stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
258         stripe->state = 0;
259
260         init_waitqueue_head(&stripe->io_wait);
261         init_waitqueue_head(&stripe->repair_wait);
262         atomic_set(&stripe->pending_io, 0);
263         spin_lock_init(&stripe->write_error_lock);
264
265         ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages);
266         if (ret < 0)
267                 goto error;
268
269         stripe->sectors = kcalloc(stripe->nr_sectors,
270                                   sizeof(struct scrub_sector_verification),
271                                   GFP_KERNEL);
272         if (!stripe->sectors)
273                 goto error;
274
275         stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
276                                 fs_info->csum_size, GFP_KERNEL);
277         if (!stripe->csums)
278                 goto error;
279         return 0;
280 error:
281         release_scrub_stripe(stripe);
282         return -ENOMEM;
283 }
284
285 static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
286 {
287         wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
288 }
289
290 static void scrub_put_ctx(struct scrub_ctx *sctx);
291
292 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
293 {
294         while (atomic_read(&fs_info->scrub_pause_req)) {
295                 mutex_unlock(&fs_info->scrub_lock);
296                 wait_event(fs_info->scrub_pause_wait,
297                    atomic_read(&fs_info->scrub_pause_req) == 0);
298                 mutex_lock(&fs_info->scrub_lock);
299         }
300 }
301
302 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
303 {
304         atomic_inc(&fs_info->scrubs_paused);
305         wake_up(&fs_info->scrub_pause_wait);
306 }
307
308 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
309 {
310         mutex_lock(&fs_info->scrub_lock);
311         __scrub_blocked_if_needed(fs_info);
312         atomic_dec(&fs_info->scrubs_paused);
313         mutex_unlock(&fs_info->scrub_lock);
314
315         wake_up(&fs_info->scrub_pause_wait);
316 }
317
318 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
319 {
320         scrub_pause_on(fs_info);
321         scrub_pause_off(fs_info);
322 }
323
324 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
325 {
326         int i;
327
328         if (!sctx)
329                 return;
330
331         for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
332                 release_scrub_stripe(&sctx->stripes[i]);
333
334         kvfree(sctx);
335 }
336
337 static void scrub_put_ctx(struct scrub_ctx *sctx)
338 {
339         if (refcount_dec_and_test(&sctx->refs))
340                 scrub_free_ctx(sctx);
341 }
342
343 static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
344                 struct btrfs_fs_info *fs_info, int is_dev_replace)
345 {
346         struct scrub_ctx *sctx;
347         int             i;
348
349         /* Since sctx has inline 128 stripes, it can go beyond 64K easily.  Use
350          * kvzalloc().
351          */
352         sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
353         if (!sctx)
354                 goto nomem;
355         refcount_set(&sctx->refs, 1);
356         sctx->is_dev_replace = is_dev_replace;
357         sctx->fs_info = fs_info;
358         sctx->extent_path.search_commit_root = 1;
359         sctx->extent_path.skip_locking = 1;
360         sctx->csum_path.search_commit_root = 1;
361         sctx->csum_path.skip_locking = 1;
362         for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
363                 int ret;
364
365                 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
366                 if (ret < 0)
367                         goto nomem;
368                 sctx->stripes[i].sctx = sctx;
369         }
370         sctx->first_free = 0;
371         atomic_set(&sctx->cancel_req, 0);
372
373         spin_lock_init(&sctx->stat_lock);
374         sctx->throttle_deadline = 0;
375
376         mutex_init(&sctx->wr_lock);
377         if (is_dev_replace) {
378                 WARN_ON(!fs_info->dev_replace.tgtdev);
379                 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
380         }
381
382         return sctx;
383
384 nomem:
385         scrub_free_ctx(sctx);
386         return ERR_PTR(-ENOMEM);
387 }
388
389 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
390                                      u64 root, void *warn_ctx)
391 {
392         u32 nlink;
393         int ret;
394         int i;
395         unsigned nofs_flag;
396         struct extent_buffer *eb;
397         struct btrfs_inode_item *inode_item;
398         struct scrub_warning *swarn = warn_ctx;
399         struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
400         struct inode_fs_paths *ipath = NULL;
401         struct btrfs_root *local_root;
402         struct btrfs_key key;
403
404         local_root = btrfs_get_fs_root(fs_info, root, true);
405         if (IS_ERR(local_root)) {
406                 ret = PTR_ERR(local_root);
407                 goto err;
408         }
409
410         /*
411          * this makes the path point to (inum INODE_ITEM ioff)
412          */
413         key.objectid = inum;
414         key.type = BTRFS_INODE_ITEM_KEY;
415         key.offset = 0;
416
417         ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
418         if (ret) {
419                 btrfs_put_root(local_root);
420                 btrfs_release_path(swarn->path);
421                 goto err;
422         }
423
424         eb = swarn->path->nodes[0];
425         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
426                                         struct btrfs_inode_item);
427         nlink = btrfs_inode_nlink(eb, inode_item);
428         btrfs_release_path(swarn->path);
429
430         /*
431          * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
432          * uses GFP_NOFS in this context, so we keep it consistent but it does
433          * not seem to be strictly necessary.
434          */
435         nofs_flag = memalloc_nofs_save();
436         ipath = init_ipath(4096, local_root, swarn->path);
437         memalloc_nofs_restore(nofs_flag);
438         if (IS_ERR(ipath)) {
439                 btrfs_put_root(local_root);
440                 ret = PTR_ERR(ipath);
441                 ipath = NULL;
442                 goto err;
443         }
444         ret = paths_from_inode(inum, ipath);
445
446         if (ret < 0)
447                 goto err;
448
449         /*
450          * we deliberately ignore the bit ipath might have been too small to
451          * hold all of the paths here
452          */
453         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
454                 btrfs_warn_in_rcu(fs_info,
455 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
456                                   swarn->errstr, swarn->logical,
457                                   btrfs_dev_name(swarn->dev),
458                                   swarn->physical,
459                                   root, inum, offset,
460                                   fs_info->sectorsize, nlink,
461                                   (char *)(unsigned long)ipath->fspath->val[i]);
462
463         btrfs_put_root(local_root);
464         free_ipath(ipath);
465         return 0;
466
467 err:
468         btrfs_warn_in_rcu(fs_info,
469                           "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
470                           swarn->errstr, swarn->logical,
471                           btrfs_dev_name(swarn->dev),
472                           swarn->physical,
473                           root, inum, offset, ret);
474
475         free_ipath(ipath);
476         return 0;
477 }
478
479 static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
480                                        bool is_super, u64 logical, u64 physical)
481 {
482         struct btrfs_fs_info *fs_info = dev->fs_info;
483         struct btrfs_path *path;
484         struct btrfs_key found_key;
485         struct extent_buffer *eb;
486         struct btrfs_extent_item *ei;
487         struct scrub_warning swarn;
488         u64 flags = 0;
489         u32 item_size;
490         int ret;
491
492         /* Super block error, no need to search extent tree. */
493         if (is_super) {
494                 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
495                                   errstr, btrfs_dev_name(dev), physical);
496                 return;
497         }
498         path = btrfs_alloc_path();
499         if (!path)
500                 return;
501
502         swarn.physical = physical;
503         swarn.logical = logical;
504         swarn.errstr = errstr;
505         swarn.dev = NULL;
506
507         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
508                                   &flags);
509         if (ret < 0)
510                 goto out;
511
512         swarn.extent_item_size = found_key.offset;
513
514         eb = path->nodes[0];
515         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
516         item_size = btrfs_item_size(eb, path->slots[0]);
517
518         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
519                 unsigned long ptr = 0;
520                 u8 ref_level;
521                 u64 ref_root;
522
523                 while (true) {
524                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
525                                                       item_size, &ref_root,
526                                                       &ref_level);
527                         if (ret < 0) {
528                                 btrfs_warn(fs_info,
529                                 "failed to resolve tree backref for logical %llu: %d",
530                                                   swarn.logical, ret);
531                                 break;
532                         }
533                         if (ret > 0)
534                                 break;
535                         btrfs_warn_in_rcu(fs_info,
536 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
537                                 errstr, swarn.logical, btrfs_dev_name(dev),
538                                 swarn.physical, (ref_level ? "node" : "leaf"),
539                                 ref_level, ref_root);
540                 }
541                 btrfs_release_path(path);
542         } else {
543                 struct btrfs_backref_walk_ctx ctx = { 0 };
544
545                 btrfs_release_path(path);
546
547                 ctx.bytenr = found_key.objectid;
548                 ctx.extent_item_pos = swarn.logical - found_key.objectid;
549                 ctx.fs_info = fs_info;
550
551                 swarn.path = path;
552                 swarn.dev = dev;
553
554                 iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
555         }
556
557 out:
558         btrfs_free_path(path);
559 }
560
561 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
562 {
563         int ret = 0;
564         u64 length;
565
566         if (!btrfs_is_zoned(sctx->fs_info))
567                 return 0;
568
569         if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
570                 return 0;
571
572         if (sctx->write_pointer < physical) {
573                 length = physical - sctx->write_pointer;
574
575                 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
576                                                 sctx->write_pointer, length);
577                 if (!ret)
578                         sctx->write_pointer = physical;
579         }
580         return ret;
581 }
582
583 static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
584 {
585         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
586         int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
587
588         return stripe->pages[page_index];
589 }
590
591 static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
592                                                  int sector_nr)
593 {
594         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
595
596         return offset_in_page(sector_nr << fs_info->sectorsize_bits);
597 }
598
599 static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
600 {
601         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
602         const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
603         const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
604         const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
605         const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
606         SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
607         u8 on_disk_csum[BTRFS_CSUM_SIZE];
608         u8 calculated_csum[BTRFS_CSUM_SIZE];
609         struct btrfs_header *header;
610
611         /*
612          * Here we don't have a good way to attach the pages (and subpages)
613          * to a dummy extent buffer, thus we have to directly grab the members
614          * from pages.
615          */
616         header = (struct btrfs_header *)(page_address(first_page) + first_off);
617         memcpy(on_disk_csum, header->csum, fs_info->csum_size);
618
619         if (logical != btrfs_stack_header_bytenr(header)) {
620                 bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
621                 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
622                 btrfs_warn_rl(fs_info,
623                 "tree block %llu mirror %u has bad bytenr, has %llu want %llu",
624                               logical, stripe->mirror_num,
625                               btrfs_stack_header_bytenr(header), logical);
626                 return;
627         }
628         if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
629                    BTRFS_FSID_SIZE) != 0) {
630                 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
631                 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
632                 btrfs_warn_rl(fs_info,
633                 "tree block %llu mirror %u has bad fsid, has %pU want %pU",
634                               logical, stripe->mirror_num,
635                               header->fsid, fs_info->fs_devices->fsid);
636                 return;
637         }
638         if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
639                    BTRFS_UUID_SIZE) != 0) {
640                 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
641                 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
642                 btrfs_warn_rl(fs_info,
643                 "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
644                               logical, stripe->mirror_num,
645                               header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
646                 return;
647         }
648
649         /* Now check tree block csum. */
650         shash->tfm = fs_info->csum_shash;
651         crypto_shash_init(shash);
652         crypto_shash_update(shash, page_address(first_page) + first_off +
653                             BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
654
655         for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
656                 struct page *page = scrub_stripe_get_page(stripe, i);
657                 unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
658
659                 crypto_shash_update(shash, page_address(page) + page_off,
660                                     fs_info->sectorsize);
661         }
662
663         crypto_shash_final(shash, calculated_csum);
664         if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
665                 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
666                 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
667                 btrfs_warn_rl(fs_info,
668                 "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
669                               logical, stripe->mirror_num,
670                               CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
671                               CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
672                 return;
673         }
674         if (stripe->sectors[sector_nr].generation !=
675             btrfs_stack_header_generation(header)) {
676                 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
677                 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
678                 btrfs_warn_rl(fs_info,
679                 "tree block %llu mirror %u has bad generation, has %llu want %llu",
680                               logical, stripe->mirror_num,
681                               btrfs_stack_header_generation(header),
682                               stripe->sectors[sector_nr].generation);
683                 return;
684         }
685         bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
686         bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
687         bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
688 }
689
690 static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
691 {
692         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
693         struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
694         const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
695         struct page *page = scrub_stripe_get_page(stripe, sector_nr);
696         unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
697         u8 csum_buf[BTRFS_CSUM_SIZE];
698         int ret;
699
700         ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
701
702         /* Sector not utilized, skip it. */
703         if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
704                 return;
705
706         /* IO error, no need to check. */
707         if (test_bit(sector_nr, &stripe->io_error_bitmap))
708                 return;
709
710         /* Metadata, verify the full tree block. */
711         if (sector->is_metadata) {
712                 /*
713                  * Check if the tree block crosses the stripe boudary.  If
714                  * crossed the boundary, we cannot verify it but only give a
715                  * warning.
716                  *
717                  * This can only happen on a very old filesystem where chunks
718                  * are not ensured to be stripe aligned.
719                  */
720                 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
721                         btrfs_warn_rl(fs_info,
722                         "tree block at %llu crosses stripe boundary %llu",
723                                       stripe->logical +
724                                       (sector_nr << fs_info->sectorsize_bits),
725                                       stripe->logical);
726                         return;
727                 }
728                 scrub_verify_one_metadata(stripe, sector_nr);
729                 return;
730         }
731
732         /*
733          * Data is easier, we just verify the data csum (if we have it).  For
734          * cases without csum, we have no other choice but to trust it.
735          */
736         if (!sector->csum) {
737                 clear_bit(sector_nr, &stripe->error_bitmap);
738                 return;
739         }
740
741         ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
742         if (ret < 0) {
743                 set_bit(sector_nr, &stripe->csum_error_bitmap);
744                 set_bit(sector_nr, &stripe->error_bitmap);
745         } else {
746                 clear_bit(sector_nr, &stripe->csum_error_bitmap);
747                 clear_bit(sector_nr, &stripe->error_bitmap);
748         }
749 }
750
751 /* Verify specified sectors of a stripe. */
752 static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
753 {
754         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
755         const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
756         int sector_nr;
757
758         for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
759                 scrub_verify_one_sector(stripe, sector_nr);
760                 if (stripe->sectors[sector_nr].is_metadata)
761                         sector_nr += sectors_per_tree - 1;
762         }
763 }
764
765 static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
766 {
767         int i;
768
769         for (i = 0; i < stripe->nr_sectors; i++) {
770                 if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
771                     scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
772                         break;
773         }
774         ASSERT(i < stripe->nr_sectors);
775         return i;
776 }
777
778 /*
779  * Repair read is different to the regular read:
780  *
781  * - Only reads the failed sectors
782  * - May have extra blocksize limits
783  */
784 static void scrub_repair_read_endio(struct btrfs_bio *bbio)
785 {
786         struct scrub_stripe *stripe = bbio->private;
787         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
788         struct bio_vec *bvec;
789         int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
790         u32 bio_size = 0;
791         int i;
792
793         ASSERT(sector_nr < stripe->nr_sectors);
794
795         bio_for_each_bvec_all(bvec, &bbio->bio, i)
796                 bio_size += bvec->bv_len;
797
798         if (bbio->bio.bi_status) {
799                 bitmap_set(&stripe->io_error_bitmap, sector_nr,
800                            bio_size >> fs_info->sectorsize_bits);
801                 bitmap_set(&stripe->error_bitmap, sector_nr,
802                            bio_size >> fs_info->sectorsize_bits);
803         } else {
804                 bitmap_clear(&stripe->io_error_bitmap, sector_nr,
805                              bio_size >> fs_info->sectorsize_bits);
806         }
807         bio_put(&bbio->bio);
808         if (atomic_dec_and_test(&stripe->pending_io))
809                 wake_up(&stripe->io_wait);
810 }
811
812 static int calc_next_mirror(int mirror, int num_copies)
813 {
814         ASSERT(mirror <= num_copies);
815         return (mirror + 1 > num_copies) ? 1 : mirror + 1;
816 }
817
818 static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
819                                             int mirror, int blocksize, bool wait)
820 {
821         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
822         struct btrfs_bio *bbio = NULL;
823         const unsigned long old_error_bitmap = stripe->error_bitmap;
824         int i;
825
826         ASSERT(stripe->mirror_num >= 1);
827         ASSERT(atomic_read(&stripe->pending_io) == 0);
828
829         for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
830                 struct page *page;
831                 int pgoff;
832                 int ret;
833
834                 page = scrub_stripe_get_page(stripe, i);
835                 pgoff = scrub_stripe_get_page_offset(stripe, i);
836
837                 /* The current sector cannot be merged, submit the bio. */
838                 if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
839                              bbio->bio.bi_iter.bi_size >= blocksize)) {
840                         ASSERT(bbio->bio.bi_iter.bi_size);
841                         atomic_inc(&stripe->pending_io);
842                         btrfs_submit_bio(bbio, mirror);
843                         if (wait)
844                                 wait_scrub_stripe_io(stripe);
845                         bbio = NULL;
846                 }
847
848                 if (!bbio) {
849                         bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
850                                 fs_info, scrub_repair_read_endio, stripe);
851                         bbio->bio.bi_iter.bi_sector = (stripe->logical +
852                                 (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
853                 }
854
855                 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
856                 ASSERT(ret == fs_info->sectorsize);
857         }
858         if (bbio) {
859                 ASSERT(bbio->bio.bi_iter.bi_size);
860                 atomic_inc(&stripe->pending_io);
861                 btrfs_submit_bio(bbio, mirror);
862                 if (wait)
863                         wait_scrub_stripe_io(stripe);
864         }
865 }
866
867 static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
868                                        struct scrub_stripe *stripe)
869 {
870         static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
871                                       DEFAULT_RATELIMIT_BURST);
872         struct btrfs_fs_info *fs_info = sctx->fs_info;
873         struct btrfs_device *dev = NULL;
874         u64 physical = 0;
875         int nr_data_sectors = 0;
876         int nr_meta_sectors = 0;
877         int nr_nodatacsum_sectors = 0;
878         int nr_repaired_sectors = 0;
879         int sector_nr;
880
881         if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
882                 return;
883
884         /*
885          * Init needed infos for error reporting.
886          *
887          * Although our scrub_stripe infrastucture is mostly based on btrfs_submit_bio()
888          * thus no need for dev/physical, error reporting still needs dev and physical.
889          */
890         if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
891                 u64 mapped_len = fs_info->sectorsize;
892                 struct btrfs_io_context *bioc = NULL;
893                 int stripe_index = stripe->mirror_num - 1;
894                 int ret;
895
896                 /* For scrub, our mirror_num should always start at 1. */
897                 ASSERT(stripe->mirror_num >= 1);
898                 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
899                                       stripe->logical, &mapped_len, &bioc,
900                                       NULL, NULL, 1);
901                 /*
902                  * If we failed, dev will be NULL, and later detailed reports
903                  * will just be skipped.
904                  */
905                 if (ret < 0)
906                         goto skip;
907                 physical = bioc->stripes[stripe_index].physical;
908                 dev = bioc->stripes[stripe_index].dev;
909                 btrfs_put_bioc(bioc);
910         }
911
912 skip:
913         for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
914                 bool repaired = false;
915
916                 if (stripe->sectors[sector_nr].is_metadata) {
917                         nr_meta_sectors++;
918                 } else {
919                         nr_data_sectors++;
920                         if (!stripe->sectors[sector_nr].csum)
921                                 nr_nodatacsum_sectors++;
922                 }
923
924                 if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
925                     !test_bit(sector_nr, &stripe->error_bitmap)) {
926                         nr_repaired_sectors++;
927                         repaired = true;
928                 }
929
930                 /* Good sector from the beginning, nothing need to be done. */
931                 if (!test_bit(sector_nr, &stripe->init_error_bitmap))
932                         continue;
933
934                 /*
935                  * Report error for the corrupted sectors.  If repaired, just
936                  * output the message of repaired message.
937                  */
938                 if (repaired) {
939                         if (dev) {
940                                 btrfs_err_rl_in_rcu(fs_info,
941                         "fixed up error at logical %llu on dev %s physical %llu",
942                                             stripe->logical, btrfs_dev_name(dev),
943                                             physical);
944                         } else {
945                                 btrfs_err_rl_in_rcu(fs_info,
946                         "fixed up error at logical %llu on mirror %u",
947                                             stripe->logical, stripe->mirror_num);
948                         }
949                         continue;
950                 }
951
952                 /* The remaining are all for unrepaired. */
953                 if (dev) {
954                         btrfs_err_rl_in_rcu(fs_info,
955         "unable to fixup (regular) error at logical %llu on dev %s physical %llu",
956                                             stripe->logical, btrfs_dev_name(dev),
957                                             physical);
958                 } else {
959                         btrfs_err_rl_in_rcu(fs_info,
960         "unable to fixup (regular) error at logical %llu on mirror %u",
961                                             stripe->logical, stripe->mirror_num);
962                 }
963
964                 if (test_bit(sector_nr, &stripe->io_error_bitmap))
965                         if (__ratelimit(&rs) && dev)
966                                 scrub_print_common_warning("i/o error", dev, false,
967                                                      stripe->logical, physical);
968                 if (test_bit(sector_nr, &stripe->csum_error_bitmap))
969                         if (__ratelimit(&rs) && dev)
970                                 scrub_print_common_warning("checksum error", dev, false,
971                                                      stripe->logical, physical);
972                 if (test_bit(sector_nr, &stripe->meta_error_bitmap))
973                         if (__ratelimit(&rs) && dev)
974                                 scrub_print_common_warning("header error", dev, false,
975                                                      stripe->logical, physical);
976         }
977
978         spin_lock(&sctx->stat_lock);
979         sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
980         sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
981         sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
982         sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
983         sctx->stat.no_csum += nr_nodatacsum_sectors;
984         sctx->stat.read_errors += stripe->init_nr_io_errors;
985         sctx->stat.csum_errors += stripe->init_nr_csum_errors;
986         sctx->stat.verify_errors += stripe->init_nr_meta_errors;
987         sctx->stat.uncorrectable_errors +=
988                 bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
989         sctx->stat.corrected_errors += nr_repaired_sectors;
990         spin_unlock(&sctx->stat_lock);
991 }
992
993 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
994                                 unsigned long write_bitmap, bool dev_replace);
995
996 /*
997  * The main entrance for all read related scrub work, including:
998  *
999  * - Wait for the initial read to finish
1000  * - Verify and locate any bad sectors
1001  * - Go through the remaining mirrors and try to read as large blocksize as
1002  *   possible
1003  * - Go through all mirrors (including the failed mirror) sector-by-sector
1004  * - Submit writeback for repaired sectors
1005  *
1006  * Writeback for dev-replace does not happen here, it needs extra
1007  * synchronization for zoned devices.
1008  */
1009 static void scrub_stripe_read_repair_worker(struct work_struct *work)
1010 {
1011         struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
1012         struct scrub_ctx *sctx = stripe->sctx;
1013         struct btrfs_fs_info *fs_info = sctx->fs_info;
1014         int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1015                                           stripe->bg->length);
1016         int mirror;
1017         int i;
1018
1019         ASSERT(stripe->mirror_num > 0);
1020
1021         wait_scrub_stripe_io(stripe);
1022         scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
1023         /* Save the initial failed bitmap for later repair and report usage. */
1024         stripe->init_error_bitmap = stripe->error_bitmap;
1025         stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
1026                                                   stripe->nr_sectors);
1027         stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
1028                                                     stripe->nr_sectors);
1029         stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
1030                                                     stripe->nr_sectors);
1031
1032         if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
1033                 goto out;
1034
1035         /*
1036          * Try all remaining mirrors.
1037          *
1038          * Here we still try to read as large block as possible, as this is
1039          * faster and we have extra safety nets to rely on.
1040          */
1041         for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
1042              mirror != stripe->mirror_num;
1043              mirror = calc_next_mirror(mirror, num_copies)) {
1044                 const unsigned long old_error_bitmap = stripe->error_bitmap;
1045
1046                 scrub_stripe_submit_repair_read(stripe, mirror,
1047                                                 BTRFS_STRIPE_LEN, false);
1048                 wait_scrub_stripe_io(stripe);
1049                 scrub_verify_one_stripe(stripe, old_error_bitmap);
1050                 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1051                         goto out;
1052         }
1053
1054         /*
1055          * Last safety net, try re-checking all mirrors, including the failed
1056          * one, sector-by-sector.
1057          *
1058          * As if one sector failed the drive's internal csum, the whole read
1059          * containing the offending sector would be marked as error.
1060          * Thus here we do sector-by-sector read.
1061          *
1062          * This can be slow, thus we only try it as the last resort.
1063          */
1064
1065         for (i = 0, mirror = stripe->mirror_num;
1066              i < num_copies;
1067              i++, mirror = calc_next_mirror(mirror, num_copies)) {
1068                 const unsigned long old_error_bitmap = stripe->error_bitmap;
1069
1070                 scrub_stripe_submit_repair_read(stripe, mirror,
1071                                                 fs_info->sectorsize, true);
1072                 wait_scrub_stripe_io(stripe);
1073                 scrub_verify_one_stripe(stripe, old_error_bitmap);
1074                 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1075                         goto out;
1076         }
1077 out:
1078         /*
1079          * Submit the repaired sectors.  For zoned case, we cannot do repair
1080          * in-place, but queue the bg to be relocated.
1081          */
1082         if (btrfs_is_zoned(fs_info)) {
1083                 if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1084                         btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1085         } else if (!sctx->readonly) {
1086                 unsigned long repaired;
1087
1088                 bitmap_andnot(&repaired, &stripe->init_error_bitmap,
1089                               &stripe->error_bitmap, stripe->nr_sectors);
1090                 scrub_write_sectors(sctx, stripe, repaired, false);
1091                 wait_scrub_stripe_io(stripe);
1092         }
1093
1094         scrub_stripe_report_errors(sctx, stripe);
1095         set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
1096         wake_up(&stripe->repair_wait);
1097 }
1098
1099 static void scrub_read_endio(struct btrfs_bio *bbio)
1100 {
1101         struct scrub_stripe *stripe = bbio->private;
1102         struct bio_vec *bvec;
1103         int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1104         int num_sectors;
1105         u32 bio_size = 0;
1106         int i;
1107
1108         ASSERT(sector_nr < stripe->nr_sectors);
1109         bio_for_each_bvec_all(bvec, &bbio->bio, i)
1110                 bio_size += bvec->bv_len;
1111         num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
1112
1113         if (bbio->bio.bi_status) {
1114                 bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
1115                 bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
1116         } else {
1117                 bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
1118         }
1119         bio_put(&bbio->bio);
1120         if (atomic_dec_and_test(&stripe->pending_io)) {
1121                 wake_up(&stripe->io_wait);
1122                 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1123                 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1124         }
1125 }
1126
1127 static void scrub_write_endio(struct btrfs_bio *bbio)
1128 {
1129         struct scrub_stripe *stripe = bbio->private;
1130         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1131         struct bio_vec *bvec;
1132         int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1133         u32 bio_size = 0;
1134         int i;
1135
1136         bio_for_each_bvec_all(bvec, &bbio->bio, i)
1137                 bio_size += bvec->bv_len;
1138
1139         if (bbio->bio.bi_status) {
1140                 unsigned long flags;
1141
1142                 spin_lock_irqsave(&stripe->write_error_lock, flags);
1143                 bitmap_set(&stripe->write_error_bitmap, sector_nr,
1144                            bio_size >> fs_info->sectorsize_bits);
1145                 spin_unlock_irqrestore(&stripe->write_error_lock, flags);
1146         }
1147         bio_put(&bbio->bio);
1148
1149         if (atomic_dec_and_test(&stripe->pending_io))
1150                 wake_up(&stripe->io_wait);
1151 }
1152
1153 static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1154                                    struct scrub_stripe *stripe,
1155                                    struct btrfs_bio *bbio, bool dev_replace)
1156 {
1157         struct btrfs_fs_info *fs_info = sctx->fs_info;
1158         u32 bio_len = bbio->bio.bi_iter.bi_size;
1159         u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
1160                       stripe->logical;
1161
1162         fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1163         atomic_inc(&stripe->pending_io);
1164         btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
1165         if (!btrfs_is_zoned(fs_info))
1166                 return;
1167         /*
1168          * For zoned writeback, queue depth must be 1, thus we must wait for
1169          * the write to finish before the next write.
1170          */
1171         wait_scrub_stripe_io(stripe);
1172
1173         /*
1174          * And also need to update the write pointer if write finished
1175          * successfully.
1176          */
1177         if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1178                       &stripe->write_error_bitmap))
1179                 sctx->write_pointer += bio_len;
1180 }
1181
1182 /*
1183  * Submit the write bio(s) for the sectors specified by @write_bitmap.
1184  *
1185  * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
1186  *
1187  * - Only needs logical bytenr and mirror_num
1188  *   Just like the scrub read path
1189  *
1190  * - Would only result in writes to the specified mirror
1191  *   Unlike the regular writeback path, which would write back to all stripes
1192  *
1193  * - Handle dev-replace and read-repair writeback differently
1194  */
1195 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1196                                 unsigned long write_bitmap, bool dev_replace)
1197 {
1198         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1199         struct btrfs_bio *bbio = NULL;
1200         int sector_nr;
1201
1202         for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
1203                 struct page *page = scrub_stripe_get_page(stripe, sector_nr);
1204                 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
1205                 int ret;
1206
1207                 /* We should only writeback sectors covered by an extent. */
1208                 ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
1209
1210                 /* Cannot merge with previous sector, submit the current one. */
1211                 if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
1212                         scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1213                         bbio = NULL;
1214                 }
1215                 if (!bbio) {
1216                         bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
1217                                                fs_info, scrub_write_endio, stripe);
1218                         bbio->bio.bi_iter.bi_sector = (stripe->logical +
1219                                 (sector_nr << fs_info->sectorsize_bits)) >>
1220                                 SECTOR_SHIFT;
1221                 }
1222                 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1223                 ASSERT(ret == fs_info->sectorsize);
1224         }
1225         if (bbio)
1226                 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1227 }
1228
1229 /*
1230  * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1231  * second.  Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
1232  */
1233 static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1234                                   unsigned int bio_size)
1235 {
1236         const int time_slice = 1000;
1237         s64 delta;
1238         ktime_t now;
1239         u32 div;
1240         u64 bwlimit;
1241
1242         bwlimit = READ_ONCE(device->scrub_speed_max);
1243         if (bwlimit == 0)
1244                 return;
1245
1246         /*
1247          * Slice is divided into intervals when the IO is submitted, adjust by
1248          * bwlimit and maximum of 64 intervals.
1249          */
1250         div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
1251         div = min_t(u32, 64, div);
1252
1253         /* Start new epoch, set deadline */
1254         now = ktime_get();
1255         if (sctx->throttle_deadline == 0) {
1256                 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1257                 sctx->throttle_sent = 0;
1258         }
1259
1260         /* Still in the time to send? */
1261         if (ktime_before(now, sctx->throttle_deadline)) {
1262                 /* If current bio is within the limit, send it */
1263                 sctx->throttle_sent += bio_size;
1264                 if (sctx->throttle_sent <= div_u64(bwlimit, div))
1265                         return;
1266
1267                 /* We're over the limit, sleep until the rest of the slice */
1268                 delta = ktime_ms_delta(sctx->throttle_deadline, now);
1269         } else {
1270                 /* New request after deadline, start new epoch */
1271                 delta = 0;
1272         }
1273
1274         if (delta) {
1275                 long timeout;
1276
1277                 timeout = div_u64(delta * HZ, 1000);
1278                 schedule_timeout_interruptible(timeout);
1279         }
1280
1281         /* Next call will start the deadline period */
1282         sctx->throttle_deadline = 0;
1283 }
1284
1285 /*
1286  * Given a physical address, this will calculate it's
1287  * logical offset. if this is a parity stripe, it will return
1288  * the most left data stripe's logical offset.
1289  *
1290  * return 0 if it is a data stripe, 1 means parity stripe.
1291  */
1292 static int get_raid56_logic_offset(u64 physical, int num,
1293                                    struct map_lookup *map, u64 *offset,
1294                                    u64 *stripe_start)
1295 {
1296         int i;
1297         int j = 0;
1298         u64 last_offset;
1299         const int data_stripes = nr_data_stripes(map);
1300
1301         last_offset = (physical - map->stripes[num].physical) * data_stripes;
1302         if (stripe_start)
1303                 *stripe_start = last_offset;
1304
1305         *offset = last_offset;
1306         for (i = 0; i < data_stripes; i++) {
1307                 u32 stripe_nr;
1308                 u32 stripe_index;
1309                 u32 rot;
1310
1311                 *offset = last_offset + btrfs_stripe_nr_to_offset(i);
1312
1313                 stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
1314
1315                 /* Work out the disk rotation on this stripe-set */
1316                 rot = stripe_nr % map->num_stripes;
1317                 /* calculate which stripe this data locates */
1318                 rot += i;
1319                 stripe_index = rot % map->num_stripes;
1320                 if (stripe_index == num)
1321                         return 0;
1322                 if (stripe_index < num)
1323                         j++;
1324         }
1325         *offset = last_offset + btrfs_stripe_nr_to_offset(j);
1326         return 1;
1327 }
1328
1329 /*
1330  * Return 0 if the extent item range covers any byte of the range.
1331  * Return <0 if the extent item is before @search_start.
1332  * Return >0 if the extent item is after @start_start + @search_len.
1333  */
1334 static int compare_extent_item_range(struct btrfs_path *path,
1335                                      u64 search_start, u64 search_len)
1336 {
1337         struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1338         u64 len;
1339         struct btrfs_key key;
1340
1341         btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1342         ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
1343                key.type == BTRFS_METADATA_ITEM_KEY);
1344         if (key.type == BTRFS_METADATA_ITEM_KEY)
1345                 len = fs_info->nodesize;
1346         else
1347                 len = key.offset;
1348
1349         if (key.objectid + len <= search_start)
1350                 return -1;
1351         if (key.objectid >= search_start + search_len)
1352                 return 1;
1353         return 0;
1354 }
1355
1356 /*
1357  * Locate one extent item which covers any byte in range
1358  * [@search_start, @search_start + @search_length)
1359  *
1360  * If the path is not initialized, we will initialize the search by doing
1361  * a btrfs_search_slot().
1362  * If the path is already initialized, we will use the path as the initial
1363  * slot, to avoid duplicated btrfs_search_slot() calls.
1364  *
1365  * NOTE: If an extent item starts before @search_start, we will still
1366  * return the extent item. This is for data extent crossing stripe boundary.
1367  *
1368  * Return 0 if we found such extent item, and @path will point to the extent item.
1369  * Return >0 if no such extent item can be found, and @path will be released.
1370  * Return <0 if hit fatal error, and @path will be released.
1371  */
1372 static int find_first_extent_item(struct btrfs_root *extent_root,
1373                                   struct btrfs_path *path,
1374                                   u64 search_start, u64 search_len)
1375 {
1376         struct btrfs_fs_info *fs_info = extent_root->fs_info;
1377         struct btrfs_key key;
1378         int ret;
1379
1380         /* Continue using the existing path */
1381         if (path->nodes[0])
1382                 goto search_forward;
1383
1384         if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1385                 key.type = BTRFS_METADATA_ITEM_KEY;
1386         else
1387                 key.type = BTRFS_EXTENT_ITEM_KEY;
1388         key.objectid = search_start;
1389         key.offset = (u64)-1;
1390
1391         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1392         if (ret < 0)
1393                 return ret;
1394
1395         ASSERT(ret > 0);
1396         /*
1397          * Here we intentionally pass 0 as @min_objectid, as there could be
1398          * an extent item starting before @search_start.
1399          */
1400         ret = btrfs_previous_extent_item(extent_root, path, 0);
1401         if (ret < 0)
1402                 return ret;
1403         /*
1404          * No matter whether we have found an extent item, the next loop will
1405          * properly do every check on the key.
1406          */
1407 search_forward:
1408         while (true) {
1409                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1410                 if (key.objectid >= search_start + search_len)
1411                         break;
1412                 if (key.type != BTRFS_METADATA_ITEM_KEY &&
1413                     key.type != BTRFS_EXTENT_ITEM_KEY)
1414                         goto next;
1415
1416                 ret = compare_extent_item_range(path, search_start, search_len);
1417                 if (ret == 0)
1418                         return ret;
1419                 if (ret > 0)
1420                         break;
1421 next:
1422                 path->slots[0]++;
1423                 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
1424                         ret = btrfs_next_leaf(extent_root, path);
1425                         if (ret) {
1426                                 /* Either no more item or fatal error */
1427                                 btrfs_release_path(path);
1428                                 return ret;
1429                         }
1430                 }
1431         }
1432         btrfs_release_path(path);
1433         return 1;
1434 }
1435
1436 static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
1437                             u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
1438 {
1439         struct btrfs_key key;
1440         struct btrfs_extent_item *ei;
1441
1442         btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1443         ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
1444                key.type == BTRFS_EXTENT_ITEM_KEY);
1445         *extent_start_ret = key.objectid;
1446         if (key.type == BTRFS_METADATA_ITEM_KEY)
1447                 *size_ret = path->nodes[0]->fs_info->nodesize;
1448         else
1449                 *size_ret = key.offset;
1450         ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
1451         *flags_ret = btrfs_extent_flags(path->nodes[0], ei);
1452         *generation_ret = btrfs_extent_generation(path->nodes[0], ei);
1453 }
1454
1455 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1456                                         u64 physical, u64 physical_end)
1457 {
1458         struct btrfs_fs_info *fs_info = sctx->fs_info;
1459         int ret = 0;
1460
1461         if (!btrfs_is_zoned(fs_info))
1462                 return 0;
1463
1464         mutex_lock(&sctx->wr_lock);
1465         if (sctx->write_pointer < physical_end) {
1466                 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1467                                                     physical,
1468                                                     sctx->write_pointer);
1469                 if (ret)
1470                         btrfs_err(fs_info,
1471                                   "zoned: failed to recover write pointer");
1472         }
1473         mutex_unlock(&sctx->wr_lock);
1474         btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1475
1476         return ret;
1477 }
1478
1479 static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1480                                  struct scrub_stripe *stripe,
1481                                  u64 extent_start, u64 extent_len,
1482                                  u64 extent_flags, u64 extent_gen)
1483 {
1484         for (u64 cur_logical = max(stripe->logical, extent_start);
1485              cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
1486                                extent_start + extent_len);
1487              cur_logical += fs_info->sectorsize) {
1488                 const int nr_sector = (cur_logical - stripe->logical) >>
1489                                       fs_info->sectorsize_bits;
1490                 struct scrub_sector_verification *sector =
1491                                                 &stripe->sectors[nr_sector];
1492
1493                 set_bit(nr_sector, &stripe->extent_sector_bitmap);
1494                 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1495                         sector->is_metadata = true;
1496                         sector->generation = extent_gen;
1497                 }
1498         }
1499 }
1500
1501 static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
1502 {
1503         stripe->extent_sector_bitmap = 0;
1504         stripe->init_error_bitmap = 0;
1505         stripe->init_nr_io_errors = 0;
1506         stripe->init_nr_csum_errors = 0;
1507         stripe->init_nr_meta_errors = 0;
1508         stripe->error_bitmap = 0;
1509         stripe->io_error_bitmap = 0;
1510         stripe->csum_error_bitmap = 0;
1511         stripe->meta_error_bitmap = 0;
1512 }
1513
1514 /*
1515  * Locate one stripe which has at least one extent in its range.
1516  *
1517  * Return 0 if found such stripe, and store its info into @stripe.
1518  * Return >0 if there is no such stripe in the specified range.
1519  * Return <0 for error.
1520  */
1521 static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
1522                                         struct btrfs_path *extent_path,
1523                                         struct btrfs_path *csum_path,
1524                                         struct btrfs_device *dev, u64 physical,
1525                                         int mirror_num, u64 logical_start,
1526                                         u32 logical_len,
1527                                         struct scrub_stripe *stripe)
1528 {
1529         struct btrfs_fs_info *fs_info = bg->fs_info;
1530         struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1531         struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1532         const u64 logical_end = logical_start + logical_len;
1533         u64 cur_logical = logical_start;
1534         u64 stripe_end;
1535         u64 extent_start;
1536         u64 extent_len;
1537         u64 extent_flags;
1538         u64 extent_gen;
1539         int ret;
1540
1541         memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
1542                                    stripe->nr_sectors);
1543         scrub_stripe_reset_bitmaps(stripe);
1544
1545         /* The range must be inside the bg. */
1546         ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1547
1548         ret = find_first_extent_item(extent_root, extent_path, logical_start,
1549                                      logical_len);
1550         /* Either error or not found. */
1551         if (ret)
1552                 goto out;
1553         get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
1554                         &extent_gen);
1555         if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1556                 stripe->nr_meta_extents++;
1557         if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1558                 stripe->nr_data_extents++;
1559         cur_logical = max(extent_start, cur_logical);
1560
1561         /*
1562          * Round down to stripe boundary.
1563          *
1564          * The extra calculation against bg->start is to handle block groups
1565          * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
1566          */
1567         stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
1568                           bg->start;
1569         stripe->physical = physical + stripe->logical - logical_start;
1570         stripe->dev = dev;
1571         stripe->bg = bg;
1572         stripe->mirror_num = mirror_num;
1573         stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
1574
1575         /* Fill the first extent info into stripe->sectors[] array. */
1576         fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1577                              extent_flags, extent_gen);
1578         cur_logical = extent_start + extent_len;
1579
1580         /* Fill the extent info for the remaining sectors. */
1581         while (cur_logical <= stripe_end) {
1582                 ret = find_first_extent_item(extent_root, extent_path, cur_logical,
1583                                              stripe_end - cur_logical + 1);
1584                 if (ret < 0)
1585                         goto out;
1586                 if (ret > 0) {
1587                         ret = 0;
1588                         break;
1589                 }
1590                 get_extent_info(extent_path, &extent_start, &extent_len,
1591                                 &extent_flags, &extent_gen);
1592                 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1593                         stripe->nr_meta_extents++;
1594                 if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1595                         stripe->nr_data_extents++;
1596                 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1597                                      extent_flags, extent_gen);
1598                 cur_logical = extent_start + extent_len;
1599         }
1600
1601         /* Now fill the data csum. */
1602         if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
1603                 int sector_nr;
1604                 unsigned long csum_bitmap = 0;
1605
1606                 /* Csum space should have already been allocated. */
1607                 ASSERT(stripe->csums);
1608
1609                 /*
1610                  * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
1611                  * should contain at most 16 sectors.
1612                  */
1613                 ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1614
1615                 ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
1616                                                 stripe->logical, stripe_end,
1617                                                 stripe->csums, &csum_bitmap);
1618                 if (ret < 0)
1619                         goto out;
1620                 if (ret > 0)
1621                         ret = 0;
1622
1623                 for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
1624                         stripe->sectors[sector_nr].csum = stripe->csums +
1625                                 sector_nr * fs_info->csum_size;
1626                 }
1627         }
1628         set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1629 out:
1630         return ret;
1631 }
1632
1633 static void scrub_reset_stripe(struct scrub_stripe *stripe)
1634 {
1635         scrub_stripe_reset_bitmaps(stripe);
1636
1637         stripe->nr_meta_extents = 0;
1638         stripe->nr_data_extents = 0;
1639         stripe->state = 0;
1640
1641         for (int i = 0; i < stripe->nr_sectors; i++) {
1642                 stripe->sectors[i].is_metadata = false;
1643                 stripe->sectors[i].csum = NULL;
1644                 stripe->sectors[i].generation = 0;
1645         }
1646 }
1647
1648 static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1649                                       struct scrub_stripe *stripe)
1650 {
1651         struct btrfs_fs_info *fs_info = sctx->fs_info;
1652         struct btrfs_bio *bbio;
1653         unsigned int nr_sectors = min_t(u64, BTRFS_STRIPE_LEN, stripe->bg->start +
1654                                       stripe->bg->length - stripe->logical) >>
1655                                   fs_info->sectorsize_bits;
1656         int mirror = stripe->mirror_num;
1657
1658         ASSERT(stripe->bg);
1659         ASSERT(stripe->mirror_num > 0);
1660         ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1661
1662         bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1663                                scrub_read_endio, stripe);
1664
1665         bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
1666         /* Read the whole range inside the chunk boundary. */
1667         for (unsigned int cur = 0; cur < nr_sectors; cur++) {
1668                 struct page *page = scrub_stripe_get_page(stripe, cur);
1669                 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
1670                 int ret;
1671
1672                 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1673                 /* We should have allocated enough bio vectors. */
1674                 ASSERT(ret == fs_info->sectorsize);
1675         }
1676         atomic_inc(&stripe->pending_io);
1677
1678         /*
1679          * For dev-replace, either user asks to avoid the source dev, or
1680          * the device is missing, we try the next mirror instead.
1681          */
1682         if (sctx->is_dev_replace &&
1683             (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1684              BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
1685              !stripe->dev->bdev)) {
1686                 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1687                                                   stripe->bg->length);
1688
1689                 mirror = calc_next_mirror(mirror, num_copies);
1690         }
1691         btrfs_submit_bio(bbio, mirror);
1692 }
1693
1694 static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
1695 {
1696         int i;
1697
1698         for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
1699                 if (stripe->sectors[i].is_metadata) {
1700                         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1701
1702                         btrfs_err(fs_info,
1703                         "stripe %llu has unrepaired metadata sector at %llu",
1704                                   stripe->logical,
1705                                   stripe->logical + (i << fs_info->sectorsize_bits));
1706                         return true;
1707                 }
1708         }
1709         return false;
1710 }
1711
1712 static void submit_initial_group_read(struct scrub_ctx *sctx,
1713                                       unsigned int first_slot,
1714                                       unsigned int nr_stripes)
1715 {
1716         struct blk_plug plug;
1717
1718         ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
1719         ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
1720
1721         scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1722                               btrfs_stripe_nr_to_offset(nr_stripes));
1723         blk_start_plug(&plug);
1724         for (int i = 0; i < nr_stripes; i++) {
1725                 struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
1726
1727                 /* Those stripes should be initialized. */
1728                 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1729                 scrub_submit_initial_read(sctx, stripe);
1730         }
1731         blk_finish_plug(&plug);
1732 }
1733
1734 static int flush_scrub_stripes(struct scrub_ctx *sctx)
1735 {
1736         struct btrfs_fs_info *fs_info = sctx->fs_info;
1737         struct scrub_stripe *stripe;
1738         const int nr_stripes = sctx->cur_stripe;
1739         int ret = 0;
1740
1741         if (!nr_stripes)
1742                 return 0;
1743
1744         ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
1745
1746         /* Submit the stripes which are populated but not submitted. */
1747         if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
1748                 const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
1749
1750                 submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
1751         }
1752
1753         for (int i = 0; i < nr_stripes; i++) {
1754                 stripe = &sctx->stripes[i];
1755
1756                 wait_event(stripe->repair_wait,
1757                            test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1758         }
1759
1760         /* Submit for dev-replace. */
1761         if (sctx->is_dev_replace) {
1762                 /*
1763                  * For dev-replace, if we know there is something wrong with
1764                  * metadata, we should immedately abort.
1765                  */
1766                 for (int i = 0; i < nr_stripes; i++) {
1767                         if (stripe_has_metadata_error(&sctx->stripes[i])) {
1768                                 ret = -EIO;
1769                                 goto out;
1770                         }
1771                 }
1772                 for (int i = 0; i < nr_stripes; i++) {
1773                         unsigned long good;
1774
1775                         stripe = &sctx->stripes[i];
1776
1777                         ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
1778
1779                         bitmap_andnot(&good, &stripe->extent_sector_bitmap,
1780                                       &stripe->error_bitmap, stripe->nr_sectors);
1781                         scrub_write_sectors(sctx, stripe, good, true);
1782                 }
1783         }
1784
1785         /* Wait for the above writebacks to finish. */
1786         for (int i = 0; i < nr_stripes; i++) {
1787                 stripe = &sctx->stripes[i];
1788
1789                 wait_scrub_stripe_io(stripe);
1790                 scrub_reset_stripe(stripe);
1791         }
1792 out:
1793         sctx->cur_stripe = 0;
1794         return ret;
1795 }
1796
1797 static void raid56_scrub_wait_endio(struct bio *bio)
1798 {
1799         complete(bio->bi_private);
1800 }
1801
1802 static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1803                               struct btrfs_device *dev, int mirror_num,
1804                               u64 logical, u32 length, u64 physical,
1805                               u64 *found_logical_ret)
1806 {
1807         struct scrub_stripe *stripe;
1808         int ret;
1809
1810         /*
1811          * There should always be one slot left, as caller filling the last
1812          * slot should flush them all.
1813          */
1814         ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
1815
1816         /* @found_logical_ret must be specified. */
1817         ASSERT(found_logical_ret);
1818
1819         stripe = &sctx->stripes[sctx->cur_stripe];
1820         scrub_reset_stripe(stripe);
1821         ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
1822                                            &sctx->csum_path, dev, physical,
1823                                            mirror_num, logical, length, stripe);
1824         /* Either >0 as no more extents or <0 for error. */
1825         if (ret)
1826                 return ret;
1827         *found_logical_ret = stripe->logical;
1828         sctx->cur_stripe++;
1829
1830         /* We filled one group, submit it. */
1831         if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
1832                 const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
1833
1834                 submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
1835         }
1836
1837         /* Last slot used, flush them all. */
1838         if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
1839                 return flush_scrub_stripes(sctx);
1840         return 0;
1841 }
1842
1843 static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1844                                       struct btrfs_device *scrub_dev,
1845                                       struct btrfs_block_group *bg,
1846                                       struct map_lookup *map,
1847                                       u64 full_stripe_start)
1848 {
1849         DECLARE_COMPLETION_ONSTACK(io_done);
1850         struct btrfs_fs_info *fs_info = sctx->fs_info;
1851         struct btrfs_raid_bio *rbio;
1852         struct btrfs_io_context *bioc = NULL;
1853         struct btrfs_path extent_path = { 0 };
1854         struct btrfs_path csum_path = { 0 };
1855         struct bio *bio;
1856         struct scrub_stripe *stripe;
1857         bool all_empty = true;
1858         const int data_stripes = nr_data_stripes(map);
1859         unsigned long extent_bitmap = 0;
1860         u64 length = btrfs_stripe_nr_to_offset(data_stripes);
1861         int ret;
1862
1863         ASSERT(sctx->raid56_data_stripes);
1864
1865         /*
1866          * For data stripe search, we cannot re-use the same extent/csum paths,
1867          * as the data stripe bytenr may be smaller than previous extent.  Thus
1868          * we have to use our own extent/csum paths.
1869          */
1870         extent_path.search_commit_root = 1;
1871         extent_path.skip_locking = 1;
1872         csum_path.search_commit_root = 1;
1873         csum_path.skip_locking = 1;
1874
1875         for (int i = 0; i < data_stripes; i++) {
1876                 int stripe_index;
1877                 int rot;
1878                 u64 physical;
1879
1880                 stripe = &sctx->raid56_data_stripes[i];
1881                 rot = div_u64(full_stripe_start - bg->start,
1882                               data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
1883                 stripe_index = (i + rot) % map->num_stripes;
1884                 physical = map->stripes[stripe_index].physical +
1885                            btrfs_stripe_nr_to_offset(rot);
1886
1887                 scrub_reset_stripe(stripe);
1888                 set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
1889                 ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
1890                                 map->stripes[stripe_index].dev, physical, 1,
1891                                 full_stripe_start + btrfs_stripe_nr_to_offset(i),
1892                                 BTRFS_STRIPE_LEN, stripe);
1893                 if (ret < 0)
1894                         goto out;
1895                 /*
1896                  * No extent in this data stripe, need to manually mark them
1897                  * initialized to make later read submission happy.
1898                  */
1899                 if (ret > 0) {
1900                         stripe->logical = full_stripe_start +
1901                                           btrfs_stripe_nr_to_offset(i);
1902                         stripe->dev = map->stripes[stripe_index].dev;
1903                         stripe->mirror_num = 1;
1904                         set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1905                 }
1906         }
1907
1908         /* Check if all data stripes are empty. */
1909         for (int i = 0; i < data_stripes; i++) {
1910                 stripe = &sctx->raid56_data_stripes[i];
1911                 if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
1912                         all_empty = false;
1913                         break;
1914                 }
1915         }
1916         if (all_empty) {
1917                 ret = 0;
1918                 goto out;
1919         }
1920
1921         for (int i = 0; i < data_stripes; i++) {
1922                 stripe = &sctx->raid56_data_stripes[i];
1923                 scrub_submit_initial_read(sctx, stripe);
1924         }
1925         for (int i = 0; i < data_stripes; i++) {
1926                 stripe = &sctx->raid56_data_stripes[i];
1927
1928                 wait_event(stripe->repair_wait,
1929                            test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1930         }
1931         /* For now, no zoned support for RAID56. */
1932         ASSERT(!btrfs_is_zoned(sctx->fs_info));
1933
1934         /*
1935          * Now all data stripes are properly verified. Check if we have any
1936          * unrepaired, if so abort immediately or we could further corrupt the
1937          * P/Q stripes.
1938          *
1939          * During the loop, also populate extent_bitmap.
1940          */
1941         for (int i = 0; i < data_stripes; i++) {
1942                 unsigned long error;
1943
1944                 stripe = &sctx->raid56_data_stripes[i];
1945
1946                 /*
1947                  * We should only check the errors where there is an extent.
1948                  * As we may hit an empty data stripe while it's missing.
1949                  */
1950                 bitmap_and(&error, &stripe->error_bitmap,
1951                            &stripe->extent_sector_bitmap, stripe->nr_sectors);
1952                 if (!bitmap_empty(&error, stripe->nr_sectors)) {
1953                         btrfs_err(fs_info,
1954 "unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
1955                                   full_stripe_start, i, stripe->nr_sectors,
1956                                   &error);
1957                         ret = -EIO;
1958                         goto out;
1959                 }
1960                 bitmap_or(&extent_bitmap, &extent_bitmap,
1961                           &stripe->extent_sector_bitmap, stripe->nr_sectors);
1962         }
1963
1964         /* Now we can check and regenerate the P/Q stripe. */
1965         bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
1966         bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
1967         bio->bi_private = &io_done;
1968         bio->bi_end_io = raid56_scrub_wait_endio;
1969
1970         btrfs_bio_counter_inc_blocked(fs_info);
1971         ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
1972                               &length, &bioc, NULL, NULL, 1);
1973         if (ret < 0) {
1974                 btrfs_put_bioc(bioc);
1975                 btrfs_bio_counter_dec(fs_info);
1976                 goto out;
1977         }
1978         rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
1979                                 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1980         btrfs_put_bioc(bioc);
1981         if (!rbio) {
1982                 ret = -ENOMEM;
1983                 btrfs_bio_counter_dec(fs_info);
1984                 goto out;
1985         }
1986         /* Use the recovered stripes as cache to avoid read them from disk again. */
1987         for (int i = 0; i < data_stripes; i++) {
1988                 stripe = &sctx->raid56_data_stripes[i];
1989
1990                 raid56_parity_cache_data_pages(rbio, stripe->pages,
1991                                 full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
1992         }
1993         raid56_parity_submit_scrub_rbio(rbio);
1994         wait_for_completion_io(&io_done);
1995         ret = blk_status_to_errno(bio->bi_status);
1996         bio_put(bio);
1997         btrfs_bio_counter_dec(fs_info);
1998
1999         btrfs_release_path(&extent_path);
2000         btrfs_release_path(&csum_path);
2001 out:
2002         return ret;
2003 }
2004
2005 /*
2006  * Scrub one range which can only has simple mirror based profile.
2007  * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
2008  *  RAID0/RAID10).
2009  *
2010  * Since we may need to handle a subset of block group, we need @logical_start
2011  * and @logical_length parameter.
2012  */
2013 static int scrub_simple_mirror(struct scrub_ctx *sctx,
2014                                struct btrfs_block_group *bg,
2015                                struct map_lookup *map,
2016                                u64 logical_start, u64 logical_length,
2017                                struct btrfs_device *device,
2018                                u64 physical, int mirror_num)
2019 {
2020         struct btrfs_fs_info *fs_info = sctx->fs_info;
2021         const u64 logical_end = logical_start + logical_length;
2022         u64 cur_logical = logical_start;
2023         int ret;
2024
2025         /* The range must be inside the bg */
2026         ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
2027
2028         /* Go through each extent items inside the logical range */
2029         while (cur_logical < logical_end) {
2030                 u64 found_logical = U64_MAX;
2031                 u64 cur_physical = physical + cur_logical - logical_start;
2032
2033                 /* Canceled? */
2034                 if (atomic_read(&fs_info->scrub_cancel_req) ||
2035                     atomic_read(&sctx->cancel_req)) {
2036                         ret = -ECANCELED;
2037                         break;
2038                 }
2039                 /* Paused? */
2040                 if (atomic_read(&fs_info->scrub_pause_req)) {
2041                         /* Push queued extents */
2042                         scrub_blocked_if_needed(fs_info);
2043                 }
2044                 /* Block group removed? */
2045                 spin_lock(&bg->lock);
2046                 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
2047                         spin_unlock(&bg->lock);
2048                         ret = 0;
2049                         break;
2050                 }
2051                 spin_unlock(&bg->lock);
2052
2053                 ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
2054                                          cur_logical, logical_end - cur_logical,
2055                                          cur_physical, &found_logical);
2056                 if (ret > 0) {
2057                         /* No more extent, just update the accounting */
2058                         sctx->stat.last_physical = physical + logical_length;
2059                         ret = 0;
2060                         break;
2061                 }
2062                 if (ret < 0)
2063                         break;
2064
2065                 /* queue_scrub_stripe() returned 0, @found_logical must be updated. */
2066                 ASSERT(found_logical != U64_MAX);
2067                 cur_logical = found_logical + BTRFS_STRIPE_LEN;
2068
2069                 /* Don't hold CPU for too long time */
2070                 cond_resched();
2071         }
2072         return ret;
2073 }
2074
2075 /* Calculate the full stripe length for simple stripe based profiles */
2076 static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
2077 {
2078         ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2079                             BTRFS_BLOCK_GROUP_RAID10));
2080
2081         return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
2082 }
2083
2084 /* Get the logical bytenr for the stripe */
2085 static u64 simple_stripe_get_logical(struct map_lookup *map,
2086                                      struct btrfs_block_group *bg,
2087                                      int stripe_index)
2088 {
2089         ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2090                             BTRFS_BLOCK_GROUP_RAID10));
2091         ASSERT(stripe_index < map->num_stripes);
2092
2093         /*
2094          * (stripe_index / sub_stripes) gives how many data stripes we need to
2095          * skip.
2096          */
2097         return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
2098                bg->start;
2099 }
2100
2101 /* Get the mirror number for the stripe */
2102 static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index)
2103 {
2104         ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2105                             BTRFS_BLOCK_GROUP_RAID10));
2106         ASSERT(stripe_index < map->num_stripes);
2107
2108         /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
2109         return stripe_index % map->sub_stripes + 1;
2110 }
2111
2112 static int scrub_simple_stripe(struct scrub_ctx *sctx,
2113                                struct btrfs_block_group *bg,
2114                                struct map_lookup *map,
2115                                struct btrfs_device *device,
2116                                int stripe_index)
2117 {
2118         const u64 logical_increment = simple_stripe_full_stripe_len(map);
2119         const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
2120         const u64 orig_physical = map->stripes[stripe_index].physical;
2121         const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
2122         u64 cur_logical = orig_logical;
2123         u64 cur_physical = orig_physical;
2124         int ret = 0;
2125
2126         while (cur_logical < bg->start + bg->length) {
2127                 /*
2128                  * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
2129                  * just RAID1, so we can reuse scrub_simple_mirror() to scrub
2130                  * this stripe.
2131                  */
2132                 ret = scrub_simple_mirror(sctx, bg, map, cur_logical,
2133                                           BTRFS_STRIPE_LEN, device, cur_physical,
2134                                           mirror_num);
2135                 if (ret)
2136                         return ret;
2137                 /* Skip to next stripe which belongs to the target device */
2138                 cur_logical += logical_increment;
2139                 /* For physical offset, we just go to next stripe */
2140                 cur_physical += BTRFS_STRIPE_LEN;
2141         }
2142         return ret;
2143 }
2144
2145 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2146                                            struct btrfs_block_group *bg,
2147                                            struct extent_map *em,
2148                                            struct btrfs_device *scrub_dev,
2149                                            int stripe_index)
2150 {
2151         struct btrfs_fs_info *fs_info = sctx->fs_info;
2152         struct map_lookup *map = em->map_lookup;
2153         const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2154         const u64 chunk_logical = bg->start;
2155         int ret;
2156         int ret2;
2157         u64 physical = map->stripes[stripe_index].physical;
2158         const u64 dev_stripe_len = btrfs_calc_stripe_length(em);
2159         const u64 physical_end = physical + dev_stripe_len;
2160         u64 logical;
2161         u64 logic_end;
2162         /* The logical increment after finishing one stripe */
2163         u64 increment;
2164         /* Offset inside the chunk */
2165         u64 offset;
2166         u64 stripe_logical;
2167         int stop_loop = 0;
2168
2169         /* Extent_path should be released by now. */
2170         ASSERT(sctx->extent_path.nodes[0] == NULL);
2171
2172         scrub_blocked_if_needed(fs_info);
2173
2174         if (sctx->is_dev_replace &&
2175             btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2176                 mutex_lock(&sctx->wr_lock);
2177                 sctx->write_pointer = physical;
2178                 mutex_unlock(&sctx->wr_lock);
2179         }
2180
2181         /* Prepare the extra data stripes used by RAID56. */
2182         if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2183                 ASSERT(sctx->raid56_data_stripes == NULL);
2184
2185                 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2186                                                     sizeof(struct scrub_stripe),
2187                                                     GFP_KERNEL);
2188                 if (!sctx->raid56_data_stripes) {
2189                         ret = -ENOMEM;
2190                         goto out;
2191                 }
2192                 for (int i = 0; i < nr_data_stripes(map); i++) {
2193                         ret = init_scrub_stripe(fs_info,
2194                                                 &sctx->raid56_data_stripes[i]);
2195                         if (ret < 0)
2196                                 goto out;
2197                         sctx->raid56_data_stripes[i].bg = bg;
2198                         sctx->raid56_data_stripes[i].sctx = sctx;
2199                 }
2200         }
2201         /*
2202          * There used to be a big double loop to handle all profiles using the
2203          * same routine, which grows larger and more gross over time.
2204          *
2205          * So here we handle each profile differently, so simpler profiles
2206          * have simpler scrubbing function.
2207          */
2208         if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
2209                          BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2210                 /*
2211                  * Above check rules out all complex profile, the remaining
2212                  * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
2213                  * mirrored duplication without stripe.
2214                  *
2215                  * Only @physical and @mirror_num needs to calculated using
2216                  * @stripe_index.
2217                  */
2218                 ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length,
2219                                 scrub_dev, map->stripes[stripe_index].physical,
2220                                 stripe_index + 1);
2221                 offset = 0;
2222                 goto out;
2223         }
2224         if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
2225                 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2226                 offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
2227                 goto out;
2228         }
2229
2230         /* Only RAID56 goes through the old code */
2231         ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
2232         ret = 0;
2233
2234         /* Calculate the logical end of the stripe */
2235         get_raid56_logic_offset(physical_end, stripe_index,
2236                                 map, &logic_end, NULL);
2237         logic_end += chunk_logical;
2238
2239         /* Initialize @offset in case we need to go to out: label */
2240         get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
2241         increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
2242
2243         /*
2244          * Due to the rotation, for RAID56 it's better to iterate each stripe
2245          * using their physical offset.
2246          */
2247         while (physical < physical_end) {
2248                 ret = get_raid56_logic_offset(physical, stripe_index, map,
2249                                               &logical, &stripe_logical);
2250                 logical += chunk_logical;
2251                 if (ret) {
2252                         /* it is parity strip */
2253                         stripe_logical += chunk_logical;
2254                         ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2255                                                          map, stripe_logical);
2256                         if (ret)
2257                                 goto out;
2258                         goto next;
2259                 }
2260
2261                 /*
2262                  * Now we're at a data stripe, scrub each extents in the range.
2263                  *
2264                  * At this stage, if we ignore the repair part, inside each data
2265                  * stripe it is no different than SINGLE profile.
2266                  * We can reuse scrub_simple_mirror() here, as the repair part
2267                  * is still based on @mirror_num.
2268                  */
2269                 ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN,
2270                                           scrub_dev, physical, 1);
2271                 if (ret < 0)
2272                         goto out;
2273 next:
2274                 logical += increment;
2275                 physical += BTRFS_STRIPE_LEN;
2276                 spin_lock(&sctx->stat_lock);
2277                 if (stop_loop)
2278                         sctx->stat.last_physical =
2279                                 map->stripes[stripe_index].physical + dev_stripe_len;
2280                 else
2281                         sctx->stat.last_physical = physical;
2282                 spin_unlock(&sctx->stat_lock);
2283                 if (stop_loop)
2284                         break;
2285         }
2286 out:
2287         ret2 = flush_scrub_stripes(sctx);
2288         if (!ret)
2289                 ret = ret2;
2290         btrfs_release_path(&sctx->extent_path);
2291         btrfs_release_path(&sctx->csum_path);
2292
2293         if (sctx->raid56_data_stripes) {
2294                 for (int i = 0; i < nr_data_stripes(map); i++)
2295                         release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2296                 kfree(sctx->raid56_data_stripes);
2297                 sctx->raid56_data_stripes = NULL;
2298         }
2299
2300         if (sctx->is_dev_replace && ret >= 0) {
2301                 int ret2;
2302
2303                 ret2 = sync_write_pointer_for_zoned(sctx,
2304                                 chunk_logical + offset,
2305                                 map->stripes[stripe_index].physical,
2306                                 physical_end);
2307                 if (ret2)
2308                         ret = ret2;
2309         }
2310
2311         return ret < 0 ? ret : 0;
2312 }
2313
2314 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2315                                           struct btrfs_block_group *bg,
2316                                           struct btrfs_device *scrub_dev,
2317                                           u64 dev_offset,
2318                                           u64 dev_extent_len)
2319 {
2320         struct btrfs_fs_info *fs_info = sctx->fs_info;
2321         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
2322         struct map_lookup *map;
2323         struct extent_map *em;
2324         int i;
2325         int ret = 0;
2326
2327         read_lock(&map_tree->lock);
2328         em = lookup_extent_mapping(map_tree, bg->start, bg->length);
2329         read_unlock(&map_tree->lock);
2330
2331         if (!em) {
2332                 /*
2333                  * Might have been an unused block group deleted by the cleaner
2334                  * kthread or relocation.
2335                  */
2336                 spin_lock(&bg->lock);
2337                 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
2338                         ret = -EINVAL;
2339                 spin_unlock(&bg->lock);
2340
2341                 return ret;
2342         }
2343         if (em->start != bg->start)
2344                 goto out;
2345         if (em->len < dev_extent_len)
2346                 goto out;
2347
2348         map = em->map_lookup;
2349         for (i = 0; i < map->num_stripes; ++i) {
2350                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2351                     map->stripes[i].physical == dev_offset) {
2352                         ret = scrub_stripe(sctx, bg, em, scrub_dev, i);
2353                         if (ret)
2354                                 goto out;
2355                 }
2356         }
2357 out:
2358         free_extent_map(em);
2359
2360         return ret;
2361 }
2362
2363 static int finish_extent_writes_for_zoned(struct btrfs_root *root,
2364                                           struct btrfs_block_group *cache)
2365 {
2366         struct btrfs_fs_info *fs_info = cache->fs_info;
2367         struct btrfs_trans_handle *trans;
2368
2369         if (!btrfs_is_zoned(fs_info))
2370                 return 0;
2371
2372         btrfs_wait_block_group_reservations(cache);
2373         btrfs_wait_nocow_writers(cache);
2374         btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
2375
2376         trans = btrfs_join_transaction(root);
2377         if (IS_ERR(trans))
2378                 return PTR_ERR(trans);
2379         return btrfs_commit_transaction(trans);
2380 }
2381
2382 static noinline_for_stack
2383 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2384                            struct btrfs_device *scrub_dev, u64 start, u64 end)
2385 {
2386         struct btrfs_dev_extent *dev_extent = NULL;
2387         struct btrfs_path *path;
2388         struct btrfs_fs_info *fs_info = sctx->fs_info;
2389         struct btrfs_root *root = fs_info->dev_root;
2390         u64 chunk_offset;
2391         int ret = 0;
2392         int ro_set;
2393         int slot;
2394         struct extent_buffer *l;
2395         struct btrfs_key key;
2396         struct btrfs_key found_key;
2397         struct btrfs_block_group *cache;
2398         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2399
2400         path = btrfs_alloc_path();
2401         if (!path)
2402                 return -ENOMEM;
2403
2404         path->reada = READA_FORWARD;
2405         path->search_commit_root = 1;
2406         path->skip_locking = 1;
2407
2408         key.objectid = scrub_dev->devid;
2409         key.offset = 0ull;
2410         key.type = BTRFS_DEV_EXTENT_KEY;
2411
2412         while (1) {
2413                 u64 dev_extent_len;
2414
2415                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2416                 if (ret < 0)
2417                         break;
2418                 if (ret > 0) {
2419                         if (path->slots[0] >=
2420                             btrfs_header_nritems(path->nodes[0])) {
2421                                 ret = btrfs_next_leaf(root, path);
2422                                 if (ret < 0)
2423                                         break;
2424                                 if (ret > 0) {
2425                                         ret = 0;
2426                                         break;
2427                                 }
2428                         } else {
2429                                 ret = 0;
2430                         }
2431                 }
2432
2433                 l = path->nodes[0];
2434                 slot = path->slots[0];
2435
2436                 btrfs_item_key_to_cpu(l, &found_key, slot);
2437
2438                 if (found_key.objectid != scrub_dev->devid)
2439                         break;
2440
2441                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
2442                         break;
2443
2444                 if (found_key.offset >= end)
2445                         break;
2446
2447                 if (found_key.offset < key.offset)
2448                         break;
2449
2450                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2451                 dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
2452
2453                 if (found_key.offset + dev_extent_len <= start)
2454                         goto skip;
2455
2456                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2457
2458                 /*
2459                  * get a reference on the corresponding block group to prevent
2460                  * the chunk from going away while we scrub it
2461                  */
2462                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2463
2464                 /* some chunks are removed but not committed to disk yet,
2465                  * continue scrubbing */
2466                 if (!cache)
2467                         goto skip;
2468
2469                 ASSERT(cache->start <= chunk_offset);
2470                 /*
2471                  * We are using the commit root to search for device extents, so
2472                  * that means we could have found a device extent item from a
2473                  * block group that was deleted in the current transaction. The
2474                  * logical start offset of the deleted block group, stored at
2475                  * @chunk_offset, might be part of the logical address range of
2476                  * a new block group (which uses different physical extents).
2477                  * In this case btrfs_lookup_block_group() has returned the new
2478                  * block group, and its start address is less than @chunk_offset.
2479                  *
2480                  * We skip such new block groups, because it's pointless to
2481                  * process them, as we won't find their extents because we search
2482                  * for them using the commit root of the extent tree. For a device
2483                  * replace it's also fine to skip it, we won't miss copying them
2484                  * to the target device because we have the write duplication
2485                  * setup through the regular write path (by btrfs_map_block()),
2486                  * and we have committed a transaction when we started the device
2487                  * replace, right after setting up the device replace state.
2488                  */
2489                 if (cache->start < chunk_offset) {
2490                         btrfs_put_block_group(cache);
2491                         goto skip;
2492                 }
2493
2494                 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2495                         if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
2496                                 btrfs_put_block_group(cache);
2497                                 goto skip;
2498                         }
2499                 }
2500
2501                 /*
2502                  * Make sure that while we are scrubbing the corresponding block
2503                  * group doesn't get its logical address and its device extents
2504                  * reused for another block group, which can possibly be of a
2505                  * different type and different profile. We do this to prevent
2506                  * false error detections and crashes due to bogus attempts to
2507                  * repair extents.
2508                  */
2509                 spin_lock(&cache->lock);
2510                 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2511                         spin_unlock(&cache->lock);
2512                         btrfs_put_block_group(cache);
2513                         goto skip;
2514                 }
2515                 btrfs_freeze_block_group(cache);
2516                 spin_unlock(&cache->lock);
2517
2518                 /*
2519                  * we need call btrfs_inc_block_group_ro() with scrubs_paused,
2520                  * to avoid deadlock caused by:
2521                  * btrfs_inc_block_group_ro()
2522                  * -> btrfs_wait_for_commit()
2523                  * -> btrfs_commit_transaction()
2524                  * -> btrfs_scrub_pause()
2525                  */
2526                 scrub_pause_on(fs_info);
2527
2528                 /*
2529                  * Don't do chunk preallocation for scrub.
2530                  *
2531                  * This is especially important for SYSTEM bgs, or we can hit
2532                  * -EFBIG from btrfs_finish_chunk_alloc() like:
2533                  * 1. The only SYSTEM bg is marked RO.
2534                  *    Since SYSTEM bg is small, that's pretty common.
2535                  * 2. New SYSTEM bg will be allocated
2536                  *    Due to regular version will allocate new chunk.
2537                  * 3. New SYSTEM bg is empty and will get cleaned up
2538                  *    Before cleanup really happens, it's marked RO again.
2539                  * 4. Empty SYSTEM bg get scrubbed
2540                  *    We go back to 2.
2541                  *
2542                  * This can easily boost the amount of SYSTEM chunks if cleaner
2543                  * thread can't be triggered fast enough, and use up all space
2544                  * of btrfs_super_block::sys_chunk_array
2545                  *
2546                  * While for dev replace, we need to try our best to mark block
2547                  * group RO, to prevent race between:
2548                  * - Write duplication
2549                  *   Contains latest data
2550                  * - Scrub copy
2551                  *   Contains data from commit tree
2552                  *
2553                  * If target block group is not marked RO, nocow writes can
2554                  * be overwritten by scrub copy, causing data corruption.
2555                  * So for dev-replace, it's not allowed to continue if a block
2556                  * group is not RO.
2557                  */
2558                 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2559                 if (!ret && sctx->is_dev_replace) {
2560                         ret = finish_extent_writes_for_zoned(root, cache);
2561                         if (ret) {
2562                                 btrfs_dec_block_group_ro(cache);
2563                                 scrub_pause_off(fs_info);
2564                                 btrfs_put_block_group(cache);
2565                                 break;
2566                         }
2567                 }
2568
2569                 if (ret == 0) {
2570                         ro_set = 1;
2571                 } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2572                            !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
2573                         /*
2574                          * btrfs_inc_block_group_ro return -ENOSPC when it
2575                          * failed in creating new chunk for metadata.
2576                          * It is not a problem for scrub, because
2577                          * metadata are always cowed, and our scrub paused
2578                          * commit_transactions.
2579                          *
2580                          * For RAID56 chunks, we have to mark them read-only
2581                          * for scrub, as later we would use our own cache
2582                          * out of RAID56 realm.
2583                          * Thus we want the RAID56 bg to be marked RO to
2584                          * prevent RMW from screwing up out cache.
2585                          */
2586                         ro_set = 0;
2587                 } else if (ret == -ETXTBSY) {
2588                         btrfs_warn(fs_info,
2589                    "skipping scrub of block group %llu due to active swapfile",
2590                                    cache->start);
2591                         scrub_pause_off(fs_info);
2592                         ret = 0;
2593                         goto skip_unfreeze;
2594                 } else {
2595                         btrfs_warn(fs_info,
2596                                    "failed setting block group ro: %d", ret);
2597                         btrfs_unfreeze_block_group(cache);
2598                         btrfs_put_block_group(cache);
2599                         scrub_pause_off(fs_info);
2600                         break;
2601                 }
2602
2603                 /*
2604                  * Now the target block is marked RO, wait for nocow writes to
2605                  * finish before dev-replace.
2606                  * COW is fine, as COW never overwrites extents in commit tree.
2607                  */
2608                 if (sctx->is_dev_replace) {
2609                         btrfs_wait_nocow_writers(cache);
2610                         btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
2611                                         cache->length);
2612                 }
2613
2614                 scrub_pause_off(fs_info);
2615                 down_write(&dev_replace->rwsem);
2616                 dev_replace->cursor_right = found_key.offset + dev_extent_len;
2617                 dev_replace->cursor_left = found_key.offset;
2618                 dev_replace->item_needs_writeback = 1;
2619                 up_write(&dev_replace->rwsem);
2620
2621                 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2622                                   dev_extent_len);
2623                 if (sctx->is_dev_replace &&
2624                     !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
2625                                                       cache, found_key.offset))
2626                         ro_set = 0;
2627
2628                 down_write(&dev_replace->rwsem);
2629                 dev_replace->cursor_left = dev_replace->cursor_right;
2630                 dev_replace->item_needs_writeback = 1;
2631                 up_write(&dev_replace->rwsem);
2632
2633                 if (ro_set)
2634                         btrfs_dec_block_group_ro(cache);
2635
2636                 /*
2637                  * We might have prevented the cleaner kthread from deleting
2638                  * this block group if it was already unused because we raced
2639                  * and set it to RO mode first. So add it back to the unused
2640                  * list, otherwise it might not ever be deleted unless a manual
2641                  * balance is triggered or it becomes used and unused again.
2642                  */
2643                 spin_lock(&cache->lock);
2644                 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
2645                     !cache->ro && cache->reserved == 0 && cache->used == 0) {
2646                         spin_unlock(&cache->lock);
2647                         if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2648                                 btrfs_discard_queue_work(&fs_info->discard_ctl,
2649                                                          cache);
2650                         else
2651                                 btrfs_mark_bg_unused(cache);
2652                 } else {
2653                         spin_unlock(&cache->lock);
2654                 }
2655 skip_unfreeze:
2656                 btrfs_unfreeze_block_group(cache);
2657                 btrfs_put_block_group(cache);
2658                 if (ret)
2659                         break;
2660                 if (sctx->is_dev_replace &&
2661                     atomic64_read(&dev_replace->num_write_errors) > 0) {
2662                         ret = -EIO;
2663                         break;
2664                 }
2665                 if (sctx->stat.malloc_errors > 0) {
2666                         ret = -ENOMEM;
2667                         break;
2668                 }
2669 skip:
2670                 key.offset = found_key.offset + dev_extent_len;
2671                 btrfs_release_path(path);
2672         }
2673
2674         btrfs_free_path(path);
2675
2676         return ret;
2677 }
2678
2679 static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2680                            struct page *page, u64 physical, u64 generation)
2681 {
2682         struct btrfs_fs_info *fs_info = sctx->fs_info;
2683         struct bio_vec bvec;
2684         struct bio bio;
2685         struct btrfs_super_block *sb = page_address(page);
2686         int ret;
2687
2688         bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
2689         bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
2690         __bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
2691         ret = submit_bio_wait(&bio);
2692         bio_uninit(&bio);
2693
2694         if (ret < 0)
2695                 return ret;
2696         ret = btrfs_check_super_csum(fs_info, sb);
2697         if (ret != 0) {
2698                 btrfs_err_rl(fs_info,
2699                         "super block at physical %llu devid %llu has bad csum",
2700                         physical, dev->devid);
2701                 return -EIO;
2702         }
2703         if (btrfs_super_generation(sb) != generation) {
2704                 btrfs_err_rl(fs_info,
2705 "super block at physical %llu devid %llu has bad generation %llu expect %llu",
2706                              physical, dev->devid,
2707                              btrfs_super_generation(sb), generation);
2708                 return -EUCLEAN;
2709         }
2710
2711         return btrfs_validate_super(fs_info, sb, -1);
2712 }
2713
2714 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2715                                            struct btrfs_device *scrub_dev)
2716 {
2717         int     i;
2718         u64     bytenr;
2719         u64     gen;
2720         int ret = 0;
2721         struct page *page;
2722         struct btrfs_fs_info *fs_info = sctx->fs_info;
2723
2724         if (BTRFS_FS_ERROR(fs_info))
2725                 return -EROFS;
2726
2727         page = alloc_page(GFP_KERNEL);
2728         if (!page) {
2729                 spin_lock(&sctx->stat_lock);
2730                 sctx->stat.malloc_errors++;
2731                 spin_unlock(&sctx->stat_lock);
2732                 return -ENOMEM;
2733         }
2734
2735         /* Seed devices of a new filesystem has their own generation. */
2736         if (scrub_dev->fs_devices != fs_info->fs_devices)
2737                 gen = scrub_dev->generation;
2738         else
2739                 gen = fs_info->last_trans_committed;
2740
2741         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2742                 bytenr = btrfs_sb_offset(i);
2743                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
2744                     scrub_dev->commit_total_bytes)
2745                         break;
2746                 if (!btrfs_check_super_location(scrub_dev, bytenr))
2747                         continue;
2748
2749                 ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2750                 if (ret) {
2751                         spin_lock(&sctx->stat_lock);
2752                         sctx->stat.super_errors++;
2753                         spin_unlock(&sctx->stat_lock);
2754                 }
2755         }
2756         __free_page(page);
2757         return 0;
2758 }
2759
2760 static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2761 {
2762         if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2763                                         &fs_info->scrub_lock)) {
2764                 struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
2765
2766                 fs_info->scrub_workers = NULL;
2767                 mutex_unlock(&fs_info->scrub_lock);
2768
2769                 if (scrub_workers)
2770                         destroy_workqueue(scrub_workers);
2771         }
2772 }
2773
2774 /*
2775  * get a reference count on fs_info->scrub_workers. start worker if necessary
2776  */
2777 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
2778 {
2779         struct workqueue_struct *scrub_workers = NULL;
2780         unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
2781         int max_active = fs_info->thread_pool_size;
2782         int ret = -ENOMEM;
2783
2784         if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2785                 return 0;
2786
2787         scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
2788         if (!scrub_workers)
2789                 return -ENOMEM;
2790
2791         mutex_lock(&fs_info->scrub_lock);
2792         if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
2793                 ASSERT(fs_info->scrub_workers == NULL);
2794                 fs_info->scrub_workers = scrub_workers;
2795                 refcount_set(&fs_info->scrub_workers_refcnt, 1);
2796                 mutex_unlock(&fs_info->scrub_lock);
2797                 return 0;
2798         }
2799         /* Other thread raced in and created the workers for us */
2800         refcount_inc(&fs_info->scrub_workers_refcnt);
2801         mutex_unlock(&fs_info->scrub_lock);
2802
2803         ret = 0;
2804
2805         destroy_workqueue(scrub_workers);
2806         return ret;
2807 }
2808
2809 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2810                     u64 end, struct btrfs_scrub_progress *progress,
2811                     int readonly, int is_dev_replace)
2812 {
2813         struct btrfs_dev_lookup_args args = { .devid = devid };
2814         struct scrub_ctx *sctx;
2815         int ret;
2816         struct btrfs_device *dev;
2817         unsigned int nofs_flag;
2818         bool need_commit = false;
2819
2820         if (btrfs_fs_closing(fs_info))
2821                 return -EAGAIN;
2822
2823         /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
2824         ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
2825
2826         /*
2827          * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
2828          * value (max nodesize / min sectorsize), thus nodesize should always
2829          * be fine.
2830          */
2831         ASSERT(fs_info->nodesize <=
2832                SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
2833
2834         /* Allocate outside of device_list_mutex */
2835         sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2836         if (IS_ERR(sctx))
2837                 return PTR_ERR(sctx);
2838
2839         ret = scrub_workers_get(fs_info);
2840         if (ret)
2841                 goto out_free_ctx;
2842
2843         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2844         dev = btrfs_find_device(fs_info->fs_devices, &args);
2845         if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
2846                      !is_dev_replace)) {
2847                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2848                 ret = -ENODEV;
2849                 goto out;
2850         }
2851
2852         if (!is_dev_replace && !readonly &&
2853             !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2854                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2855                 btrfs_err_in_rcu(fs_info,
2856                         "scrub on devid %llu: filesystem on %s is not writable",
2857                                  devid, btrfs_dev_name(dev));
2858                 ret = -EROFS;
2859                 goto out;
2860         }
2861
2862         mutex_lock(&fs_info->scrub_lock);
2863         if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
2864             test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
2865                 mutex_unlock(&fs_info->scrub_lock);
2866                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2867                 ret = -EIO;
2868                 goto out;
2869         }
2870
2871         down_read(&fs_info->dev_replace.rwsem);
2872         if (dev->scrub_ctx ||
2873             (!is_dev_replace &&
2874              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2875                 up_read(&fs_info->dev_replace.rwsem);
2876                 mutex_unlock(&fs_info->scrub_lock);
2877                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2878                 ret = -EINPROGRESS;
2879                 goto out;
2880         }
2881         up_read(&fs_info->dev_replace.rwsem);
2882
2883         sctx->readonly = readonly;
2884         dev->scrub_ctx = sctx;
2885         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2886
2887         /*
2888          * checking @scrub_pause_req here, we can avoid
2889          * race between committing transaction and scrubbing.
2890          */
2891         __scrub_blocked_if_needed(fs_info);
2892         atomic_inc(&fs_info->scrubs_running);
2893         mutex_unlock(&fs_info->scrub_lock);
2894
2895         /*
2896          * In order to avoid deadlock with reclaim when there is a transaction
2897          * trying to pause scrub, make sure we use GFP_NOFS for all the
2898          * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
2899          * invoked by our callees. The pausing request is done when the
2900          * transaction commit starts, and it blocks the transaction until scrub
2901          * is paused (done at specific points at scrub_stripe() or right above
2902          * before incrementing fs_info->scrubs_running).
2903          */
2904         nofs_flag = memalloc_nofs_save();
2905         if (!is_dev_replace) {
2906                 u64 old_super_errors;
2907
2908                 spin_lock(&sctx->stat_lock);
2909                 old_super_errors = sctx->stat.super_errors;
2910                 spin_unlock(&sctx->stat_lock);
2911
2912                 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
2913                 /*
2914                  * by holding device list mutex, we can
2915                  * kick off writing super in log tree sync.
2916                  */
2917                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2918                 ret = scrub_supers(sctx, dev);
2919                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2920
2921                 spin_lock(&sctx->stat_lock);
2922                 /*
2923                  * Super block errors found, but we can not commit transaction
2924                  * at current context, since btrfs_commit_transaction() needs
2925                  * to pause the current running scrub (hold by ourselves).
2926                  */
2927                 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
2928                         need_commit = true;
2929                 spin_unlock(&sctx->stat_lock);
2930         }
2931
2932         if (!ret)
2933                 ret = scrub_enumerate_chunks(sctx, dev, start, end);
2934         memalloc_nofs_restore(nofs_flag);
2935
2936         atomic_dec(&fs_info->scrubs_running);
2937         wake_up(&fs_info->scrub_pause_wait);
2938
2939         if (progress)
2940                 memcpy(progress, &sctx->stat, sizeof(*progress));
2941
2942         if (!is_dev_replace)
2943                 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
2944                         ret ? "not finished" : "finished", devid, ret);
2945
2946         mutex_lock(&fs_info->scrub_lock);
2947         dev->scrub_ctx = NULL;
2948         mutex_unlock(&fs_info->scrub_lock);
2949
2950         scrub_workers_put(fs_info);
2951         scrub_put_ctx(sctx);
2952
2953         /*
2954          * We found some super block errors before, now try to force a
2955          * transaction commit, as scrub has finished.
2956          */
2957         if (need_commit) {
2958                 struct btrfs_trans_handle *trans;
2959
2960                 trans = btrfs_start_transaction(fs_info->tree_root, 0);
2961                 if (IS_ERR(trans)) {
2962                         ret = PTR_ERR(trans);
2963                         btrfs_err(fs_info,
2964         "scrub: failed to start transaction to fix super block errors: %d", ret);
2965                         return ret;
2966                 }
2967                 ret = btrfs_commit_transaction(trans);
2968                 if (ret < 0)
2969                         btrfs_err(fs_info,
2970         "scrub: failed to commit transaction to fix super block errors: %d", ret);
2971         }
2972         return ret;
2973 out:
2974         scrub_workers_put(fs_info);
2975 out_free_ctx:
2976         scrub_free_ctx(sctx);
2977
2978         return ret;
2979 }
2980
2981 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
2982 {
2983         mutex_lock(&fs_info->scrub_lock);
2984         atomic_inc(&fs_info->scrub_pause_req);
2985         while (atomic_read(&fs_info->scrubs_paused) !=
2986                atomic_read(&fs_info->scrubs_running)) {
2987                 mutex_unlock(&fs_info->scrub_lock);
2988                 wait_event(fs_info->scrub_pause_wait,
2989                            atomic_read(&fs_info->scrubs_paused) ==
2990                            atomic_read(&fs_info->scrubs_running));
2991                 mutex_lock(&fs_info->scrub_lock);
2992         }
2993         mutex_unlock(&fs_info->scrub_lock);
2994 }
2995
2996 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
2997 {
2998         atomic_dec(&fs_info->scrub_pause_req);
2999         wake_up(&fs_info->scrub_pause_wait);
3000 }
3001
3002 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3003 {
3004         mutex_lock(&fs_info->scrub_lock);
3005         if (!atomic_read(&fs_info->scrubs_running)) {
3006                 mutex_unlock(&fs_info->scrub_lock);
3007                 return -ENOTCONN;
3008         }
3009
3010         atomic_inc(&fs_info->scrub_cancel_req);
3011         while (atomic_read(&fs_info->scrubs_running)) {
3012                 mutex_unlock(&fs_info->scrub_lock);
3013                 wait_event(fs_info->scrub_pause_wait,
3014                            atomic_read(&fs_info->scrubs_running) == 0);
3015                 mutex_lock(&fs_info->scrub_lock);
3016         }
3017         atomic_dec(&fs_info->scrub_cancel_req);
3018         mutex_unlock(&fs_info->scrub_lock);
3019
3020         return 0;
3021 }
3022
3023 int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
3024 {
3025         struct btrfs_fs_info *fs_info = dev->fs_info;
3026         struct scrub_ctx *sctx;
3027
3028         mutex_lock(&fs_info->scrub_lock);
3029         sctx = dev->scrub_ctx;
3030         if (!sctx) {
3031                 mutex_unlock(&fs_info->scrub_lock);
3032                 return -ENOTCONN;
3033         }
3034         atomic_inc(&sctx->cancel_req);
3035         while (dev->scrub_ctx) {
3036                 mutex_unlock(&fs_info->scrub_lock);
3037                 wait_event(fs_info->scrub_pause_wait,
3038                            dev->scrub_ctx == NULL);
3039                 mutex_lock(&fs_info->scrub_lock);
3040         }
3041         mutex_unlock(&fs_info->scrub_lock);
3042
3043         return 0;
3044 }
3045
3046 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
3047                          struct btrfs_scrub_progress *progress)
3048 {
3049         struct btrfs_dev_lookup_args args = { .devid = devid };
3050         struct btrfs_device *dev;
3051         struct scrub_ctx *sctx = NULL;
3052
3053         mutex_lock(&fs_info->fs_devices->device_list_mutex);
3054         dev = btrfs_find_device(fs_info->fs_devices, &args);
3055         if (dev)
3056                 sctx = dev->scrub_ctx;
3057         if (sctx)
3058                 memcpy(progress, &sctx->stat, sizeof(*progress));
3059         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3060
3061         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3062 }