1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/sched/mm.h>
7 #include <linux/atomic.h>
8 #include <linux/vmalloc.h>
12 #include "rcu-string.h"
14 #include "block-group.h"
15 #include "transaction.h"
16 #include "dev-replace.h"
17 #include "space-info.h"
20 #include "accessors.h"
23 /* Maximum number of zones to report per blkdev_report_zones() call */
24 #define BTRFS_REPORT_NR_ZONES 4096
25 /* Invalid allocation pointer value for missing devices */
26 #define WP_MISSING_DEV ((u64)-1)
27 /* Pseudo write pointer value for conventional zone */
28 #define WP_CONVENTIONAL ((u64)-2)
31 * Location of the first zone of superblock logging zone pairs.
33 * - primary superblock: 0B (zone 0)
34 * - first copy: 512G (zone starting at that offset)
35 * - second copy: 4T (zone starting at that offset)
37 #define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL)
38 #define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
39 #define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
41 #define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
42 #define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
44 /* Number of superblock log zones */
45 #define BTRFS_NR_SB_LOG_ZONES 2
48 * Minimum of active zones we need:
50 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
51 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
52 * - 1 zone for tree-log dedicated block group
53 * - 1 zone for relocation
55 #define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5)
58 * Minimum / maximum supported zone size. Currently, SMR disks have a zone
59 * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
60 * We do not expect the zone size to become larger than 8GiB or smaller than
61 * 4MiB in the near future.
63 #define BTRFS_MAX_ZONE_SIZE SZ_8G
64 #define BTRFS_MIN_ZONE_SIZE SZ_4M
66 #define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
68 static inline bool sb_zone_is_full(const struct blk_zone *zone)
70 return (zone->cond == BLK_ZONE_COND_FULL) ||
71 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
74 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
76 struct blk_zone *zones = data;
78 memcpy(&zones[idx], zone, sizeof(*zone));
83 static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
86 bool empty[BTRFS_NR_SB_LOG_ZONES];
87 bool full[BTRFS_NR_SB_LOG_ZONES];
91 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
92 ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
93 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
94 full[i] = sb_zone_is_full(&zones[i]);
98 * Possible states of log buffer zones
100 * Empty[0] In use[0] Full[0]
106 * *: Special case, no superblock is written
107 * 0: Use write pointer of zones[0]
108 * 1: Use write pointer of zones[1]
109 * C: Compare super blocks from zones[0] and zones[1], use the latest
110 * one determined by generation
114 if (empty[0] && empty[1]) {
115 /* Special case to distinguish no superblock to read */
116 *wp_ret = zones[0].start << SECTOR_SHIFT;
118 } else if (full[0] && full[1]) {
119 /* Compare two super blocks */
120 struct address_space *mapping = bdev->bd_inode->i_mapping;
121 struct page *page[BTRFS_NR_SB_LOG_ZONES];
122 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
125 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
126 u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
127 u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) -
128 BTRFS_SUPER_INFO_SIZE;
130 page[i] = read_cache_page_gfp(mapping,
131 bytenr >> PAGE_SHIFT, GFP_NOFS);
132 if (IS_ERR(page[i])) {
134 btrfs_release_disk_super(super[0]);
135 return PTR_ERR(page[i]);
137 super[i] = page_address(page[i]);
140 if (btrfs_super_generation(super[0]) >
141 btrfs_super_generation(super[1]))
142 sector = zones[1].start;
144 sector = zones[0].start;
146 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
147 btrfs_release_disk_super(super[i]);
148 } else if (!full[0] && (empty[1] || full[1])) {
149 sector = zones[0].wp;
150 } else if (full[0]) {
151 sector = zones[1].wp;
155 *wp_ret = sector << SECTOR_SHIFT;
160 * Get the first zone number of the superblock mirror
162 static inline u32 sb_zone_number(int shift, int mirror)
166 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
168 case 0: zone = 0; break;
169 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
170 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
173 ASSERT(zone <= U32_MAX);
178 static inline sector_t zone_start_sector(u32 zone_number,
179 struct block_device *bdev)
181 return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
184 static inline u64 zone_start_physical(u32 zone_number,
185 struct btrfs_zoned_device_info *zone_info)
187 return (u64)zone_number << zone_info->zone_size_shift;
191 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
192 * device into static sized chunks and fake a conventional zone on each of
195 static int emulate_report_zones(struct btrfs_device *device, u64 pos,
196 struct blk_zone *zones, unsigned int nr_zones)
198 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
199 sector_t bdev_size = bdev_nr_sectors(device->bdev);
202 pos >>= SECTOR_SHIFT;
203 for (i = 0; i < nr_zones; i++) {
204 zones[i].start = i * zone_sectors + pos;
205 zones[i].len = zone_sectors;
206 zones[i].capacity = zone_sectors;
207 zones[i].wp = zones[i].start + zone_sectors;
208 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
209 zones[i].cond = BLK_ZONE_COND_NOT_WP;
211 if (zones[i].wp >= bdev_size) {
220 static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
221 struct blk_zone *zones, unsigned int *nr_zones)
223 struct btrfs_zoned_device_info *zinfo = device->zone_info;
229 if (!bdev_is_zoned(device->bdev)) {
230 ret = emulate_report_zones(device, pos, zones, *nr_zones);
236 if (zinfo->zone_cache) {
240 ASSERT(IS_ALIGNED(pos, zinfo->zone_size));
241 zno = pos >> zinfo->zone_size_shift;
243 * We cannot report zones beyond the zone end. So, it is OK to
244 * cap *nr_zones to at the end.
246 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
248 for (i = 0; i < *nr_zones; i++) {
249 struct blk_zone *zone_info;
251 zone_info = &zinfo->zone_cache[zno + i];
256 if (i == *nr_zones) {
257 /* Cache hit on all the zones */
258 memcpy(zones, zinfo->zone_cache + zno,
259 sizeof(*zinfo->zone_cache) * *nr_zones);
264 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
265 copy_zone_info_cb, zones);
267 btrfs_err_in_rcu(device->fs_info,
268 "zoned: failed to read zone %llu on %s (devid %llu)",
269 pos, rcu_str_deref(device->name),
278 if (zinfo->zone_cache) {
279 u32 zno = pos >> zinfo->zone_size_shift;
281 memcpy(zinfo->zone_cache + zno, zones,
282 sizeof(*zinfo->zone_cache) * *nr_zones);
288 /* The emulated zone size is determined from the size of device extent */
289 static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
291 struct btrfs_path *path;
292 struct btrfs_root *root = fs_info->dev_root;
293 struct btrfs_key key;
294 struct extent_buffer *leaf;
295 struct btrfs_dev_extent *dext;
299 key.type = BTRFS_DEV_EXTENT_KEY;
302 path = btrfs_alloc_path();
306 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
310 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
311 ret = btrfs_next_leaf(root, path);
314 /* No dev extents at all? Not good */
321 leaf = path->nodes[0];
322 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
323 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
327 btrfs_free_path(path);
332 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
334 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
335 struct btrfs_device *device;
338 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
339 if (!btrfs_fs_incompat(fs_info, ZONED))
342 mutex_lock(&fs_devices->device_list_mutex);
343 list_for_each_entry(device, &fs_devices->devices, dev_list) {
344 /* We can skip reading of zone info for missing devices */
348 ret = btrfs_get_dev_zone_info(device, true);
352 mutex_unlock(&fs_devices->device_list_mutex);
357 int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
359 struct btrfs_fs_info *fs_info = device->fs_info;
360 struct btrfs_zoned_device_info *zone_info = NULL;
361 struct block_device *bdev = device->bdev;
362 unsigned int max_active_zones;
363 unsigned int nactive;
366 struct blk_zone *zones = NULL;
367 unsigned int i, nreported = 0, nr_zones;
368 sector_t zone_sectors;
369 char *model, *emulated;
373 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
376 if (!btrfs_fs_incompat(fs_info, ZONED))
379 if (device->zone_info)
382 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
386 device->zone_info = zone_info;
388 if (!bdev_is_zoned(bdev)) {
389 if (!fs_info->zone_size) {
390 ret = calculate_emulated_zone_size(fs_info);
395 ASSERT(fs_info->zone_size);
396 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
398 zone_sectors = bdev_zone_sectors(bdev);
401 ASSERT(is_power_of_two_u64(zone_sectors));
402 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
404 /* We reject devices with a zone size larger than 8GB */
405 if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
406 btrfs_err_in_rcu(fs_info,
407 "zoned: %s: zone size %llu larger than supported maximum %llu",
408 rcu_str_deref(device->name),
409 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
412 } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
413 btrfs_err_in_rcu(fs_info,
414 "zoned: %s: zone size %llu smaller than supported minimum %u",
415 rcu_str_deref(device->name),
416 zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
421 nr_sectors = bdev_nr_sectors(bdev);
422 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
423 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
424 if (!IS_ALIGNED(nr_sectors, zone_sectors))
425 zone_info->nr_zones++;
427 max_active_zones = bdev_max_active_zones(bdev);
428 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
429 btrfs_err_in_rcu(fs_info,
430 "zoned: %s: max active zones %u is too small, need at least %u active zones",
431 rcu_str_deref(device->name), max_active_zones,
432 BTRFS_MIN_ACTIVE_ZONES);
436 zone_info->max_active_zones = max_active_zones;
438 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
439 if (!zone_info->seq_zones) {
444 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
445 if (!zone_info->empty_zones) {
450 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
451 if (!zone_info->active_zones) {
456 zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
463 * Enable zone cache only for a zoned device. On a non-zoned device, we
464 * fill the zone info with emulated CONVENTIONAL zones, so no need to
467 if (populate_cache && bdev_is_zoned(device->bdev)) {
468 zone_info->zone_cache = vzalloc(sizeof(struct blk_zone) *
469 zone_info->nr_zones);
470 if (!zone_info->zone_cache) {
471 btrfs_err_in_rcu(device->fs_info,
472 "zoned: failed to allocate zone cache for %s",
473 rcu_str_deref(device->name));
481 while (sector < nr_sectors) {
482 nr_zones = BTRFS_REPORT_NR_ZONES;
483 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
488 for (i = 0; i < nr_zones; i++) {
489 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
490 __set_bit(nreported, zone_info->seq_zones);
491 switch (zones[i].cond) {
492 case BLK_ZONE_COND_EMPTY:
493 __set_bit(nreported, zone_info->empty_zones);
495 case BLK_ZONE_COND_IMP_OPEN:
496 case BLK_ZONE_COND_EXP_OPEN:
497 case BLK_ZONE_COND_CLOSED:
498 __set_bit(nreported, zone_info->active_zones);
504 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
507 if (nreported != zone_info->nr_zones) {
508 btrfs_err_in_rcu(device->fs_info,
509 "inconsistent number of zones on %s (%u/%u)",
510 rcu_str_deref(device->name), nreported,
511 zone_info->nr_zones);
516 if (max_active_zones) {
517 if (nactive > max_active_zones) {
518 btrfs_err_in_rcu(device->fs_info,
519 "zoned: %u active zones on %s exceeds max_active_zones %u",
520 nactive, rcu_str_deref(device->name),
525 atomic_set(&zone_info->active_zones_left,
526 max_active_zones - nactive);
527 set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
530 /* Validate superblock log */
531 nr_zones = BTRFS_NR_SB_LOG_ZONES;
532 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
535 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
537 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
538 if (sb_zone + 1 >= zone_info->nr_zones)
541 ret = btrfs_get_dev_zones(device,
542 zone_start_physical(sb_zone, zone_info),
543 &zone_info->sb_zones[sb_pos],
548 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
549 btrfs_err_in_rcu(device->fs_info,
550 "zoned: failed to read super block log zone info at devid %llu zone %u",
551 device->devid, sb_zone);
557 * If zones[0] is conventional, always use the beginning of the
558 * zone to record superblock. No need to validate in that case.
560 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
561 BLK_ZONE_TYPE_CONVENTIONAL)
564 ret = sb_write_pointer(device->bdev,
565 &zone_info->sb_zones[sb_pos], &sb_wp);
566 if (ret != -ENOENT && ret) {
567 btrfs_err_in_rcu(device->fs_info,
568 "zoned: super block log zone corrupted devid %llu zone %u",
569 device->devid, sb_zone);
578 switch (bdev_zoned_model(bdev)) {
580 model = "host-managed zoned";
584 model = "host-aware zoned";
589 emulated = "emulated ";
593 btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
594 bdev_zoned_model(bdev),
595 rcu_str_deref(device->name));
597 goto out_free_zone_info;
600 btrfs_info_in_rcu(fs_info,
601 "%s block device %s, %u %szones of %llu bytes",
602 model, rcu_str_deref(device->name), zone_info->nr_zones,
603 emulated, zone_info->zone_size);
610 btrfs_destroy_dev_zone_info(device);
615 void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
617 struct btrfs_zoned_device_info *zone_info = device->zone_info;
622 bitmap_free(zone_info->active_zones);
623 bitmap_free(zone_info->seq_zones);
624 bitmap_free(zone_info->empty_zones);
625 vfree(zone_info->zone_cache);
627 device->zone_info = NULL;
630 struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
632 struct btrfs_zoned_device_info *zone_info;
634 zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
638 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
639 if (!zone_info->seq_zones)
642 bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
643 zone_info->nr_zones);
645 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
646 if (!zone_info->empty_zones)
649 bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
650 zone_info->nr_zones);
652 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
653 if (!zone_info->active_zones)
656 bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
657 zone_info->nr_zones);
658 zone_info->zone_cache = NULL;
663 bitmap_free(zone_info->seq_zones);
664 bitmap_free(zone_info->empty_zones);
665 bitmap_free(zone_info->active_zones);
670 int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
671 struct blk_zone *zone)
673 unsigned int nr_zones = 1;
676 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
677 if (ret != 0 || !nr_zones)
678 return ret ? ret : -EIO;
683 static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info)
685 struct btrfs_device *device;
687 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
689 bdev_zoned_model(device->bdev) == BLK_ZONED_HM) {
691 "zoned: mode not enabled but zoned device found: %pg",
700 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
702 struct queue_limits *lim = &fs_info->limits;
703 struct btrfs_device *device;
708 * Host-Managed devices can't be used without the ZONED flag. With the
709 * ZONED all devices can be used, using zone emulation if required.
711 if (!btrfs_fs_incompat(fs_info, ZONED))
712 return btrfs_check_for_zoned_device(fs_info);
714 blk_set_stacking_limits(lim);
716 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
717 struct btrfs_zoned_device_info *zone_info = device->zone_info;
723 zone_size = zone_info->zone_size;
724 } else if (zone_info->zone_size != zone_size) {
726 "zoned: unequal block device zone sizes: have %llu found %llu",
727 zone_info->zone_size, zone_size);
732 * With the zoned emulation, we can have non-zoned device on the
733 * zoned mode. In this case, we don't have a valid max zone
736 if (bdev_is_zoned(device->bdev)) {
737 blk_stack_limits(lim,
738 &bdev_get_queue(device->bdev)->limits,
744 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
745 * btrfs_create_chunk(). Since we want stripe_len == zone_size,
746 * check the alignment here.
748 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
750 "zoned: zone size %llu not aligned to stripe %u",
751 zone_size, BTRFS_STRIPE_LEN);
755 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
756 btrfs_err(fs_info, "zoned: mixed block groups not supported");
760 fs_info->zone_size = zone_size;
762 * Also limit max_zone_append_size by max_segments * PAGE_SIZE.
763 * Technically, we can have multiple pages per segment. But, since
764 * we add the pages one by one to a bio, and cannot increase the
765 * metadata reservation even if it increases the number of extents, it
766 * is safe to stick with the limit.
768 fs_info->max_zone_append_size = ALIGN_DOWN(
769 min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT,
770 (u64)lim->max_sectors << SECTOR_SHIFT,
771 (u64)lim->max_segments << PAGE_SHIFT),
772 fs_info->sectorsize);
773 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
774 if (fs_info->max_zone_append_size < fs_info->max_extent_size)
775 fs_info->max_extent_size = fs_info->max_zone_append_size;
778 * Check mount options here, because we might change fs_info->zoned
779 * from fs_info->zone_size.
781 ret = btrfs_check_mountopts_zoned(fs_info);
785 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
789 int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
791 if (!btrfs_is_zoned(info))
795 * Space cache writing is not COWed. Disable that to avoid write errors
796 * in sequential zones.
798 if (btrfs_test_opt(info, SPACE_CACHE)) {
799 btrfs_err(info, "zoned: space cache v1 is not supported");
803 if (btrfs_test_opt(info, NODATACOW)) {
804 btrfs_err(info, "zoned: NODATACOW not supported");
808 btrfs_clear_and_info(info, DISCARD_ASYNC,
809 "zoned: async discard ignored and disabled for zoned mode");
814 static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
815 int rw, u64 *bytenr_ret)
820 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
821 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
825 ret = sb_write_pointer(bdev, zones, &wp);
826 if (ret != -ENOENT && ret < 0)
830 struct blk_zone *reset = NULL;
832 if (wp == zones[0].start << SECTOR_SHIFT)
834 else if (wp == zones[1].start << SECTOR_SHIFT)
837 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
838 ASSERT(sb_zone_is_full(reset));
840 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
841 reset->start, reset->len,
846 reset->cond = BLK_ZONE_COND_EMPTY;
847 reset->wp = reset->start;
849 } else if (ret != -ENOENT) {
851 * For READ, we want the previous one. Move write pointer to
852 * the end of a zone, if it is at the head of a zone.
856 if (wp == zones[0].start << SECTOR_SHIFT)
857 zone_end = zones[1].start + zones[1].capacity;
858 else if (wp == zones[1].start << SECTOR_SHIFT)
859 zone_end = zones[0].start + zones[0].capacity;
861 wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
862 BTRFS_SUPER_INFO_SIZE);
864 wp -= BTRFS_SUPER_INFO_SIZE;
872 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
875 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
876 sector_t zone_sectors;
879 u8 zone_sectors_shift;
883 if (!bdev_is_zoned(bdev)) {
884 *bytenr_ret = btrfs_sb_offset(mirror);
888 ASSERT(rw == READ || rw == WRITE);
890 zone_sectors = bdev_zone_sectors(bdev);
891 if (!is_power_of_2(zone_sectors))
893 zone_sectors_shift = ilog2(zone_sectors);
894 nr_sectors = bdev_nr_sectors(bdev);
895 nr_zones = nr_sectors >> zone_sectors_shift;
897 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
898 if (sb_zone + 1 >= nr_zones)
901 ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
902 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
906 if (ret != BTRFS_NR_SB_LOG_ZONES)
909 return sb_log_location(bdev, zones, rw, bytenr_ret);
912 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
915 struct btrfs_zoned_device_info *zinfo = device->zone_info;
919 * For a zoned filesystem on a non-zoned block device, use the same
920 * super block locations as regular filesystem. Doing so, the super
921 * block can always be retrieved and the zoned flag of the volume
922 * detected from the super block information.
924 if (!bdev_is_zoned(device->bdev)) {
925 *bytenr_ret = btrfs_sb_offset(mirror);
929 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
930 if (zone_num + 1 >= zinfo->nr_zones)
933 return sb_log_location(device->bdev,
934 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
938 static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
946 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
947 if (zone_num + 1 >= zinfo->nr_zones)
950 if (!test_bit(zone_num, zinfo->seq_zones))
956 int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
958 struct btrfs_zoned_device_info *zinfo = device->zone_info;
959 struct blk_zone *zone;
962 if (!is_sb_log_zone(zinfo, mirror))
965 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
966 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
967 /* Advance the next zone */
968 if (zone->cond == BLK_ZONE_COND_FULL) {
973 if (zone->cond == BLK_ZONE_COND_EMPTY)
974 zone->cond = BLK_ZONE_COND_IMP_OPEN;
976 zone->wp += SUPER_INFO_SECTORS;
978 if (sb_zone_is_full(zone)) {
980 * No room left to write new superblock. Since
981 * superblock is written with REQ_SYNC, it is safe to
982 * finish the zone now.
984 * If the write pointer is exactly at the capacity,
985 * explicit ZONE_FINISH is not necessary.
987 if (zone->wp != zone->start + zone->capacity) {
990 ret = blkdev_zone_mgmt(device->bdev,
991 REQ_OP_ZONE_FINISH, zone->start,
992 zone->len, GFP_NOFS);
997 zone->wp = zone->start + zone->len;
998 zone->cond = BLK_ZONE_COND_FULL;
1003 /* All the zones are FULL. Should not reach here. */
1008 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
1010 sector_t zone_sectors;
1011 sector_t nr_sectors;
1012 u8 zone_sectors_shift;
1016 zone_sectors = bdev_zone_sectors(bdev);
1017 zone_sectors_shift = ilog2(zone_sectors);
1018 nr_sectors = bdev_nr_sectors(bdev);
1019 nr_zones = nr_sectors >> zone_sectors_shift;
1021 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
1022 if (sb_zone + 1 >= nr_zones)
1025 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1026 zone_start_sector(sb_zone, bdev),
1027 zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
1031 * Find allocatable zones within a given region.
1033 * @device: the device to allocate a region on
1034 * @hole_start: the position of the hole to allocate the region
1035 * @num_bytes: size of wanted region
1036 * @hole_end: the end of the hole
1037 * @return: position of allocatable zones
1039 * Allocatable region should not contain any superblock locations.
1041 u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
1042 u64 hole_end, u64 num_bytes)
1044 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1045 const u8 shift = zinfo->zone_size_shift;
1046 u64 nzones = num_bytes >> shift;
1047 u64 pos = hole_start;
1052 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
1053 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
1055 while (pos < hole_end) {
1056 begin = pos >> shift;
1057 end = begin + nzones;
1059 if (end > zinfo->nr_zones)
1062 /* Check if zones in the region are all empty */
1063 if (btrfs_dev_is_sequential(device, pos) &&
1064 !bitmap_test_range_all_set(zinfo->empty_zones, begin, nzones)) {
1065 pos += zinfo->zone_size;
1070 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1074 sb_zone = sb_zone_number(shift, i);
1075 if (!(end <= sb_zone ||
1076 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
1078 pos = zone_start_physical(
1079 sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
1083 /* We also need to exclude regular superblock positions */
1084 sb_pos = btrfs_sb_offset(i);
1085 if (!(pos + num_bytes <= sb_pos ||
1086 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
1088 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
1100 static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
1102 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1103 unsigned int zno = (pos >> zone_info->zone_size_shift);
1105 /* We can use any number of zones */
1106 if (zone_info->max_active_zones == 0)
1109 if (!test_bit(zno, zone_info->active_zones)) {
1110 /* Active zone left? */
1111 if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
1113 if (test_and_set_bit(zno, zone_info->active_zones)) {
1114 /* Someone already set the bit */
1115 atomic_inc(&zone_info->active_zones_left);
1122 static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
1124 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1125 unsigned int zno = (pos >> zone_info->zone_size_shift);
1127 /* We can use any number of zones */
1128 if (zone_info->max_active_zones == 0)
1131 if (test_and_clear_bit(zno, zone_info->active_zones))
1132 atomic_inc(&zone_info->active_zones_left);
1135 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
1136 u64 length, u64 *bytes)
1141 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
1142 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
1149 btrfs_dev_set_zone_empty(device, physical);
1150 btrfs_dev_clear_active_zone(device, physical);
1151 physical += device->zone_info->zone_size;
1152 length -= device->zone_info->zone_size;
1158 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
1160 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1161 const u8 shift = zinfo->zone_size_shift;
1162 unsigned long begin = start >> shift;
1163 unsigned long nbits = size >> shift;
1167 ASSERT(IS_ALIGNED(start, zinfo->zone_size));
1168 ASSERT(IS_ALIGNED(size, zinfo->zone_size));
1170 if (begin + nbits > zinfo->nr_zones)
1173 /* All the zones are conventional */
1174 if (bitmap_test_range_all_zero(zinfo->seq_zones, begin, nbits))
1177 /* All the zones are sequential and empty */
1178 if (bitmap_test_range_all_set(zinfo->seq_zones, begin, nbits) &&
1179 bitmap_test_range_all_set(zinfo->empty_zones, begin, nbits))
1182 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
1185 if (!btrfs_dev_is_sequential(device, pos) ||
1186 btrfs_dev_is_empty_zone(device, pos))
1189 /* Free regions should be empty */
1192 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
1193 rcu_str_deref(device->name), device->devid, pos >> shift);
1196 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
1206 * Calculate an allocation pointer from the extent allocation information
1207 * for a block group consist of conventional zones. It is pointed to the
1208 * end of the highest addressed extent in the block group as an allocation
1211 static int calculate_alloc_pointer(struct btrfs_block_group *cache,
1212 u64 *offset_ret, bool new)
1214 struct btrfs_fs_info *fs_info = cache->fs_info;
1215 struct btrfs_root *root;
1216 struct btrfs_path *path;
1217 struct btrfs_key key;
1218 struct btrfs_key found_key;
1223 * Avoid tree lookups for a new block group, there's no use for it.
1224 * It must always be 0.
1226 * Also, we have a lock chain of extent buffer lock -> chunk mutex.
1227 * For new a block group, this function is called from
1228 * btrfs_make_block_group() which is already taking the chunk mutex.
1229 * Thus, we cannot call calculate_alloc_pointer() which takes extent
1230 * buffer locks to avoid deadlock.
1237 path = btrfs_alloc_path();
1241 key.objectid = cache->start + cache->length;
1245 root = btrfs_extent_root(fs_info, key.objectid);
1246 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1247 /* We should not find the exact match */
1253 ret = btrfs_previous_extent_item(root, path, cache->start);
1262 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
1264 if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
1265 length = found_key.offset;
1267 length = fs_info->nodesize;
1269 if (!(found_key.objectid >= cache->start &&
1270 found_key.objectid + length <= cache->start + cache->length)) {
1274 *offset_ret = found_key.objectid + length - cache->start;
1278 btrfs_free_path(path);
1282 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1284 struct btrfs_fs_info *fs_info = cache->fs_info;
1285 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1286 struct extent_map *em;
1287 struct map_lookup *map;
1288 struct btrfs_device *device;
1289 u64 logical = cache->start;
1290 u64 length = cache->length;
1293 unsigned int nofs_flag;
1294 u64 *alloc_offsets = NULL;
1296 u64 *physical = NULL;
1297 unsigned long *active = NULL;
1299 u32 num_sequential = 0, num_conventional = 0;
1301 if (!btrfs_is_zoned(fs_info))
1305 if (!IS_ALIGNED(length, fs_info->zone_size)) {
1307 "zoned: block group %llu len %llu unaligned to zone size %llu",
1308 logical, length, fs_info->zone_size);
1312 /* Get the chunk mapping */
1313 read_lock(&em_tree->lock);
1314 em = lookup_extent_mapping(em_tree, logical, length);
1315 read_unlock(&em_tree->lock);
1320 map = em->map_lookup;
1322 cache->physical_map = kmemdup(map, map_lookup_size(map->num_stripes), GFP_NOFS);
1323 if (!cache->physical_map) {
1328 alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
1329 if (!alloc_offsets) {
1334 caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
1340 physical = kcalloc(map->num_stripes, sizeof(*physical), GFP_NOFS);
1346 active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
1352 for (i = 0; i < map->num_stripes; i++) {
1354 struct blk_zone zone;
1355 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1356 int dev_replace_is_ongoing = 0;
1358 device = map->stripes[i].dev;
1359 physical[i] = map->stripes[i].physical;
1361 if (device->bdev == NULL) {
1362 alloc_offsets[i] = WP_MISSING_DEV;
1366 is_sequential = btrfs_dev_is_sequential(device, physical[i]);
1373 * Consider a zone as active if we can allow any number of
1376 if (!device->zone_info->max_active_zones)
1377 __set_bit(i, active);
1379 if (!is_sequential) {
1380 alloc_offsets[i] = WP_CONVENTIONAL;
1385 * This zone will be used for allocation, so mark this zone
1388 btrfs_dev_clear_zone_empty(device, physical[i]);
1390 down_read(&dev_replace->rwsem);
1391 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
1392 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1393 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical[i]);
1394 up_read(&dev_replace->rwsem);
1397 * The group is mapped to a sequential zone. Get the zone write
1398 * pointer to determine the allocation offset within the zone.
1400 WARN_ON(!IS_ALIGNED(physical[i], fs_info->zone_size));
1401 nofs_flag = memalloc_nofs_save();
1402 ret = btrfs_get_dev_zone(device, physical[i], &zone);
1403 memalloc_nofs_restore(nofs_flag);
1404 if (ret == -EIO || ret == -EOPNOTSUPP) {
1406 alloc_offsets[i] = WP_MISSING_DEV;
1412 if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
1413 btrfs_err_in_rcu(fs_info,
1414 "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
1415 zone.start << SECTOR_SHIFT,
1416 rcu_str_deref(device->name), device->devid);
1421 caps[i] = (zone.capacity << SECTOR_SHIFT);
1423 switch (zone.cond) {
1424 case BLK_ZONE_COND_OFFLINE:
1425 case BLK_ZONE_COND_READONLY:
1427 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1428 physical[i] >> device->zone_info->zone_size_shift,
1429 rcu_str_deref(device->name), device->devid);
1430 alloc_offsets[i] = WP_MISSING_DEV;
1432 case BLK_ZONE_COND_EMPTY:
1433 alloc_offsets[i] = 0;
1435 case BLK_ZONE_COND_FULL:
1436 alloc_offsets[i] = caps[i];
1439 /* Partially used zone */
1441 ((zone.wp - zone.start) << SECTOR_SHIFT);
1442 __set_bit(i, active);
1447 if (num_sequential > 0)
1448 set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1450 if (num_conventional > 0) {
1451 /* Zone capacity is always zone size in emulation */
1452 cache->zone_capacity = cache->length;
1453 ret = calculate_alloc_pointer(cache, &last_alloc, new);
1456 "zoned: failed to determine allocation offset of bg %llu",
1459 } else if (map->num_stripes == num_conventional) {
1460 cache->alloc_offset = last_alloc;
1461 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
1466 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
1467 case 0: /* single */
1468 if (alloc_offsets[0] == WP_MISSING_DEV) {
1470 "zoned: cannot recover write pointer for zone %llu",
1475 cache->alloc_offset = alloc_offsets[0];
1476 cache->zone_capacity = caps[0];
1477 if (test_bit(0, active))
1478 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
1480 case BTRFS_BLOCK_GROUP_DUP:
1481 if (map->type & BTRFS_BLOCK_GROUP_DATA) {
1482 btrfs_err(fs_info, "zoned: profile DUP not yet supported on data bg");
1486 if (alloc_offsets[0] == WP_MISSING_DEV) {
1488 "zoned: cannot recover write pointer for zone %llu",
1493 if (alloc_offsets[1] == WP_MISSING_DEV) {
1495 "zoned: cannot recover write pointer for zone %llu",
1500 if (alloc_offsets[0] != alloc_offsets[1]) {
1502 "zoned: write pointer offset mismatch of zones in DUP profile");
1506 if (test_bit(0, active) != test_bit(1, active)) {
1507 if (!btrfs_zone_activate(cache)) {
1512 if (test_bit(0, active))
1513 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
1514 &cache->runtime_flags);
1516 cache->alloc_offset = alloc_offsets[0];
1517 cache->zone_capacity = min(caps[0], caps[1]);
1519 case BTRFS_BLOCK_GROUP_RAID1:
1520 case BTRFS_BLOCK_GROUP_RAID0:
1521 case BTRFS_BLOCK_GROUP_RAID10:
1522 case BTRFS_BLOCK_GROUP_RAID5:
1523 case BTRFS_BLOCK_GROUP_RAID6:
1524 /* non-single profiles are not supported yet */
1526 btrfs_err(fs_info, "zoned: profile %s not yet supported",
1527 btrfs_bg_type_to_raid_name(map->type));
1533 if (cache->alloc_offset > fs_info->zone_size) {
1535 "zoned: invalid write pointer %llu in block group %llu",
1536 cache->alloc_offset, cache->start);
1540 if (cache->alloc_offset > cache->zone_capacity) {
1542 "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
1543 cache->alloc_offset, cache->zone_capacity,
1548 /* An extent is allocated after the write pointer */
1549 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1551 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1552 logical, last_alloc, cache->alloc_offset);
1557 cache->meta_write_pointer = cache->alloc_offset + cache->start;
1558 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
1559 btrfs_get_block_group(cache);
1560 spin_lock(&fs_info->zone_active_bgs_lock);
1561 list_add_tail(&cache->active_bg_list,
1562 &fs_info->zone_active_bgs);
1563 spin_unlock(&fs_info->zone_active_bgs_lock);
1566 kfree(cache->physical_map);
1567 cache->physical_map = NULL;
1569 bitmap_free(active);
1572 kfree(alloc_offsets);
1573 free_extent_map(em);
1578 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
1582 if (!btrfs_is_zoned(cache->fs_info))
1585 WARN_ON(cache->bytes_super != 0);
1587 /* Check for block groups never get activated */
1588 if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &cache->fs_info->flags) &&
1589 cache->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM) &&
1590 !test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags) &&
1591 cache->alloc_offset == 0) {
1592 unusable = cache->length;
1595 unusable = (cache->alloc_offset - cache->used) +
1596 (cache->length - cache->zone_capacity);
1597 free = cache->zone_capacity - cache->alloc_offset;
1600 /* We only need ->free_space in ALLOC_SEQ block groups */
1601 cache->cached = BTRFS_CACHE_FINISHED;
1602 cache->free_space_ctl->free_space = free;
1603 cache->zone_unusable = unusable;
1606 void btrfs_redirty_list_add(struct btrfs_transaction *trans,
1607 struct extent_buffer *eb)
1609 if (!btrfs_is_zoned(eb->fs_info) ||
1610 btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN))
1613 ASSERT(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1615 memzero_extent_buffer(eb, 0, eb->len);
1616 set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
1617 set_extent_buffer_dirty(eb);
1618 set_extent_bit(&trans->dirty_pages, eb->start, eb->start + eb->len - 1,
1619 EXTENT_DIRTY | EXTENT_NOWAIT, NULL);
1622 bool btrfs_use_zone_append(struct btrfs_bio *bbio)
1624 u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
1625 struct btrfs_inode *inode = bbio->inode;
1626 struct btrfs_fs_info *fs_info = bbio->fs_info;
1627 struct btrfs_block_group *cache;
1630 if (!btrfs_is_zoned(fs_info))
1633 if (!inode || !is_data_inode(&inode->vfs_inode))
1636 if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
1640 * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
1641 * extent layout the relocation code has.
1642 * Furthermore we have set aside own block-group from which only the
1643 * relocation "process" can allocate and make sure only one process at a
1644 * time can add pages to an extent that gets relocated, so it's safe to
1645 * use regular REQ_OP_WRITE for this special case.
1647 if (btrfs_is_data_reloc_root(inode->root))
1650 cache = btrfs_lookup_block_group(fs_info, start);
1655 ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1656 btrfs_put_block_group(cache);
1661 void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
1663 const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
1664 struct btrfs_ordered_sum *sum = bbio->sums;
1666 if (physical < bbio->orig_physical)
1667 sum->logical -= bbio->orig_physical - physical;
1669 sum->logical += physical - bbio->orig_physical;
1672 static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
1675 struct extent_map_tree *em_tree = &BTRFS_I(ordered->inode)->extent_tree;
1676 struct extent_map *em;
1678 ordered->disk_bytenr = logical;
1680 write_lock(&em_tree->lock);
1681 em = search_extent_mapping(em_tree, ordered->file_offset,
1682 ordered->num_bytes);
1683 em->block_start = logical;
1684 free_extent_map(em);
1685 write_unlock(&em_tree->lock);
1688 static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
1689 u64 logical, u64 len)
1691 struct btrfs_ordered_extent *new;
1693 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
1694 split_extent_map(BTRFS_I(ordered->inode), ordered->file_offset,
1695 ordered->num_bytes, len, logical))
1698 new = btrfs_split_ordered_extent(ordered, len);
1701 new->disk_bytenr = logical;
1702 btrfs_finish_one_ordered(new);
1706 void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
1708 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1709 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1710 struct btrfs_ordered_sum *sum =
1711 list_first_entry(&ordered->list, typeof(*sum), list);
1712 u64 logical = sum->logical;
1715 while (len < ordered->disk_num_bytes) {
1716 sum = list_next_entry(sum, list);
1717 if (sum->logical == logical + len) {
1721 if (!btrfs_zoned_split_ordered(ordered, logical, len)) {
1722 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
1723 btrfs_err(fs_info, "failed to split ordered extent");
1726 logical = sum->logical;
1730 if (ordered->disk_bytenr != logical)
1731 btrfs_rewrite_logical_zoned(ordered, logical);
1735 * If we end up here for nodatasum I/O, the btrfs_ordered_sum structures
1736 * were allocated by btrfs_alloc_dummy_sum only to record the logical
1737 * addresses and don't contain actual checksums. We thus must free them
1738 * here so that we don't attempt to log the csums later.
1740 if ((inode->flags & BTRFS_INODE_NODATASUM) ||
1741 test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) {
1742 while ((sum = list_first_entry_or_null(&ordered->list,
1743 typeof(*sum), list))) {
1744 list_del(&sum->list);
1750 bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
1751 struct extent_buffer *eb,
1752 struct btrfs_block_group **cache_ret)
1754 struct btrfs_block_group *cache;
1757 if (!btrfs_is_zoned(fs_info))
1760 cache = btrfs_lookup_block_group(fs_info, eb->start);
1764 if (cache->meta_write_pointer != eb->start) {
1765 btrfs_put_block_group(cache);
1769 cache->meta_write_pointer = eb->start + eb->len;
1777 void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
1778 struct extent_buffer *eb)
1780 if (!btrfs_is_zoned(eb->fs_info) || !cache)
1783 ASSERT(cache->meta_write_pointer == eb->start + eb->len);
1784 cache->meta_write_pointer = eb->start;
1787 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
1789 if (!btrfs_dev_is_sequential(device, physical))
1792 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
1793 length >> SECTOR_SHIFT, GFP_NOFS, 0);
1796 static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
1797 struct blk_zone *zone)
1799 struct btrfs_io_context *bioc = NULL;
1800 u64 mapped_length = PAGE_SIZE;
1801 unsigned int nofs_flag;
1805 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
1806 &mapped_length, &bioc, NULL, NULL, 1);
1807 if (ret || !bioc || mapped_length < PAGE_SIZE) {
1812 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1817 nofs_flag = memalloc_nofs_save();
1818 nmirrors = (int)bioc->num_stripes;
1819 for (i = 0; i < nmirrors; i++) {
1820 u64 physical = bioc->stripes[i].physical;
1821 struct btrfs_device *dev = bioc->stripes[i].dev;
1823 /* Missing device */
1827 ret = btrfs_get_dev_zone(dev, physical, zone);
1828 /* Failing device */
1829 if (ret == -EIO || ret == -EOPNOTSUPP)
1833 memalloc_nofs_restore(nofs_flag);
1835 btrfs_put_bioc(bioc);
1840 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
1841 * filling zeros between @physical_pos to a write pointer of dev-replace
1844 int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
1845 u64 physical_start, u64 physical_pos)
1847 struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
1848 struct blk_zone zone;
1853 if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
1856 ret = read_zone_info(fs_info, logical, &zone);
1860 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
1862 if (physical_pos == wp)
1865 if (physical_pos > wp)
1868 length = wp - physical_pos;
1869 return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
1873 * Activate block group and underlying device zones
1875 * @block_group: the block group to activate
1877 * Return: true on success, false otherwise
1879 bool btrfs_zone_activate(struct btrfs_block_group *block_group)
1881 struct btrfs_fs_info *fs_info = block_group->fs_info;
1882 struct btrfs_space_info *space_info = block_group->space_info;
1883 struct map_lookup *map;
1884 struct btrfs_device *device;
1889 if (!btrfs_is_zoned(block_group->fs_info))
1892 map = block_group->physical_map;
1894 spin_lock(&space_info->lock);
1895 spin_lock(&block_group->lock);
1896 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
1902 if (btrfs_zoned_bg_is_full(block_group)) {
1907 for (i = 0; i < map->num_stripes; i++) {
1908 device = map->stripes[i].dev;
1909 physical = map->stripes[i].physical;
1911 if (device->zone_info->max_active_zones == 0)
1914 if (!btrfs_dev_set_active_zone(device, physical)) {
1915 /* Cannot activate the zone */
1921 /* Successfully activated all the zones */
1922 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
1923 WARN_ON(block_group->alloc_offset != 0);
1924 if (block_group->zone_unusable == block_group->length) {
1925 block_group->zone_unusable = block_group->length - block_group->zone_capacity;
1926 space_info->bytes_zone_unusable -= block_group->zone_capacity;
1928 spin_unlock(&block_group->lock);
1929 btrfs_try_granting_tickets(fs_info, space_info);
1930 spin_unlock(&space_info->lock);
1932 /* For the active block group list */
1933 btrfs_get_block_group(block_group);
1935 spin_lock(&fs_info->zone_active_bgs_lock);
1936 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
1937 spin_unlock(&fs_info->zone_active_bgs_lock);
1942 spin_unlock(&block_group->lock);
1943 spin_unlock(&space_info->lock);
1947 static void wait_eb_writebacks(struct btrfs_block_group *block_group)
1949 struct btrfs_fs_info *fs_info = block_group->fs_info;
1950 const u64 end = block_group->start + block_group->length;
1951 struct radix_tree_iter iter;
1952 struct extent_buffer *eb;
1956 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
1957 block_group->start >> fs_info->sectorsize_bits) {
1958 eb = radix_tree_deref_slot(slot);
1961 if (radix_tree_deref_retry(eb)) {
1962 slot = radix_tree_iter_retry(&iter);
1966 if (eb->start < block_group->start)
1968 if (eb->start >= end)
1971 slot = radix_tree_iter_resume(slot, &iter);
1973 wait_on_extent_buffer_writeback(eb);
1979 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
1981 struct btrfs_fs_info *fs_info = block_group->fs_info;
1982 struct map_lookup *map;
1983 const bool is_metadata = (block_group->flags &
1984 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
1988 spin_lock(&block_group->lock);
1989 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
1990 spin_unlock(&block_group->lock);
1994 /* Check if we have unwritten allocated space */
1996 block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
1997 spin_unlock(&block_group->lock);
2002 * If we are sure that the block group is full (= no more room left for
2003 * new allocation) and the IO for the last usable block is completed, we
2004 * don't need to wait for the other IOs. This holds because we ensure
2005 * the sequential IO submissions using the ZONE_APPEND command for data
2006 * and block_group->meta_write_pointer for metadata.
2008 if (!fully_written) {
2009 spin_unlock(&block_group->lock);
2011 ret = btrfs_inc_block_group_ro(block_group, false);
2015 /* Ensure all writes in this block group finish */
2016 btrfs_wait_block_group_reservations(block_group);
2017 /* No need to wait for NOCOW writers. Zoned mode does not allow that */
2018 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
2019 block_group->length);
2020 /* Wait for extent buffers to be written. */
2022 wait_eb_writebacks(block_group);
2024 spin_lock(&block_group->lock);
2027 * Bail out if someone already deactivated the block group, or
2028 * allocated space is left in the block group.
2030 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2031 &block_group->runtime_flags)) {
2032 spin_unlock(&block_group->lock);
2033 btrfs_dec_block_group_ro(block_group);
2037 if (block_group->reserved) {
2038 spin_unlock(&block_group->lock);
2039 btrfs_dec_block_group_ro(block_group);
2044 clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2045 block_group->alloc_offset = block_group->zone_capacity;
2046 block_group->free_space_ctl->free_space = 0;
2047 btrfs_clear_treelog_bg(block_group);
2048 btrfs_clear_data_reloc_bg(block_group);
2049 spin_unlock(&block_group->lock);
2051 map = block_group->physical_map;
2052 for (i = 0; i < map->num_stripes; i++) {
2053 struct btrfs_device *device = map->stripes[i].dev;
2054 const u64 physical = map->stripes[i].physical;
2056 if (device->zone_info->max_active_zones == 0)
2059 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
2060 physical >> SECTOR_SHIFT,
2061 device->zone_info->zone_size >> SECTOR_SHIFT,
2067 btrfs_dev_clear_active_zone(device, physical);
2071 btrfs_dec_block_group_ro(block_group);
2073 spin_lock(&fs_info->zone_active_bgs_lock);
2074 ASSERT(!list_empty(&block_group->active_bg_list));
2075 list_del_init(&block_group->active_bg_list);
2076 spin_unlock(&fs_info->zone_active_bgs_lock);
2078 /* For active_bg_list */
2079 btrfs_put_block_group(block_group);
2081 clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2086 int btrfs_zone_finish(struct btrfs_block_group *block_group)
2088 if (!btrfs_is_zoned(block_group->fs_info))
2091 return do_zone_finish(block_group, false);
2094 bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
2096 struct btrfs_fs_info *fs_info = fs_devices->fs_info;
2097 struct btrfs_device *device;
2100 if (!btrfs_is_zoned(fs_info))
2103 /* Check if there is a device with active zones left */
2104 mutex_lock(&fs_info->chunk_mutex);
2105 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
2106 struct btrfs_zoned_device_info *zinfo = device->zone_info;
2111 if (!zinfo->max_active_zones) {
2116 switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
2117 case 0: /* single */
2118 ret = (atomic_read(&zinfo->active_zones_left) >= 1);
2120 case BTRFS_BLOCK_GROUP_DUP:
2121 ret = (atomic_read(&zinfo->active_zones_left) >= 2);
2127 mutex_unlock(&fs_info->chunk_mutex);
2130 set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2135 void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
2137 struct btrfs_block_group *block_group;
2138 u64 min_alloc_bytes;
2140 if (!btrfs_is_zoned(fs_info))
2143 block_group = btrfs_lookup_block_group(fs_info, logical);
2144 ASSERT(block_group);
2146 /* No MIXED_BG on zoned btrfs. */
2147 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
2148 min_alloc_bytes = fs_info->sectorsize;
2150 min_alloc_bytes = fs_info->nodesize;
2152 /* Bail out if we can allocate more data from this block group. */
2153 if (logical + length + min_alloc_bytes <=
2154 block_group->start + block_group->zone_capacity)
2157 do_zone_finish(block_group, true);
2160 btrfs_put_block_group(block_group);
2163 static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
2165 struct btrfs_block_group *bg =
2166 container_of(work, struct btrfs_block_group, zone_finish_work);
2168 wait_on_extent_buffer_writeback(bg->last_eb);
2169 free_extent_buffer(bg->last_eb);
2170 btrfs_zone_finish_endio(bg->fs_info, bg->start, bg->length);
2171 btrfs_put_block_group(bg);
2174 void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
2175 struct extent_buffer *eb)
2177 if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
2178 eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
2181 if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
2182 btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
2188 btrfs_get_block_group(bg);
2189 atomic_inc(&eb->refs);
2191 INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
2192 queue_work(system_unbound_wq, &bg->zone_finish_work);
2195 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
2197 struct btrfs_fs_info *fs_info = bg->fs_info;
2199 spin_lock(&fs_info->relocation_bg_lock);
2200 if (fs_info->data_reloc_bg == bg->start)
2201 fs_info->data_reloc_bg = 0;
2202 spin_unlock(&fs_info->relocation_bg_lock);
2205 void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
2207 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2208 struct btrfs_device *device;
2210 if (!btrfs_is_zoned(fs_info))
2213 mutex_lock(&fs_devices->device_list_mutex);
2214 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2215 if (device->zone_info) {
2216 vfree(device->zone_info->zone_cache);
2217 device->zone_info->zone_cache = NULL;
2220 mutex_unlock(&fs_devices->device_list_mutex);
2223 bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
2225 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2226 struct btrfs_device *device;
2231 ASSERT(btrfs_is_zoned(fs_info));
2233 if (fs_info->bg_reclaim_threshold == 0)
2236 mutex_lock(&fs_devices->device_list_mutex);
2237 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2241 total += device->disk_total_bytes;
2242 used += device->bytes_used;
2244 mutex_unlock(&fs_devices->device_list_mutex);
2246 factor = div64_u64(used * 100, total);
2247 return factor >= fs_info->bg_reclaim_threshold;
2250 void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
2253 struct btrfs_block_group *block_group;
2255 if (!btrfs_is_zoned(fs_info))
2258 block_group = btrfs_lookup_block_group(fs_info, logical);
2259 /* It should be called on a previous data relocation block group. */
2260 ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
2262 spin_lock(&block_group->lock);
2263 if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
2266 /* All relocation extents are written. */
2267 if (block_group->start + block_group->alloc_offset == logical + length) {
2268 /* Now, release this block group for further allocations. */
2269 clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2270 &block_group->runtime_flags);
2274 spin_unlock(&block_group->lock);
2275 btrfs_put_block_group(block_group);
2278 int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
2280 struct btrfs_block_group *block_group;
2281 struct btrfs_block_group *min_bg = NULL;
2282 u64 min_avail = U64_MAX;
2285 spin_lock(&fs_info->zone_active_bgs_lock);
2286 list_for_each_entry(block_group, &fs_info->zone_active_bgs,
2290 spin_lock(&block_group->lock);
2291 if (block_group->reserved || block_group->alloc_offset == 0 ||
2292 (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
2293 spin_unlock(&block_group->lock);
2297 avail = block_group->zone_capacity - block_group->alloc_offset;
2298 if (min_avail > avail) {
2300 btrfs_put_block_group(min_bg);
2301 min_bg = block_group;
2303 btrfs_get_block_group(min_bg);
2305 spin_unlock(&block_group->lock);
2307 spin_unlock(&fs_info->zone_active_bgs_lock);
2312 ret = btrfs_zone_finish(min_bg);
2313 btrfs_put_block_group(min_bg);
2315 return ret < 0 ? ret : 1;
2318 int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
2319 struct btrfs_space_info *space_info,
2322 struct btrfs_block_group *bg;
2325 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
2330 bool need_finish = false;
2332 down_read(&space_info->groups_sem);
2333 for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
2334 list_for_each_entry(bg, &space_info->block_groups[index],
2336 if (!spin_trylock(&bg->lock))
2338 if (btrfs_zoned_bg_is_full(bg) ||
2339 test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2340 &bg->runtime_flags)) {
2341 spin_unlock(&bg->lock);
2344 spin_unlock(&bg->lock);
2346 if (btrfs_zone_activate(bg)) {
2347 up_read(&space_info->groups_sem);
2354 up_read(&space_info->groups_sem);
2356 if (!do_finish || !need_finish)
2359 ret = btrfs_zone_finish_one_bg(fs_info);