+struct map_lookup {
+ struct cache_extent ce;
+ u64 type;
+ int io_align;
+ int io_width;
+ int stripe_len;
+ int sector_size;
+ int num_stripes;
+ int sub_stripes;
+ struct btrfs_bio_stripe stripes[];
+};
+
+struct btrfs_raid_attr {
+ int sub_stripes; /* sub_stripes info for map */
+ int dev_stripes; /* stripes per dev */
+ int devs_max; /* max devs to use */
+ int devs_min; /* min devs needed */
+ int tolerated_failures; /* max tolerated fail devs */
+ int devs_increment; /* ndevs has to be a multiple of this */
+ int ncopies; /* how many copies to data has */
+};
+
+extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES];
+
+static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
+{
+ if (flags & BTRFS_BLOCK_GROUP_RAID10)
+ return BTRFS_RAID_RAID10;
+ else if (flags & BTRFS_BLOCK_GROUP_RAID1)
+ return BTRFS_RAID_RAID1;
+ else if (flags & BTRFS_BLOCK_GROUP_DUP)
+ return BTRFS_RAID_DUP;
+ else if (flags & BTRFS_BLOCK_GROUP_RAID0)
+ return BTRFS_RAID_RAID0;
+ else if (flags & BTRFS_BLOCK_GROUP_RAID5)
+ return BTRFS_RAID_RAID5;
+ else if (flags & BTRFS_BLOCK_GROUP_RAID6)
+ return BTRFS_RAID_RAID6;
+
+ return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
+}
+
+#define btrfs_multi_bio_size(n) (sizeof(struct btrfs_multi_bio) + \
+ (sizeof(struct btrfs_bio_stripe) * (n)))
+#define btrfs_map_lookup_size(n) (sizeof(struct map_lookup) + \
+ (sizeof(struct btrfs_bio_stripe) * (n)))
+
+/*
+ * Restriper's general type filter
+ */
+#define BTRFS_BALANCE_DATA (1ULL << 0)
+#define BTRFS_BALANCE_SYSTEM (1ULL << 1)
+#define BTRFS_BALANCE_METADATA (1ULL << 2)
+
+#define BTRFS_BALANCE_TYPE_MASK (BTRFS_BALANCE_DATA | \
+ BTRFS_BALANCE_SYSTEM | \
+ BTRFS_BALANCE_METADATA)
+
+#define BTRFS_BALANCE_FORCE (1ULL << 3)
+#define BTRFS_BALANCE_RESUME (1ULL << 4)
+
+/*
+ * Balance filters
+ */
+#define BTRFS_BALANCE_ARGS_PROFILES (1ULL << 0)
+#define BTRFS_BALANCE_ARGS_USAGE (1ULL << 1)
+#define BTRFS_BALANCE_ARGS_DEVID (1ULL << 2)
+#define BTRFS_BALANCE_ARGS_DRANGE (1ULL << 3)
+#define BTRFS_BALANCE_ARGS_VRANGE (1ULL << 4)
+#define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5)
+#define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6)
+#define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7)
+#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 10)
+
+/*
+ * Profile changing flags. When SOFT is set we won't relocate chunk if
+ * it already has the target profile (even though it may be
+ * half-filled).
+ */
+#define BTRFS_BALANCE_ARGS_CONVERT (1ULL << 8)
+#define BTRFS_BALANCE_ARGS_SOFT (1ULL << 9)
+
+#define BTRFS_RAID5_P_STRIPE ((u64)-2)
+#define BTRFS_RAID6_Q_STRIPE ((u64)-1)
+
+/*
+ * Check if the given range cross stripes.
+ * To ensure kernel scrub won't causing bug on with METADATA in mixed
+ * block group
+ *
+ * Return 1 if the range crosses STRIPE boundary
+ * Return 0 if the range doesn't cross STRIPE boundary or it
+ * doesn't belong to any block group (no boundary to cross)
+ */
+static inline int check_crossing_stripes(struct btrfs_fs_info *fs_info,
+ u64 start, u64 len)
+{
+ struct btrfs_block_group_cache *bg_cache;
+ u64 bg_offset;
+
+ bg_cache = btrfs_lookup_block_group(fs_info, start);
+ /*
+ * Does not belong to block group, no boundary to cross
+ * although it's a bigger problem, but here we don't care.
+ */
+ if (!bg_cache)
+ return 0;
+ bg_offset = start - bg_cache->key.objectid;
+
+ return (bg_offset / BTRFS_STRIPE_LEN !=
+ (bg_offset + len - 1) / BTRFS_STRIPE_LEN);
+}
+
+static inline u64 calc_stripe_length(u64 type, u64 length, int num_stripes)
+{
+ u64 stripe_size;
+
+ if (type & BTRFS_BLOCK_GROUP_RAID0) {
+ stripe_size = length;
+ stripe_size /= num_stripes;
+ } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
+ stripe_size = length * 2;
+ stripe_size /= num_stripes;
+ } else if (type & BTRFS_BLOCK_GROUP_RAID5) {
+ stripe_size = length;
+ stripe_size /= (num_stripes - 1);
+ } else if (type & BTRFS_BLOCK_GROUP_RAID6) {
+ stripe_size = length;
+ stripe_size /= (num_stripes - 2);
+ } else {
+ stripe_size = length;
+ }
+ return stripe_size;
+}
+
+int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
+ u64 logical, u64 *length, u64 *type,
+ struct btrfs_multi_bio **multi_ret, int mirror_num,
+ u64 **raid_map);
+int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
+ u64 logical, u64 *length,
+ struct btrfs_multi_bio **multi_ret, int mirror_num,
+ u64 **raid_map_ret);
+int btrfs_next_bg(struct btrfs_fs_info *map_tree, u64 *logical,
+ u64 *size, u64 type);
+static inline int btrfs_next_bg_metadata(struct btrfs_fs_info *fs_info,
+ u64 *logical, u64 *size)
+{
+ return btrfs_next_bg(fs_info, logical, size,
+ BTRFS_BLOCK_GROUP_METADATA);
+}
+static inline int btrfs_next_bg_system(struct btrfs_fs_info *fs_info,
+ u64 *logical, u64 *size)
+{
+ return btrfs_next_bg(fs_info, logical, size,
+ BTRFS_BLOCK_GROUP_SYSTEM);
+}
+int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
+ u64 chunk_start, u64 physical, u64 devid,
+ u64 **logical, int *naddrs, int *stripe_len);
+int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
+int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);