Btrfs: improve the logic in btrfs_can_relocate()
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / btrfs / extent-tree.c
index 283af7a..9454045 100644 (file)
@@ -3098,11 +3098,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
 
 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 {
-       u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
-
-       /* chunk -> extended profile */
-       if (extra_flags == 0)
-               extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+       u64 extra_flags = chunk_to_extended(flags) &
+                               BTRFS_EXTENDED_PROFILE_MASK;
 
        if (flags & BTRFS_BLOCK_GROUP_DATA)
                fs_info->avail_data_alloc_bits |= extra_flags;
@@ -3113,6 +3110,35 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 }
 
 /*
+ * returns target flags in extended format or 0 if restripe for this
+ * chunk_type is not in progress
+ */
+static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
+{
+       struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+       u64 target = 0;
+
+       BUG_ON(!mutex_is_locked(&fs_info->volume_mutex) &&
+              !spin_is_locked(&fs_info->balance_lock));
+
+       if (!bctl)
+               return 0;
+
+       if (flags & BTRFS_BLOCK_GROUP_DATA &&
+           bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+               target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
+       } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
+                  bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+               target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
+       } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
+                  bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+               target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
+       }
+
+       return target;
+}
+
+/*
  * @flags: available profiles in extended format (see ctree.h)
  *
  * Returns reduced profile in chunk format.  If profile changing is in
@@ -3128,31 +3154,19 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
         */
        u64 num_devices = root->fs_info->fs_devices->rw_devices +
                root->fs_info->fs_devices->missing_devices;
+       u64 target;
 
-       /* pick restriper's target profile if it's available */
+       /*
+        * see if restripe for this chunk_type is in progress, if so
+        * try to reduce to the target profile
+        */
        spin_lock(&root->fs_info->balance_lock);
-       if (root->fs_info->balance_ctl) {
-               struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
-               u64 tgt = 0;
-
-               if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
-                   (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
-                   (flags & bctl->data.target)) {
-                       tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
-               } else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
-                          (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
-                          (flags & bctl->sys.target)) {
-                       tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
-               } else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
-                          (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
-                          (flags & bctl->meta.target)) {
-                       tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
-               }
-
-               if (tgt) {
+       target = get_restripe_target(root->fs_info, flags);
+       if (target) {
+               /* pick target profile only if it's already available */
+               if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
                        spin_unlock(&root->fs_info->balance_lock);
-                       flags = tgt;
-                       goto out;
+                       return extended_to_chunk(target);
                }
        }
        spin_unlock(&root->fs_info->balance_lock);
@@ -3180,10 +3194,7 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
                flags &= ~BTRFS_BLOCK_GROUP_RAID0;
        }
 
-out:
-       /* extended -> chunk profile */
-       flags &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
-       return flags;
+       return extended_to_chunk(flags);
 }
 
 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
@@ -3312,7 +3323,8 @@ commit_trans:
        }
        data_sinfo->bytes_may_use += bytes;
        trace_btrfs_space_reservation(root->fs_info, "space_info",
-                                     (u64)data_sinfo, bytes, 1);
+                                     (u64)(unsigned long)data_sinfo,
+                                     bytes, 1);
        spin_unlock(&data_sinfo->lock);
 
        return 0;
@@ -3333,7 +3345,8 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
        spin_lock(&data_sinfo->lock);
        data_sinfo->bytes_may_use -= bytes;
        trace_btrfs_space_reservation(root->fs_info, "space_info",
-                                     (u64)data_sinfo, bytes, 0);
+                                     (u64)(unsigned long)data_sinfo,
+                                     bytes, 0);
        spin_unlock(&data_sinfo->lock);
 }
 
@@ -3403,8 +3416,6 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
        int wait_for_alloc = 0;
        int ret = 0;
 
-       BUG_ON(!profile_is_valid(flags, 0));
-
        space_info = __find_space_info(extent_root->fs_info, flags);
        if (!space_info) {
                ret = update_space_info(extent_root->fs_info, flags,
@@ -3611,12 +3622,15 @@ static int may_commit_transaction(struct btrfs_root *root,
        if (space_info != delayed_rsv->space_info)
                return -ENOSPC;
 
+       spin_lock(&space_info->lock);
        spin_lock(&delayed_rsv->lock);
-       if (delayed_rsv->size < bytes) {
+       if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
                spin_unlock(&delayed_rsv->lock);
+               spin_unlock(&space_info->lock);
                return -ENOSPC;
        }
        spin_unlock(&delayed_rsv->lock);
+       spin_unlock(&space_info->lock);
 
 commit:
        trans = btrfs_join_transaction(root);
@@ -3695,9 +3709,9 @@ again:
                if (used + orig_bytes <= space_info->total_bytes) {
                        space_info->bytes_may_use += orig_bytes;
                        trace_btrfs_space_reservation(root->fs_info,
-                                                     "space_info",
-                                                     (u64)space_info,
-                                                     orig_bytes, 1);
+                                             "space_info",
+                                             (u64)(unsigned long)space_info,
+                                             orig_bytes, 1);
                        ret = 0;
                } else {
                        /*
@@ -3766,9 +3780,9 @@ again:
                if (used + num_bytes < space_info->total_bytes + avail) {
                        space_info->bytes_may_use += orig_bytes;
                        trace_btrfs_space_reservation(root->fs_info,
-                                                     "space_info",
-                                                     (u64)space_info,
-                                                     orig_bytes, 1);
+                                             "space_info",
+                                             (u64)(unsigned long)space_info,
+                                             orig_bytes, 1);
                        ret = 0;
                } else {
                        wait_ordered = true;
@@ -3913,8 +3927,8 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
                        spin_lock(&space_info->lock);
                        space_info->bytes_may_use -= num_bytes;
                        trace_btrfs_space_reservation(fs_info, "space_info",
-                                                     (u64)space_info,
-                                                     num_bytes, 0);
+                                             (u64)(unsigned long)space_info,
+                                             num_bytes, 0);
                        space_info->reservation_progress++;
                        spin_unlock(&space_info->lock);
                }
@@ -4105,7 +4119,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
        num_bytes += div64_u64(data_used + meta_used, 50);
 
        if (num_bytes * 3 > meta_used)
-               num_bytes = div64_u64(meta_used, 3);
+               num_bytes = div64_u64(meta_used, 3) * 2;
 
        return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
 }
@@ -4132,14 +4146,14 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
                block_rsv->reserved += num_bytes;
                sinfo->bytes_may_use += num_bytes;
                trace_btrfs_space_reservation(fs_info, "space_info",
-                                             (u64)sinfo, num_bytes, 1);
+                                     (u64)(unsigned long)sinfo, num_bytes, 1);
        }
 
        if (block_rsv->reserved >= block_rsv->size) {
                num_bytes = block_rsv->reserved - block_rsv->size;
                sinfo->bytes_may_use -= num_bytes;
                trace_btrfs_space_reservation(fs_info, "space_info",
-                                             (u64)sinfo, num_bytes, 0);
+                                     (u64)(unsigned long)sinfo, num_bytes, 0);
                sinfo->reservation_progress++;
                block_rsv->reserved = block_rsv->size;
                block_rsv->full = 1;
@@ -4192,7 +4206,8 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
        if (!trans->bytes_reserved)
                return;
 
-       trace_btrfs_space_reservation(root->fs_info, "transaction", (u64)trans,
+       trace_btrfs_space_reservation(root->fs_info, "transaction",
+                                     (u64)(unsigned long)trans,
                                      trans->bytes_reserved, 0);
        btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
        trans->bytes_reserved = 0;
@@ -4710,9 +4725,9 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
                        space_info->bytes_reserved += num_bytes;
                        if (reserve == RESERVE_ALLOC) {
                                trace_btrfs_space_reservation(cache->fs_info,
-                                                             "space_info",
-                                                             (u64)space_info,
-                                                             num_bytes, 0);
+                                             "space_info",
+                                             (u64)(unsigned long)space_info,
+                                             num_bytes, 0);
                                space_info->bytes_may_use -= num_bytes;
                        }
                }
@@ -5012,10 +5027,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                if (is_data) {
                        ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
                        BUG_ON(ret);
-               } else {
-                       invalidate_mapping_pages(info->btree_inode->i_mapping,
-                            bytenr >> PAGE_CACHE_SHIFT,
-                            (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
                }
 
                ret = update_block_group(trans, root, bytenr, num_bytes, 0);
@@ -5237,28 +5248,34 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
        return 0;
 }
 
-static int get_block_group_index(struct btrfs_block_group_cache *cache)
+static int __get_block_group_index(u64 flags)
 {
        int index;
-       if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
+
+       if (flags & BTRFS_BLOCK_GROUP_RAID10)
                index = 0;
-       else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
+       else if (flags & BTRFS_BLOCK_GROUP_RAID1)
                index = 1;
-       else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
+       else if (flags & BTRFS_BLOCK_GROUP_DUP)
                index = 2;
-       else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
+       else if (flags & BTRFS_BLOCK_GROUP_RAID0)
                index = 3;
        else
                index = 4;
+
        return index;
 }
 
+static int get_block_group_index(struct btrfs_block_group_cache *cache)
+{
+       return __get_block_group_index(cache->flags);
+}
+
 enum btrfs_loop_type {
-       LOOP_FIND_IDEAL = 0,
-       LOOP_CACHING_NOWAIT = 1,
-       LOOP_CACHING_WAIT = 2,
-       LOOP_ALLOC_CHUNK = 3,
-       LOOP_NO_EMPTY_SIZE = 4,
+       LOOP_CACHING_NOWAIT = 0,
+       LOOP_CACHING_WAIT = 1,
+       LOOP_ALLOC_CHUNK = 2,
+       LOOP_NO_EMPTY_SIZE = 3,
 };
 
 /*
@@ -5272,7 +5289,6 @@ enum btrfs_loop_type {
 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *orig_root,
                                     u64 num_bytes, u64 empty_size,
-                                    u64 search_start, u64 search_end,
                                     u64 hint_byte, struct btrfs_key *ins,
                                     u64 data)
 {
@@ -5281,6 +5297,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        struct btrfs_free_cluster *last_ptr = NULL;
        struct btrfs_block_group_cache *block_group = NULL;
        struct btrfs_block_group_cache *used_block_group;
+       u64 search_start = 0;
        int empty_cluster = 2 * 1024 * 1024;
        int allowed_chunk_alloc = 0;
        int done_chunk_alloc = 0;
@@ -5294,8 +5311,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        bool failed_alloc = false;
        bool use_cluster = true;
        bool have_caching_bg = false;
-       u64 ideal_cache_percent = 0;
-       u64 ideal_cache_offset = 0;
 
        WARN_ON(num_bytes < root->sectorsize);
        btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -5345,7 +5360,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                empty_cluster = 0;
 
        if (search_start == hint_byte) {
-ideal_cache:
                block_group = btrfs_lookup_block_group(root->fs_info,
                                                       search_start);
                used_block_group = block_group;
@@ -5357,8 +5371,7 @@ ideal_cache:
                 * picked out then we don't care that the block group is cached.
                 */
                if (block_group && block_group_bits(block_group, data) &&
-                   (block_group->cached != BTRFS_CACHE_NO ||
-                    search_start == ideal_cache_offset)) {
+                   block_group->cached != BTRFS_CACHE_NO) {
                        down_read(&space_info->groups_sem);
                        if (list_empty(&block_group->list) ||
                            block_group->ro) {
@@ -5412,44 +5425,12 @@ search:
 have_block_group:
                cached = block_group_cache_done(block_group);
                if (unlikely(!cached)) {
-                       u64 free_percent;
-
                        found_uncached_bg = true;
                        ret = cache_block_group(block_group, trans,
-                                               orig_root, 1);
-                       if (block_group->cached == BTRFS_CACHE_FINISHED)
-                               goto alloc;
-
-                       free_percent = btrfs_block_group_used(&block_group->item);
-                       free_percent *= 100;
-                       free_percent = div64_u64(free_percent,
-                                                block_group->key.offset);
-                       free_percent = 100 - free_percent;
-                       if (free_percent > ideal_cache_percent &&
-                           likely(!block_group->ro)) {
-                               ideal_cache_offset = block_group->key.objectid;
-                               ideal_cache_percent = free_percent;
-                       }
-
-                       /*
-                        * The caching workers are limited to 2 threads, so we
-                        * can queue as much work as we care to.
-                        */
-                       if (loop > LOOP_FIND_IDEAL) {
-                               ret = cache_block_group(block_group, trans,
-                                                       orig_root, 0);
-                               BUG_ON(ret);
-                       }
-
-                       /*
-                        * If loop is set for cached only, try the next block
-                        * group.
-                        */
-                       if (loop == LOOP_FIND_IDEAL)
-                               goto loop;
+                                               orig_root, 0);
+                       BUG_ON(ret);
                }
 
-alloc:
                if (unlikely(block_group->ro))
                        goto loop;
 
@@ -5600,11 +5581,6 @@ unclustered_alloc:
                }
 checks:
                search_start = stripe_align(root, offset);
-               /* move on to the next group */
-               if (search_start + num_bytes >= search_end) {
-                       btrfs_add_free_space(used_block_group, offset, num_bytes);
-                       goto loop;
-               }
 
                /* move on to the next group */
                if (search_start + num_bytes >
@@ -5655,9 +5631,7 @@ loop:
        if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
                goto search;
 
-       /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
-        *                      for them to make caching progress.  Also
-        *                      determine the best possible bg to cache
+       /*
         * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
         *                      caching kthreads as we move along
         * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
@@ -5667,45 +5641,7 @@ loop:
         */
        if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
                index = 0;
-               if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
-                       found_uncached_bg = false;
-                       loop++;
-                       if (!ideal_cache_percent)
-                               goto search;
-
-                       /*
-                        * 1 of the following 2 things have happened so far
-                        *
-                        * 1) We found an ideal block group for caching that
-                        * is mostly full and will cache quickly, so we might
-                        * as well wait for it.
-                        *
-                        * 2) We searched for cached only and we didn't find
-                        * anything, and we didn't start any caching kthreads
-                        * either, so chances are we will loop through and
-                        * start a couple caching kthreads, and then come back
-                        * around and just wait for them.  This will be slower
-                        * because we will have 2 caching kthreads reading at
-                        * the same time when we could have just started one
-                        * and waited for it to get far enough to give us an
-                        * allocation, so go ahead and go to the wait caching
-                        * loop.
-                        */
-                       loop = LOOP_CACHING_WAIT;
-                       search_start = ideal_cache_offset;
-                       ideal_cache_percent = 0;
-                       goto ideal_cache;
-               } else if (loop == LOOP_FIND_IDEAL) {
-                       /*
-                        * Didn't find a uncached bg, wait on anything we find
-                        * next.
-                        */
-                       loop = LOOP_CACHING_WAIT;
-                       goto search;
-               }
-
                loop++;
-
                if (loop == LOOP_ALLOC_CHUNK) {
                       if (allowed_chunk_alloc) {
                                ret = do_chunk_alloc(trans, root, num_bytes +
@@ -5792,12 +5728,10 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
                         struct btrfs_root *root,
                         u64 num_bytes, u64 min_alloc_size,
                         u64 empty_size, u64 hint_byte,
-                        u64 search_end, struct btrfs_key *ins,
-                        u64 data)
+                        struct btrfs_key *ins, u64 data)
 {
        bool final_tried = false;
        int ret;
-       u64 search_start = 0;
 
        data = btrfs_get_alloc_profile(root, data);
 again:
@@ -5812,8 +5746,7 @@ again:
 
        WARN_ON(num_bytes < root->sectorsize);
        ret = find_free_extent(trans, root, num_bytes, empty_size,
-                              search_start, search_end, hint_byte,
-                              ins, data);
+                              hint_byte, ins, data);
 
        if (ret == -ENOSPC) {
                if (!final_tried) {
@@ -6101,6 +6034,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
        btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
        btrfs_tree_lock(buf);
        clean_tree_block(trans, root, buf);
+       clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
 
        btrfs_set_lock_blocking(buf);
        btrfs_set_buffer_uptodate(buf);
@@ -6208,7 +6142,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                return ERR_CAST(block_rsv);
 
        ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
-                                  empty_size, hint, (u64)-1, &ins, 0);
+                                  empty_size, hint, &ins, 0);
        if (ret) {
                unuse_block_rsv(root->fs_info, block_rsv, blocksize);
                return ERR_PTR(ret);
@@ -6977,31 +6911,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
 {
        u64 num_devices;
-       u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
-               BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
-
-       if (root->fs_info->balance_ctl) {
-               struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
-               u64 tgt = 0;
-
-               /* pick restriper's target profile and return */
-               if (flags & BTRFS_BLOCK_GROUP_DATA &&
-                   bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
-                       tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
-               } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
-                          bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
-                       tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
-               } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
-                          bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
-                       tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
-               }
+       u64 stripped;
 
-               if (tgt) {
-                       /* extended -> chunk profile */
-                       tgt &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
-                       return tgt;
-               }
-       }
+       /*
+        * if restripe for this chunk_type is on pick target profile and
+        * return, otherwise do the usual balance
+        */
+       stripped = get_restripe_target(root->fs_info, flags);
+       if (stripped)
+               return extended_to_chunk(stripped);
 
        /*
         * we add in the count of missing devices because we want
@@ -7011,6 +6929,9 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
        num_devices = root->fs_info->fs_devices->rw_devices +
                root->fs_info->fs_devices->missing_devices;
 
+       stripped = BTRFS_BLOCK_GROUP_RAID0 |
+               BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
+
        if (num_devices == 1) {
                stripped |= BTRFS_BLOCK_GROUP_DUP;
                stripped = flags & ~stripped;
@@ -7023,7 +6944,6 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
                if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
                             BTRFS_BLOCK_GROUP_RAID10))
                        return stripped | BTRFS_BLOCK_GROUP_DUP;
-               return flags;
        } else {
                /* they already had raid on here, just return */
                if (flags & stripped)
@@ -7036,9 +6956,9 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
                if (flags & BTRFS_BLOCK_GROUP_DUP)
                        return stripped | BTRFS_BLOCK_GROUP_RAID1;
 
-               /* turn single device chunks into raid0 */
-               return stripped | BTRFS_BLOCK_GROUP_RAID0;
+               /* this is drive concat, leave it alone */
        }
+
        return flags;
 }
 
@@ -7216,6 +7136,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
        u64 min_free;
        u64 dev_min = 1;
        u64 dev_nr = 0;
+       u64 target;
        int index;
        int full = 0;
        int ret = 0;
@@ -7256,13 +7177,11 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
        /*
         * ok we don't have enough space, but maybe we have free space on our
         * devices to allocate new chunks for relocation, so loop through our
-        * alloc devices and guess if we have enough space.  However, if we
-        * were marked as full, then we know there aren't enough chunks, and we
-        * can just return.
+        * alloc devices and guess if we have enough space.  if this block
+        * group is going to be restriped, run checks against the target
+        * profile instead of the current one.
         */
        ret = -1;
-       if (full)
-               goto out;
 
        /*
         * index:
@@ -7272,7 +7191,20 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
         *      3: raid0
         *      4: single
         */
-       index = get_block_group_index(block_group);
+       target = get_restripe_target(root->fs_info, block_group->flags);
+       if (target) {
+               index = __get_block_group_index(extended_to_chunk(target));
+       } else {
+               /*
+                * this is just a balance, so if we were marked as full
+                * we know there is no space for a new chunk
+                */
+               if (full)
+                       goto out;
+
+               index = get_block_group_index(block_group);
+       }
+
        if (index == 0) {
                dev_min = 4;
                /* Divide by 2 */
@@ -7680,11 +7612,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
 
 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 {
-       u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
-
-       /* chunk -> extended profile */
-       if (extra_flags == 0)
-               extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+       u64 extra_flags = chunk_to_extended(flags) &
+                               BTRFS_EXTENDED_PROFILE_MASK;
 
        if (flags & BTRFS_BLOCK_GROUP_DATA)
                fs_info->avail_data_alloc_bits &= ~extra_flags;
@@ -7886,9 +7815,16 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
        u64 start;
        u64 end;
        u64 trimmed = 0;
+       u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
        int ret = 0;
 
-       cache = btrfs_lookup_block_group(fs_info, range->start);
+       /*
+        * try to trim all FS space, our block group may start from non-zero.
+        */
+       if (range->len == total_bytes)
+               cache = btrfs_lookup_first_block_group(fs_info, range->start);
+       else
+               cache = btrfs_lookup_block_group(fs_info, range->start);
 
        while (cache) {
                if (cache->key.objectid >= (range->start + range->len)) {