media: dvb: symbol fixup for dvb_attach()
[platform/kernel/linux-starfive.git] / fs / btrfs / space-info.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "misc.h"
4 #include "ctree.h"
5 #include "space-info.h"
6 #include "sysfs.h"
7 #include "volumes.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
12 #include "zoned.h"
13
14 /*
15  * HOW DOES SPACE RESERVATION WORK
16  *
17  * If you want to know about delalloc specifically, there is a separate comment
18  * for that with the delalloc code.  This comment is about how the whole system
19  * works generally.
20  *
21  * BASIC CONCEPTS
22  *
23  *   1) space_info.  This is the ultimate arbiter of how much space we can use.
24  *   There's a description of the bytes_ fields with the struct declaration,
25  *   refer to that for specifics on each field.  Suffice it to say that for
26  *   reservations we care about total_bytes - SUM(space_info->bytes_) when
27  *   determining if there is space to make an allocation.  There is a space_info
28  *   for METADATA, SYSTEM, and DATA areas.
29  *
30  *   2) block_rsv's.  These are basically buckets for every different type of
31  *   metadata reservation we have.  You can see the comment in the block_rsv
32  *   code on the rules for each type, but generally block_rsv->reserved is how
33  *   much space is accounted for in space_info->bytes_may_use.
34  *
35  *   3) btrfs_calc*_size.  These are the worst case calculations we used based
36  *   on the number of items we will want to modify.  We have one for changing
37  *   items, and one for inserting new items.  Generally we use these helpers to
38  *   determine the size of the block reserves, and then use the actual bytes
39  *   values to adjust the space_info counters.
40  *
41  * MAKING RESERVATIONS, THE NORMAL CASE
42  *
43  *   We call into either btrfs_reserve_data_bytes() or
44  *   btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
45  *   num_bytes we want to reserve.
46  *
47  *   ->reserve
48  *     space_info->bytes_may_reserve += num_bytes
49  *
50  *   ->extent allocation
51  *     Call btrfs_add_reserved_bytes() which does
52  *     space_info->bytes_may_reserve -= num_bytes
53  *     space_info->bytes_reserved += extent_bytes
54  *
55  *   ->insert reference
56  *     Call btrfs_update_block_group() which does
57  *     space_info->bytes_reserved -= extent_bytes
58  *     space_info->bytes_used += extent_bytes
59  *
60  * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
61  *
62  *   Assume we are unable to simply make the reservation because we do not have
63  *   enough space
64  *
65  *   -> __reserve_bytes
66  *     create a reserve_ticket with ->bytes set to our reservation, add it to
67  *     the tail of space_info->tickets, kick async flush thread
68  *
69  *   ->handle_reserve_ticket
70  *     wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
71  *     on the ticket.
72  *
73  *   -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
74  *     Flushes various things attempting to free up space.
75  *
76  *   -> btrfs_try_granting_tickets()
77  *     This is called by anything that either subtracts space from
78  *     space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
79  *     space_info->total_bytes.  This loops through the ->priority_tickets and
80  *     then the ->tickets list checking to see if the reservation can be
81  *     completed.  If it can the space is added to space_info->bytes_may_use and
82  *     the ticket is woken up.
83  *
84  *   -> ticket wakeup
85  *     Check if ->bytes == 0, if it does we got our reservation and we can carry
86  *     on, if not return the appropriate error (ENOSPC, but can be EINTR if we
87  *     were interrupted.)
88  *
89  * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
90  *
91  *   Same as the above, except we add ourselves to the
92  *   space_info->priority_tickets, and we do not use ticket->wait, we simply
93  *   call flush_space() ourselves for the states that are safe for us to call
94  *   without deadlocking and hope for the best.
95  *
96  * THE FLUSHING STATES
97  *
98  *   Generally speaking we will have two cases for each state, a "nice" state
99  *   and a "ALL THE THINGS" state.  In btrfs we delay a lot of work in order to
100  *   reduce the locking over head on the various trees, and even to keep from
101  *   doing any work at all in the case of delayed refs.  Each of these delayed
102  *   things however hold reservations, and so letting them run allows us to
103  *   reclaim space so we can make new reservations.
104  *
105  *   FLUSH_DELAYED_ITEMS
106  *     Every inode has a delayed item to update the inode.  Take a simple write
107  *     for example, we would update the inode item at write time to update the
108  *     mtime, and then again at finish_ordered_io() time in order to update the
109  *     isize or bytes.  We keep these delayed items to coalesce these operations
110  *     into a single operation done on demand.  These are an easy way to reclaim
111  *     metadata space.
112  *
113  *   FLUSH_DELALLOC
114  *     Look at the delalloc comment to get an idea of how much space is reserved
115  *     for delayed allocation.  We can reclaim some of this space simply by
116  *     running delalloc, but usually we need to wait for ordered extents to
117  *     reclaim the bulk of this space.
118  *
119  *   FLUSH_DELAYED_REFS
120  *     We have a block reserve for the outstanding delayed refs space, and every
121  *     delayed ref operation holds a reservation.  Running these is a quick way
122  *     to reclaim space, but we want to hold this until the end because COW can
123  *     churn a lot and we can avoid making some extent tree modifications if we
124  *     are able to delay for as long as possible.
125  *
126  *   ALLOC_CHUNK
127  *     We will skip this the first time through space reservation, because of
128  *     overcommit and we don't want to have a lot of useless metadata space when
129  *     our worst case reservations will likely never come true.
130  *
131  *   RUN_DELAYED_IPUTS
132  *     If we're freeing inodes we're likely freeing checksums, file extent
133  *     items, and extent tree items.  Loads of space could be freed up by these
134  *     operations, however they won't be usable until the transaction commits.
135  *
136  *   COMMIT_TRANS
137  *     This will commit the transaction.  Historically we had a lot of logic
138  *     surrounding whether or not we'd commit the transaction, but this waits born
139  *     out of a pre-tickets era where we could end up committing the transaction
140  *     thousands of times in a row without making progress.  Now thanks to our
141  *     ticketing system we know if we're not making progress and can error
142  *     everybody out after a few commits rather than burning the disk hoping for
143  *     a different answer.
144  *
145  * OVERCOMMIT
146  *
147  *   Because we hold so many reservations for metadata we will allow you to
148  *   reserve more space than is currently free in the currently allocate
149  *   metadata space.  This only happens with metadata, data does not allow
150  *   overcommitting.
151  *
152  *   You can see the current logic for when we allow overcommit in
153  *   btrfs_can_overcommit(), but it only applies to unallocated space.  If there
154  *   is no unallocated space to be had, all reservations are kept within the
155  *   free space in the allocated metadata chunks.
156  *
157  *   Because of overcommitting, you generally want to use the
158  *   btrfs_can_overcommit() logic for metadata allocations, as it does the right
159  *   thing with or without extra unallocated space.
160  */
161
162 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
163                           bool may_use_included)
164 {
165         ASSERT(s_info);
166         return s_info->bytes_used + s_info->bytes_reserved +
167                 s_info->bytes_pinned + s_info->bytes_readonly +
168                 s_info->bytes_zone_unusable +
169                 (may_use_included ? s_info->bytes_may_use : 0);
170 }
171
172 /*
173  * after adding space to the filesystem, we need to clear the full flags
174  * on all the space infos.
175  */
176 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
177 {
178         struct list_head *head = &info->space_info;
179         struct btrfs_space_info *found;
180
181         list_for_each_entry(found, head, list)
182                 found->full = 0;
183 }
184
185 /*
186  * Block groups with more than this value (percents) of unusable space will be
187  * scheduled for background reclaim.
188  */
189 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH                      (75)
190
191 /*
192  * Calculate chunk size depending on volume type (regular or zoned).
193  */
194 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
195 {
196         if (btrfs_is_zoned(fs_info))
197                 return fs_info->zone_size;
198
199         ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
200
201         if (flags & BTRFS_BLOCK_GROUP_DATA)
202                 return BTRFS_MAX_DATA_CHUNK_SIZE;
203         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
204                 return SZ_32M;
205
206         /* Handle BTRFS_BLOCK_GROUP_METADATA */
207         if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
208                 return SZ_1G;
209
210         return SZ_256M;
211 }
212
213 /*
214  * Update default chunk size.
215  */
216 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
217                                         u64 chunk_size)
218 {
219         WRITE_ONCE(space_info->chunk_size, chunk_size);
220 }
221
222 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
223 {
224
225         struct btrfs_space_info *space_info;
226         int i;
227         int ret;
228
229         space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
230         if (!space_info)
231                 return -ENOMEM;
232
233         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
234                 INIT_LIST_HEAD(&space_info->block_groups[i]);
235         init_rwsem(&space_info->groups_sem);
236         spin_lock_init(&space_info->lock);
237         space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
238         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
239         INIT_LIST_HEAD(&space_info->ro_bgs);
240         INIT_LIST_HEAD(&space_info->tickets);
241         INIT_LIST_HEAD(&space_info->priority_tickets);
242         space_info->clamp = 1;
243         btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
244
245         if (btrfs_is_zoned(info))
246                 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
247
248         ret = btrfs_sysfs_add_space_info_type(info, space_info);
249         if (ret)
250                 return ret;
251
252         list_add(&space_info->list, &info->space_info);
253         if (flags & BTRFS_BLOCK_GROUP_DATA)
254                 info->data_sinfo = space_info;
255
256         return ret;
257 }
258
259 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
260 {
261         struct btrfs_super_block *disk_super;
262         u64 features;
263         u64 flags;
264         int mixed = 0;
265         int ret;
266
267         disk_super = fs_info->super_copy;
268         if (!btrfs_super_root(disk_super))
269                 return -EINVAL;
270
271         features = btrfs_super_incompat_flags(disk_super);
272         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
273                 mixed = 1;
274
275         flags = BTRFS_BLOCK_GROUP_SYSTEM;
276         ret = create_space_info(fs_info, flags);
277         if (ret)
278                 goto out;
279
280         if (mixed) {
281                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
282                 ret = create_space_info(fs_info, flags);
283         } else {
284                 flags = BTRFS_BLOCK_GROUP_METADATA;
285                 ret = create_space_info(fs_info, flags);
286                 if (ret)
287                         goto out;
288
289                 flags = BTRFS_BLOCK_GROUP_DATA;
290                 ret = create_space_info(fs_info, flags);
291         }
292 out:
293         return ret;
294 }
295
296 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
297                                 struct btrfs_block_group *block_group)
298 {
299         struct btrfs_space_info *found;
300         int factor, index;
301
302         factor = btrfs_bg_type_to_factor(block_group->flags);
303
304         found = btrfs_find_space_info(info, block_group->flags);
305         ASSERT(found);
306         spin_lock(&found->lock);
307         found->total_bytes += block_group->length;
308         if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
309                 found->active_total_bytes += block_group->length;
310         found->disk_total += block_group->length * factor;
311         found->bytes_used += block_group->used;
312         found->disk_used += block_group->used * factor;
313         found->bytes_readonly += block_group->bytes_super;
314         found->bytes_zone_unusable += block_group->zone_unusable;
315         if (block_group->length > 0)
316                 found->full = 0;
317         btrfs_try_granting_tickets(info, found);
318         spin_unlock(&found->lock);
319
320         block_group->space_info = found;
321
322         index = btrfs_bg_flags_to_raid_index(block_group->flags);
323         down_write(&found->groups_sem);
324         list_add_tail(&block_group->list, &found->block_groups[index]);
325         up_write(&found->groups_sem);
326 }
327
328 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
329                                                u64 flags)
330 {
331         struct list_head *head = &info->space_info;
332         struct btrfs_space_info *found;
333
334         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
335
336         list_for_each_entry(found, head, list) {
337                 if (found->flags & flags)
338                         return found;
339         }
340         return NULL;
341 }
342
343 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
344                           struct btrfs_space_info *space_info,
345                           enum btrfs_reserve_flush_enum flush)
346 {
347         u64 profile;
348         u64 avail;
349         int factor;
350
351         if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
352                 profile = btrfs_system_alloc_profile(fs_info);
353         else
354                 profile = btrfs_metadata_alloc_profile(fs_info);
355
356         avail = atomic64_read(&fs_info->free_chunk_space);
357
358         /*
359          * If we have dup, raid1 or raid10 then only half of the free
360          * space is actually usable.  For raid56, the space info used
361          * doesn't include the parity drive, so we don't have to
362          * change the math
363          */
364         factor = btrfs_bg_type_to_factor(profile);
365         avail = div_u64(avail, factor);
366
367         /*
368          * If we aren't flushing all things, let us overcommit up to
369          * 1/2th of the space. If we can flush, don't let us overcommit
370          * too much, let it overcommit up to 1/8 of the space.
371          */
372         if (flush == BTRFS_RESERVE_FLUSH_ALL)
373                 avail >>= 3;
374         else
375                 avail >>= 1;
376         return avail;
377 }
378
379 static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
380                                        struct btrfs_space_info *space_info)
381 {
382         /*
383          * On regular filesystem, all total_bytes are always writable. On zoned
384          * filesystem, there may be a limitation imposed by max_active_zones.
385          * For metadata allocation, we cannot finish an existing active block
386          * group to avoid a deadlock. Thus, we need to consider only the active
387          * groups to be writable for metadata space.
388          */
389         if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
390                 return space_info->total_bytes;
391
392         return space_info->active_total_bytes;
393 }
394
395 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
396                          struct btrfs_space_info *space_info, u64 bytes,
397                          enum btrfs_reserve_flush_enum flush)
398 {
399         u64 avail;
400         u64 used;
401
402         /* Don't overcommit when in mixed mode */
403         if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
404                 return 0;
405
406         used = btrfs_space_info_used(space_info, true);
407         if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) &&
408             (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
409                 avail = 0;
410         else
411                 avail = calc_available_free_space(fs_info, space_info, flush);
412
413         if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
414                 return 1;
415         return 0;
416 }
417
418 static void remove_ticket(struct btrfs_space_info *space_info,
419                           struct reserve_ticket *ticket)
420 {
421         if (!list_empty(&ticket->list)) {
422                 list_del_init(&ticket->list);
423                 ASSERT(space_info->reclaim_size >= ticket->bytes);
424                 space_info->reclaim_size -= ticket->bytes;
425         }
426 }
427
428 /*
429  * This is for space we already have accounted in space_info->bytes_may_use, so
430  * basically when we're returning space from block_rsv's.
431  */
432 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
433                                 struct btrfs_space_info *space_info)
434 {
435         struct list_head *head;
436         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
437
438         lockdep_assert_held(&space_info->lock);
439
440         head = &space_info->priority_tickets;
441 again:
442         while (!list_empty(head)) {
443                 struct reserve_ticket *ticket;
444                 u64 used = btrfs_space_info_used(space_info, true);
445
446                 ticket = list_first_entry(head, struct reserve_ticket, list);
447
448                 /* Check and see if our ticket can be satisfied now. */
449                 if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
450                     btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
451                                          flush)) {
452                         btrfs_space_info_update_bytes_may_use(fs_info,
453                                                               space_info,
454                                                               ticket->bytes);
455                         remove_ticket(space_info, ticket);
456                         ticket->bytes = 0;
457                         space_info->tickets_id++;
458                         wake_up(&ticket->wait);
459                 } else {
460                         break;
461                 }
462         }
463
464         if (head == &space_info->priority_tickets) {
465                 head = &space_info->tickets;
466                 flush = BTRFS_RESERVE_FLUSH_ALL;
467                 goto again;
468         }
469 }
470
471 #define DUMP_BLOCK_RSV(fs_info, rsv_name)                               \
472 do {                                                                    \
473         struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name;           \
474         spin_lock(&__rsv->lock);                                        \
475         btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu",      \
476                    __rsv->size, __rsv->reserved);                       \
477         spin_unlock(&__rsv->lock);                                      \
478 } while (0)
479
480 static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info)
481 {
482         switch (space_info->flags) {
483         case BTRFS_BLOCK_GROUP_SYSTEM:
484                 return "SYSTEM";
485         case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
486                 return "DATA+METADATA";
487         case BTRFS_BLOCK_GROUP_DATA:
488                 return "DATA";
489         case BTRFS_BLOCK_GROUP_METADATA:
490                 return "METADATA";
491         default:
492                 return "UNKNOWN";
493         }
494 }
495
496 static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
497 {
498         DUMP_BLOCK_RSV(fs_info, global_block_rsv);
499         DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
500         DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
501         DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
502         DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
503 }
504
505 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
506                                     struct btrfs_space_info *info)
507 {
508         const char *flag_str = space_info_flag_to_str(info);
509         lockdep_assert_held(&info->lock);
510
511         /* The free space could be negative in case of overcommit */
512         btrfs_info(fs_info, "space_info %s has %lld free, is %sfull",
513                    flag_str,
514                    (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
515                    info->full ? "" : "not ");
516         btrfs_info(fs_info,
517 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
518                 info->total_bytes, info->bytes_used, info->bytes_pinned,
519                 info->bytes_reserved, info->bytes_may_use,
520                 info->bytes_readonly, info->bytes_zone_unusable);
521 }
522
523 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
524                            struct btrfs_space_info *info, u64 bytes,
525                            int dump_block_groups)
526 {
527         struct btrfs_block_group *cache;
528         int index = 0;
529
530         spin_lock(&info->lock);
531         __btrfs_dump_space_info(fs_info, info);
532         dump_global_block_rsv(fs_info);
533         spin_unlock(&info->lock);
534
535         if (!dump_block_groups)
536                 return;
537
538         down_read(&info->groups_sem);
539 again:
540         list_for_each_entry(cache, &info->block_groups[index], list) {
541                 spin_lock(&cache->lock);
542                 btrfs_info(fs_info,
543                         "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
544                         cache->start, cache->length, cache->used, cache->pinned,
545                         cache->reserved, cache->zone_unusable,
546                         cache->ro ? "[readonly]" : "");
547                 spin_unlock(&cache->lock);
548                 btrfs_dump_free_space(cache, bytes);
549         }
550         if (++index < BTRFS_NR_RAID_TYPES)
551                 goto again;
552         up_read(&info->groups_sem);
553 }
554
555 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
556                                         u64 to_reclaim)
557 {
558         u64 bytes;
559         u64 nr;
560
561         bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
562         nr = div64_u64(to_reclaim, bytes);
563         if (!nr)
564                 nr = 1;
565         return nr;
566 }
567
568 #define EXTENT_SIZE_PER_ITEM    SZ_256K
569
570 /*
571  * shrink metadata reservation for delalloc
572  */
573 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
574                             struct btrfs_space_info *space_info,
575                             u64 to_reclaim, bool wait_ordered,
576                             bool for_preempt)
577 {
578         struct btrfs_trans_handle *trans;
579         u64 delalloc_bytes;
580         u64 ordered_bytes;
581         u64 items;
582         long time_left;
583         int loops;
584
585         delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
586         ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
587         if (delalloc_bytes == 0 && ordered_bytes == 0)
588                 return;
589
590         /* Calc the number of the pages we need flush for space reservation */
591         if (to_reclaim == U64_MAX) {
592                 items = U64_MAX;
593         } else {
594                 /*
595                  * to_reclaim is set to however much metadata we need to
596                  * reclaim, but reclaiming that much data doesn't really track
597                  * exactly.  What we really want to do is reclaim full inode's
598                  * worth of reservations, however that's not available to us
599                  * here.  We will take a fraction of the delalloc bytes for our
600                  * flushing loops and hope for the best.  Delalloc will expand
601                  * the amount we write to cover an entire dirty extent, which
602                  * will reclaim the metadata reservation for that range.  If
603                  * it's not enough subsequent flush stages will be more
604                  * aggressive.
605                  */
606                 to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
607                 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
608         }
609
610         trans = current->journal_info;
611
612         /*
613          * If we are doing more ordered than delalloc we need to just wait on
614          * ordered extents, otherwise we'll waste time trying to flush delalloc
615          * that likely won't give us the space back we need.
616          */
617         if (ordered_bytes > delalloc_bytes && !for_preempt)
618                 wait_ordered = true;
619
620         loops = 0;
621         while ((delalloc_bytes || ordered_bytes) && loops < 3) {
622                 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
623                 long nr_pages = min_t(u64, temp, LONG_MAX);
624                 int async_pages;
625
626                 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
627
628                 /*
629                  * We need to make sure any outstanding async pages are now
630                  * processed before we continue.  This is because things like
631                  * sync_inode() try to be smart and skip writing if the inode is
632                  * marked clean.  We don't use filemap_fwrite for flushing
633                  * because we want to control how many pages we write out at a
634                  * time, thus this is the only safe way to make sure we've
635                  * waited for outstanding compressed workers to have started
636                  * their jobs and thus have ordered extents set up properly.
637                  *
638                  * This exists because we do not want to wait for each
639                  * individual inode to finish its async work, we simply want to
640                  * start the IO on everybody, and then come back here and wait
641                  * for all of the async work to catch up.  Once we're done with
642                  * that we know we'll have ordered extents for everything and we
643                  * can decide if we wait for that or not.
644                  *
645                  * If we choose to replace this in the future, make absolutely
646                  * sure that the proper waiting is being done in the async case,
647                  * as there have been bugs in that area before.
648                  */
649                 async_pages = atomic_read(&fs_info->async_delalloc_pages);
650                 if (!async_pages)
651                         goto skip_async;
652
653                 /*
654                  * We don't want to wait forever, if we wrote less pages in this
655                  * loop than we have outstanding, only wait for that number of
656                  * pages, otherwise we can wait for all async pages to finish
657                  * before continuing.
658                  */
659                 if (async_pages > nr_pages)
660                         async_pages -= nr_pages;
661                 else
662                         async_pages = 0;
663                 wait_event(fs_info->async_submit_wait,
664                            atomic_read(&fs_info->async_delalloc_pages) <=
665                            async_pages);
666 skip_async:
667                 loops++;
668                 if (wait_ordered && !trans) {
669                         btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
670                 } else {
671                         time_left = schedule_timeout_killable(1);
672                         if (time_left)
673                                 break;
674                 }
675
676                 /*
677                  * If we are for preemption we just want a one-shot of delalloc
678                  * flushing so we can stop flushing if we decide we don't need
679                  * to anymore.
680                  */
681                 if (for_preempt)
682                         break;
683
684                 spin_lock(&space_info->lock);
685                 if (list_empty(&space_info->tickets) &&
686                     list_empty(&space_info->priority_tickets)) {
687                         spin_unlock(&space_info->lock);
688                         break;
689                 }
690                 spin_unlock(&space_info->lock);
691
692                 delalloc_bytes = percpu_counter_sum_positive(
693                                                 &fs_info->delalloc_bytes);
694                 ordered_bytes = percpu_counter_sum_positive(
695                                                 &fs_info->ordered_bytes);
696         }
697 }
698
699 /*
700  * Try to flush some data based on policy set by @state. This is only advisory
701  * and may fail for various reasons. The caller is supposed to examine the
702  * state of @space_info to detect the outcome.
703  */
704 static void flush_space(struct btrfs_fs_info *fs_info,
705                        struct btrfs_space_info *space_info, u64 num_bytes,
706                        enum btrfs_flush_state state, bool for_preempt)
707 {
708         struct btrfs_root *root = fs_info->tree_root;
709         struct btrfs_trans_handle *trans;
710         int nr;
711         int ret = 0;
712
713         switch (state) {
714         case FLUSH_DELAYED_ITEMS_NR:
715         case FLUSH_DELAYED_ITEMS:
716                 if (state == FLUSH_DELAYED_ITEMS_NR)
717                         nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
718                 else
719                         nr = -1;
720
721                 trans = btrfs_join_transaction(root);
722                 if (IS_ERR(trans)) {
723                         ret = PTR_ERR(trans);
724                         break;
725                 }
726                 ret = btrfs_run_delayed_items_nr(trans, nr);
727                 btrfs_end_transaction(trans);
728                 break;
729         case FLUSH_DELALLOC:
730         case FLUSH_DELALLOC_WAIT:
731         case FLUSH_DELALLOC_FULL:
732                 if (state == FLUSH_DELALLOC_FULL)
733                         num_bytes = U64_MAX;
734                 shrink_delalloc(fs_info, space_info, num_bytes,
735                                 state != FLUSH_DELALLOC, for_preempt);
736                 break;
737         case FLUSH_DELAYED_REFS_NR:
738         case FLUSH_DELAYED_REFS:
739                 trans = btrfs_join_transaction(root);
740                 if (IS_ERR(trans)) {
741                         ret = PTR_ERR(trans);
742                         break;
743                 }
744                 if (state == FLUSH_DELAYED_REFS_NR)
745                         nr = calc_reclaim_items_nr(fs_info, num_bytes);
746                 else
747                         nr = 0;
748                 btrfs_run_delayed_refs(trans, nr);
749                 btrfs_end_transaction(trans);
750                 break;
751         case ALLOC_CHUNK:
752         case ALLOC_CHUNK_FORCE:
753                 /*
754                  * For metadata space on zoned filesystem, reaching here means we
755                  * don't have enough space left in active_total_bytes. Try to
756                  * activate a block group first, because we may have inactive
757                  * block group already allocated.
758                  */
759                 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false);
760                 if (ret < 0)
761                         break;
762                 else if (ret == 1)
763                         break;
764
765                 trans = btrfs_join_transaction(root);
766                 if (IS_ERR(trans)) {
767                         ret = PTR_ERR(trans);
768                         break;
769                 }
770                 ret = btrfs_chunk_alloc(trans,
771                                 btrfs_get_alloc_profile(fs_info, space_info->flags),
772                                 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
773                                         CHUNK_ALLOC_FORCE);
774                 btrfs_end_transaction(trans);
775
776                 /*
777                  * For metadata space on zoned filesystem, allocating a new chunk
778                  * is not enough. We still need to activate the block * group.
779                  * Active the newly allocated block group by (maybe) finishing
780                  * a block group.
781                  */
782                 if (ret == 1) {
783                         ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true);
784                         /*
785                          * Revert to the original ret regardless we could finish
786                          * one block group or not.
787                          */
788                         if (ret >= 0)
789                                 ret = 1;
790                 }
791
792                 if (ret > 0 || ret == -ENOSPC)
793                         ret = 0;
794                 break;
795         case RUN_DELAYED_IPUTS:
796                 /*
797                  * If we have pending delayed iputs then we could free up a
798                  * bunch of pinned space, so make sure we run the iputs before
799                  * we do our pinned bytes check below.
800                  */
801                 btrfs_run_delayed_iputs(fs_info);
802                 btrfs_wait_on_delayed_iputs(fs_info);
803                 break;
804         case COMMIT_TRANS:
805                 ASSERT(current->journal_info == NULL);
806                 trans = btrfs_join_transaction(root);
807                 if (IS_ERR(trans)) {
808                         ret = PTR_ERR(trans);
809                         break;
810                 }
811                 ret = btrfs_commit_transaction(trans);
812                 break;
813         default:
814                 ret = -ENOSPC;
815                 break;
816         }
817
818         trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
819                                 ret, for_preempt);
820         return;
821 }
822
823 static inline u64
824 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
825                                  struct btrfs_space_info *space_info)
826 {
827         u64 used;
828         u64 avail;
829         u64 total;
830         u64 to_reclaim = space_info->reclaim_size;
831
832         lockdep_assert_held(&space_info->lock);
833
834         avail = calc_available_free_space(fs_info, space_info,
835                                           BTRFS_RESERVE_FLUSH_ALL);
836         used = btrfs_space_info_used(space_info, true);
837
838         /*
839          * We may be flushing because suddenly we have less space than we had
840          * before, and now we're well over-committed based on our current free
841          * space.  If that's the case add in our overage so we make sure to put
842          * appropriate pressure on the flushing state machine.
843          */
844         total = writable_total_bytes(fs_info, space_info);
845         if (total + avail < used)
846                 to_reclaim += used - (total + avail);
847
848         return to_reclaim;
849 }
850
851 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
852                                     struct btrfs_space_info *space_info)
853 {
854         u64 global_rsv_size = fs_info->global_block_rsv.reserved;
855         u64 ordered, delalloc;
856         u64 total = writable_total_bytes(fs_info, space_info);
857         u64 thresh;
858         u64 used;
859
860         thresh = div_factor_fine(total, 90);
861
862         lockdep_assert_held(&space_info->lock);
863
864         /* If we're just plain full then async reclaim just slows us down. */
865         if ((space_info->bytes_used + space_info->bytes_reserved +
866              global_rsv_size) >= thresh)
867                 return false;
868
869         used = space_info->bytes_may_use + space_info->bytes_pinned;
870
871         /* The total flushable belongs to the global rsv, don't flush. */
872         if (global_rsv_size >= used)
873                 return false;
874
875         /*
876          * 128MiB is 1/4 of the maximum global rsv size.  If we have less than
877          * that devoted to other reservations then there's no sense in flushing,
878          * we don't have a lot of things that need flushing.
879          */
880         if (used - global_rsv_size <= SZ_128M)
881                 return false;
882
883         /*
884          * We have tickets queued, bail so we don't compete with the async
885          * flushers.
886          */
887         if (space_info->reclaim_size)
888                 return false;
889
890         /*
891          * If we have over half of the free space occupied by reservations or
892          * pinned then we want to start flushing.
893          *
894          * We do not do the traditional thing here, which is to say
895          *
896          *   if (used >= ((total_bytes + avail) / 2))
897          *     return 1;
898          *
899          * because this doesn't quite work how we want.  If we had more than 50%
900          * of the space_info used by bytes_used and we had 0 available we'd just
901          * constantly run the background flusher.  Instead we want it to kick in
902          * if our reclaimable space exceeds our clamped free space.
903          *
904          * Our clamping range is 2^1 -> 2^8.  Practically speaking that means
905          * the following:
906          *
907          * Amount of RAM        Minimum threshold       Maximum threshold
908          *
909          *        256GiB                     1GiB                  128GiB
910          *        128GiB                   512MiB                   64GiB
911          *         64GiB                   256MiB                   32GiB
912          *         32GiB                   128MiB                   16GiB
913          *         16GiB                    64MiB                    8GiB
914          *
915          * These are the range our thresholds will fall in, corresponding to how
916          * much delalloc we need for the background flusher to kick in.
917          */
918
919         thresh = calc_available_free_space(fs_info, space_info,
920                                            BTRFS_RESERVE_FLUSH_ALL);
921         used = space_info->bytes_used + space_info->bytes_reserved +
922                space_info->bytes_readonly + global_rsv_size;
923         if (used < total)
924                 thresh += total - used;
925         thresh >>= space_info->clamp;
926
927         used = space_info->bytes_pinned;
928
929         /*
930          * If we have more ordered bytes than delalloc bytes then we're either
931          * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
932          * around.  Preemptive flushing is only useful in that it can free up
933          * space before tickets need to wait for things to finish.  In the case
934          * of ordered extents, preemptively waiting on ordered extents gets us
935          * nothing, if our reservations are tied up in ordered extents we'll
936          * simply have to slow down writers by forcing them to wait on ordered
937          * extents.
938          *
939          * In the case that ordered is larger than delalloc, only include the
940          * block reserves that we would actually be able to directly reclaim
941          * from.  In this case if we're heavy on metadata operations this will
942          * clearly be heavy enough to warrant preemptive flushing.  In the case
943          * of heavy DIO or ordered reservations, preemptive flushing will just
944          * waste time and cause us to slow down.
945          *
946          * We want to make sure we truly are maxed out on ordered however, so
947          * cut ordered in half, and if it's still higher than delalloc then we
948          * can keep flushing.  This is to avoid the case where we start
949          * flushing, and now delalloc == ordered and we stop preemptively
950          * flushing when we could still have several gigs of delalloc to flush.
951          */
952         ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
953         delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
954         if (ordered >= delalloc)
955                 used += fs_info->delayed_refs_rsv.reserved +
956                         fs_info->delayed_block_rsv.reserved;
957         else
958                 used += space_info->bytes_may_use - global_rsv_size;
959
960         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
961                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
962 }
963
964 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
965                                   struct btrfs_space_info *space_info,
966                                   struct reserve_ticket *ticket)
967 {
968         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
969         u64 min_bytes;
970
971         if (!ticket->steal)
972                 return false;
973
974         if (global_rsv->space_info != space_info)
975                 return false;
976
977         spin_lock(&global_rsv->lock);
978         min_bytes = div_factor(global_rsv->size, 1);
979         if (global_rsv->reserved < min_bytes + ticket->bytes) {
980                 spin_unlock(&global_rsv->lock);
981                 return false;
982         }
983         global_rsv->reserved -= ticket->bytes;
984         remove_ticket(space_info, ticket);
985         ticket->bytes = 0;
986         wake_up(&ticket->wait);
987         space_info->tickets_id++;
988         if (global_rsv->reserved < global_rsv->size)
989                 global_rsv->full = 0;
990         spin_unlock(&global_rsv->lock);
991
992         return true;
993 }
994
995 /*
996  * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
997  * @fs_info - fs_info for this fs
998  * @space_info - the space info we were flushing
999  *
1000  * We call this when we've exhausted our flushing ability and haven't made
1001  * progress in satisfying tickets.  The reservation code handles tickets in
1002  * order, so if there is a large ticket first and then smaller ones we could
1003  * very well satisfy the smaller tickets.  This will attempt to wake up any
1004  * tickets in the list to catch this case.
1005  *
1006  * This function returns true if it was able to make progress by clearing out
1007  * other tickets, or if it stumbles across a ticket that was smaller than the
1008  * first ticket.
1009  */
1010 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
1011                                    struct btrfs_space_info *space_info)
1012 {
1013         struct reserve_ticket *ticket;
1014         u64 tickets_id = space_info->tickets_id;
1015         const bool aborted = BTRFS_FS_ERROR(fs_info);
1016
1017         trace_btrfs_fail_all_tickets(fs_info, space_info);
1018
1019         if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1020                 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
1021                 __btrfs_dump_space_info(fs_info, space_info);
1022         }
1023
1024         while (!list_empty(&space_info->tickets) &&
1025                tickets_id == space_info->tickets_id) {
1026                 ticket = list_first_entry(&space_info->tickets,
1027                                           struct reserve_ticket, list);
1028
1029                 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1030                         return true;
1031
1032                 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1033                         btrfs_info(fs_info, "failing ticket with %llu bytes",
1034                                    ticket->bytes);
1035
1036                 remove_ticket(space_info, ticket);
1037                 if (aborted)
1038                         ticket->error = -EIO;
1039                 else
1040                         ticket->error = -ENOSPC;
1041                 wake_up(&ticket->wait);
1042
1043                 /*
1044                  * We're just throwing tickets away, so more flushing may not
1045                  * trip over btrfs_try_granting_tickets, so we need to call it
1046                  * here to see if we can make progress with the next ticket in
1047                  * the list.
1048                  */
1049                 if (!aborted)
1050                         btrfs_try_granting_tickets(fs_info, space_info);
1051         }
1052         return (tickets_id != space_info->tickets_id);
1053 }
1054
1055 /*
1056  * This is for normal flushers, we can wait all goddamned day if we want to.  We
1057  * will loop and continuously try to flush as long as we are making progress.
1058  * We count progress as clearing off tickets each time we have to loop.
1059  */
1060 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1061 {
1062         struct btrfs_fs_info *fs_info;
1063         struct btrfs_space_info *space_info;
1064         u64 to_reclaim;
1065         enum btrfs_flush_state flush_state;
1066         int commit_cycles = 0;
1067         u64 last_tickets_id;
1068
1069         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1070         space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1071
1072         spin_lock(&space_info->lock);
1073         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1074         if (!to_reclaim) {
1075                 space_info->flush = 0;
1076                 spin_unlock(&space_info->lock);
1077                 return;
1078         }
1079         last_tickets_id = space_info->tickets_id;
1080         spin_unlock(&space_info->lock);
1081
1082         flush_state = FLUSH_DELAYED_ITEMS_NR;
1083         do {
1084                 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1085                 spin_lock(&space_info->lock);
1086                 if (list_empty(&space_info->tickets)) {
1087                         space_info->flush = 0;
1088                         spin_unlock(&space_info->lock);
1089                         return;
1090                 }
1091                 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1092                                                               space_info);
1093                 if (last_tickets_id == space_info->tickets_id) {
1094                         flush_state++;
1095                 } else {
1096                         last_tickets_id = space_info->tickets_id;
1097                         flush_state = FLUSH_DELAYED_ITEMS_NR;
1098                         if (commit_cycles)
1099                                 commit_cycles--;
1100                 }
1101
1102                 /*
1103                  * We do not want to empty the system of delalloc unless we're
1104                  * under heavy pressure, so allow one trip through the flushing
1105                  * logic before we start doing a FLUSH_DELALLOC_FULL.
1106                  */
1107                 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1108                         flush_state++;
1109
1110                 /*
1111                  * We don't want to force a chunk allocation until we've tried
1112                  * pretty hard to reclaim space.  Think of the case where we
1113                  * freed up a bunch of space and so have a lot of pinned space
1114                  * to reclaim.  We would rather use that than possibly create a
1115                  * underutilized metadata chunk.  So if this is our first run
1116                  * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1117                  * commit the transaction.  If nothing has changed the next go
1118                  * around then we can force a chunk allocation.
1119                  */
1120                 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1121                         flush_state++;
1122
1123                 if (flush_state > COMMIT_TRANS) {
1124                         commit_cycles++;
1125                         if (commit_cycles > 2) {
1126                                 if (maybe_fail_all_tickets(fs_info, space_info)) {
1127                                         flush_state = FLUSH_DELAYED_ITEMS_NR;
1128                                         commit_cycles--;
1129                                 } else {
1130                                         space_info->flush = 0;
1131                                 }
1132                         } else {
1133                                 flush_state = FLUSH_DELAYED_ITEMS_NR;
1134                         }
1135                 }
1136                 spin_unlock(&space_info->lock);
1137         } while (flush_state <= COMMIT_TRANS);
1138 }
1139
1140 /*
1141  * This handles pre-flushing of metadata space before we get to the point that
1142  * we need to start blocking threads on tickets.  The logic here is different
1143  * from the other flush paths because it doesn't rely on tickets to tell us how
1144  * much we need to flush, instead it attempts to keep us below the 80% full
1145  * watermark of space by flushing whichever reservation pool is currently the
1146  * largest.
1147  */
1148 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1149 {
1150         struct btrfs_fs_info *fs_info;
1151         struct btrfs_space_info *space_info;
1152         struct btrfs_block_rsv *delayed_block_rsv;
1153         struct btrfs_block_rsv *delayed_refs_rsv;
1154         struct btrfs_block_rsv *global_rsv;
1155         struct btrfs_block_rsv *trans_rsv;
1156         int loops = 0;
1157
1158         fs_info = container_of(work, struct btrfs_fs_info,
1159                                preempt_reclaim_work);
1160         space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1161         delayed_block_rsv = &fs_info->delayed_block_rsv;
1162         delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1163         global_rsv = &fs_info->global_block_rsv;
1164         trans_rsv = &fs_info->trans_block_rsv;
1165
1166         spin_lock(&space_info->lock);
1167         while (need_preemptive_reclaim(fs_info, space_info)) {
1168                 enum btrfs_flush_state flush;
1169                 u64 delalloc_size = 0;
1170                 u64 to_reclaim, block_rsv_size;
1171                 u64 global_rsv_size = global_rsv->reserved;
1172
1173                 loops++;
1174
1175                 /*
1176                  * We don't have a precise counter for the metadata being
1177                  * reserved for delalloc, so we'll approximate it by subtracting
1178                  * out the block rsv's space from the bytes_may_use.  If that
1179                  * amount is higher than the individual reserves, then we can
1180                  * assume it's tied up in delalloc reservations.
1181                  */
1182                 block_rsv_size = global_rsv_size +
1183                         delayed_block_rsv->reserved +
1184                         delayed_refs_rsv->reserved +
1185                         trans_rsv->reserved;
1186                 if (block_rsv_size < space_info->bytes_may_use)
1187                         delalloc_size = space_info->bytes_may_use - block_rsv_size;
1188
1189                 /*
1190                  * We don't want to include the global_rsv in our calculation,
1191                  * because that's space we can't touch.  Subtract it from the
1192                  * block_rsv_size for the next checks.
1193                  */
1194                 block_rsv_size -= global_rsv_size;
1195
1196                 /*
1197                  * We really want to avoid flushing delalloc too much, as it
1198                  * could result in poor allocation patterns, so only flush it if
1199                  * it's larger than the rest of the pools combined.
1200                  */
1201                 if (delalloc_size > block_rsv_size) {
1202                         to_reclaim = delalloc_size;
1203                         flush = FLUSH_DELALLOC;
1204                 } else if (space_info->bytes_pinned >
1205                            (delayed_block_rsv->reserved +
1206                             delayed_refs_rsv->reserved)) {
1207                         to_reclaim = space_info->bytes_pinned;
1208                         flush = COMMIT_TRANS;
1209                 } else if (delayed_block_rsv->reserved >
1210                            delayed_refs_rsv->reserved) {
1211                         to_reclaim = delayed_block_rsv->reserved;
1212                         flush = FLUSH_DELAYED_ITEMS_NR;
1213                 } else {
1214                         to_reclaim = delayed_refs_rsv->reserved;
1215                         flush = FLUSH_DELAYED_REFS_NR;
1216                 }
1217
1218                 spin_unlock(&space_info->lock);
1219
1220                 /*
1221                  * We don't want to reclaim everything, just a portion, so scale
1222                  * down the to_reclaim by 1/4.  If it takes us down to 0,
1223                  * reclaim 1 items worth.
1224                  */
1225                 to_reclaim >>= 2;
1226                 if (!to_reclaim)
1227                         to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1228                 flush_space(fs_info, space_info, to_reclaim, flush, true);
1229                 cond_resched();
1230                 spin_lock(&space_info->lock);
1231         }
1232
1233         /* We only went through once, back off our clamping. */
1234         if (loops == 1 && !space_info->reclaim_size)
1235                 space_info->clamp = max(1, space_info->clamp - 1);
1236         trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1237         spin_unlock(&space_info->lock);
1238 }
1239
1240 /*
1241  * FLUSH_DELALLOC_WAIT:
1242  *   Space is freed from flushing delalloc in one of two ways.
1243  *
1244  *   1) compression is on and we allocate less space than we reserved
1245  *   2) we are overwriting existing space
1246  *
1247  *   For #1 that extra space is reclaimed as soon as the delalloc pages are
1248  *   COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1249  *   length to ->bytes_reserved, and subtracts the reserved space from
1250  *   ->bytes_may_use.
1251  *
1252  *   For #2 this is trickier.  Once the ordered extent runs we will drop the
1253  *   extent in the range we are overwriting, which creates a delayed ref for
1254  *   that freed extent.  This however is not reclaimed until the transaction
1255  *   commits, thus the next stages.
1256  *
1257  * RUN_DELAYED_IPUTS
1258  *   If we are freeing inodes, we want to make sure all delayed iputs have
1259  *   completed, because they could have been on an inode with i_nlink == 0, and
1260  *   thus have been truncated and freed up space.  But again this space is not
1261  *   immediately re-usable, it comes in the form of a delayed ref, which must be
1262  *   run and then the transaction must be committed.
1263  *
1264  * COMMIT_TRANS
1265  *   This is where we reclaim all of the pinned space generated by running the
1266  *   iputs
1267  *
1268  * ALLOC_CHUNK_FORCE
1269  *   For data we start with alloc chunk force, however we could have been full
1270  *   before, and then the transaction commit could have freed new block groups,
1271  *   so if we now have space to allocate do the force chunk allocation.
1272  */
1273 static const enum btrfs_flush_state data_flush_states[] = {
1274         FLUSH_DELALLOC_FULL,
1275         RUN_DELAYED_IPUTS,
1276         COMMIT_TRANS,
1277         ALLOC_CHUNK_FORCE,
1278 };
1279
1280 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1281 {
1282         struct btrfs_fs_info *fs_info;
1283         struct btrfs_space_info *space_info;
1284         u64 last_tickets_id;
1285         enum btrfs_flush_state flush_state = 0;
1286
1287         fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1288         space_info = fs_info->data_sinfo;
1289
1290         spin_lock(&space_info->lock);
1291         if (list_empty(&space_info->tickets)) {
1292                 space_info->flush = 0;
1293                 spin_unlock(&space_info->lock);
1294                 return;
1295         }
1296         last_tickets_id = space_info->tickets_id;
1297         spin_unlock(&space_info->lock);
1298
1299         while (!space_info->full) {
1300                 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1301                 spin_lock(&space_info->lock);
1302                 if (list_empty(&space_info->tickets)) {
1303                         space_info->flush = 0;
1304                         spin_unlock(&space_info->lock);
1305                         return;
1306                 }
1307
1308                 /* Something happened, fail everything and bail. */
1309                 if (BTRFS_FS_ERROR(fs_info))
1310                         goto aborted_fs;
1311                 last_tickets_id = space_info->tickets_id;
1312                 spin_unlock(&space_info->lock);
1313         }
1314
1315         while (flush_state < ARRAY_SIZE(data_flush_states)) {
1316                 flush_space(fs_info, space_info, U64_MAX,
1317                             data_flush_states[flush_state], false);
1318                 spin_lock(&space_info->lock);
1319                 if (list_empty(&space_info->tickets)) {
1320                         space_info->flush = 0;
1321                         spin_unlock(&space_info->lock);
1322                         return;
1323                 }
1324
1325                 if (last_tickets_id == space_info->tickets_id) {
1326                         flush_state++;
1327                 } else {
1328                         last_tickets_id = space_info->tickets_id;
1329                         flush_state = 0;
1330                 }
1331
1332                 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1333                         if (space_info->full) {
1334                                 if (maybe_fail_all_tickets(fs_info, space_info))
1335                                         flush_state = 0;
1336                                 else
1337                                         space_info->flush = 0;
1338                         } else {
1339                                 flush_state = 0;
1340                         }
1341
1342                         /* Something happened, fail everything and bail. */
1343                         if (BTRFS_FS_ERROR(fs_info))
1344                                 goto aborted_fs;
1345
1346                 }
1347                 spin_unlock(&space_info->lock);
1348         }
1349         return;
1350
1351 aborted_fs:
1352         maybe_fail_all_tickets(fs_info, space_info);
1353         space_info->flush = 0;
1354         spin_unlock(&space_info->lock);
1355 }
1356
1357 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1358 {
1359         INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1360         INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1361         INIT_WORK(&fs_info->preempt_reclaim_work,
1362                   btrfs_preempt_reclaim_metadata_space);
1363 }
1364
1365 static const enum btrfs_flush_state priority_flush_states[] = {
1366         FLUSH_DELAYED_ITEMS_NR,
1367         FLUSH_DELAYED_ITEMS,
1368         ALLOC_CHUNK,
1369 };
1370
1371 static const enum btrfs_flush_state evict_flush_states[] = {
1372         FLUSH_DELAYED_ITEMS_NR,
1373         FLUSH_DELAYED_ITEMS,
1374         FLUSH_DELAYED_REFS_NR,
1375         FLUSH_DELAYED_REFS,
1376         FLUSH_DELALLOC,
1377         FLUSH_DELALLOC_WAIT,
1378         FLUSH_DELALLOC_FULL,
1379         ALLOC_CHUNK,
1380         COMMIT_TRANS,
1381 };
1382
1383 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1384                                 struct btrfs_space_info *space_info,
1385                                 struct reserve_ticket *ticket,
1386                                 const enum btrfs_flush_state *states,
1387                                 int states_nr)
1388 {
1389         u64 to_reclaim;
1390         int flush_state = 0;
1391
1392         spin_lock(&space_info->lock);
1393         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1394         /*
1395          * This is the priority reclaim path, so to_reclaim could be >0 still
1396          * because we may have only satisfied the priority tickets and still
1397          * left non priority tickets on the list.  We would then have
1398          * to_reclaim but ->bytes == 0.
1399          */
1400         if (ticket->bytes == 0) {
1401                 spin_unlock(&space_info->lock);
1402                 return;
1403         }
1404
1405         while (flush_state < states_nr) {
1406                 spin_unlock(&space_info->lock);
1407                 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1408                             false);
1409                 flush_state++;
1410                 spin_lock(&space_info->lock);
1411                 if (ticket->bytes == 0) {
1412                         spin_unlock(&space_info->lock);
1413                         return;
1414                 }
1415         }
1416
1417         /* Attempt to steal from the global rsv if we can. */
1418         if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1419                 ticket->error = -ENOSPC;
1420                 remove_ticket(space_info, ticket);
1421         }
1422
1423         /*
1424          * We must run try_granting_tickets here because we could be a large
1425          * ticket in front of a smaller ticket that can now be satisfied with
1426          * the available space.
1427          */
1428         btrfs_try_granting_tickets(fs_info, space_info);
1429         spin_unlock(&space_info->lock);
1430 }
1431
1432 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1433                                         struct btrfs_space_info *space_info,
1434                                         struct reserve_ticket *ticket)
1435 {
1436         spin_lock(&space_info->lock);
1437
1438         /* We could have been granted before we got here. */
1439         if (ticket->bytes == 0) {
1440                 spin_unlock(&space_info->lock);
1441                 return;
1442         }
1443
1444         while (!space_info->full) {
1445                 spin_unlock(&space_info->lock);
1446                 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1447                 spin_lock(&space_info->lock);
1448                 if (ticket->bytes == 0) {
1449                         spin_unlock(&space_info->lock);
1450                         return;
1451                 }
1452         }
1453
1454         ticket->error = -ENOSPC;
1455         remove_ticket(space_info, ticket);
1456         btrfs_try_granting_tickets(fs_info, space_info);
1457         spin_unlock(&space_info->lock);
1458 }
1459
1460 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1461                                 struct btrfs_space_info *space_info,
1462                                 struct reserve_ticket *ticket)
1463
1464 {
1465         DEFINE_WAIT(wait);
1466         int ret = 0;
1467
1468         spin_lock(&space_info->lock);
1469         while (ticket->bytes > 0 && ticket->error == 0) {
1470                 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1471                 if (ret) {
1472                         /*
1473                          * Delete us from the list. After we unlock the space
1474                          * info, we don't want the async reclaim job to reserve
1475                          * space for this ticket. If that would happen, then the
1476                          * ticket's task would not known that space was reserved
1477                          * despite getting an error, resulting in a space leak
1478                          * (bytes_may_use counter of our space_info).
1479                          */
1480                         remove_ticket(space_info, ticket);
1481                         ticket->error = -EINTR;
1482                         break;
1483                 }
1484                 spin_unlock(&space_info->lock);
1485
1486                 schedule();
1487
1488                 finish_wait(&ticket->wait, &wait);
1489                 spin_lock(&space_info->lock);
1490         }
1491         spin_unlock(&space_info->lock);
1492 }
1493
1494 /**
1495  * Do the appropriate flushing and waiting for a ticket
1496  *
1497  * @fs_info:    the filesystem
1498  * @space_info: space info for the reservation
1499  * @ticket:     ticket for the reservation
1500  * @start_ns:   timestamp when the reservation started
1501  * @orig_bytes: amount of bytes originally reserved
1502  * @flush:      how much we can flush
1503  *
1504  * This does the work of figuring out how to flush for the ticket, waiting for
1505  * the reservation, and returning the appropriate error if there is one.
1506  */
1507 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1508                                  struct btrfs_space_info *space_info,
1509                                  struct reserve_ticket *ticket,
1510                                  u64 start_ns, u64 orig_bytes,
1511                                  enum btrfs_reserve_flush_enum flush)
1512 {
1513         int ret;
1514
1515         switch (flush) {
1516         case BTRFS_RESERVE_FLUSH_DATA:
1517         case BTRFS_RESERVE_FLUSH_ALL:
1518         case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1519                 wait_reserve_ticket(fs_info, space_info, ticket);
1520                 break;
1521         case BTRFS_RESERVE_FLUSH_LIMIT:
1522                 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1523                                                 priority_flush_states,
1524                                                 ARRAY_SIZE(priority_flush_states));
1525                 break;
1526         case BTRFS_RESERVE_FLUSH_EVICT:
1527                 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1528                                                 evict_flush_states,
1529                                                 ARRAY_SIZE(evict_flush_states));
1530                 break;
1531         case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1532                 priority_reclaim_data_space(fs_info, space_info, ticket);
1533                 break;
1534         default:
1535                 ASSERT(0);
1536                 break;
1537         }
1538
1539         ret = ticket->error;
1540         ASSERT(list_empty(&ticket->list));
1541         /*
1542          * Check that we can't have an error set if the reservation succeeded,
1543          * as that would confuse tasks and lead them to error out without
1544          * releasing reserved space (if an error happens the expectation is that
1545          * space wasn't reserved at all).
1546          */
1547         ASSERT(!(ticket->bytes == 0 && ticket->error));
1548         trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1549                                    start_ns, flush, ticket->error);
1550         return ret;
1551 }
1552
1553 /*
1554  * This returns true if this flush state will go through the ordinary flushing
1555  * code.
1556  */
1557 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1558 {
1559         return  (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1560                 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1561 }
1562
1563 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1564                                        struct btrfs_space_info *space_info)
1565 {
1566         u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1567         u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1568
1569         /*
1570          * If we're heavy on ordered operations then clamping won't help us.  We
1571          * need to clamp specifically to keep up with dirty'ing buffered
1572          * writers, because there's not a 1:1 correlation of writing delalloc
1573          * and freeing space, like there is with flushing delayed refs or
1574          * delayed nodes.  If we're already more ordered than delalloc then
1575          * we're keeping up, otherwise we aren't and should probably clamp.
1576          */
1577         if (ordered < delalloc)
1578                 space_info->clamp = min(space_info->clamp + 1, 8);
1579 }
1580
1581 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1582 {
1583         return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1584                 flush == BTRFS_RESERVE_FLUSH_EVICT);
1585 }
1586
1587 /**
1588  * Try to reserve bytes from the block_rsv's space
1589  *
1590  * @fs_info:    the filesystem
1591  * @space_info: space info we want to allocate from
1592  * @orig_bytes: number of bytes we want
1593  * @flush:      whether or not we can flush to make our reservation
1594  *
1595  * This will reserve orig_bytes number of bytes from the space info associated
1596  * with the block_rsv.  If there is not enough space it will make an attempt to
1597  * flush out space to make room.  It will do this by flushing delalloc if
1598  * possible or committing the transaction.  If flush is 0 then no attempts to
1599  * regain reservations will be made and this will fail if there is not enough
1600  * space already.
1601  */
1602 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1603                            struct btrfs_space_info *space_info, u64 orig_bytes,
1604                            enum btrfs_reserve_flush_enum flush)
1605 {
1606         struct work_struct *async_work;
1607         struct reserve_ticket ticket;
1608         u64 start_ns = 0;
1609         u64 used;
1610         int ret = 0;
1611         bool pending_tickets;
1612
1613         ASSERT(orig_bytes);
1614         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1615
1616         if (flush == BTRFS_RESERVE_FLUSH_DATA)
1617                 async_work = &fs_info->async_data_reclaim_work;
1618         else
1619                 async_work = &fs_info->async_reclaim_work;
1620
1621         spin_lock(&space_info->lock);
1622         ret = -ENOSPC;
1623         used = btrfs_space_info_used(space_info, true);
1624
1625         /*
1626          * We don't want NO_FLUSH allocations to jump everybody, they can
1627          * generally handle ENOSPC in a different way, so treat them the same as
1628          * normal flushers when it comes to skipping pending tickets.
1629          */
1630         if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1631                 pending_tickets = !list_empty(&space_info->tickets) ||
1632                         !list_empty(&space_info->priority_tickets);
1633         else
1634                 pending_tickets = !list_empty(&space_info->priority_tickets);
1635
1636         /*
1637          * Carry on if we have enough space (short-circuit) OR call
1638          * can_overcommit() to ensure we can overcommit to continue.
1639          */
1640         if (!pending_tickets &&
1641             ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
1642              btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1643                 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1644                                                       orig_bytes);
1645                 ret = 0;
1646         }
1647
1648         /*
1649          * If we couldn't make a reservation then setup our reservation ticket
1650          * and kick the async worker if it's not already running.
1651          *
1652          * If we are a priority flusher then we just need to add our ticket to
1653          * the list and we will do our own flushing further down.
1654          */
1655         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1656                 ticket.bytes = orig_bytes;
1657                 ticket.error = 0;
1658                 space_info->reclaim_size += ticket.bytes;
1659                 init_waitqueue_head(&ticket.wait);
1660                 ticket.steal = can_steal(flush);
1661                 if (trace_btrfs_reserve_ticket_enabled())
1662                         start_ns = ktime_get_ns();
1663
1664                 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1665                     flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1666                     flush == BTRFS_RESERVE_FLUSH_DATA) {
1667                         list_add_tail(&ticket.list, &space_info->tickets);
1668                         if (!space_info->flush) {
1669                                 /*
1670                                  * We were forced to add a reserve ticket, so
1671                                  * our preemptive flushing is unable to keep
1672                                  * up.  Clamp down on the threshold for the
1673                                  * preemptive flushing in order to keep up with
1674                                  * the workload.
1675                                  */
1676                                 maybe_clamp_preempt(fs_info, space_info);
1677
1678                                 space_info->flush = 1;
1679                                 trace_btrfs_trigger_flush(fs_info,
1680                                                           space_info->flags,
1681                                                           orig_bytes, flush,
1682                                                           "enospc");
1683                                 queue_work(system_unbound_wq, async_work);
1684                         }
1685                 } else {
1686                         list_add_tail(&ticket.list,
1687                                       &space_info->priority_tickets);
1688                 }
1689         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1690                 /*
1691                  * We will do the space reservation dance during log replay,
1692                  * which means we won't have fs_info->fs_root set, so don't do
1693                  * the async reclaim as we will panic.
1694                  */
1695                 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1696                     !work_busy(&fs_info->preempt_reclaim_work) &&
1697                     need_preemptive_reclaim(fs_info, space_info)) {
1698                         trace_btrfs_trigger_flush(fs_info, space_info->flags,
1699                                                   orig_bytes, flush, "preempt");
1700                         queue_work(system_unbound_wq,
1701                                    &fs_info->preempt_reclaim_work);
1702                 }
1703         }
1704         spin_unlock(&space_info->lock);
1705         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1706                 return ret;
1707
1708         return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1709                                      orig_bytes, flush);
1710 }
1711
1712 /**
1713  * Trye to reserve metadata bytes from the block_rsv's space
1714  *
1715  * @fs_info:    the filesystem
1716  * @block_rsv:  block_rsv we're allocating for
1717  * @orig_bytes: number of bytes we want
1718  * @flush:      whether or not we can flush to make our reservation
1719  *
1720  * This will reserve orig_bytes number of bytes from the space info associated
1721  * with the block_rsv.  If there is not enough space it will make an attempt to
1722  * flush out space to make room.  It will do this by flushing delalloc if
1723  * possible or committing the transaction.  If flush is 0 then no attempts to
1724  * regain reservations will be made and this will fail if there is not enough
1725  * space already.
1726  */
1727 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1728                                  struct btrfs_block_rsv *block_rsv,
1729                                  u64 orig_bytes,
1730                                  enum btrfs_reserve_flush_enum flush)
1731 {
1732         int ret;
1733
1734         ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1735         if (ret == -ENOSPC) {
1736                 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1737                                               block_rsv->space_info->flags,
1738                                               orig_bytes, 1);
1739
1740                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1741                         btrfs_dump_space_info(fs_info, block_rsv->space_info,
1742                                               orig_bytes, 0);
1743         }
1744         return ret;
1745 }
1746
1747 /**
1748  * Try to reserve data bytes for an allocation
1749  *
1750  * @fs_info: the filesystem
1751  * @bytes:   number of bytes we need
1752  * @flush:   how we are allowed to flush
1753  *
1754  * This will reserve bytes from the data space info.  If there is not enough
1755  * space then we will attempt to flush space as specified by flush.
1756  */
1757 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1758                              enum btrfs_reserve_flush_enum flush)
1759 {
1760         struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1761         int ret;
1762
1763         ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1764                flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
1765                flush == BTRFS_RESERVE_NO_FLUSH);
1766         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1767
1768         ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1769         if (ret == -ENOSPC) {
1770                 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1771                                               data_sinfo->flags, bytes, 1);
1772                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1773                         btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1774         }
1775         return ret;
1776 }
1777
1778 /* Dump all the space infos when we abort a transaction due to ENOSPC. */
1779 __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
1780 {
1781         struct btrfs_space_info *space_info;
1782
1783         btrfs_info(fs_info, "dumping space info:");
1784         list_for_each_entry(space_info, &fs_info->space_info, list) {
1785                 spin_lock(&space_info->lock);
1786                 __btrfs_dump_space_info(fs_info, space_info);
1787                 spin_unlock(&space_info->lock);
1788         }
1789         dump_global_block_rsv(fs_info);
1790 }