1 /* SPDX-License-Identifier: GPL-2.0 */
7 #include <linux/btrfs_tree.h>
8 #include <linux/sizes.h>
9 #include "extent-io-tree.h"
10 #include "extent_map.h"
11 #include "async-thread.h"
12 #include "block-rsv.h"
14 #define BTRFS_MAX_EXTENT_SIZE SZ_128M
16 #define BTRFS_OLDEST_GENERATION 0ULL
18 #define BTRFS_EMPTY_DIR_SIZE 0
20 #define BTRFS_DIRTY_METADATA_THRESH SZ_32M
22 #define BTRFS_SUPER_INFO_OFFSET SZ_64K
23 #define BTRFS_SUPER_INFO_SIZE 4096
24 static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
27 * The reserved space at the beginning of each device. It covers the primary
28 * super block and leaves space for potential use by other tools like
29 * bootloaders or to lower potential damage of accidental overwrite.
31 #define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M)
33 * Runtime (in-memory) states of filesystem
36 /* Global indicator of serious filesystem errors */
39 * Filesystem is being remounted, allow to skip some operations, like
42 BTRFS_FS_STATE_REMOUNTING,
43 /* Filesystem in RO mode */
45 /* Track if a transaction abort has been reported on this filesystem */
46 BTRFS_FS_STATE_TRANS_ABORTED,
48 * Bio operations should be blocked on this filesystem because a source
49 * or target device is being destroyed as part of a device replace
51 BTRFS_FS_STATE_DEV_REPLACING,
52 /* The btrfs_fs_info created for self-tests */
53 BTRFS_FS_STATE_DUMMY_FS_INFO,
55 BTRFS_FS_STATE_NO_CSUMS,
57 /* Indicates there was an error cleaning up a log tree. */
58 BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
64 BTRFS_FS_CLOSING_START,
65 BTRFS_FS_CLOSING_DONE,
66 BTRFS_FS_LOG_RECOVERING,
68 BTRFS_FS_QUOTA_ENABLED,
69 BTRFS_FS_UPDATE_UUID_TREE_GEN,
70 BTRFS_FS_CREATING_FREE_SPACE_TREE,
74 BTRFS_FS_QUOTA_OVERRIDE,
75 /* Used to record internally whether fs has been frozen */
78 * Indicate that balance has been set up from the ioctl and is in the
79 * main phase. The fs_info::balance_ctl is initialized.
81 BTRFS_FS_BALANCE_RUNNING,
84 * Indicate that relocation of a chunk has started, it's set per chunk
85 * and is toggled between chunks.
87 BTRFS_FS_RELOC_RUNNING,
89 /* Indicate that the cleaner thread is awake and doing something. */
90 BTRFS_FS_CLEANER_RUNNING,
93 * The checksumming has an optimized version and is considered fast,
94 * so we don't need to offload checksums to workqueues.
96 BTRFS_FS_CSUM_IMPL_FAST,
98 /* Indicate that the discard workqueue can service discards. */
99 BTRFS_FS_DISCARD_RUNNING,
101 /* Indicate that we need to cleanup space cache v1 */
102 BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
104 /* Indicate that we can't trust the free space tree for caching yet */
105 BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
107 /* Indicate whether there are any tree modification log users */
108 BTRFS_FS_TREE_MOD_LOG_USERS,
110 /* Indicate that we want the transaction kthread to commit right now. */
111 BTRFS_FS_COMMIT_TRANS,
113 /* Indicate we have half completed snapshot deletions pending. */
114 BTRFS_FS_UNFINISHED_DROPS,
116 /* Indicate we have to finish a zone to do next allocation. */
117 BTRFS_FS_NEED_ZONE_FINISH,
119 /* Indicate that we want to commit the transaction. */
120 BTRFS_FS_NEED_TRANS_COMMIT,
122 #if BITS_PER_LONG == 32
123 /* Indicate if we have error/warn message printed on 32bit systems */
124 BTRFS_FS_32BIT_ERROR,
130 * Flags for mount options.
132 * Note: don't forget to add new options to btrfs_show_options()
135 BTRFS_MOUNT_NODATASUM = (1UL << 0),
136 BTRFS_MOUNT_NODATACOW = (1UL << 1),
137 BTRFS_MOUNT_NOBARRIER = (1UL << 2),
138 BTRFS_MOUNT_SSD = (1UL << 3),
139 BTRFS_MOUNT_DEGRADED = (1UL << 4),
140 BTRFS_MOUNT_COMPRESS = (1UL << 5),
141 BTRFS_MOUNT_NOTREELOG = (1UL << 6),
142 BTRFS_MOUNT_FLUSHONCOMMIT = (1UL << 7),
143 BTRFS_MOUNT_SSD_SPREAD = (1UL << 8),
144 BTRFS_MOUNT_NOSSD = (1UL << 9),
145 BTRFS_MOUNT_DISCARD_SYNC = (1UL << 10),
146 BTRFS_MOUNT_FORCE_COMPRESS = (1UL << 11),
147 BTRFS_MOUNT_SPACE_CACHE = (1UL << 12),
148 BTRFS_MOUNT_CLEAR_CACHE = (1UL << 13),
149 BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = (1UL << 14),
150 BTRFS_MOUNT_ENOSPC_DEBUG = (1UL << 15),
151 BTRFS_MOUNT_AUTO_DEFRAG = (1UL << 16),
152 BTRFS_MOUNT_USEBACKUPROOT = (1UL << 17),
153 BTRFS_MOUNT_SKIP_BALANCE = (1UL << 18),
154 BTRFS_MOUNT_CHECK_INTEGRITY = (1UL << 19),
155 BTRFS_MOUNT_CHECK_INTEGRITY_DATA = (1UL << 20),
156 BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1UL << 21),
157 BTRFS_MOUNT_RESCAN_UUID_TREE = (1UL << 22),
158 BTRFS_MOUNT_FRAGMENT_DATA = (1UL << 23),
159 BTRFS_MOUNT_FRAGMENT_METADATA = (1UL << 24),
160 BTRFS_MOUNT_FREE_SPACE_TREE = (1UL << 25),
161 BTRFS_MOUNT_NOLOGREPLAY = (1UL << 26),
162 BTRFS_MOUNT_REF_VERIFY = (1UL << 27),
163 BTRFS_MOUNT_DISCARD_ASYNC = (1UL << 28),
164 BTRFS_MOUNT_IGNOREBADROOTS = (1UL << 29),
165 BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 30),
166 BTRFS_MOUNT_NODISCARD = (1UL << 31),
170 * Compat flags that we support. If any incompat flags are set other than the
171 * ones specified below then we will fail to mount
173 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL
174 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL
175 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
177 #define BTRFS_FEATURE_COMPAT_RO_SUPP \
178 (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \
179 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \
180 BTRFS_FEATURE_COMPAT_RO_VERITY | \
181 BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE)
183 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
184 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
186 #ifdef CONFIG_BTRFS_DEBUG
188 * Extent tree v2 supported only with CONFIG_BTRFS_DEBUG
190 #define BTRFS_FEATURE_INCOMPAT_SUPP \
191 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
192 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
193 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
194 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
195 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
196 BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
197 BTRFS_FEATURE_INCOMPAT_RAID56 | \
198 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
199 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
200 BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
201 BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
202 BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
203 BTRFS_FEATURE_INCOMPAT_ZONED | \
204 BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2)
206 #define BTRFS_FEATURE_INCOMPAT_SUPP \
207 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
208 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
209 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
210 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
211 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
212 BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
213 BTRFS_FEATURE_INCOMPAT_RAID56 | \
214 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
215 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
216 BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
217 BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
218 BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
219 BTRFS_FEATURE_INCOMPAT_ZONED)
222 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
223 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
224 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL
226 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
227 #define BTRFS_DEFAULT_MAX_INLINE (2048)
229 struct btrfs_dev_replace {
230 /* See #define above */
232 /* Seconds since 1-Jan-1970 */
233 time64_t time_started;
234 /* Seconds since 1-Jan-1970 */
235 time64_t time_stopped;
236 atomic64_t num_write_errors;
237 atomic64_t num_uncorrectable_read_errors;
240 u64 committed_cursor_left;
241 u64 cursor_left_last_write_of_item;
244 /* See #define above */
245 u64 cont_reading_from_srcdev_mode;
248 int item_needs_writeback;
249 struct btrfs_device *srcdev;
250 struct btrfs_device *tgtdev;
252 struct mutex lock_finishing_cancel_unmount;
253 struct rw_semaphore rwsem;
255 struct btrfs_scrub_progress scrub_progress;
257 struct percpu_counter bio_counter;
258 wait_queue_head_t replace_wait;
262 * Free clusters are used to claim free space in relatively large chunks,
263 * allowing us to do less seeky writes. They are used for all metadata
264 * allocations. In ssd_spread mode they are also used for data allocations.
266 struct btrfs_free_cluster {
268 spinlock_t refill_lock;
271 /* Largest extent in this cluster */
274 /* First extent starting offset */
277 /* We did a full search and couldn't create a cluster */
280 struct btrfs_block_group *block_group;
282 * When a cluster is allocated from a block group, we put the cluster
283 * onto a list in the block group so that it can be freed before the
284 * block group is freed.
286 struct list_head block_group_list;
289 /* Discard control. */
291 * Async discard uses multiple lists to differentiate the discard filter
292 * parameters. Index 0 is for completely free block groups where we need to
293 * ensure the entire block group is trimmed without being lossy. Indices
294 * afterwards represent monotonically decreasing discard filter sizes to
295 * prioritize what should be discarded next.
297 #define BTRFS_NR_DISCARD_LISTS 3
298 #define BTRFS_DISCARD_INDEX_UNUSED 0
299 #define BTRFS_DISCARD_INDEX_START 1
301 struct btrfs_discard_ctl {
302 struct workqueue_struct *discard_workers;
303 struct delayed_work work;
305 struct btrfs_block_group *block_group;
306 struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
308 u64 prev_discard_time;
309 atomic_t discardable_extents;
310 atomic64_t discardable_bytes;
311 u64 max_discard_size;
315 u64 discard_extent_bytes;
316 u64 discard_bitmap_bytes;
317 atomic64_t discard_bytes_saved;
321 * Exclusive operations (device replace, resize, device add/remove, balance)
323 enum btrfs_exclusive_operation {
325 BTRFS_EXCLOP_BALANCE_PAUSED,
326 BTRFS_EXCLOP_BALANCE,
327 BTRFS_EXCLOP_DEV_ADD,
328 BTRFS_EXCLOP_DEV_REMOVE,
329 BTRFS_EXCLOP_DEV_REPLACE,
331 BTRFS_EXCLOP_SWAP_ACTIVATE,
334 /* Store data about transaction commits, exported via sysfs. */
335 struct btrfs_commit_stats {
336 /* Total number of commits */
338 /* The maximum commit duration so far in ns */
340 /* The last commit duration in ns */
342 /* The total commit duration in ns */
343 u64 total_commit_dur;
346 struct btrfs_fs_info {
347 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
349 struct btrfs_root *tree_root;
350 struct btrfs_root *chunk_root;
351 struct btrfs_root *dev_root;
352 struct btrfs_root *fs_root;
353 struct btrfs_root *quota_root;
354 struct btrfs_root *uuid_root;
355 struct btrfs_root *data_reloc_root;
356 struct btrfs_root *block_group_root;
358 /* The log root tree is a directory of all the other log roots */
359 struct btrfs_root *log_root_tree;
361 /* The tree that holds the global roots (csum, extent, etc) */
362 rwlock_t global_root_lock;
363 struct rb_root global_root_tree;
365 spinlock_t fs_roots_radix_lock;
366 struct radix_tree_root fs_roots_radix;
368 /* Block group cache stuff */
369 rwlock_t block_group_cache_lock;
370 struct rb_root_cached block_group_cache_tree;
372 /* Keep track of unallocated space */
373 atomic64_t free_chunk_space;
375 /* Track ranges which are used by log trees blocks/logged data extents */
376 struct extent_io_tree excluded_extents;
378 /* logical->physical extent mapping */
379 struct extent_map_tree mapping_tree;
382 * Block reservation for extent, checksum, root tree and delayed dir
385 struct btrfs_block_rsv global_block_rsv;
386 /* Block reservation for metadata operations */
387 struct btrfs_block_rsv trans_block_rsv;
388 /* Block reservation for chunk tree */
389 struct btrfs_block_rsv chunk_block_rsv;
390 /* Block reservation for delayed operations */
391 struct btrfs_block_rsv delayed_block_rsv;
392 /* Block reservation for delayed refs */
393 struct btrfs_block_rsv delayed_refs_rsv;
395 struct btrfs_block_rsv empty_block_rsv;
398 u64 last_trans_committed;
400 * Generation of the last transaction used for block group relocation
401 * since the filesystem was last mounted (or 0 if none happened yet).
402 * Must be written and read while holding btrfs_fs_info::commit_root_sem.
404 u64 last_reloc_trans;
405 u64 avg_delayed_ref_runtime;
408 * This is updated to the current trans every time a full commit is
409 * required instead of the faster short fsync log commits
411 u64 last_trans_log_full_commit;
412 unsigned long mount_opt;
414 unsigned long compress_type:4;
415 unsigned int compress_level;
418 * It is a suggestive number, the read side is safe even it gets a
419 * wrong number because we will write out the data into a regular
420 * extent. The write side(mount/remount) is under ->s_umount lock,
421 * so it is also safe.
425 struct btrfs_transaction *running_transaction;
426 wait_queue_head_t transaction_throttle;
427 wait_queue_head_t transaction_wait;
428 wait_queue_head_t transaction_blocked_wait;
429 wait_queue_head_t async_submit_wait;
432 * Used to protect the incompat_flags, compat_flags, compat_ro_flags
433 * when they are updated.
435 * Because we do not clear the flags for ever, so we needn't use
436 * the lock on the read side.
438 * We also needn't use the lock when we mount the fs, because
439 * there is no other task which will update the flag.
441 spinlock_t super_lock;
442 struct btrfs_super_block *super_copy;
443 struct btrfs_super_block *super_for_commit;
444 struct super_block *sb;
445 struct inode *btree_inode;
446 struct mutex tree_log_mutex;
447 struct mutex transaction_kthread_mutex;
448 struct mutex cleaner_mutex;
449 struct mutex chunk_mutex;
452 * This is taken to make sure we don't set block groups ro after the
453 * free space cache has been allocated on them.
455 struct mutex ro_block_group_mutex;
458 * This is used during read/modify/write to make sure no two ios are
459 * trying to mod the same stripe at the same time.
461 struct btrfs_stripe_hash_table *stripe_hash_table;
464 * This protects the ordered operations list only while we are
465 * processing all of the entries on it. This way we make sure the
466 * commit code doesn't find the list temporarily empty because another
467 * function happens to be doing non-waiting preflush before jumping
468 * into the main commit.
470 struct mutex ordered_operations_mutex;
472 struct rw_semaphore commit_root_sem;
474 struct rw_semaphore cleanup_work_sem;
476 struct rw_semaphore subvol_sem;
478 spinlock_t trans_lock;
480 * The reloc mutex goes with the trans lock, it is taken during commit
481 * to protect us from the relocation code.
483 struct mutex reloc_mutex;
485 struct list_head trans_list;
486 struct list_head dead_roots;
487 struct list_head caching_block_groups;
489 spinlock_t delayed_iput_lock;
490 struct list_head delayed_iputs;
491 atomic_t nr_delayed_iputs;
492 wait_queue_head_t delayed_iputs_wait;
494 atomic64_t tree_mod_seq;
496 /* This protects tree_mod_log and tree_mod_seq_list */
497 rwlock_t tree_mod_log_lock;
498 struct rb_root tree_mod_log;
499 struct list_head tree_mod_seq_list;
501 atomic_t async_delalloc_pages;
503 /* This is used to protect the following list -- ordered_roots. */
504 spinlock_t ordered_root_lock;
507 * All fs/file tree roots in which there are data=ordered extents
508 * pending writeback are added into this list.
510 * These can span multiple transactions and basically include every
511 * dirty data page that isn't from nodatacow.
513 struct list_head ordered_roots;
515 struct mutex delalloc_root_mutex;
516 spinlock_t delalloc_root_lock;
517 /* All fs/file tree roots that have delalloc inodes. */
518 struct list_head delalloc_roots;
521 * There is a pool of worker threads for checksumming during writes and
522 * a pool for checksumming after reads. This is because readers can
523 * run with FS locks held, and the writers may be waiting for those
524 * locks. We don't want ordering in the pending list to cause
525 * deadlocks, and so the two are serviced separately.
527 * A third pool does submit_bio to avoid deadlocking with the other two.
529 struct btrfs_workqueue *workers;
530 struct btrfs_workqueue *hipri_workers;
531 struct btrfs_workqueue *delalloc_workers;
532 struct btrfs_workqueue *flush_workers;
533 struct workqueue_struct *endio_workers;
534 struct workqueue_struct *endio_meta_workers;
535 struct workqueue_struct *rmw_workers;
536 struct workqueue_struct *compressed_write_workers;
537 struct btrfs_workqueue *endio_write_workers;
538 struct btrfs_workqueue *endio_freespace_worker;
539 struct btrfs_workqueue *caching_workers;
542 * Fixup workers take dirty pages that didn't properly go through the
543 * cow mechanism and make them safe to write. It happens for the
544 * sys_munmap function call path.
546 struct btrfs_workqueue *fixup_workers;
547 struct btrfs_workqueue *delayed_workers;
549 struct task_struct *transaction_kthread;
550 struct task_struct *cleaner_kthread;
551 u32 thread_pool_size;
553 struct kobject *space_info_kobj;
554 struct kobject *qgroups_kobj;
555 struct kobject *discard_kobj;
557 /* Used to keep from writing metadata until there is a nice batch */
558 struct percpu_counter dirty_metadata_bytes;
559 struct percpu_counter delalloc_bytes;
560 struct percpu_counter ordered_bytes;
561 s32 dirty_metadata_batch;
564 struct list_head dirty_cowonly_roots;
566 struct btrfs_fs_devices *fs_devices;
569 * The space_info list is effectively read only after initial setup.
570 * It is populated at mount time and cleaned up after all block groups
571 * are removed. RCU is used to protect it.
573 struct list_head space_info;
575 struct btrfs_space_info *data_sinfo;
577 struct reloc_control *reloc_ctl;
579 /* data_alloc_cluster is only used in ssd_spread mode */
580 struct btrfs_free_cluster data_alloc_cluster;
582 /* All metadata allocations go through this cluster. */
583 struct btrfs_free_cluster meta_alloc_cluster;
585 /* Auto defrag inodes go here. */
586 spinlock_t defrag_inodes_lock;
587 struct rb_root defrag_inodes;
588 atomic_t defrag_running;
590 /* Used to protect avail_{data, metadata, system}_alloc_bits */
591 seqlock_t profiles_lock;
593 * These three are in extended format (availability of single chunks is
594 * denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other types are denoted
595 * by corresponding BTRFS_BLOCK_GROUP_* bits)
597 u64 avail_data_alloc_bits;
598 u64 avail_metadata_alloc_bits;
599 u64 avail_system_alloc_bits;
602 spinlock_t balance_lock;
603 struct mutex balance_mutex;
604 atomic_t balance_pause_req;
605 atomic_t balance_cancel_req;
606 struct btrfs_balance_control *balance_ctl;
607 wait_queue_head_t balance_wait_q;
609 /* Cancellation requests for chunk relocation */
610 atomic_t reloc_cancel_req;
612 u32 data_chunk_allocations;
617 /* Private scrub information */
618 struct mutex scrub_lock;
619 atomic_t scrubs_running;
620 atomic_t scrub_pause_req;
621 atomic_t scrubs_paused;
622 atomic_t scrub_cancel_req;
623 wait_queue_head_t scrub_pause_wait;
625 * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
628 refcount_t scrub_workers_refcnt;
629 struct workqueue_struct *scrub_workers;
630 struct workqueue_struct *scrub_wr_completion_workers;
631 struct workqueue_struct *scrub_parity_workers;
632 struct btrfs_subpage_info *subpage_info;
634 struct btrfs_discard_ctl discard_ctl;
636 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
637 u32 check_integrity_print_mask;
639 /* Is qgroup tracking in a consistent state? */
642 /* Holds configuration and tracking. Protected by qgroup_lock. */
643 struct rb_root qgroup_tree;
644 spinlock_t qgroup_lock;
647 * Used to avoid frequently calling ulist_alloc()/ulist_free()
648 * when doing qgroup accounting, it must be protected by qgroup_lock.
650 struct ulist *qgroup_ulist;
653 * Protect user change for quota operations. If a transaction is needed,
654 * it must be started before locking this lock.
656 struct mutex qgroup_ioctl_lock;
658 /* List of dirty qgroups to be written at next commit. */
659 struct list_head dirty_qgroups;
661 /* Used by qgroup for an efficient tree traversal. */
664 /* Qgroup rescan items. */
665 /* Protects the progress item */
666 struct mutex qgroup_rescan_lock;
667 struct btrfs_key qgroup_rescan_progress;
668 struct btrfs_workqueue *qgroup_rescan_workers;
669 struct completion qgroup_rescan_completion;
670 struct btrfs_work qgroup_rescan_work;
671 /* Protected by qgroup_rescan_lock */
672 bool qgroup_rescan_running;
673 u8 qgroup_drop_subtree_thres;
675 /* Filesystem state */
676 unsigned long fs_state;
678 struct btrfs_delayed_root *delayed_root;
680 /* Extent buffer radix tree */
681 spinlock_t buffer_lock;
682 /* Entries are eb->start / sectorsize */
683 struct radix_tree_root buffer_radix;
685 /* Next backup root to be overwritten */
686 int backup_root_index;
688 /* Device replace state */
689 struct btrfs_dev_replace dev_replace;
691 struct semaphore uuid_tree_rescan_sem;
693 /* Used to reclaim the metadata space in the background. */
694 struct work_struct async_reclaim_work;
695 struct work_struct async_data_reclaim_work;
696 struct work_struct preempt_reclaim_work;
698 /* Reclaim partially filled block groups in the background */
699 struct work_struct reclaim_bgs_work;
700 struct list_head reclaim_bgs;
701 int bg_reclaim_threshold;
703 spinlock_t unused_bgs_lock;
704 struct list_head unused_bgs;
705 struct mutex unused_bg_unpin_mutex;
706 /* Protect block groups that are going to be deleted */
707 struct mutex reclaim_bgs_lock;
709 /* Cached block sizes */
712 /* ilog2 of sectorsize, use to avoid 64bit division */
719 * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
720 * filesystem, on zoned it depends on the device constraints.
724 /* Block groups and devices containing active swapfiles. */
725 spinlock_t swapfile_pins_lock;
726 struct rb_root swapfile_pins;
728 struct crypto_shash *csum_shash;
730 /* Type of exclusive operation running, protected by super_lock */
731 enum btrfs_exclusive_operation exclusive_operation;
734 * Zone size > 0 when in ZONED mode, otherwise it's used for a check
735 * if the mode is enabled
739 /* Max size to emit ZONE_APPEND write command */
740 u64 max_zone_append_size;
741 struct mutex zoned_meta_io_lock;
742 spinlock_t treelog_bg_lock;
746 * Start of the dedicated data relocation block group, protected by
747 * relocation_bg_lock.
749 spinlock_t relocation_bg_lock;
751 struct mutex zoned_data_reloc_io_lock;
755 spinlock_t zone_active_bgs_lock;
756 struct list_head zone_active_bgs;
758 /* Updates are not protected by any lock */
759 struct btrfs_commit_stats commit_stats;
762 * Last generation where we dropped a non-relocation root.
763 * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen()
764 * to change it and to read it, respectively.
766 u64 last_root_drop_gen;
769 * Annotations for transaction events (structures are empty when
770 * compiled without lockdep).
772 struct lockdep_map btrfs_trans_num_writers_map;
773 struct lockdep_map btrfs_trans_num_extwriters_map;
774 struct lockdep_map btrfs_state_change_map[4];
775 struct lockdep_map btrfs_trans_pending_ordered_map;
776 struct lockdep_map btrfs_ordered_extent_map;
778 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
779 spinlock_t ref_verify_lock;
780 struct rb_root block_tree;
783 #ifdef CONFIG_BTRFS_DEBUG
784 struct kobject *debug_kobj;
785 struct list_head allocated_roots;
787 spinlock_t eb_leak_lock;
788 struct list_head allocated_ebs;
792 static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
795 WRITE_ONCE(fs_info->last_root_drop_gen, gen);
798 static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info)
800 return READ_ONCE(fs_info->last_root_drop_gen);
804 * Take the number of bytes to be checksummed and figure out how many leaves
805 * it would require to store the csums for that many bytes.
807 static inline u64 btrfs_csum_bytes_to_leaves(
808 const struct btrfs_fs_info *fs_info, u64 csum_bytes)
810 const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
812 return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
816 * Use this if we would be adding new items, as we could split nodes as we cow
819 static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info,
822 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
826 * Doing a truncate or a modification won't result in new nodes or leaves, just
827 * what we need for COW.
829 static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info,
832 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
835 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
836 sizeof(struct btrfs_item))
838 static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
840 return fs_info->zone_size > 0;
844 * Count how many fs_info->max_extent_size cover the @size
846 static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size)
848 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
850 return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
853 return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
856 bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
857 enum btrfs_exclusive_operation type);
858 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
859 enum btrfs_exclusive_operation type);
860 void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
861 void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
862 void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
863 enum btrfs_exclusive_operation op);
865 /* Compatibility and incompatibility defines */
866 void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
868 void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
870 void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
872 void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
875 #define __btrfs_fs_incompat(fs_info, flags) \
876 (!!(btrfs_super_incompat_flags((fs_info)->super_copy) & (flags)))
878 #define __btrfs_fs_compat_ro(fs_info, flags) \
879 (!!(btrfs_super_compat_ro_flags((fs_info)->super_copy) & (flags)))
881 #define btrfs_set_fs_incompat(__fs_info, opt) \
882 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
884 #define btrfs_clear_fs_incompat(__fs_info, opt) \
885 __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
887 #define btrfs_fs_incompat(fs_info, opt) \
888 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
890 #define btrfs_set_fs_compat_ro(__fs_info, opt) \
891 __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
893 #define btrfs_clear_fs_compat_ro(__fs_info, opt) \
894 __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
896 #define btrfs_fs_compat_ro(fs_info, opt) \
897 __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
899 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
900 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
901 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
902 #define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \
905 #define btrfs_set_and_info(fs_info, opt, fmt, args...) \
907 if (!btrfs_test_opt(fs_info, opt)) \
908 btrfs_info(fs_info, fmt, ##args); \
909 btrfs_set_opt(fs_info->mount_opt, opt); \
912 #define btrfs_clear_and_info(fs_info, opt, fmt, args...) \
914 if (btrfs_test_opt(fs_info, opt)) \
915 btrfs_info(fs_info, fmt, ##args); \
916 btrfs_clear_opt(fs_info->mount_opt, opt); \
919 static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
921 /* Do it this way so we only ever do one test_bit in the normal case. */
922 if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
923 if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
931 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
932 * anything except sleeping. This function is used to check the status of
934 * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
935 * since setting and checking for SB_RDONLY in the superblock's flags is not
938 static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
940 return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
941 btrfs_fs_closing(fs_info);
944 static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
946 clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
949 #define BTRFS_FS_ERROR(fs_info) (unlikely(test_bit(BTRFS_FS_STATE_ERROR, \
950 &(fs_info)->fs_state)))
951 #define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \
952 (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
953 &(fs_info)->fs_state)))
955 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
957 #define EXPORT_FOR_TESTS
959 static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
961 return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
964 void btrfs_test_destroy_inode(struct inode *inode);
968 #define EXPORT_FOR_TESTS static
970 static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)