1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
13 #include "extent_io.h"
15 #include "accessors.h"
18 * Lockdep class keys for extent_buffer->lock's in this root. For a given
19 * eb, the lockdep key is determined by the btrfs_root it belongs to and
20 * the level the eb occupies in the tree.
22 * Different roots are used for different purposes and may nest inside each
23 * other and they require separate keysets. As lockdep keys should be
24 * static, assign keysets according to the purpose of the root as indicated
25 * by btrfs_root->root_key.objectid. This ensures that all special purpose
26 * roots have separate keysets.
28 * Lock-nesting across peer nodes is always done with the immediate parent
29 * node locked thus preventing deadlock. As lockdep doesn't know this, use
30 * subclass to avoid triggering lockdep warning in such cases.
32 * The key is set by the readpage_end_io_hook after the buffer has passed
33 * csum validation but before the pages are unlocked. It is also set by
34 * btrfs_init_new_buffer on freshly allocated blocks.
36 * We also add a check to make sure the highest level of the tree is the
37 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
38 * needs update as well.
40 #ifdef CONFIG_DEBUG_LOCK_ALLOC
41 #if BTRFS_MAX_LEVEL != 8
45 #define DEFINE_LEVEL(stem, level) \
46 .names[level] = "btrfs-" stem "-0" #level,
48 #define DEFINE_NAME(stem) \
49 DEFINE_LEVEL(stem, 0) \
50 DEFINE_LEVEL(stem, 1) \
51 DEFINE_LEVEL(stem, 2) \
52 DEFINE_LEVEL(stem, 3) \
53 DEFINE_LEVEL(stem, 4) \
54 DEFINE_LEVEL(stem, 5) \
55 DEFINE_LEVEL(stem, 6) \
58 static struct btrfs_lockdep_keyset {
59 u64 id; /* root objectid */
60 /* Longest entry: btrfs-block-group-00 */
61 char names[BTRFS_MAX_LEVEL][24];
62 struct lock_class_key keys[BTRFS_MAX_LEVEL];
63 } btrfs_lockdep_keysets[] = {
64 { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") },
65 { .id = BTRFS_EXTENT_TREE_OBJECTID, DEFINE_NAME("extent") },
66 { .id = BTRFS_CHUNK_TREE_OBJECTID, DEFINE_NAME("chunk") },
67 { .id = BTRFS_DEV_TREE_OBJECTID, DEFINE_NAME("dev") },
68 { .id = BTRFS_CSUM_TREE_OBJECTID, DEFINE_NAME("csum") },
69 { .id = BTRFS_QUOTA_TREE_OBJECTID, DEFINE_NAME("quota") },
70 { .id = BTRFS_TREE_LOG_OBJECTID, DEFINE_NAME("log") },
71 { .id = BTRFS_TREE_RELOC_OBJECTID, DEFINE_NAME("treloc") },
72 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") },
73 { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") },
74 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
75 { .id = BTRFS_BLOCK_GROUP_TREE_OBJECTID, DEFINE_NAME("block-group") },
76 { .id = 0, DEFINE_NAME("tree") },
82 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level)
84 struct btrfs_lockdep_keyset *ks;
86 BUG_ON(level >= ARRAY_SIZE(ks->keys));
88 /* Find the matching keyset, id 0 is the default entry */
89 for (ks = btrfs_lockdep_keysets; ks->id; ks++)
90 if (ks->id == objectid)
93 lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]);
96 void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb)
98 if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
99 btrfs_set_buffer_lockdep_class(root->root_key.objectid,
100 eb, btrfs_header_level(eb));
106 * Extent buffer locking
107 * =====================
109 * We use a rw_semaphore for tree locking, and the semantics are exactly the
112 * - reader/writer exclusion
113 * - writer/writer exclusion
114 * - reader/reader sharing
115 * - try-lock semantics for readers and writers
117 * The rwsem implementation does opportunistic spinning which reduces number of
118 * times the locking task needs to sleep.
122 * __btrfs_tree_read_lock - lock extent buffer for read
123 * @eb: the eb to be locked
124 * @nest: the nesting level to be used for lockdep
126 * This takes the read lock on the extent buffer, using the specified nesting
127 * level for lockdep purposes.
129 void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
133 if (trace_btrfs_tree_read_lock_enabled())
134 start_ns = ktime_get_ns();
136 down_read_nested(&eb->lock, nest);
137 trace_btrfs_tree_read_lock(eb, start_ns);
140 void btrfs_tree_read_lock(struct extent_buffer *eb)
142 __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL);
148 * Return 1 if the rwlock has been taken, 0 otherwise
150 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
152 if (down_read_trylock(&eb->lock)) {
153 trace_btrfs_try_tree_read_lock(eb);
160 * Try-lock for write.
162 * Return 1 if the rwlock has been taken, 0 otherwise
164 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
166 if (down_write_trylock(&eb->lock)) {
167 eb->lock_owner = current->pid;
168 trace_btrfs_try_tree_write_lock(eb);
177 void btrfs_tree_read_unlock(struct extent_buffer *eb)
179 trace_btrfs_tree_read_unlock(eb);
184 * __btrfs_tree_lock - lock eb for write
185 * @eb: the eb to lock
186 * @nest: the nesting to use for the lock
188 * Returns with the eb->lock write locked.
190 void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
191 __acquires(&eb->lock)
195 if (trace_btrfs_tree_lock_enabled())
196 start_ns = ktime_get_ns();
198 down_write_nested(&eb->lock, nest);
199 eb->lock_owner = current->pid;
200 trace_btrfs_tree_lock(eb, start_ns);
203 void btrfs_tree_lock(struct extent_buffer *eb)
205 __btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL);
209 * Release the write lock.
211 void btrfs_tree_unlock(struct extent_buffer *eb)
213 trace_btrfs_tree_unlock(eb);
219 * This releases any locks held in the path starting at level and going all the
220 * way up to the root.
222 * btrfs_search_slot will keep the lock held on higher nodes in a few corner
223 * cases, such as COW of the block at slot zero in the node. This ignores
224 * those rules, and it should only be called when there are no more updates to
225 * be done higher up in the tree.
227 void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
231 if (path->keep_locks)
234 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
239 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
245 * Loop around taking references on and locking the root node of the tree until
246 * we end up with a lock on the root node.
248 * Return: root extent buffer with write lock held
250 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
252 struct extent_buffer *eb;
255 eb = btrfs_root_node(root);
257 btrfs_maybe_reset_lockdep_class(root, eb);
259 if (eb == root->node)
261 btrfs_tree_unlock(eb);
262 free_extent_buffer(eb);
268 * Loop around taking references on and locking the root node of the tree until
269 * we end up with a lock on the root node.
271 * Return: root extent buffer with read lock held
273 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
275 struct extent_buffer *eb;
278 eb = btrfs_root_node(root);
280 btrfs_maybe_reset_lockdep_class(root, eb);
281 btrfs_tree_read_lock(eb);
282 if (eb == root->node)
284 btrfs_tree_read_unlock(eb);
285 free_extent_buffer(eb);
291 * Loop around taking references on and locking the root node of the tree in
292 * nowait mode until we end up with a lock on the root node or returning to
295 * Return: root extent buffer with read lock held or -EAGAIN.
297 struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root)
299 struct extent_buffer *eb;
302 eb = btrfs_root_node(root);
303 if (!btrfs_try_tree_read_lock(eb)) {
304 free_extent_buffer(eb);
305 return ERR_PTR(-EAGAIN);
307 if (eb == root->node)
309 btrfs_tree_read_unlock(eb);
310 free_extent_buffer(eb);
319 * DREW stands for double-reader-writer-exclusion lock. It's used in situation
320 * where you want to provide A-B exclusion but not AA or BB.
322 * Currently implementation gives more priority to reader. If a reader and a
323 * writer both race to acquire their respective sides of the lock the writer
324 * would yield its lock as soon as it detects a concurrent reader. Additionally
325 * if there are pending readers no new writers would be allowed to come in and
329 void btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
331 atomic_set(&lock->readers, 0);
332 atomic_set(&lock->writers, 0);
333 init_waitqueue_head(&lock->pending_readers);
334 init_waitqueue_head(&lock->pending_writers);
337 /* Return true if acquisition is successful, false otherwise */
338 bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
340 if (atomic_read(&lock->readers))
343 atomic_inc(&lock->writers);
345 /* Ensure writers count is updated before we check for pending readers */
346 smp_mb__after_atomic();
347 if (atomic_read(&lock->readers)) {
348 btrfs_drew_write_unlock(lock);
355 void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
358 if (btrfs_drew_try_write_lock(lock))
360 wait_event(lock->pending_writers, !atomic_read(&lock->readers));
364 void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
366 atomic_dec(&lock->writers);
367 cond_wake_up(&lock->pending_readers);
370 void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
372 atomic_inc(&lock->readers);
375 * Ensure the pending reader count is perceieved BEFORE this reader
376 * goes to sleep in case of active writers. This guarantees new writers
377 * won't be allowed and that the current reader will be woken up when
378 * the last active writer finishes its jobs.
380 smp_mb__after_atomic();
382 wait_event(lock->pending_readers, atomic_read(&lock->writers) == 0);
385 void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
388 * atomic_dec_and_test implies a full barrier, so woken up writers
389 * are guaranteed to see the decrement
391 if (atomic_dec_and_test(&lock->readers))
392 wake_up(&lock->pending_writers);