1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #ifndef BTRFS_LOCKING_H
7 #define BTRFS_LOCKING_H
9 #include <linux/atomic.h>
10 #include <linux/wait.h>
11 #include <linux/percpu_counter.h>
12 #include "extent_io.h"
14 #define BTRFS_WRITE_LOCK 1
15 #define BTRFS_READ_LOCK 2
18 * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at
19 * the time of this patch is 8, which is how many we use. Keep this in mind if
20 * you decide you want to add another subclass.
22 enum btrfs_lock_nesting {
26 * When we COW a block we are holding the lock on the original block,
27 * and since our lockdep maps are rootid+level, this confuses lockdep
28 * when we lock the newly allocated COW'd block. Handle this by having
29 * a subclass for COW'ed blocks so that lockdep doesn't complain.
34 * Oftentimes we need to lock adjacent nodes on the same level while
35 * still holding the lock on the original node we searched to, such as
36 * for searching forward or for split/balance.
38 * Because of this we need to indicate to lockdep that this is
39 * acceptable by having a different subclass for each of these
46 * When splitting we will be holding a lock on the left/right node when
47 * we need to cow that node, thus we need a new set of subclasses for
48 * these two operations.
50 BTRFS_NESTING_LEFT_COW,
51 BTRFS_NESTING_RIGHT_COW,
54 * When splitting we may push nodes to the left or right, but still use
55 * the subsequent nodes in our path, keeping our locks on those adjacent
56 * blocks. Thus when we go to allocate a new split block we've already
57 * used up all of our available subclasses, so this subclass exists to
58 * handle this case where we need to allocate a new split block.
63 * When promoting a new block to a root we need to have a special
64 * subclass so we don't confuse lockdep, as it will appear that we are
65 * locking a higher level node before a lower level one. Copying also
66 * has this problem as it appears we're locking the same block again
67 * when we make a snapshot of an existing root.
69 BTRFS_NESTING_NEW_ROOT,
72 * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
73 * add this in here and add a static_assert to keep us from going over
74 * the limit. As of this writing we're limited to 8, and we're
75 * definitely using 8, hence this check to keep us from messing up in
81 enum btrfs_lockdep_trans_states {
82 BTRFS_LOCKDEP_TRANS_COMMIT_PREP,
83 BTRFS_LOCKDEP_TRANS_UNBLOCKED,
84 BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED,
85 BTRFS_LOCKDEP_TRANS_COMPLETED,
89 * Lockdep annotation for wait events.
91 * @owner: The struct where the lockdep map is defined
92 * @lock: The lockdep map corresponding to a wait event
94 * This macro is used to annotate a wait event. In this case a thread acquires
95 * the lockdep map as writer (exclusive lock) because it has to block until all
96 * the threads that hold the lock as readers signal the condition for the wait
97 * event and release their locks.
99 #define btrfs_might_wait_for_event(owner, lock) \
101 rwsem_acquire(&owner->lock##_map, 0, 0, _THIS_IP_); \
102 rwsem_release(&owner->lock##_map, _THIS_IP_); \
106 * Protection for the resource/condition of a wait event.
108 * @owner: The struct where the lockdep map is defined
109 * @lock: The lockdep map corresponding to a wait event
111 * Many threads can modify the condition for the wait event at the same time
112 * and signal the threads that block on the wait event. The threads that modify
113 * the condition and do the signaling acquire the lock as readers (shared
116 #define btrfs_lockdep_acquire(owner, lock) \
117 rwsem_acquire_read(&owner->lock##_map, 0, 0, _THIS_IP_)
120 * Used after signaling the condition for a wait event to release the lockdep
121 * map held by a reader thread.
123 #define btrfs_lockdep_release(owner, lock) \
124 rwsem_release(&owner->lock##_map, _THIS_IP_)
127 * Macros for the transaction states wait events, similar to the generic wait
130 #define btrfs_might_wait_for_state(owner, i) \
132 rwsem_acquire(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_); \
133 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_); \
136 #define btrfs_trans_state_lockdep_acquire(owner, i) \
137 rwsem_acquire_read(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_)
139 #define btrfs_trans_state_lockdep_release(owner, i) \
140 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_)
142 /* Initialization of the lockdep map */
143 #define btrfs_lockdep_init_map(owner, lock) \
145 static struct lock_class_key lock##_key; \
146 lockdep_init_map(&owner->lock##_map, #lock, &lock##_key, 0); \
149 /* Initialization of the transaction states lockdep maps. */
150 #define btrfs_state_lockdep_init_map(owner, lock, state) \
152 static struct lock_class_key lock##_key; \
153 lockdep_init_map(&owner->btrfs_state_change_map[state], #lock, \
157 static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES,
158 "too many lock subclasses defined");
162 void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
163 void btrfs_tree_lock(struct extent_buffer *eb);
164 void btrfs_tree_unlock(struct extent_buffer *eb);
166 void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
167 void btrfs_tree_read_lock(struct extent_buffer *eb);
168 void btrfs_tree_read_unlock(struct extent_buffer *eb);
169 int btrfs_try_tree_read_lock(struct extent_buffer *eb);
170 int btrfs_try_tree_write_lock(struct extent_buffer *eb);
171 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
172 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
173 struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root);
175 #ifdef CONFIG_BTRFS_DEBUG
176 static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb)
178 lockdep_assert_held_write(&eb->lock);
181 static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { }
184 void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
186 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
188 if (rw == BTRFS_WRITE_LOCK)
189 btrfs_tree_unlock(eb);
190 else if (rw == BTRFS_READ_LOCK)
191 btrfs_tree_read_unlock(eb);
196 struct btrfs_drew_lock {
199 wait_queue_head_t pending_writers;
200 wait_queue_head_t pending_readers;
203 void btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
204 void btrfs_drew_write_lock(struct btrfs_drew_lock *lock);
205 bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock);
206 void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
207 void btrfs_drew_read_lock(struct btrfs_drew_lock *lock);
208 void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock);
210 #ifdef CONFIG_DEBUG_LOCK_ALLOC
211 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level);
212 void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb);
214 static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
215 struct extent_buffer *eb, int level)
218 static inline void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root,
219 struct extent_buffer *eb)