1 #ifndef _BCACHE_BTREE_H
2 #define _BCACHE_BTREE_H
7 * At a high level, bcache's btree is relatively standard b+ tree. All keys and
8 * pointers are in the leaves; interior nodes only have pointers to the child
11 * In the interior nodes, a struct bkey always points to a child btree node, and
12 * the key is the highest key in the child node - except that the highest key in
13 * an interior node is always MAX_KEY. The size field refers to the size on disk
14 * of the child node - this would allow us to have variable sized btree nodes
15 * (handy for keeping the depth of the btree 1 by expanding just the root).
17 * Btree nodes are themselves log structured, but this is hidden fairly
18 * thoroughly. Btree nodes on disk will in practice have extents that overlap
19 * (because they were written at different times), but in memory we never have
20 * overlapping extents - when we read in a btree node from disk, the first thing
21 * we do is resort all the sets of keys with a mergesort, and in the same pass
22 * we check for overlapping extents and adjust them appropriately.
24 * struct btree_op is a central interface to the btree code. It's used for
25 * specifying read vs. write locking, and the embedded closure is used for
26 * waiting on IO or reserve memory.
30 * Btree nodes are cached in memory; traversing the btree might require reading
31 * in btree nodes which is handled mostly transparently.
33 * bch_btree_node_get() looks up a btree node in the cache and reads it in from
34 * disk if necessary. This function is almost never called directly though - the
35 * btree() macro is used to get a btree node, call some function on it, and
36 * unlock the node after the function returns.
38 * The root is special cased - it's taken out of the cache's lru (thus pinning
39 * it in memory), so we can find the root of the btree by just dereferencing a
40 * pointer instead of looking it up in the cache. This makes locking a bit
41 * tricky, since the root pointer is protected by the lock in the btree node it
42 * points to - the btree_root() macro handles this.
44 * In various places we must be able to allocate memory for multiple btree nodes
45 * in order to make forward progress. To do this we use the btree cache itself
46 * as a reserve; if __get_free_pages() fails, we'll find a node in the btree
47 * cache we can reuse. We can't allow more than one thread to be doing this at a
48 * time, so there's a lock, implemented by a pointer to the btree_op closure -
49 * this allows the btree_root() macro to implicitly release this lock.
53 * Btree nodes never have to be explicitly read in; bch_btree_node_get() handles
56 * For writing, we have two btree_write structs embeddded in struct btree - one
57 * write in flight, and one being set up, and we toggle between them.
59 * Writing is done with a single function - bch_btree_write() really serves two
60 * different purposes and should be broken up into two different functions. When
61 * passing now = false, it merely indicates that the node is now dirty - calling
62 * it ensures that the dirty keys will be written at some point in the future.
64 * When passing now = true, bch_btree_write() causes a write to happen
65 * "immediately" (if there was already a write in flight, it'll cause the write
66 * to happen as soon as the previous write completes). It returns immediately
67 * though - but it takes a refcount on the closure in struct btree_op you passed
68 * to it, so a closure_sync() later can be used to wait for the write to
71 * This is handy because btree_split() and garbage collection can issue writes
72 * in parallel, reducing the amount of time they have to hold write locks.
76 * When traversing the btree, we may need write locks starting at some level -
77 * inserting a key into the btree will typically only require a write lock on
80 * This is specified with the lock field in struct btree_op; lock = 0 means we
81 * take write locks at level <= 0, i.e. only leaf nodes. bch_btree_node_get()
82 * checks this field and returns the node with the appropriate lock held.
84 * If, after traversing the btree, the insertion code discovers it has to split
85 * then it must restart from the root and take new locks - to do this it changes
86 * the lock field and returns -EINTR, which causes the btree_root() macro to
89 * Handling cache misses require a different mechanism for upgrading to a write
90 * lock. We do cache lookups with only a read lock held, but if we get a cache
91 * miss and we wish to insert this data into the cache, we have to insert a
92 * placeholder key to detect races - otherwise, we could race with a write and
93 * overwrite the data that was just written to the cache with stale data from
96 * For this we use a sequence number that write locks and unlocks increment - to
97 * insert the check key it unlocks the btree node and then takes a write lock,
98 * and fails if the sequence number doesn't match.
107 /* If btree_split() frees a btree node, it writes a new pointer to that
108 * btree node indicating it was freed; it takes a refcount on
109 * c->prio_blocked because we can't write the gens until the new
110 * pointer is on disk. This allows btree_write_endio() to release the
111 * refcount that btree_split() took.
116 struct btree_keys_ops {
117 bool (*sort_cmp)(struct btree_iter_set,
118 struct btree_iter_set);
119 struct bkey *(*sort_fixup)(struct btree_iter *,
121 bool (*key_invalid)(struct btree *,
122 const struct bkey *);
123 bool (*key_bad)(struct btree *,
124 const struct bkey *);
125 bool (*key_merge)(struct btree *,
126 struct bkey *, struct bkey *);
130 * Only used for deciding whether to use START_KEY(k) or just the key
131 * itself in a couple places
137 const struct btree_keys_ops *ops;
138 /* Hottest entries first */
139 struct hlist_node hash;
141 /* Key/pointer for this btree node */
144 /* Single bit - set when accessed, cleared by shrinker */
145 unsigned long accessed;
147 struct rw_semaphore lock;
149 struct btree *parent;
152 uint16_t written; /* would be nice to kill */
158 * Set of sorted keys - the real btree node - plus a binary search tree
160 * sets[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
161 * to the memory we have allocated for this btree node. Additionally,
162 * set[0]->data points to the entire btree node as it exists on disk.
164 struct bset_tree sets[MAX_BSETS];
166 /* For outstanding btree writes, used as a lock - protects write_idx */
168 struct semaphore io_mutex;
170 struct list_head list;
171 struct delayed_work work;
173 struct btree_write writes[2];
177 #define BTREE_FLAG(flag) \
178 static inline bool btree_node_ ## flag(struct btree *b) \
179 { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
181 static inline void set_btree_node_ ## flag(struct btree *b) \
182 { set_bit(BTREE_NODE_ ## flag, &b->flags); } \
187 BTREE_NODE_write_idx,
190 BTREE_FLAG(io_error);
192 BTREE_FLAG(write_idx);
194 static inline struct btree_write *btree_current_write(struct btree *b)
196 return b->writes + btree_node_write_idx(b);
199 static inline struct btree_write *btree_prev_write(struct btree *b)
201 return b->writes + (btree_node_write_idx(b) ^ 1);
204 static inline struct bset_tree *bset_tree_last(struct btree *b)
206 return b->sets + b->nsets;
209 static inline struct bset *btree_bset_first(struct btree *b)
211 return b->sets->data;
214 static inline struct bset *btree_bset_last(struct btree *b)
216 return bset_tree_last(b)->data;
219 static inline unsigned bset_byte_offset(struct btree *b, struct bset *i)
221 return ((size_t) i) - ((size_t) b->sets->data);
224 static inline unsigned bset_sector_offset(struct btree *b, struct bset *i)
226 return (((void *) i) - ((void *) btree_bset_first(b))) >> 9;
229 static inline unsigned bset_block_offset(struct btree *b, struct bset *i)
231 return bset_sector_offset(b, i) >> b->c->block_bits;
234 static inline struct bset *write_block(struct btree *b)
236 return ((void *) b->sets[0].data) + b->written * block_bytes(b->c);
239 static inline bool bset_written(struct btree *b, struct bset_tree *t)
241 return t->data < write_block(b);
244 static inline bool bkey_written(struct btree *b, struct bkey *k)
246 return k < write_block(b)->start;
249 static inline void set_gc_sectors(struct cache_set *c)
251 atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
254 static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
256 return b->ops->key_invalid(b, k);
259 static inline bool bch_ptr_bad(struct btree *b, const struct bkey *k)
261 return b->ops->key_bad(b, k);
265 * Tries to merge l and r: l should be lower than r
266 * Returns true if we were able to merge. If we did merge, l will be the merged
267 * key, r will be untouched.
269 static inline bool bch_bkey_try_merge(struct btree *b,
270 struct bkey *l, struct bkey *r)
272 return b->ops->key_merge ? b->ops->key_merge(b, l, r) : false;
275 void bkey_put(struct cache_set *c, struct bkey *k);
279 #define for_each_cached_btree(b, c, iter) \
281 iter < ARRAY_SIZE((c)->bucket_hash); \
283 hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
285 #define for_each_key_filter(b, k, iter, filter) \
286 for (bch_btree_iter_init((b), (iter), NULL); \
287 ((k) = bch_btree_iter_next_filter((iter), b, filter));)
289 #define for_each_key(b, k, iter) \
290 for (bch_btree_iter_init((b), (iter), NULL); \
291 ((k) = bch_btree_iter_next(iter));)
293 /* Recursing down the btree */
296 /* for waiting on btree reserve in btree_split() */
299 /* Btree level at which we start taking write locks */
302 unsigned insert_collision:1;
305 static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
307 memset(op, 0, sizeof(struct btree_op));
308 init_wait(&op->wait);
309 op->lock = write_lock_level;
312 static inline void rw_lock(bool w, struct btree *b, int level)
314 w ? down_write_nested(&b->lock, level + 1)
315 : down_read_nested(&b->lock, level + 1);
320 static inline void rw_unlock(bool w, struct btree *b)
324 (w ? up_write : up_read)(&b->lock);
327 void bch_btree_node_read_done(struct btree *);
328 void bch_btree_node_write(struct btree *, struct closure *);
330 void bch_btree_set_root(struct btree *);
331 struct btree *bch_btree_node_alloc(struct cache_set *, int, bool);
332 struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
334 int bch_btree_insert_check_key(struct btree *, struct btree_op *,
336 int bch_btree_insert(struct cache_set *, struct keylist *,
337 atomic_t *, struct bkey *);
339 int bch_gc_thread_start(struct cache_set *);
340 size_t bch_btree_gc_finish(struct cache_set *);
341 void bch_moving_gc(struct cache_set *);
342 int bch_btree_check(struct cache_set *);
343 uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
345 static inline void wake_up_gc(struct cache_set *c)
348 wake_up_process(c->gc_thread);
352 #define MAP_CONTINUE 1
354 #define MAP_ALL_NODES 0
355 #define MAP_LEAF_NODES 1
357 #define MAP_END_KEY 1
359 typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
360 int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
361 struct bkey *, btree_map_nodes_fn *, int);
363 static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
364 struct bkey *from, btree_map_nodes_fn *fn)
366 return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
369 static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
372 btree_map_nodes_fn *fn)
374 return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
377 typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
379 int bch_btree_map_keys(struct btree_op *, struct cache_set *,
380 struct bkey *, btree_map_keys_fn *, int);
382 typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
384 void bch_keybuf_init(struct keybuf *);
385 void bch_refill_keybuf(struct cache_set *, struct keybuf *,
386 struct bkey *, keybuf_pred_fn *);
387 bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
389 void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
390 struct keybuf_key *bch_keybuf_next(struct keybuf *);
391 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
392 struct bkey *, keybuf_pred_fn *);