4 #include <linux/slab.h>
6 #include "util.h" /* for time_stats */
11 * A bkey contains a key, a size field, a variable number of pointers, and some
12 * ancillary flag bits.
14 * We use two different functions for validating bkeys, bch_ptr_invalid and
17 * bch_ptr_invalid() primarily filters out keys and pointers that would be
18 * invalid due to some sort of bug, whereas bch_ptr_bad() filters out keys and
19 * pointer that occur in normal practice but don't point to real data.
21 * The one exception to the rule that ptr_invalid() filters out invalid keys is
22 * that it also filters out keys of size 0 - these are keys that have been
23 * completely overwritten. It'd be safe to delete these in memory while leaving
24 * them on disk, just unnecessary work - so we filter them out when resorting
27 * We can't filter out stale keys when we're resorting, because garbage
28 * collection needs to find them to ensure bucket gens don't wrap around -
29 * unless we're rewriting the btree node those stale keys still exist on disk.
31 * We also implement functions here for removing some number of sectors from the
32 * front or the back of a bkey - this is mainly used for fixing overlapping
33 * extents, by removing the overlapping sectors from the older key.
37 * A bset is an array of bkeys laid out contiguously in memory in sorted order,
38 * along with a header. A btree node is made up of a number of these, written at
41 * There could be many of them on disk, but we never allow there to be more than
42 * 4 in memory - we lazily resort as needed.
44 * We implement code here for creating and maintaining auxiliary search trees
45 * (described below) for searching an individial bset, and on top of that we
46 * implement a btree iterator.
50 * Most of the code in bcache doesn't care about an individual bset - it needs
51 * to search entire btree nodes and iterate over them in sorted order.
53 * The btree iterator code serves both functions; it iterates through the keys
54 * in a btree node in sorted order, starting from either keys after a specific
55 * point (if you pass it a search key) or the start of the btree node.
57 * AUXILIARY SEARCH TREES:
59 * Since keys are variable length, we can't use a binary search on a bset - we
60 * wouldn't be able to find the start of the next key. But binary searches are
61 * slow anyways, due to terrible cache behaviour; bcache originally used binary
62 * searches and that code topped out at under 50k lookups/second.
64 * So we need to construct some sort of lookup table. Since we only insert keys
65 * into the last (unwritten) set, most of the keys within a given btree node are
66 * usually in sets that are mostly constant. We use two different types of
67 * lookup tables to take advantage of this.
69 * Both lookup tables share in common that they don't index every key in the
70 * set; they index one key every BSET_CACHELINE bytes, and then a linear search
71 * is used for the rest.
73 * For sets that have been written to disk and are no longer being inserted
74 * into, we construct a binary search tree in an array - traversing a binary
75 * search tree in an array gives excellent locality of reference and is very
76 * fast, since both children of any node are adjacent to each other in memory
77 * (and their grandchildren, and great grandchildren...) - this means
78 * prefetching can be used to great effect.
80 * It's quite useful performance wise to keep these nodes small - not just
81 * because they're more likely to be in L2, but also because we can prefetch
82 * more nodes on a single cacheline and thus prefetch more iterations in advance
83 * when traversing this tree.
85 * Nodes in the auxiliary search tree must contain both a key to compare against
86 * (we don't want to fetch the key from the set, that would defeat the purpose),
87 * and a pointer to the key. We use a few tricks to compress both of these.
89 * To compress the pointer, we take advantage of the fact that one node in the
90 * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
91 * a function (to_inorder()) that takes the index of a node in a binary tree and
92 * returns what its index would be in an inorder traversal, so we only have to
93 * store the low bits of the offset.
95 * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
96 * compress that, we take advantage of the fact that when we're traversing the
97 * search tree at every iteration we know that both our search key and the key
98 * we're looking for lie within some range - bounded by our previous
99 * comparisons. (We special case the start of a search so that this is true even
100 * at the root of the tree).
102 * So we know the key we're looking for is between a and b, and a and b don't
103 * differ higher than bit 50, we don't need to check anything higher than bit
106 * We don't usually need the rest of the bits, either; we only need enough bits
107 * to partition the key range we're currently checking. Consider key n - the
108 * key our auxiliary search tree node corresponds to, and key p, the key
109 * immediately preceding n. The lowest bit we need to store in the auxiliary
110 * search tree is the highest bit that differs between n and p.
112 * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
113 * comparison. But we'd really like our nodes in the auxiliary search tree to be
116 * The solution is to make them fixed size, and when we're constructing a node
117 * check if p and n differed in the bits we needed them to. If they don't we
118 * flag that node, and when doing lookups we fallback to comparing against the
119 * real key. As long as this doesn't happen to often (and it seems to reliably
120 * happen a bit less than 1% of the time), we win - even on failures, that key
121 * is then more likely to be in cache than if we were doing binary searches all
122 * the way, since we're touching so much less memory.
124 * The keys in the auxiliary search tree are stored in (software) floating
125 * point, with an exponent and a mantissa. The exponent needs to be big enough
126 * to address all the bits in the original key, but the number of bits in the
127 * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
129 * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
130 * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
131 * We need one node per 128 bytes in the btree node, which means the auxiliary
132 * search trees take up 3% as much memory as the btree itself.
134 * Constructing these auxiliary search trees is moderately expensive, and we
135 * don't want to be constantly rebuilding the search tree for the last set
136 * whenever we insert another key into it. For the unwritten set, we use a much
137 * simpler lookup table - it's just a flat array, so index i in the lookup table
138 * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
139 * within each byte range works the same as with the auxiliary search trees.
141 * These are much easier to keep up to date when we insert a key - we do it
142 * somewhat lazily; when we shift a key up we usually just increment the pointer
143 * to it, only when it would overflow do we go to the trouble of finding the
144 * first key in that range of bytes again.
150 struct btree_iter_set;
157 * We construct a binary tree in an array as if the array
158 * started at 1, so that things line up on the same cachelines
159 * better: see comments in bset.c at cacheline_to_bkey() for
163 /* size of the binary tree and prev array */
166 /* function of size - precalculated for to_inorder() */
169 /* copy of the last key in the set */
171 struct bkey_float *tree;
174 * The nodes in the bset tree point to specific keys - this
175 * array holds the sizes of the previous key.
177 * Conceptually it's a member of struct bkey_float, but we want
178 * to keep bkey_float to 4 bytes and prev isn't used in the fast
183 /* The actual btree node, with pointers to each sorted set */
187 struct btree_keys_ops {
188 bool (*sort_cmp)(struct btree_iter_set,
189 struct btree_iter_set);
190 struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *);
191 bool (*key_invalid)(struct btree_keys *,
192 const struct bkey *);
193 bool (*key_bad)(struct btree_keys *, const struct bkey *);
194 bool (*key_merge)(struct btree_keys *,
195 struct bkey *, struct bkey *);
198 * Only used for deciding whether to use START_KEY(k) or just the key
199 * itself in a couple places
205 const struct btree_keys_ops *ops;
208 unsigned last_set_unwritten:1;
209 bool *expensive_debug_checks;
212 * Sets of sorted keys - the real btree node - plus a binary search tree
214 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
215 * to the memory we have allocated for this btree node. Additionally,
216 * set[0]->data points to the entire btree node as it exists on disk.
218 struct bset_tree set[MAX_BSETS];
221 static inline struct bset_tree *bset_tree_last(struct btree_keys *b)
223 return b->set + b->nsets;
226 static inline bool bset_written(struct btree_keys *b, struct bset_tree *t)
228 return t <= b->set + b->nsets - b->last_set_unwritten;
231 static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
233 return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
236 static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i)
238 return ((size_t) i) - ((size_t) b->set->data);
241 static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)
243 return bset_byte_offset(b, i) >> 9;
246 static inline bool btree_keys_expensive_checks(struct btree_keys *b)
248 #ifdef CONFIG_BCACHE_DEBUG
249 return *b->expensive_debug_checks;
255 #define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
256 #define set_bytes(i) __set_bytes(i, i->keys)
258 #define __set_blocks(i, k, block_bytes) \
259 DIV_ROUND_UP(__set_bytes(i, k), block_bytes)
260 #define set_blocks(i, block_bytes) \
261 __set_blocks(i, (i)->keys, block_bytes)
263 static inline struct bset *bset_next_set(struct btree_keys *b,
264 unsigned block_bytes)
266 struct bset *i = bset_tree_last(b)->data;
268 return ((void *) i) + roundup(set_bytes(i), block_bytes);
271 void bch_btree_keys_free(struct btree_keys *);
272 int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t);
273 void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
276 void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
277 void bch_bset_build_written_tree(struct btree_keys *);
278 void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
279 void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
282 * Tries to merge l and r: l should be lower than r
283 * Returns true if we were able to merge. If we did merge, l will be the merged
284 * key, r will be untouched.
286 static inline bool bch_bkey_try_merge(struct btree_keys *b,
287 struct bkey *l, struct bkey *r)
289 return b->ops->key_merge ? b->ops->key_merge(b, l, r) : false;
292 /* Btree key iteration */
296 #ifdef CONFIG_BCACHE_DEBUG
299 struct btree_iter_set {
300 struct bkey *k, *end;
304 typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *);
306 struct bkey *bch_btree_iter_next(struct btree_iter *);
307 struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
308 struct btree_keys *, ptr_filter_fn);
310 void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
311 struct bkey *bch_btree_iter_init(struct btree *, struct btree_iter *,
314 struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
315 const struct bkey *);
318 * Returns the first key that is strictly greater than search
320 static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
321 const struct bkey *search)
323 return search ? __bch_bset_search(b, t, search) : t->data->start;
328 struct bset_sort_state {
332 unsigned crit_factor;
334 struct time_stats time;
337 void bch_bset_sort_state_free(struct bset_sort_state *);
338 int bch_bset_sort_state_init(struct bset_sort_state *, unsigned);
339 void bch_btree_sort_lazy(struct btree *, struct bset_sort_state *);
340 void bch_btree_sort_into(struct btree *, struct btree *,
341 struct bset_sort_state *);
342 void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
343 struct bset_sort_state *);
344 void bch_btree_sort_partial(struct btree *, unsigned,
345 struct bset_sort_state *);
347 static inline void bch_btree_sort(struct btree *b,
348 struct bset_sort_state *state)
350 bch_btree_sort_partial(b, 0, state);
353 /* Bkey utility code */
355 #define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
357 static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
359 return bkey_idx(i->start, idx);
362 static inline void bkey_init(struct bkey *k)
367 static __always_inline int64_t bkey_cmp(const struct bkey *l,
368 const struct bkey *r)
370 return unlikely(KEY_INODE(l) != KEY_INODE(r))
371 ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r)
372 : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
375 void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
377 bool __bch_cut_front(const struct bkey *, struct bkey *);
378 bool __bch_cut_back(const struct bkey *, struct bkey *);
380 static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
382 BUG_ON(bkey_cmp(where, k) > 0);
383 return __bch_cut_front(where, k);
386 static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
388 BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
389 return __bch_cut_back(where, k);
392 #define PRECEDING_KEY(_k) \
394 struct bkey *_ret = NULL; \
396 if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
397 _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
407 static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
409 return b->ops->key_invalid(b, k);
412 static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k)
414 return b->ops->key_bad(b, k);
429 /* Enough room for btree_split's keys without realloc */
430 #define KEYLIST_INLINE 16
431 uint64_t inline_keys[KEYLIST_INLINE];
434 static inline void bch_keylist_init(struct keylist *l)
436 l->top_p = l->keys_p = l->inline_keys;
439 static inline void bch_keylist_push(struct keylist *l)
441 l->top = bkey_next(l->top);
444 static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
446 bkey_copy(l->top, k);
450 static inline bool bch_keylist_empty(struct keylist *l)
452 return l->top == l->keys;
455 static inline void bch_keylist_reset(struct keylist *l)
460 static inline void bch_keylist_free(struct keylist *l)
462 if (l->keys_p != l->inline_keys)
466 static inline size_t bch_keylist_nkeys(struct keylist *l)
468 return l->top_p - l->keys_p;
471 static inline size_t bch_keylist_bytes(struct keylist *l)
473 return bch_keylist_nkeys(l) * sizeof(uint64_t);
476 struct bkey *bch_keylist_pop(struct keylist *);
477 void bch_keylist_pop_front(struct keylist *);
478 int __bch_keylist_realloc(struct keylist *, unsigned);
481 const char *bch_ptr_status(struct cache_set *, const struct bkey *);
483 int bch_bset_print_stats(struct cache_set *, char *);