2 * Code for working with individual keys, and sorted sets of keys with in a
5 * Copyright 2012 Google, Inc.
12 #include <linux/random.h>
13 #include <linux/prefetch.h>
17 int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
19 size_t oldsize = bch_keylist_nkeys(l);
20 size_t newsize = oldsize + u64s;
21 uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
24 newsize = roundup_pow_of_two(newsize);
26 if (newsize <= KEYLIST_INLINE ||
27 roundup_pow_of_two(oldsize) == newsize)
30 new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
36 memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
39 l->top_p = new_keys + oldsize;
44 struct bkey *bch_keylist_pop(struct keylist *l)
46 struct bkey *k = l->keys;
51 while (bkey_next(k) != l->top)
57 void bch_keylist_pop_front(struct keylist *l)
59 l->top_p -= bkey_u64s(l->keys);
63 bch_keylist_bytes(l));
66 /* Pointer validation */
68 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
72 for (i = 0; i < KEY_PTRS(k); i++)
73 if (ptr_available(c, k, i)) {
74 struct cache *ca = PTR_CACHE(c, k, i);
75 size_t bucket = PTR_BUCKET_NR(c, k, i);
76 size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
78 if (KEY_SIZE(k) + r > c->sb.bucket_size ||
79 bucket < ca->sb.first_bucket ||
80 bucket >= ca->sb.nbuckets)
87 bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
91 if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
94 if (__ptr_invalid(c, k))
99 bch_bkey_to_text(buf, sizeof(buf), k);
100 cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
104 bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
111 if (KEY_SIZE(k) > KEY_OFFSET(k))
114 if (__ptr_invalid(c, k))
119 bch_bkey_to_text(buf, sizeof(buf), k);
120 cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
124 static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
127 struct bucket *g = PTR_BUCKET(b->c, k, ptr);
130 if (mutex_trylock(&b->c->bucket_lock)) {
133 g->prio != BTREE_PRIO ||
134 (b->c->gc_mark_valid &&
135 GC_MARK(g) != GC_MARK_METADATA))
139 if (g->prio == BTREE_PRIO)
143 b->c->gc_mark_valid &&
144 GC_MARK(g) != GC_MARK_DIRTY)
147 mutex_unlock(&b->c->bucket_lock);
152 mutex_unlock(&b->c->bucket_lock);
153 bch_bkey_to_text(buf, sizeof(buf), k);
155 "inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
156 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
157 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
161 bool bch_ptr_bad(struct btree *b, const struct bkey *k)
166 if (!bkey_cmp(k, &ZERO_KEY) ||
168 bch_ptr_invalid(b, k))
171 for (i = 0; i < KEY_PTRS(k); i++)
172 if (!ptr_available(b->c, k, i))
175 if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
178 for (i = 0; i < KEY_PTRS(k); i++) {
179 g = PTR_BUCKET(b->c, k, i);
180 stale = ptr_stale(b->c, k, i);
182 btree_bug_on(stale > 96, b,
183 "key too stale: %i, need_gc %u",
184 stale, b->c->need_gc);
186 btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
187 b, "stale dirty pointer");
192 if (expensive_debug_checks(b->c) &&
193 ptr_bad_expensive_checks(b, k, i))
200 /* Key/pointer manipulation */
202 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
205 BUG_ON(i > KEY_PTRS(src));
207 /* Only copy the header, key, and one pointer. */
208 memcpy(dest, src, 2 * sizeof(uint64_t));
209 dest->ptr[0] = src->ptr[i];
210 SET_KEY_PTRS(dest, 1);
211 /* We didn't copy the checksum so clear that bit. */
212 SET_KEY_CSUM(dest, 0);
215 bool __bch_cut_front(const struct bkey *where, struct bkey *k)
219 if (bkey_cmp(where, &START_KEY(k)) <= 0)
222 if (bkey_cmp(where, k) < 0)
223 len = KEY_OFFSET(k) - KEY_OFFSET(where);
225 bkey_copy_key(k, where);
227 for (i = 0; i < KEY_PTRS(k); i++)
228 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
230 BUG_ON(len > KEY_SIZE(k));
231 SET_KEY_SIZE(k, len);
235 bool __bch_cut_back(const struct bkey *where, struct bkey *k)
239 if (bkey_cmp(where, k) >= 0)
242 BUG_ON(KEY_INODE(where) != KEY_INODE(k));
244 if (bkey_cmp(where, &START_KEY(k)) > 0)
245 len = KEY_OFFSET(where) - KEY_START(k);
247 bkey_copy_key(k, where);
249 BUG_ON(len > KEY_SIZE(k));
250 SET_KEY_SIZE(k, len);
254 static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
256 return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
257 ~((uint64_t)1 << 63);
260 /* Tries to merge l and r: l should be lower than r
261 * Returns true if we were able to merge. If we did merge, l will be the merged
262 * key, r will be untouched.
264 bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r)
268 if (key_merging_disabled(b->c))
271 if (KEY_PTRS(l) != KEY_PTRS(r) ||
272 KEY_DIRTY(l) != KEY_DIRTY(r) ||
273 bkey_cmp(l, &START_KEY(r)))
276 for (i = 0; i < KEY_PTRS(l); i++)
277 if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
278 PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
281 /* Keys with no pointers aren't restricted to one bucket and could
284 if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
285 SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
286 SET_KEY_SIZE(l, USHRT_MAX);
294 l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
299 SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
300 SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
305 /* Auxiliary search trees */
308 #define BKEY_MID_BITS 3
309 #define BKEY_EXPONENT_BITS 7
310 #define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
311 #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
314 unsigned exponent:BKEY_EXPONENT_BITS;
315 unsigned m:BKEY_MID_BITS;
316 unsigned mantissa:BKEY_MANTISSA_BITS;
320 * BSET_CACHELINE was originally intended to match the hardware cacheline size -
321 * it used to be 64, but I realized the lookup code would touch slightly less
322 * memory if it was 128.
324 * It definites the number of bytes (in struct bset) per struct bkey_float in
325 * the auxiliar search tree - when we're done searching the bset_float tree we
326 * have this many bytes left that we do a linear search over.
328 * Since (after level 5) every level of the bset_tree is on a new cacheline,
329 * we're touching one fewer cacheline in the bset tree in exchange for one more
330 * cacheline in the linear search - but the linear search might stop before it
331 * gets to the second cacheline.
334 #define BSET_CACHELINE 128
336 /* Space required for the btree node keys */
337 static inline size_t btree_keys_bytes(struct btree *b)
339 return PAGE_SIZE << b->page_order;
342 static inline size_t btree_keys_cachelines(struct btree *b)
344 return btree_keys_bytes(b) / BSET_CACHELINE;
347 /* Space required for the auxiliary search trees */
348 static inline size_t bset_tree_bytes(struct btree *b)
350 return btree_keys_cachelines(b) * sizeof(struct bkey_float);
353 /* Space required for the prev pointers */
354 static inline size_t bset_prev_bytes(struct btree *b)
356 return btree_keys_cachelines(b) * sizeof(uint8_t);
359 /* Memory allocation */
361 void bch_btree_keys_free(struct btree *b)
363 struct bset_tree *t = b->sets;
365 if (bset_prev_bytes(b) < PAGE_SIZE)
368 free_pages((unsigned long) t->prev,
369 get_order(bset_prev_bytes(b)));
371 if (bset_tree_bytes(b) < PAGE_SIZE)
374 free_pages((unsigned long) t->tree,
375 get_order(bset_tree_bytes(b)));
377 free_pages((unsigned long) t->data, b->page_order);
384 int bch_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp)
386 struct bset_tree *t = b->sets;
390 b->page_order = page_order;
392 t->data = (void *) __get_free_pages(gfp, b->page_order);
396 t->tree = bset_tree_bytes(b) < PAGE_SIZE
397 ? kmalloc(bset_tree_bytes(b), gfp)
398 : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
402 t->prev = bset_prev_bytes(b) < PAGE_SIZE
403 ? kmalloc(bset_prev_bytes(b), gfp)
404 : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
410 bch_btree_keys_free(b);
414 /* Binary tree stuff for auxiliary search trees */
416 static unsigned inorder_next(unsigned j, unsigned size)
418 if (j * 2 + 1 < size) {
429 static unsigned inorder_prev(unsigned j, unsigned size)
434 while (j * 2 + 1 < size)
442 /* I have no idea why this code works... and I'm the one who wrote it
444 * However, I do know what it does:
445 * Given a binary tree constructed in an array (i.e. how you normally implement
446 * a heap), it converts a node in the tree - referenced by array index - to the
447 * index it would have if you did an inorder traversal.
449 * Also tested for every j, size up to size somewhere around 6 million.
451 * The binary tree starts at array index 1, not 0
452 * extra is a function of size:
453 * extra = (size - rounddown_pow_of_two(size - 1)) << 1;
455 static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
458 unsigned shift = fls(size - 1) - b;
466 j -= (j - extra) >> 1;
471 static unsigned to_inorder(unsigned j, struct bset_tree *t)
473 return __to_inorder(j, t->size, t->extra);
476 static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
486 j |= roundup_pow_of_two(size) >> shift;
491 static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
493 return __inorder_to_tree(j, t->size, t->extra);
497 void inorder_test(void)
499 unsigned long done = 0;
500 ktime_t start = ktime_get();
502 for (unsigned size = 2;
505 unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
506 unsigned i = 1, j = rounddown_pow_of_two(size - 1);
509 printk(KERN_NOTICE "loop %u, %llu per us\n", size,
510 done / ktime_us_delta(ktime_get(), start));
513 if (__inorder_to_tree(i, size, extra) != j)
514 panic("size %10u j %10u i %10u", size, j, i);
516 if (__to_inorder(j, size, extra) != i)
517 panic("size %10u j %10u i %10u", size, j, i);
519 if (j == rounddown_pow_of_two(size) - 1)
522 BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
524 j = inorder_next(j, size);
534 * Cacheline/offset <-> bkey pointer arithmetic:
536 * t->tree is a binary search tree in an array; each node corresponds to a key
537 * in one cacheline in t->set (BSET_CACHELINE bytes).
539 * This means we don't have to store the full index of the key that a node in
540 * the binary tree points to; to_inorder() gives us the cacheline, and then
541 * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
543 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
546 * To construct the bfloat for an arbitrary key we need to know what the key
547 * immediately preceding it is: we have to check if the two keys differ in the
548 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
549 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
552 static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
555 return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
558 static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
560 return ((void *) k - (void *) t->data) / BSET_CACHELINE;
563 static unsigned bkey_to_cacheline_offset(struct bkey *k)
565 return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
568 static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
570 return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
573 static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
575 return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
579 * For the write set - the one we're currently inserting keys into - we don't
580 * maintain a full search tree, we just keep a simple lookup table in t->prev.
582 static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
584 return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
587 static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
590 low |= (high << 1) << (63U - shift);
594 static inline unsigned bfloat_mantissa(const struct bkey *k,
595 struct bkey_float *f)
597 const uint64_t *p = &k->low - (f->exponent >> 6);
598 return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
601 static void make_bfloat(struct bset_tree *t, unsigned j)
603 struct bkey_float *f = &t->tree[j];
604 struct bkey *m = tree_to_bkey(t, j);
605 struct bkey *p = tree_to_prev_bkey(t, j);
607 struct bkey *l = is_power_of_2(j)
609 : tree_to_prev_bkey(t, j >> ffs(j));
611 struct bkey *r = is_power_of_2(j + 1)
612 ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
613 : tree_to_bkey(t, j >> (ffz(j) + 1));
615 BUG_ON(m < l || m > r);
616 BUG_ON(bkey_next(p) != m);
618 if (KEY_INODE(l) != KEY_INODE(r))
619 f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
621 f->exponent = fls64(r->low ^ l->low);
623 f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);
626 * Setting f->exponent = 127 flags this node as failed, and causes the
627 * lookup code to fall back to comparing against the original key.
630 if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
631 f->mantissa = bfloat_mantissa(m, f) - 1;
636 static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
639 unsigned j = roundup(t[-1].size,
640 64 / sizeof(struct bkey_float));
642 t->tree = t[-1].tree + j;
643 t->prev = t[-1].prev + j;
646 while (t < b->sets + MAX_BSETS)
650 static void bch_bset_build_unwritten_tree(struct btree *b)
652 struct bset_tree *t = bset_tree_last(b);
654 bset_alloc_tree(b, t);
656 if (t->tree != b->sets->tree + btree_keys_cachelines(b)) {
657 t->prev[0] = bkey_to_cacheline_offset(t->data->start);
662 void bch_bset_init_next(struct btree *b, struct bset *i, uint64_t magic)
664 if (i != b->sets->data) {
665 b->sets[++b->nsets].data = i;
666 i->seq = b->sets->data->seq;
668 get_random_bytes(&i->seq, sizeof(uint64_t));
674 bch_bset_build_unwritten_tree(b);
677 static void bset_build_written_tree(struct btree *b)
679 struct bset_tree *t = bset_tree_last(b);
680 struct bkey *k = t->data->start;
681 unsigned j, cacheline = 1;
683 bset_alloc_tree(b, t);
685 t->size = min_t(unsigned,
686 bkey_to_cacheline(t, bset_bkey_last(t->data)),
687 b->sets->tree + btree_keys_cachelines(b) - t->tree);
694 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
696 /* First we figure out where the first key in each cacheline is */
697 for (j = inorder_next(0, t->size);
699 j = inorder_next(j, t->size)) {
700 while (bkey_to_cacheline(t, k) != cacheline)
703 t->prev[j] = bkey_u64s(k);
706 t->tree[j].m = bkey_to_cacheline_offset(k);
709 while (bkey_next(k) != bset_bkey_last(t->data))
714 /* Then we build the tree */
715 for (j = inorder_next(0, t->size);
717 j = inorder_next(j, t->size))
721 void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
724 unsigned inorder, j = 1;
726 for (t = b->sets; t <= bset_tree_last(b); t++)
727 if (k < bset_bkey_last(t->data))
732 if (!t->size || !bset_written(b, t))
735 inorder = bkey_to_cacheline(t, k);
737 if (k == t->data->start)
740 if (bkey_next(k) == bset_bkey_last(t->data)) {
745 j = inorder_to_tree(inorder, t);
749 k == tree_to_bkey(t, j))
753 } while (j < t->size);
755 j = inorder_to_tree(inorder + 1, t);
759 k == tree_to_prev_bkey(t, j))
763 } while (j < t->size);
766 static void bch_bset_fix_lookup_table(struct btree *b,
770 unsigned shift = bkey_u64s(k);
771 unsigned j = bkey_to_cacheline(t, k);
773 /* We're getting called from btree_split() or btree_gc, just bail out */
777 /* k is the key we just inserted; we need to find the entry in the
778 * lookup table for the first key that is strictly greater than k:
779 * it's either k's cacheline or the next one
782 table_to_bkey(t, j) <= k)
785 /* Adjust all the lookup table entries, and find a new key for any that
786 * have gotten too big
788 for (; j < t->size; j++) {
791 if (t->prev[j] > 7) {
792 k = table_to_bkey(t, j - 1);
794 while (k < cacheline_to_bkey(t, j, 0))
797 t->prev[j] = bkey_to_cacheline_offset(k);
801 if (t->size == b->sets->tree + btree_keys_cachelines(b) - t->tree)
804 /* Possibly add a new entry to the end of the lookup table */
806 for (k = table_to_bkey(t, t->size - 1);
807 k != bset_bkey_last(t->data);
809 if (t->size == bkey_to_cacheline(t, k)) {
810 t->prev[t->size] = bkey_to_cacheline_offset(k);
815 void bch_bset_insert(struct btree *b, struct bkey *where,
818 struct bset_tree *t = bset_tree_last(b);
820 BUG_ON(t->data != write_block(b));
821 BUG_ON(bset_byte_offset(b, t->data) +
822 __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
823 PAGE_SIZE << b->page_order);
825 memmove((uint64_t *) where + bkey_u64s(insert),
827 (void *) bset_bkey_last(t->data) - (void *) where);
829 t->data->keys += bkey_u64s(insert);
830 bkey_copy(where, insert);
831 bch_bset_fix_lookup_table(b, t, where);
834 struct bset_search_iter {
838 static struct bset_search_iter bset_search_write_set(struct btree *b,
840 const struct bkey *search)
842 unsigned li = 0, ri = t->size;
845 t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
847 while (li + 1 != ri) {
848 unsigned m = (li + ri) >> 1;
850 if (bkey_cmp(table_to_bkey(t, m), search) > 0)
856 return (struct bset_search_iter) {
857 table_to_bkey(t, li),
858 ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
862 static struct bset_search_iter bset_search_tree(struct btree *b,
864 const struct bkey *search)
867 struct bkey_float *f;
868 unsigned inorder, j, n = 1;
872 p &= ((int) (p - t->size)) >> 31;
874 prefetch(&t->tree[p]);
880 * n = (f->mantissa > bfloat_mantissa())
884 * We need to subtract 1 from f->mantissa for the sign bit trick
885 * to work - that's done in make_bfloat()
887 if (likely(f->exponent != 127))
888 n = j * 2 + (((unsigned)
890 bfloat_mantissa(search, f))) >> 31);
892 n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
895 } while (n < t->size);
897 inorder = to_inorder(j, t);
900 * n would have been the node we recursed to - the low bit tells us if
901 * we recursed left or recursed right.
904 l = cacheline_to_bkey(t, inorder, f->m);
906 if (++inorder != t->size) {
907 f = &t->tree[inorder_next(j, t->size)];
908 r = cacheline_to_bkey(t, inorder, f->m);
910 r = bset_bkey_last(t->data);
912 r = cacheline_to_bkey(t, inorder, f->m);
915 f = &t->tree[inorder_prev(j, t->size)];
916 l = cacheline_to_bkey(t, inorder, f->m);
921 return (struct bset_search_iter) {l, r};
924 struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
925 const struct bkey *search)
927 struct bset_search_iter i;
930 * First, we search for a cacheline, then lastly we do a linear search
931 * within that cacheline.
933 * To search for the cacheline, there's three different possibilities:
934 * * The set is too small to have a search tree, so we just do a linear
935 * search over the whole set.
936 * * The set is the one we're currently inserting into; keeping a full
937 * auxiliary search tree up to date would be too expensive, so we
938 * use a much simpler lookup table to do a binary search -
939 * bset_search_write_set().
940 * * Or we use the auxiliary search tree we constructed earlier -
944 if (unlikely(!t->size)) {
945 i.l = t->data->start;
946 i.r = bset_bkey_last(t->data);
947 } else if (bset_written(b, t)) {
949 * Each node in the auxiliary search tree covers a certain range
950 * of bits, and keys above and below the set it covers might
951 * differ outside those bits - so we have to special case the
952 * start and end - handle that here:
955 if (unlikely(bkey_cmp(search, &t->end) >= 0))
956 return bset_bkey_last(t->data);
958 if (unlikely(bkey_cmp(search, t->data->start) < 0))
959 return t->data->start;
961 i = bset_search_tree(b, t, search);
963 i = bset_search_write_set(b, t, search);
965 if (expensive_debug_checks(b->c)) {
966 BUG_ON(bset_written(b, t) &&
967 i.l != t->data->start &&
968 bkey_cmp(tree_to_prev_bkey(t,
969 inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
972 BUG_ON(i.r != bset_bkey_last(t->data) &&
973 bkey_cmp(i.r, search) <= 0);
976 while (likely(i.l != i.r) &&
977 bkey_cmp(i.l, search) <= 0)
978 i.l = bkey_next(i.l);
985 typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
986 struct btree_iter_set);
988 static inline bool btree_iter_cmp(struct btree_iter_set l,
989 struct btree_iter_set r)
991 return bkey_cmp(l.k, r.k) > 0;
994 static inline bool btree_iter_end(struct btree_iter *iter)
999 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
1003 BUG_ON(!heap_add(iter,
1004 ((struct btree_iter_set) { k, end }),
1008 static struct bkey *__bch_btree_iter_init(struct btree *b,
1009 struct btree_iter *iter,
1010 struct bkey *search,
1011 struct bset_tree *start)
1013 struct bkey *ret = NULL;
1014 iter->size = ARRAY_SIZE(iter->data);
1017 #ifdef CONFIG_BCACHE_DEBUG
1021 for (; start <= &b->sets[b->nsets]; start++) {
1022 ret = bch_bset_search(b, start, search);
1023 bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
1029 struct bkey *bch_btree_iter_init(struct btree *b,
1030 struct btree_iter *iter,
1031 struct bkey *search)
1033 return __bch_btree_iter_init(b, iter, search, b->sets);
1036 static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
1037 btree_iter_cmp_fn *cmp)
1039 struct btree_iter_set unused;
1040 struct bkey *ret = NULL;
1042 if (!btree_iter_end(iter)) {
1043 bch_btree_iter_next_check(iter);
1045 ret = iter->data->k;
1046 iter->data->k = bkey_next(iter->data->k);
1048 if (iter->data->k > iter->data->end) {
1049 WARN_ONCE(1, "bset was corrupt!\n");
1050 iter->data->k = iter->data->end;
1053 if (iter->data->k == iter->data->end)
1054 heap_pop(iter, unused, cmp);
1056 heap_sift(iter, 0, cmp);
1062 struct bkey *bch_btree_iter_next(struct btree_iter *iter)
1064 return __bch_btree_iter_next(iter, btree_iter_cmp);
1068 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
1069 struct btree *b, ptr_filter_fn fn)
1074 ret = bch_btree_iter_next(iter);
1075 } while (ret && fn(b, ret));
1082 void bch_bset_sort_state_free(struct bset_sort_state *state)
1085 mempool_destroy(state->pool);
1088 int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
1090 spin_lock_init(&state->time.lock);
1092 state->page_order = page_order;
1093 state->crit_factor = int_sqrt(1 << page_order);
1095 state->pool = mempool_create_page_pool(1, page_order);
1102 static void sort_key_next(struct btree_iter *iter,
1103 struct btree_iter_set *i)
1105 i->k = bkey_next(i->k);
1108 *i = iter->data[--iter->used];
1112 * Returns true if l > r - unless l == r, in which case returns true if l is
1115 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
1116 * equal in different sets, we have to process them newest to oldest.
1118 static inline bool sort_extent_cmp(struct btree_iter_set l,
1119 struct btree_iter_set r)
1121 int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
1123 return c ? c > 0 : l.k < r.k;
1126 static inline bool sort_cmp(struct btree_iter_set l,
1127 struct btree_iter_set r)
1129 int64_t c = bkey_cmp(l.k, r.k);
1131 return c ? c > 0 : l.k < r.k;
1134 static struct bkey *btree_sort_fixup_extents(struct btree_iter *iter,
1137 while (iter->used > 1) {
1138 struct btree_iter_set *top = iter->data, *i = top + 1;
1140 if (iter->used > 2 &&
1141 sort_extent_cmp(i[0], i[1]))
1144 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
1147 if (!KEY_SIZE(i->k)) {
1148 sort_key_next(iter, i);
1149 heap_sift(iter, i - top, sort_extent_cmp);
1153 if (top->k > i->k) {
1154 if (bkey_cmp(top->k, i->k) >= 0)
1155 sort_key_next(iter, i);
1157 bch_cut_front(top->k, i->k);
1159 heap_sift(iter, i - top, sort_extent_cmp);
1161 /* can't happen because of comparison func */
1162 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
1164 if (bkey_cmp(i->k, top->k) < 0) {
1165 bkey_copy(tmp, top->k);
1167 bch_cut_back(&START_KEY(i->k), tmp);
1168 bch_cut_front(i->k, top->k);
1169 heap_sift(iter, 0, btree_iter_cmp);
1173 bch_cut_back(&START_KEY(i->k), top->k);
1181 static void btree_mergesort(struct btree *b, struct bset *out,
1182 struct btree_iter *iter,
1183 bool fixup, bool remove_stale)
1186 struct bkey *k, *last = NULL;
1188 btree_iter_cmp_fn *cmp = b->level
1191 bool (*bad)(struct btree *, const struct bkey *) = remove_stale
1195 /* Heapify the iterator, using our comparison function */
1196 for (i = iter->used / 2 - 1; i >= 0; --i)
1197 heap_sift(iter, i, cmp);
1199 while (!btree_iter_end(iter)) {
1200 if (fixup && !b->level)
1201 k = btree_sort_fixup_extents(iter, &tmp.k);
1206 k = __bch_btree_iter_next(iter, cmp);
1214 } else if (b->level ||
1215 !bch_bkey_try_merge(b, last, k)) {
1216 last = bkey_next(last);
1221 out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
1223 pr_debug("sorted %i keys", out->keys);
1226 static void __btree_sort(struct btree *b, struct btree_iter *iter,
1227 unsigned start, unsigned order, bool fixup,
1228 struct bset_sort_state *state)
1230 uint64_t start_time;
1231 bool used_mempool = false;
1232 struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
1235 BUG_ON(order > state->page_order);
1237 out = page_address(mempool_alloc(state->pool, GFP_NOIO));
1238 used_mempool = true;
1239 order = ilog2(bucket_pages(b->c));
1242 start_time = local_clock();
1244 btree_mergesort(b, out, iter, fixup, false);
1247 if (!start && order == b->page_order) {
1249 * Our temporary buffer is the same size as the btree node's
1250 * buffer, we can just swap buffers instead of doing a big
1254 out->magic = bset_magic(&b->c->sb);
1255 out->seq = b->sets[0].data->seq;
1256 out->version = b->sets[0].data->version;
1257 swap(out, b->sets[0].data);
1259 b->sets[start].data->keys = out->keys;
1260 memcpy(b->sets[start].data->start, out->start,
1261 (void *) bset_bkey_last(out) - (void *) out->start);
1265 mempool_free(virt_to_page(out), state->pool);
1267 free_pages((unsigned long) out, order);
1269 bset_build_written_tree(b);
1272 bch_time_stats_update(&state->time, start_time);
1275 void bch_btree_sort_partial(struct btree *b, unsigned start,
1276 struct bset_sort_state *state)
1278 size_t order = b->page_order, keys = 0;
1279 struct btree_iter iter;
1280 int oldsize = bch_count_data(b);
1282 __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
1284 BUG_ON(!bset_written(b, bset_tree_last(b)) &&
1285 (bset_tree_last(b)->size || b->nsets));
1290 for (i = start; i <= b->nsets; i++)
1291 keys += b->sets[i].data->keys;
1293 order = roundup_pow_of_two(__set_bytes(b->sets->data,
1296 order = ilog2(order);
1299 __btree_sort(b, &iter, start, order, false, state);
1301 EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
1304 void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter,
1305 struct bset_sort_state *state)
1307 __btree_sort(b, iter, 0, b->page_order, true, state);
1310 void bch_btree_sort_into(struct btree *b, struct btree *new,
1311 struct bset_sort_state *state)
1313 uint64_t start_time = local_clock();
1315 struct btree_iter iter;
1316 bch_btree_iter_init(b, &iter, NULL);
1318 btree_mergesort(b, new->sets->data, &iter, false, true);
1320 bch_time_stats_update(&state->time, start_time);
1322 new->sets->size = 0;
1325 #define SORT_CRIT (4096 / sizeof(uint64_t))
1327 void bch_btree_sort_lazy(struct btree *b, struct bset_sort_state *state)
1329 unsigned crit = SORT_CRIT;
1332 /* Don't sort if nothing to do */
1336 for (i = b->nsets - 1; i >= 0; --i) {
1337 crit *= state->crit_factor;
1339 if (b->sets[i].data->keys < crit) {
1340 bch_btree_sort_partial(b, i, state);
1345 /* Sort if we'd overflow */
1346 if (b->nsets + 1 == MAX_BSETS) {
1347 bch_btree_sort(b, state);
1352 bset_build_written_tree(b);
1360 size_t sets_written, sets_unwritten;
1361 size_t bytes_written, bytes_unwritten;
1362 size_t floats, failed;
1365 static int btree_bset_stats(struct btree_op *op, struct btree *b)
1367 struct bset_stats *stats = container_of(op, struct bset_stats, op);
1372 for (i = 0; i <= b->nsets; i++) {
1373 struct bset_tree *t = &b->sets[i];
1374 size_t bytes = t->data->keys * sizeof(uint64_t);
1377 if (bset_written(b, t)) {
1378 stats->sets_written++;
1379 stats->bytes_written += bytes;
1381 stats->floats += t->size - 1;
1383 for (j = 1; j < t->size; j++)
1384 if (t->tree[j].exponent == 127)
1387 stats->sets_unwritten++;
1388 stats->bytes_unwritten += bytes;
1392 return MAP_CONTINUE;
1395 int bch_bset_print_stats(struct cache_set *c, char *buf)
1397 struct bset_stats t;
1400 memset(&t, 0, sizeof(struct bset_stats));
1401 bch_btree_op_init(&t.op, -1);
1403 ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
1407 return snprintf(buf, PAGE_SIZE,
1408 "btree nodes: %zu\n"
1409 "written sets: %zu\n"
1410 "unwritten sets: %zu\n"
1411 "written key bytes: %zu\n"
1412 "unwritten key bytes: %zu\n"
1416 t.sets_written, t.sets_unwritten,
1417 t.bytes_written, t.bytes_unwritten,
1418 t.floats, t.failed);