c2c42cbbe885368c6cc53e181fca5c9a98c32931
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / md / bcache / bset.c
1 /*
2  * Code for working with individual keys, and sorted sets of keys with in a
3  * btree node
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcache.h"
9 #include "btree.h"
10 #include "debug.h"
11
12 #include <linux/random.h>
13 #include <linux/prefetch.h>
14
15 /* Keylists */
16
17 int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
18 {
19         size_t oldsize = bch_keylist_nkeys(l);
20         size_t newsize = oldsize + u64s;
21         uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
22         uint64_t *new_keys;
23
24         newsize = roundup_pow_of_two(newsize);
25
26         if (newsize <= KEYLIST_INLINE ||
27             roundup_pow_of_two(oldsize) == newsize)
28                 return 0;
29
30         new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
31
32         if (!new_keys)
33                 return -ENOMEM;
34
35         if (!old_keys)
36                 memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
37
38         l->keys_p = new_keys;
39         l->top_p = new_keys + oldsize;
40
41         return 0;
42 }
43
44 struct bkey *bch_keylist_pop(struct keylist *l)
45 {
46         struct bkey *k = l->keys;
47
48         if (k == l->top)
49                 return NULL;
50
51         while (bkey_next(k) != l->top)
52                 k = bkey_next(k);
53
54         return l->top = k;
55 }
56
57 void bch_keylist_pop_front(struct keylist *l)
58 {
59         l->top_p -= bkey_u64s(l->keys);
60
61         memmove(l->keys,
62                 bkey_next(l->keys),
63                 bch_keylist_bytes(l));
64 }
65
66 /* Key/pointer manipulation */
67
68 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
69                               unsigned i)
70 {
71         BUG_ON(i > KEY_PTRS(src));
72
73         /* Only copy the header, key, and one pointer. */
74         memcpy(dest, src, 2 * sizeof(uint64_t));
75         dest->ptr[0] = src->ptr[i];
76         SET_KEY_PTRS(dest, 1);
77         /* We didn't copy the checksum so clear that bit. */
78         SET_KEY_CSUM(dest, 0);
79 }
80
81 bool __bch_cut_front(const struct bkey *where, struct bkey *k)
82 {
83         unsigned i, len = 0;
84
85         if (bkey_cmp(where, &START_KEY(k)) <= 0)
86                 return false;
87
88         if (bkey_cmp(where, k) < 0)
89                 len = KEY_OFFSET(k) - KEY_OFFSET(where);
90         else
91                 bkey_copy_key(k, where);
92
93         for (i = 0; i < KEY_PTRS(k); i++)
94                 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
95
96         BUG_ON(len > KEY_SIZE(k));
97         SET_KEY_SIZE(k, len);
98         return true;
99 }
100
101 bool __bch_cut_back(const struct bkey *where, struct bkey *k)
102 {
103         unsigned len = 0;
104
105         if (bkey_cmp(where, k) >= 0)
106                 return false;
107
108         BUG_ON(KEY_INODE(where) != KEY_INODE(k));
109
110         if (bkey_cmp(where, &START_KEY(k)) > 0)
111                 len = KEY_OFFSET(where) - KEY_START(k);
112
113         bkey_copy_key(k, where);
114
115         BUG_ON(len > KEY_SIZE(k));
116         SET_KEY_SIZE(k, len);
117         return true;
118 }
119
120 /* Auxiliary search trees */
121
122 /* 32 bits total: */
123 #define BKEY_MID_BITS           3
124 #define BKEY_EXPONENT_BITS      7
125 #define BKEY_MANTISSA_BITS      (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
126 #define BKEY_MANTISSA_MASK      ((1 << BKEY_MANTISSA_BITS) - 1)
127
128 struct bkey_float {
129         unsigned        exponent:BKEY_EXPONENT_BITS;
130         unsigned        m:BKEY_MID_BITS;
131         unsigned        mantissa:BKEY_MANTISSA_BITS;
132 } __packed;
133
134 /*
135  * BSET_CACHELINE was originally intended to match the hardware cacheline size -
136  * it used to be 64, but I realized the lookup code would touch slightly less
137  * memory if it was 128.
138  *
139  * It definites the number of bytes (in struct bset) per struct bkey_float in
140  * the auxiliar search tree - when we're done searching the bset_float tree we
141  * have this many bytes left that we do a linear search over.
142  *
143  * Since (after level 5) every level of the bset_tree is on a new cacheline,
144  * we're touching one fewer cacheline in the bset tree in exchange for one more
145  * cacheline in the linear search - but the linear search might stop before it
146  * gets to the second cacheline.
147  */
148
149 #define BSET_CACHELINE          128
150
151 /* Space required for the btree node keys */
152 static inline size_t btree_keys_bytes(struct btree *b)
153 {
154         return PAGE_SIZE << b->page_order;
155 }
156
157 static inline size_t btree_keys_cachelines(struct btree *b)
158 {
159         return btree_keys_bytes(b) / BSET_CACHELINE;
160 }
161
162 /* Space required for the auxiliary search trees */
163 static inline size_t bset_tree_bytes(struct btree *b)
164 {
165         return btree_keys_cachelines(b) * sizeof(struct bkey_float);
166 }
167
168 /* Space required for the prev pointers */
169 static inline size_t bset_prev_bytes(struct btree *b)
170 {
171         return btree_keys_cachelines(b) * sizeof(uint8_t);
172 }
173
174 /* Memory allocation */
175
176 void bch_btree_keys_free(struct btree *b)
177 {
178         struct bset_tree *t = b->sets;
179
180         if (bset_prev_bytes(b) < PAGE_SIZE)
181                 kfree(t->prev);
182         else
183                 free_pages((unsigned long) t->prev,
184                            get_order(bset_prev_bytes(b)));
185
186         if (bset_tree_bytes(b) < PAGE_SIZE)
187                 kfree(t->tree);
188         else
189                 free_pages((unsigned long) t->tree,
190                            get_order(bset_tree_bytes(b)));
191
192         free_pages((unsigned long) t->data, b->page_order);
193
194         t->prev = NULL;
195         t->tree = NULL;
196         t->data = NULL;
197 }
198
199 int bch_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp)
200 {
201         struct bset_tree *t = b->sets;
202
203         BUG_ON(t->data);
204
205         b->page_order = page_order;
206
207         t->data = (void *) __get_free_pages(gfp, b->page_order);
208         if (!t->data)
209                 goto err;
210
211         t->tree = bset_tree_bytes(b) < PAGE_SIZE
212                 ? kmalloc(bset_tree_bytes(b), gfp)
213                 : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
214         if (!t->tree)
215                 goto err;
216
217         t->prev = bset_prev_bytes(b) < PAGE_SIZE
218                 ? kmalloc(bset_prev_bytes(b), gfp)
219                 : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
220         if (!t->prev)
221                 goto err;
222
223         return 0;
224 err:
225         bch_btree_keys_free(b);
226         return -ENOMEM;
227 }
228
229 /* Binary tree stuff for auxiliary search trees */
230
231 static unsigned inorder_next(unsigned j, unsigned size)
232 {
233         if (j * 2 + 1 < size) {
234                 j = j * 2 + 1;
235
236                 while (j * 2 < size)
237                         j *= 2;
238         } else
239                 j >>= ffz(j) + 1;
240
241         return j;
242 }
243
244 static unsigned inorder_prev(unsigned j, unsigned size)
245 {
246         if (j * 2 < size) {
247                 j = j * 2;
248
249                 while (j * 2 + 1 < size)
250                         j = j * 2 + 1;
251         } else
252                 j >>= ffs(j);
253
254         return j;
255 }
256
257 /* I have no idea why this code works... and I'm the one who wrote it
258  *
259  * However, I do know what it does:
260  * Given a binary tree constructed in an array (i.e. how you normally implement
261  * a heap), it converts a node in the tree - referenced by array index - to the
262  * index it would have if you did an inorder traversal.
263  *
264  * Also tested for every j, size up to size somewhere around 6 million.
265  *
266  * The binary tree starts at array index 1, not 0
267  * extra is a function of size:
268  *   extra = (size - rounddown_pow_of_two(size - 1)) << 1;
269  */
270 static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
271 {
272         unsigned b = fls(j);
273         unsigned shift = fls(size - 1) - b;
274
275         j  ^= 1U << (b - 1);
276         j <<= 1;
277         j  |= 1;
278         j <<= shift;
279
280         if (j > extra)
281                 j -= (j - extra) >> 1;
282
283         return j;
284 }
285
286 static unsigned to_inorder(unsigned j, struct bset_tree *t)
287 {
288         return __to_inorder(j, t->size, t->extra);
289 }
290
291 static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
292 {
293         unsigned shift;
294
295         if (j > extra)
296                 j += j - extra;
297
298         shift = ffs(j);
299
300         j >>= shift;
301         j  |= roundup_pow_of_two(size) >> shift;
302
303         return j;
304 }
305
306 static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
307 {
308         return __inorder_to_tree(j, t->size, t->extra);
309 }
310
311 #if 0
312 void inorder_test(void)
313 {
314         unsigned long done = 0;
315         ktime_t start = ktime_get();
316
317         for (unsigned size = 2;
318              size < 65536000;
319              size++) {
320                 unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
321                 unsigned i = 1, j = rounddown_pow_of_two(size - 1);
322
323                 if (!(size % 4096))
324                         printk(KERN_NOTICE "loop %u, %llu per us\n", size,
325                                done / ktime_us_delta(ktime_get(), start));
326
327                 while (1) {
328                         if (__inorder_to_tree(i, size, extra) != j)
329                                 panic("size %10u j %10u i %10u", size, j, i);
330
331                         if (__to_inorder(j, size, extra) != i)
332                                 panic("size %10u j %10u i %10u", size, j, i);
333
334                         if (j == rounddown_pow_of_two(size) - 1)
335                                 break;
336
337                         BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
338
339                         j = inorder_next(j, size);
340                         i++;
341                 }
342
343                 done += size - 1;
344         }
345 }
346 #endif
347
348 /*
349  * Cacheline/offset <-> bkey pointer arithmetic:
350  *
351  * t->tree is a binary search tree in an array; each node corresponds to a key
352  * in one cacheline in t->set (BSET_CACHELINE bytes).
353  *
354  * This means we don't have to store the full index of the key that a node in
355  * the binary tree points to; to_inorder() gives us the cacheline, and then
356  * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
357  *
358  * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
359  * make this work.
360  *
361  * To construct the bfloat for an arbitrary key we need to know what the key
362  * immediately preceding it is: we have to check if the two keys differ in the
363  * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
364  * of the previous key so we can walk backwards to it from t->tree[j]'s key.
365  */
366
367 static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
368                                       unsigned offset)
369 {
370         return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
371 }
372
373 static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
374 {
375         return ((void *) k - (void *) t->data) / BSET_CACHELINE;
376 }
377
378 static unsigned bkey_to_cacheline_offset(struct bkey *k)
379 {
380         return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
381 }
382
383 static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
384 {
385         return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
386 }
387
388 static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
389 {
390         return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
391 }
392
393 /*
394  * For the write set - the one we're currently inserting keys into - we don't
395  * maintain a full search tree, we just keep a simple lookup table in t->prev.
396  */
397 static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
398 {
399         return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
400 }
401
402 static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
403 {
404         low >>= shift;
405         low  |= (high << 1) << (63U - shift);
406         return low;
407 }
408
409 static inline unsigned bfloat_mantissa(const struct bkey *k,
410                                        struct bkey_float *f)
411 {
412         const uint64_t *p = &k->low - (f->exponent >> 6);
413         return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
414 }
415
416 static void make_bfloat(struct bset_tree *t, unsigned j)
417 {
418         struct bkey_float *f = &t->tree[j];
419         struct bkey *m = tree_to_bkey(t, j);
420         struct bkey *p = tree_to_prev_bkey(t, j);
421
422         struct bkey *l = is_power_of_2(j)
423                 ? t->data->start
424                 : tree_to_prev_bkey(t, j >> ffs(j));
425
426         struct bkey *r = is_power_of_2(j + 1)
427                 ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
428                 : tree_to_bkey(t, j >> (ffz(j) + 1));
429
430         BUG_ON(m < l || m > r);
431         BUG_ON(bkey_next(p) != m);
432
433         if (KEY_INODE(l) != KEY_INODE(r))
434                 f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
435         else
436                 f->exponent = fls64(r->low ^ l->low);
437
438         f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);
439
440         /*
441          * Setting f->exponent = 127 flags this node as failed, and causes the
442          * lookup code to fall back to comparing against the original key.
443          */
444
445         if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
446                 f->mantissa = bfloat_mantissa(m, f) - 1;
447         else
448                 f->exponent = 127;
449 }
450
451 static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
452 {
453         if (t != b->sets) {
454                 unsigned j = roundup(t[-1].size,
455                                      64 / sizeof(struct bkey_float));
456
457                 t->tree = t[-1].tree + j;
458                 t->prev = t[-1].prev + j;
459         }
460
461         while (t < b->sets + MAX_BSETS)
462                 t++->size = 0;
463 }
464
465 static void bch_bset_build_unwritten_tree(struct btree *b)
466 {
467         struct bset_tree *t = bset_tree_last(b);
468
469         bset_alloc_tree(b, t);
470
471         if (t->tree != b->sets->tree + btree_keys_cachelines(b)) {
472                 t->prev[0] = bkey_to_cacheline_offset(t->data->start);
473                 t->size = 1;
474         }
475 }
476
477 void bch_bset_init_next(struct btree *b, struct bset *i, uint64_t magic)
478 {
479         if (i != b->sets->data) {
480                 b->sets[++b->nsets].data = i;
481                 i->seq = b->sets->data->seq;
482         } else
483                 get_random_bytes(&i->seq, sizeof(uint64_t));
484
485         i->magic        = magic;
486         i->version      = 0;
487         i->keys         = 0;
488
489         bch_bset_build_unwritten_tree(b);
490 }
491
492 static void bset_build_written_tree(struct btree *b)
493 {
494         struct bset_tree *t = bset_tree_last(b);
495         struct bkey *k = t->data->start;
496         unsigned j, cacheline = 1;
497
498         bset_alloc_tree(b, t);
499
500         t->size = min_t(unsigned,
501                         bkey_to_cacheline(t, bset_bkey_last(t->data)),
502                         b->sets->tree + btree_keys_cachelines(b) - t->tree);
503
504         if (t->size < 2) {
505                 t->size = 0;
506                 return;
507         }
508
509         t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
510
511         /* First we figure out where the first key in each cacheline is */
512         for (j = inorder_next(0, t->size);
513              j;
514              j = inorder_next(j, t->size)) {
515                 while (bkey_to_cacheline(t, k) != cacheline)
516                         k = bkey_next(k);
517
518                 t->prev[j] = bkey_u64s(k);
519                 k = bkey_next(k);
520                 cacheline++;
521                 t->tree[j].m = bkey_to_cacheline_offset(k);
522         }
523
524         while (bkey_next(k) != bset_bkey_last(t->data))
525                 k = bkey_next(k);
526
527         t->end = *k;
528
529         /* Then we build the tree */
530         for (j = inorder_next(0, t->size);
531              j;
532              j = inorder_next(j, t->size))
533                 make_bfloat(t, j);
534 }
535
536 void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
537 {
538         struct bset_tree *t;
539         unsigned inorder, j = 1;
540
541         for (t = b->sets; t <= bset_tree_last(b); t++)
542                 if (k < bset_bkey_last(t->data))
543                         goto found_set;
544
545         BUG();
546 found_set:
547         if (!t->size || !bset_written(b, t))
548                 return;
549
550         inorder = bkey_to_cacheline(t, k);
551
552         if (k == t->data->start)
553                 goto fix_left;
554
555         if (bkey_next(k) == bset_bkey_last(t->data)) {
556                 t->end = *k;
557                 goto fix_right;
558         }
559
560         j = inorder_to_tree(inorder, t);
561
562         if (j &&
563             j < t->size &&
564             k == tree_to_bkey(t, j))
565 fix_left:       do {
566                         make_bfloat(t, j);
567                         j = j * 2;
568                 } while (j < t->size);
569
570         j = inorder_to_tree(inorder + 1, t);
571
572         if (j &&
573             j < t->size &&
574             k == tree_to_prev_bkey(t, j))
575 fix_right:      do {
576                         make_bfloat(t, j);
577                         j = j * 2 + 1;
578                 } while (j < t->size);
579 }
580
581 static void bch_bset_fix_lookup_table(struct btree *b,
582                                       struct bset_tree *t,
583                                       struct bkey *k)
584 {
585         unsigned shift = bkey_u64s(k);
586         unsigned j = bkey_to_cacheline(t, k);
587
588         /* We're getting called from btree_split() or btree_gc, just bail out */
589         if (!t->size)
590                 return;
591
592         /* k is the key we just inserted; we need to find the entry in the
593          * lookup table for the first key that is strictly greater than k:
594          * it's either k's cacheline or the next one
595          */
596         if (j < t->size &&
597             table_to_bkey(t, j) <= k)
598                 j++;
599
600         /* Adjust all the lookup table entries, and find a new key for any that
601          * have gotten too big
602          */
603         for (; j < t->size; j++) {
604                 t->prev[j] += shift;
605
606                 if (t->prev[j] > 7) {
607                         k = table_to_bkey(t, j - 1);
608
609                         while (k < cacheline_to_bkey(t, j, 0))
610                                 k = bkey_next(k);
611
612                         t->prev[j] = bkey_to_cacheline_offset(k);
613                 }
614         }
615
616         if (t->size == b->sets->tree + btree_keys_cachelines(b) - t->tree)
617                 return;
618
619         /* Possibly add a new entry to the end of the lookup table */
620
621         for (k = table_to_bkey(t, t->size - 1);
622              k != bset_bkey_last(t->data);
623              k = bkey_next(k))
624                 if (t->size == bkey_to_cacheline(t, k)) {
625                         t->prev[t->size] = bkey_to_cacheline_offset(k);
626                         t->size++;
627                 }
628 }
629
630 void bch_bset_insert(struct btree *b, struct bkey *where,
631                      struct bkey *insert)
632 {
633         struct bset_tree *t = bset_tree_last(b);
634
635         BUG_ON(t->data != write_block(b));
636         BUG_ON(bset_byte_offset(b, t->data) +
637                __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
638                PAGE_SIZE << b->page_order);
639
640         memmove((uint64_t *) where + bkey_u64s(insert),
641                 where,
642                 (void *) bset_bkey_last(t->data) - (void *) where);
643
644         t->data->keys += bkey_u64s(insert);
645         bkey_copy(where, insert);
646         bch_bset_fix_lookup_table(b, t, where);
647 }
648
649 struct bset_search_iter {
650         struct bkey *l, *r;
651 };
652
653 static struct bset_search_iter bset_search_write_set(struct btree *b,
654                                                      struct bset_tree *t,
655                                                      const struct bkey *search)
656 {
657         unsigned li = 0, ri = t->size;
658
659         BUG_ON(!b->nsets &&
660                t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
661
662         while (li + 1 != ri) {
663                 unsigned m = (li + ri) >> 1;
664
665                 if (bkey_cmp(table_to_bkey(t, m), search) > 0)
666                         ri = m;
667                 else
668                         li = m;
669         }
670
671         return (struct bset_search_iter) {
672                 table_to_bkey(t, li),
673                 ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
674         };
675 }
676
677 static struct bset_search_iter bset_search_tree(struct btree *b,
678                                                 struct bset_tree *t,
679                                                 const struct bkey *search)
680 {
681         struct bkey *l, *r;
682         struct bkey_float *f;
683         unsigned inorder, j, n = 1;
684
685         do {
686                 unsigned p = n << 4;
687                 p &= ((int) (p - t->size)) >> 31;
688
689                 prefetch(&t->tree[p]);
690
691                 j = n;
692                 f = &t->tree[j];
693
694                 /*
695                  * n = (f->mantissa > bfloat_mantissa())
696                  *      ? j * 2
697                  *      : j * 2 + 1;
698                  *
699                  * We need to subtract 1 from f->mantissa for the sign bit trick
700                  * to work  - that's done in make_bfloat()
701                  */
702                 if (likely(f->exponent != 127))
703                         n = j * 2 + (((unsigned)
704                                       (f->mantissa -
705                                        bfloat_mantissa(search, f))) >> 31);
706                 else
707                         n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
708                                 ? j * 2
709                                 : j * 2 + 1;
710         } while (n < t->size);
711
712         inorder = to_inorder(j, t);
713
714         /*
715          * n would have been the node we recursed to - the low bit tells us if
716          * we recursed left or recursed right.
717          */
718         if (n & 1) {
719                 l = cacheline_to_bkey(t, inorder, f->m);
720
721                 if (++inorder != t->size) {
722                         f = &t->tree[inorder_next(j, t->size)];
723                         r = cacheline_to_bkey(t, inorder, f->m);
724                 } else
725                         r = bset_bkey_last(t->data);
726         } else {
727                 r = cacheline_to_bkey(t, inorder, f->m);
728
729                 if (--inorder) {
730                         f = &t->tree[inorder_prev(j, t->size)];
731                         l = cacheline_to_bkey(t, inorder, f->m);
732                 } else
733                         l = t->data->start;
734         }
735
736         return (struct bset_search_iter) {l, r};
737 }
738
739 struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
740                                const struct bkey *search)
741 {
742         struct bset_search_iter i;
743
744         /*
745          * First, we search for a cacheline, then lastly we do a linear search
746          * within that cacheline.
747          *
748          * To search for the cacheline, there's three different possibilities:
749          *  * The set is too small to have a search tree, so we just do a linear
750          *    search over the whole set.
751          *  * The set is the one we're currently inserting into; keeping a full
752          *    auxiliary search tree up to date would be too expensive, so we
753          *    use a much simpler lookup table to do a binary search -
754          *    bset_search_write_set().
755          *  * Or we use the auxiliary search tree we constructed earlier -
756          *    bset_search_tree()
757          */
758
759         if (unlikely(!t->size)) {
760                 i.l = t->data->start;
761                 i.r = bset_bkey_last(t->data);
762         } else if (bset_written(b, t)) {
763                 /*
764                  * Each node in the auxiliary search tree covers a certain range
765                  * of bits, and keys above and below the set it covers might
766                  * differ outside those bits - so we have to special case the
767                  * start and end - handle that here:
768                  */
769
770                 if (unlikely(bkey_cmp(search, &t->end) >= 0))
771                         return bset_bkey_last(t->data);
772
773                 if (unlikely(bkey_cmp(search, t->data->start) < 0))
774                         return t->data->start;
775
776                 i = bset_search_tree(b, t, search);
777         } else
778                 i = bset_search_write_set(b, t, search);
779
780         if (expensive_debug_checks(b->c)) {
781                 BUG_ON(bset_written(b, t) &&
782                        i.l != t->data->start &&
783                        bkey_cmp(tree_to_prev_bkey(t,
784                           inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
785                                 search) > 0);
786
787                 BUG_ON(i.r != bset_bkey_last(t->data) &&
788                        bkey_cmp(i.r, search) <= 0);
789         }
790
791         while (likely(i.l != i.r) &&
792                bkey_cmp(i.l, search) <= 0)
793                 i.l = bkey_next(i.l);
794
795         return i.l;
796 }
797
798 /* Btree iterator */
799
800 typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
801                                  struct btree_iter_set);
802
803 static inline bool btree_iter_cmp(struct btree_iter_set l,
804                                   struct btree_iter_set r)
805 {
806         return bkey_cmp(l.k, r.k) > 0;
807 }
808
809 static inline bool btree_iter_end(struct btree_iter *iter)
810 {
811         return !iter->used;
812 }
813
814 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
815                          struct bkey *end)
816 {
817         if (k != end)
818                 BUG_ON(!heap_add(iter,
819                                  ((struct btree_iter_set) { k, end }),
820                                  btree_iter_cmp));
821 }
822
823 static struct bkey *__bch_btree_iter_init(struct btree *b,
824                                           struct btree_iter *iter,
825                                           struct bkey *search,
826                                           struct bset_tree *start)
827 {
828         struct bkey *ret = NULL;
829         iter->size = ARRAY_SIZE(iter->data);
830         iter->used = 0;
831
832 #ifdef CONFIG_BCACHE_DEBUG
833         iter->b = b;
834 #endif
835
836         for (; start <= &b->sets[b->nsets]; start++) {
837                 ret = bch_bset_search(b, start, search);
838                 bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
839         }
840
841         return ret;
842 }
843
844 struct bkey *bch_btree_iter_init(struct btree *b,
845                                  struct btree_iter *iter,
846                                  struct bkey *search)
847 {
848         return __bch_btree_iter_init(b, iter, search, b->sets);
849 }
850
851 static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
852                                                  btree_iter_cmp_fn *cmp)
853 {
854         struct btree_iter_set unused;
855         struct bkey *ret = NULL;
856
857         if (!btree_iter_end(iter)) {
858                 bch_btree_iter_next_check(iter);
859
860                 ret = iter->data->k;
861                 iter->data->k = bkey_next(iter->data->k);
862
863                 if (iter->data->k > iter->data->end) {
864                         WARN_ONCE(1, "bset was corrupt!\n");
865                         iter->data->k = iter->data->end;
866                 }
867
868                 if (iter->data->k == iter->data->end)
869                         heap_pop(iter, unused, cmp);
870                 else
871                         heap_sift(iter, 0, cmp);
872         }
873
874         return ret;
875 }
876
877 struct bkey *bch_btree_iter_next(struct btree_iter *iter)
878 {
879         return __bch_btree_iter_next(iter, btree_iter_cmp);
880
881 }
882
883 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
884                                         struct btree *b, ptr_filter_fn fn)
885 {
886         struct bkey *ret;
887
888         do {
889                 ret = bch_btree_iter_next(iter);
890         } while (ret && fn(b, ret));
891
892         return ret;
893 }
894
895 /* Mergesort */
896
897 void bch_bset_sort_state_free(struct bset_sort_state *state)
898 {
899         if (state->pool)
900                 mempool_destroy(state->pool);
901 }
902
903 int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
904 {
905         spin_lock_init(&state->time.lock);
906
907         state->page_order = page_order;
908         state->crit_factor = int_sqrt(1 << page_order);
909
910         state->pool = mempool_create_page_pool(1, page_order);
911         if (!state->pool)
912                 return -ENOMEM;
913
914         return 0;
915 }
916
917 static void btree_mergesort(struct btree *b, struct bset *out,
918                             struct btree_iter *iter,
919                             bool fixup, bool remove_stale)
920 {
921         int i;
922         struct bkey *k, *last = NULL;
923         BKEY_PADDED(k) tmp;
924         bool (*bad)(struct btree *, const struct bkey *) = remove_stale
925                 ? bch_ptr_bad
926                 : bch_ptr_invalid;
927
928         /* Heapify the iterator, using our comparison function */
929         for (i = iter->used / 2 - 1; i >= 0; --i)
930                 heap_sift(iter, i, b->ops->sort_cmp);
931
932         while (!btree_iter_end(iter)) {
933                 if (b->ops->sort_fixup && fixup)
934                         k = b->ops->sort_fixup(iter, &tmp.k);
935                 else
936                         k = NULL;
937
938                 if (!k)
939                         k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
940
941                 if (bad(b, k))
942                         continue;
943
944                 if (!last) {
945                         last = out->start;
946                         bkey_copy(last, k);
947                 } else if (!bch_bkey_try_merge(b, last, k)) {
948                         last = bkey_next(last);
949                         bkey_copy(last, k);
950                 }
951         }
952
953         out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
954
955         pr_debug("sorted %i keys", out->keys);
956 }
957
958 static void __btree_sort(struct btree *b, struct btree_iter *iter,
959                          unsigned start, unsigned order, bool fixup,
960                          struct bset_sort_state *state)
961 {
962         uint64_t start_time;
963         bool used_mempool = false;
964         struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
965                                                      order);
966         if (!out) {
967                 BUG_ON(order > state->page_order);
968
969                 out = page_address(mempool_alloc(state->pool, GFP_NOIO));
970                 used_mempool = true;
971                 order = ilog2(bucket_pages(b->c));
972         }
973
974         start_time = local_clock();
975
976         btree_mergesort(b, out, iter, fixup, false);
977         b->nsets = start;
978
979         if (!start && order == b->page_order) {
980                 /*
981                  * Our temporary buffer is the same size as the btree node's
982                  * buffer, we can just swap buffers instead of doing a big
983                  * memcpy()
984                  */
985
986                 out->magic      = bset_magic(&b->c->sb);
987                 out->seq        = b->sets[0].data->seq;
988                 out->version    = b->sets[0].data->version;
989                 swap(out, b->sets[0].data);
990         } else {
991                 b->sets[start].data->keys = out->keys;
992                 memcpy(b->sets[start].data->start, out->start,
993                        (void *) bset_bkey_last(out) - (void *) out->start);
994         }
995
996         if (used_mempool)
997                 mempool_free(virt_to_page(out), state->pool);
998         else
999                 free_pages((unsigned long) out, order);
1000
1001         bset_build_written_tree(b);
1002
1003         if (!start)
1004                 bch_time_stats_update(&state->time, start_time);
1005 }
1006
1007 void bch_btree_sort_partial(struct btree *b, unsigned start,
1008                             struct bset_sort_state *state)
1009 {
1010         size_t order = b->page_order, keys = 0;
1011         struct btree_iter iter;
1012         int oldsize = bch_count_data(b);
1013
1014         __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
1015
1016         BUG_ON(!bset_written(b, bset_tree_last(b)) &&
1017                (bset_tree_last(b)->size || b->nsets));
1018
1019         if (start) {
1020                 unsigned i;
1021
1022                 for (i = start; i <= b->nsets; i++)
1023                         keys += b->sets[i].data->keys;
1024
1025                 order = roundup_pow_of_two(__set_bytes(b->sets->data,
1026                                                        keys)) / PAGE_SIZE;
1027                 if (order)
1028                         order = ilog2(order);
1029         }
1030
1031         __btree_sort(b, &iter, start, order, false, state);
1032
1033         EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
1034 }
1035 EXPORT_SYMBOL(bch_btree_sort_partial);
1036
1037 void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter,
1038                                     struct bset_sort_state *state)
1039 {
1040         __btree_sort(b, iter, 0, b->page_order, true, state);
1041 }
1042
1043 void bch_btree_sort_into(struct btree *b, struct btree *new,
1044                          struct bset_sort_state *state)
1045 {
1046         uint64_t start_time = local_clock();
1047
1048         struct btree_iter iter;
1049         bch_btree_iter_init(b, &iter, NULL);
1050
1051         btree_mergesort(b, new->sets->data, &iter, false, true);
1052
1053         bch_time_stats_update(&state->time, start_time);
1054
1055         new->sets->size = 0;
1056 }
1057
1058 #define SORT_CRIT       (4096 / sizeof(uint64_t))
1059
1060 void bch_btree_sort_lazy(struct btree *b, struct bset_sort_state *state)
1061 {
1062         unsigned crit = SORT_CRIT;
1063         int i;
1064
1065         /* Don't sort if nothing to do */
1066         if (!b->nsets)
1067                 goto out;
1068
1069         for (i = b->nsets - 1; i >= 0; --i) {
1070                 crit *= state->crit_factor;
1071
1072                 if (b->sets[i].data->keys < crit) {
1073                         bch_btree_sort_partial(b, i, state);
1074                         return;
1075                 }
1076         }
1077
1078         /* Sort if we'd overflow */
1079         if (b->nsets + 1 == MAX_BSETS) {
1080                 bch_btree_sort(b, state);
1081                 return;
1082         }
1083
1084 out:
1085         bset_build_written_tree(b);
1086 }
1087
1088 /* Sysfs stuff */
1089
1090 struct bset_stats {
1091         struct btree_op op;
1092         size_t nodes;
1093         size_t sets_written, sets_unwritten;
1094         size_t bytes_written, bytes_unwritten;
1095         size_t floats, failed;
1096 };
1097
1098 static int btree_bset_stats(struct btree_op *op, struct btree *b)
1099 {
1100         struct bset_stats *stats = container_of(op, struct bset_stats, op);
1101         unsigned i;
1102
1103         stats->nodes++;
1104
1105         for (i = 0; i <= b->nsets; i++) {
1106                 struct bset_tree *t = &b->sets[i];
1107                 size_t bytes = t->data->keys * sizeof(uint64_t);
1108                 size_t j;
1109
1110                 if (bset_written(b, t)) {
1111                         stats->sets_written++;
1112                         stats->bytes_written += bytes;
1113
1114                         stats->floats += t->size - 1;
1115
1116                         for (j = 1; j < t->size; j++)
1117                                 if (t->tree[j].exponent == 127)
1118                                         stats->failed++;
1119                 } else {
1120                         stats->sets_unwritten++;
1121                         stats->bytes_unwritten += bytes;
1122                 }
1123         }
1124
1125         return MAP_CONTINUE;
1126 }
1127
1128 int bch_bset_print_stats(struct cache_set *c, char *buf)
1129 {
1130         struct bset_stats t;
1131         int ret;
1132
1133         memset(&t, 0, sizeof(struct bset_stats));
1134         bch_btree_op_init(&t.op, -1);
1135
1136         ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
1137         if (ret < 0)
1138                 return ret;
1139
1140         return snprintf(buf, PAGE_SIZE,
1141                         "btree nodes:           %zu\n"
1142                         "written sets:          %zu\n"
1143                         "unwritten sets:                %zu\n"
1144                         "written key bytes:     %zu\n"
1145                         "unwritten key bytes:   %zu\n"
1146                         "floats:                        %zu\n"
1147                         "failed:                        %zu\n",
1148                         t.nodes,
1149                         t.sets_written, t.sets_unwritten,
1150                         t.bytes_written, t.bytes_unwritten,
1151                         t.floats, t.failed);
1152 }