2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
8 * Buckets containing cached data are kept on a heap sorted by priority;
9 * bucket priority is increased on cache hit, and periodically all the buckets
10 * on the heap have their priority scaled down. This currently is just used as
11 * an LRU but in the future should allow for more intelligent heuristics.
13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14 * counter. Garbage collection is used to remove stale pointers.
16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17 * as keys are inserted we only sort the pages that have not yet been written.
18 * When garbage collection is run, we resort the entire node.
20 * All configuration is done via sysfs; see Documentation/bcache.txt.
27 #include "writeback.h"
29 static void sort_key_next(struct btree_iter *iter,
30 struct btree_iter_set *i)
32 i->k = bkey_next(i->k);
35 *i = iter->data[--iter->used];
38 static bool bch_key_sort_cmp(struct btree_iter_set l,
39 struct btree_iter_set r)
41 int64_t c = bkey_cmp(l.k, r.k);
43 return c ? c > 0 : l.k < r.k;
46 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
50 for (i = 0; i < KEY_PTRS(k); i++)
51 if (ptr_available(c, k, i)) {
52 struct cache *ca = PTR_CACHE(c, k, i);
53 size_t bucket = PTR_BUCKET_NR(c, k, i);
54 size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
56 if (KEY_SIZE(k) + r > c->sb.bucket_size ||
57 bucket < ca->sb.first_bucket ||
58 bucket >= ca->sb.nbuckets)
67 bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
71 if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
74 if (__ptr_invalid(c, k))
79 bch_bkey_to_text(buf, sizeof(buf), k);
80 cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
84 static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
86 struct btree *b = container_of(bk, struct btree, keys);
87 return __bch_btree_ptr_invalid(b->c, k);
90 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
96 if (mutex_trylock(&b->c->bucket_lock)) {
97 for (i = 0; i < KEY_PTRS(k); i++)
98 if (ptr_available(b->c, k, i)) {
99 g = PTR_BUCKET(b->c, k, i);
102 g->prio != BTREE_PRIO ||
103 (b->c->gc_mark_valid &&
104 GC_MARK(g) != GC_MARK_METADATA))
108 mutex_unlock(&b->c->bucket_lock);
113 mutex_unlock(&b->c->bucket_lock);
114 bch_bkey_to_text(buf, sizeof(buf), k);
116 "inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
117 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
118 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
122 static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
124 struct btree *b = container_of(bk, struct btree, keys);
127 if (!bkey_cmp(k, &ZERO_KEY) ||
129 bch_ptr_invalid(bk, k))
132 for (i = 0; i < KEY_PTRS(k); i++)
133 if (!ptr_available(b->c, k, i) ||
134 ptr_stale(b->c, k, i))
137 if (expensive_debug_checks(b->c) &&
138 btree_ptr_bad_expensive(b, k))
144 const struct btree_keys_ops bch_btree_keys_ops = {
145 .sort_cmp = bch_key_sort_cmp,
146 .key_invalid = bch_btree_ptr_invalid,
147 .key_bad = bch_btree_ptr_bad,
153 * Returns true if l > r - unless l == r, in which case returns true if l is
156 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
157 * equal in different sets, we have to process them newest to oldest.
159 static bool bch_extent_sort_cmp(struct btree_iter_set l,
160 struct btree_iter_set r)
162 int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
164 return c ? c > 0 : l.k < r.k;
167 static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
170 while (iter->used > 1) {
171 struct btree_iter_set *top = iter->data, *i = top + 1;
173 if (iter->used > 2 &&
174 bch_extent_sort_cmp(i[0], i[1]))
177 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
180 if (!KEY_SIZE(i->k)) {
181 sort_key_next(iter, i);
182 heap_sift(iter, i - top, bch_extent_sort_cmp);
187 if (bkey_cmp(top->k, i->k) >= 0)
188 sort_key_next(iter, i);
190 bch_cut_front(top->k, i->k);
192 heap_sift(iter, i - top, bch_extent_sort_cmp);
194 /* can't happen because of comparison func */
195 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
197 if (bkey_cmp(i->k, top->k) < 0) {
198 bkey_copy(tmp, top->k);
200 bch_cut_back(&START_KEY(i->k), tmp);
201 bch_cut_front(i->k, top->k);
202 heap_sift(iter, 0, bch_extent_sort_cmp);
206 bch_cut_back(&START_KEY(i->k), top->k);
214 static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
216 struct btree *b = container_of(bk, struct btree, keys);
222 if (KEY_SIZE(k) > KEY_OFFSET(k))
225 if (__ptr_invalid(b->c, k))
230 bch_bkey_to_text(buf, sizeof(buf), k);
231 cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k));
235 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
238 struct bucket *g = PTR_BUCKET(b->c, k, ptr);
241 if (mutex_trylock(&b->c->bucket_lock)) {
242 if (b->c->gc_mark_valid &&
243 ((GC_MARK(g) != GC_MARK_DIRTY &&
245 GC_MARK(g) == GC_MARK_METADATA))
248 if (g->prio == BTREE_PRIO)
251 mutex_unlock(&b->c->bucket_lock);
256 mutex_unlock(&b->c->bucket_lock);
257 bch_bkey_to_text(buf, sizeof(buf), k);
259 "inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
260 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
261 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
265 static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
267 struct btree *b = container_of(bk, struct btree, keys);
272 bch_extent_invalid(bk, k))
275 for (i = 0; i < KEY_PTRS(k); i++)
276 if (!ptr_available(b->c, k, i))
279 if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
282 for (i = 0; i < KEY_PTRS(k); i++) {
283 g = PTR_BUCKET(b->c, k, i);
284 stale = ptr_stale(b->c, k, i);
286 btree_bug_on(stale > 96, b,
287 "key too stale: %i, need_gc %u",
288 stale, b->c->need_gc);
290 btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
291 b, "stale dirty pointer");
296 if (expensive_debug_checks(b->c) &&
297 bch_extent_bad_expensive(b, k, i))
304 static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
306 return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
307 ~((uint64_t)1 << 63);
310 static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
312 struct btree *b = container_of(bk, struct btree, keys);
315 if (key_merging_disabled(b->c))
318 if (KEY_PTRS(l) != KEY_PTRS(r) ||
319 KEY_DIRTY(l) != KEY_DIRTY(r) ||
320 bkey_cmp(l, &START_KEY(r)))
323 for (i = 0; i < KEY_PTRS(l); i++)
324 if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
325 PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
328 /* Keys with no pointers aren't restricted to one bucket and could
331 if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
332 SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
333 SET_KEY_SIZE(l, USHRT_MAX);
341 l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
346 SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
347 SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
352 const struct btree_keys_ops bch_extent_keys_ops = {
353 .sort_cmp = bch_extent_sort_cmp,
354 .sort_fixup = bch_extent_sort_fixup,
355 .key_invalid = bch_extent_invalid,
356 .key_bad = bch_extent_bad,
357 .key_merge = bch_extent_merge,