1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38 #include <linux/delay.h>
39 #include <trace/events/bcache.h>
43 * register_bcache: Return errors out to userspace correctly
45 * Writeback: don't undirty key until after a cache flush
47 * Create an iterator for key pointers
49 * On btree write error, mark bucket such that it won't be freed from the cache
52 * Check for bad keys in replay
54 * Refcount journal entries in journal_replay
57 * Finish incremental gc
58 * Gc should free old UUIDs, data for invalid UUIDs
60 * Provide a way to list backing device UUIDs we have data cached for, and
61 * probably how long it's been since we've seen them, and a way to invalidate
62 * dirty data for devices that will never be attached again
64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65 * that based on that and how much dirty data we have we can keep writeback
68 * Add a tracepoint or somesuch to watch for writeback starvation
70 * When btree depth > 1 and splitting an interior node, we have to make sure
71 * alloc_bucket() cannot fail. This should be true but is not completely
76 * If data write is less than hard sector size of ssd, round up offset in open
77 * bucket to the next whole sector
79 * Superblock needs to be fleshed out for multiple cache devices
81 * Add a sysfs tunable for the number of writeback IOs in flight
83 * Add a sysfs tunable for the number of open data buckets
85 * IO tracking: Can we track when one process is doing io on behalf of another?
86 * IO tracking: Don't use just an average, weigh more recent stuff higher
88 * Test module load/unload
91 #define MAX_NEED_GC 64
92 #define MAX_SAVE_PRIO 72
93 #define MAX_GC_TIMES 100
94 #define MIN_GC_NODES 100
95 #define GC_SLEEP_MS 100
97 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
99 #define PTR_HASH(c, k) \
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
102 static struct workqueue_struct *btree_io_wq;
104 #define insert_lock(s, b) ((b)->level <= (s)->lock)
107 static inline struct bset *write_block(struct btree *b)
109 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
112 static void bch_btree_init_next(struct btree *b)
114 /* If not a leaf node, always sort */
115 if (b->level && b->keys.nsets)
116 bch_btree_sort(&b->keys, &b->c->sort);
118 bch_btree_sort_lazy(&b->keys, &b->c->sort);
120 if (b->written < btree_blocks(b))
121 bch_bset_init_next(&b->keys, write_block(b),
122 bset_magic(&b->c->cache->sb));
126 /* Btree key manipulation */
128 void bkey_put(struct cache_set *c, struct bkey *k)
132 for (i = 0; i < KEY_PTRS(k); i++)
133 if (ptr_available(c, k, i))
134 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
139 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
141 uint64_t crc = b->key.ptr[0];
142 void *data = (void *) i + 8, *end = bset_bkey_last(i);
144 crc = crc64_be(crc, data, end - data);
145 return crc ^ 0xffffffffffffffffULL;
148 void bch_btree_node_read_done(struct btree *b)
150 const char *err = "bad btree header";
151 struct bset *i = btree_bset_first(b);
152 struct btree_iter *iter;
155 * c->fill_iter can allocate an iterator with more memory space
156 * than static MAX_BSETS.
157 * See the comment arount cache_set->fill_iter.
159 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
160 iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
163 #ifdef CONFIG_BCACHE_DEBUG
171 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
172 i = write_block(b)) {
173 err = "unsupported bset version";
174 if (i->version > BCACHE_BSET_VERSION)
177 err = "bad btree header";
178 if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
183 if (i->magic != bset_magic(&b->c->cache->sb))
186 err = "bad checksum";
187 switch (i->version) {
189 if (i->csum != csum_set(i))
192 case BCACHE_BSET_VERSION:
193 if (i->csum != btree_csum_set(b, i))
199 if (i != b->keys.set[0].data && !i->keys)
202 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
204 b->written += set_blocks(i, block_bytes(b->c->cache));
207 err = "corrupted btree";
208 for (i = write_block(b);
209 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
210 i = ((void *) i) + block_bytes(b->c->cache))
211 if (i->seq == b->keys.set[0].data->seq)
214 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
216 i = b->keys.set[0].data;
217 err = "short btree key";
218 if (b->keys.set[0].size &&
219 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
222 if (b->written < btree_blocks(b))
223 bch_bset_init_next(&b->keys, write_block(b),
224 bset_magic(&b->c->cache->sb));
226 mempool_free(iter, &b->c->fill_iter);
229 set_btree_node_io_error(b);
230 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
231 err, PTR_BUCKET_NR(b->c, &b->key, 0),
232 bset_block_offset(b, i), i->keys);
236 static void btree_node_read_endio(struct bio *bio)
238 struct closure *cl = bio->bi_private;
243 static void bch_btree_node_read(struct btree *b)
245 uint64_t start_time = local_clock();
249 trace_bcache_btree_read(b);
251 closure_init_stack(&cl);
253 bio = bch_bbio_alloc(b->c);
254 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
255 bio->bi_end_io = btree_node_read_endio;
256 bio->bi_private = &cl;
257 bio->bi_opf = REQ_OP_READ | REQ_META;
259 bch_bio_map(bio, b->keys.set[0].data);
261 bch_submit_bbio(bio, b->c, &b->key, 0);
265 set_btree_node_io_error(b);
267 bch_bbio_free(bio, b->c);
269 if (btree_node_io_error(b))
272 bch_btree_node_read_done(b);
273 bch_time_stats_update(&b->c->btree_read_time, start_time);
277 bch_cache_set_error(b->c, "io error reading bucket %zu",
278 PTR_BUCKET_NR(b->c, &b->key, 0));
281 static void btree_complete_write(struct btree *b, struct btree_write *w)
283 if (w->prio_blocked &&
284 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
285 wake_up_allocators(b->c);
288 atomic_dec_bug(w->journal);
289 __closure_wake_up(&b->c->journal.wait);
296 static void btree_node_write_unlock(struct closure *cl)
298 struct btree *b = container_of(cl, struct btree, io);
303 static void __btree_node_write_done(struct closure *cl)
305 struct btree *b = container_of(cl, struct btree, io);
306 struct btree_write *w = btree_prev_write(b);
308 bch_bbio_free(b->bio, b->c);
310 btree_complete_write(b, w);
312 if (btree_node_dirty(b))
313 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
315 closure_return_with_destructor(cl, btree_node_write_unlock);
318 static void btree_node_write_done(struct closure *cl)
320 struct btree *b = container_of(cl, struct btree, io);
322 bio_free_pages(b->bio);
323 __btree_node_write_done(cl);
326 static void btree_node_write_endio(struct bio *bio)
328 struct closure *cl = bio->bi_private;
329 struct btree *b = container_of(cl, struct btree, io);
332 set_btree_node_io_error(b);
334 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
338 static void do_btree_node_write(struct btree *b)
340 struct closure *cl = &b->io;
341 struct bset *i = btree_bset_last(b);
344 i->version = BCACHE_BSET_VERSION;
345 i->csum = btree_csum_set(b, i);
348 b->bio = bch_bbio_alloc(b->c);
350 b->bio->bi_end_io = btree_node_write_endio;
351 b->bio->bi_private = cl;
352 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
353 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
354 bch_bio_map(b->bio, i);
357 * If we're appending to a leaf node, we don't technically need FUA -
358 * this write just needs to be persisted before the next journal write,
359 * which will be marked FLUSH|FUA.
361 * Similarly if we're writing a new btree root - the pointer is going to
362 * be in the next journal entry.
364 * But if we're writing a new btree node (that isn't a root) or
365 * appending to a non leaf btree node, we need either FUA or a flush
366 * when we write the parent with the new pointer. FUA is cheaper than a
367 * flush, and writes appending to leaf nodes aren't blocking anything so
368 * just make all btree node writes FUA to keep things sane.
371 bkey_copy(&k.key, &b->key);
372 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
373 bset_sector_offset(&b->keys, i));
375 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
377 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
378 struct bvec_iter_all iter_all;
380 bio_for_each_segment_all(bv, b->bio, iter_all) {
381 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
385 bch_submit_bbio(b->bio, b->c, &k.key, 0);
387 continue_at(cl, btree_node_write_done, NULL);
390 * No problem for multipage bvec since the bio is
394 bch_bio_map(b->bio, i);
396 bch_submit_bbio(b->bio, b->c, &k.key, 0);
399 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
403 void __bch_btree_node_write(struct btree *b, struct closure *parent)
405 struct bset *i = btree_bset_last(b);
407 lockdep_assert_held(&b->write_lock);
409 trace_bcache_btree_write(b);
411 BUG_ON(current->bio_list);
412 BUG_ON(b->written >= btree_blocks(b));
413 BUG_ON(b->written && !i->keys);
414 BUG_ON(btree_bset_first(b)->seq != i->seq);
415 bch_check_keys(&b->keys, "writing");
417 cancel_delayed_work(&b->work);
419 /* If caller isn't waiting for write, parent refcount is cache set */
421 closure_init(&b->io, parent ?: &b->c->cl);
423 clear_bit(BTREE_NODE_dirty, &b->flags);
424 change_bit(BTREE_NODE_write_idx, &b->flags);
426 do_btree_node_write(b);
428 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
429 &b->c->cache->btree_sectors_written);
431 b->written += set_blocks(i, block_bytes(b->c->cache));
434 void bch_btree_node_write(struct btree *b, struct closure *parent)
436 unsigned int nsets = b->keys.nsets;
438 lockdep_assert_held(&b->lock);
440 __bch_btree_node_write(b, parent);
443 * do verify if there was more than one set initially (i.e. we did a
444 * sort) and we sorted down to a single set:
446 if (nsets && !b->keys.nsets)
449 bch_btree_init_next(b);
452 static void bch_btree_node_write_sync(struct btree *b)
456 closure_init_stack(&cl);
458 mutex_lock(&b->write_lock);
459 bch_btree_node_write(b, &cl);
460 mutex_unlock(&b->write_lock);
465 static void btree_node_write_work(struct work_struct *w)
467 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
469 mutex_lock(&b->write_lock);
470 if (btree_node_dirty(b))
471 __bch_btree_node_write(b, NULL);
472 mutex_unlock(&b->write_lock);
475 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
477 struct bset *i = btree_bset_last(b);
478 struct btree_write *w = btree_current_write(b);
480 lockdep_assert_held(&b->write_lock);
485 if (!btree_node_dirty(b))
486 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
488 set_btree_node_dirty(b);
491 * w->journal is always the oldest journal pin of all bkeys
492 * in the leaf node, to make sure the oldest jset seq won't
493 * be increased before this btree node is flushed.
497 journal_pin_cmp(b->c, w->journal, journal_ref)) {
498 atomic_dec_bug(w->journal);
503 w->journal = journal_ref;
504 atomic_inc(w->journal);
508 /* Force write if set is too big */
509 if (set_bytes(i) > PAGE_SIZE - 48 &&
511 bch_btree_node_write(b, NULL);
515 * Btree in memory cache - allocation/freeing
516 * mca -> memory cache
519 #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \
520 ? c->root->level : 1) * 8 + 16)
521 #define mca_can_free(c) \
522 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
524 static void mca_data_free(struct btree *b)
526 BUG_ON(b->io_mutex.count != 1);
528 bch_btree_keys_free(&b->keys);
530 b->c->btree_cache_used--;
531 list_move(&b->list, &b->c->btree_cache_freed);
534 static void mca_bucket_free(struct btree *b)
536 BUG_ON(btree_node_dirty(b));
539 hlist_del_init_rcu(&b->hash);
540 list_move(&b->list, &b->c->btree_cache_freeable);
543 static unsigned int btree_order(struct bkey *k)
545 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
548 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
550 if (!bch_btree_keys_alloc(&b->keys,
552 ilog2(b->c->btree_pages),
555 b->c->btree_cache_used++;
556 list_move(&b->list, &b->c->btree_cache);
558 list_move(&b->list, &b->c->btree_cache_freed);
562 static struct btree *mca_bucket_alloc(struct cache_set *c,
563 struct bkey *k, gfp_t gfp)
566 * kzalloc() is necessary here for initialization,
567 * see code comments in bch_btree_keys_init().
569 struct btree *b = kzalloc(sizeof(struct btree), gfp);
574 init_rwsem(&b->lock);
575 lockdep_set_novalidate_class(&b->lock);
576 mutex_init(&b->write_lock);
577 lockdep_set_novalidate_class(&b->write_lock);
578 INIT_LIST_HEAD(&b->list);
579 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
581 sema_init(&b->io_mutex, 1);
583 mca_data_alloc(b, k, gfp);
587 static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
591 closure_init_stack(&cl);
592 lockdep_assert_held(&b->c->bucket_lock);
594 if (!down_write_trylock(&b->lock))
597 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
599 if (b->keys.page_order < min_order)
603 if (btree_node_dirty(b))
606 if (down_trylock(&b->io_mutex))
613 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
614 * __bch_btree_node_write(). To avoid an extra flush, acquire
615 * b->write_lock before checking BTREE_NODE_dirty bit.
617 mutex_lock(&b->write_lock);
619 * If this btree node is selected in btree_flush_write() by journal
620 * code, delay and retry until the node is flushed by journal code
621 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
623 if (btree_node_journal_flush(b)) {
624 pr_debug("bnode %p is flushing by journal, retry\n", b);
625 mutex_unlock(&b->write_lock);
630 if (btree_node_dirty(b))
631 __bch_btree_node_write(b, &cl);
632 mutex_unlock(&b->write_lock);
636 /* wait for any in flight btree write */
646 static unsigned long bch_mca_scan(struct shrinker *shrink,
647 struct shrink_control *sc)
649 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
651 unsigned long i, nr = sc->nr_to_scan;
652 unsigned long freed = 0;
653 unsigned int btree_cache_used;
655 if (c->shrinker_disabled)
658 if (c->btree_cache_alloc_lock)
661 /* Return -1 if we can't do anything right now */
662 if (sc->gfp_mask & __GFP_IO)
663 mutex_lock(&c->bucket_lock);
664 else if (!mutex_trylock(&c->bucket_lock))
668 * It's _really_ critical that we don't free too many btree nodes - we
669 * have to always leave ourselves a reserve. The reserve is how we
670 * guarantee that allocating memory for a new btree node can always
671 * succeed, so that inserting keys into the btree can always succeed and
672 * IO can always make forward progress:
674 nr /= c->btree_pages;
677 nr = min_t(unsigned long, nr, mca_can_free(c));
680 btree_cache_used = c->btree_cache_used;
681 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
685 if (!mca_reap(b, 0, false)) {
694 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
695 if (nr <= 0 || i >= btree_cache_used)
698 if (!mca_reap(b, 0, false)) {
709 mutex_unlock(&c->bucket_lock);
710 return freed * c->btree_pages;
713 static unsigned long bch_mca_count(struct shrinker *shrink,
714 struct shrink_control *sc)
716 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
718 if (c->shrinker_disabled)
721 if (c->btree_cache_alloc_lock)
724 return mca_can_free(c) * c->btree_pages;
727 void bch_btree_cache_free(struct cache_set *c)
732 closure_init_stack(&cl);
734 if (c->shrink.list.next)
735 unregister_shrinker(&c->shrink);
737 mutex_lock(&c->bucket_lock);
739 #ifdef CONFIG_BCACHE_DEBUG
741 list_move(&c->verify_data->list, &c->btree_cache);
743 free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
746 list_splice(&c->btree_cache_freeable,
749 while (!list_empty(&c->btree_cache)) {
750 b = list_first_entry(&c->btree_cache, struct btree, list);
753 * This function is called by cache_set_free(), no I/O
754 * request on cache now, it is unnecessary to acquire
755 * b->write_lock before clearing BTREE_NODE_dirty anymore.
757 if (btree_node_dirty(b)) {
758 btree_complete_write(b, btree_current_write(b));
759 clear_bit(BTREE_NODE_dirty, &b->flags);
764 while (!list_empty(&c->btree_cache_freed)) {
765 b = list_first_entry(&c->btree_cache_freed,
768 cancel_delayed_work_sync(&b->work);
772 mutex_unlock(&c->bucket_lock);
775 int bch_btree_cache_alloc(struct cache_set *c)
779 for (i = 0; i < mca_reserve(c); i++)
780 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
783 list_splice_init(&c->btree_cache,
784 &c->btree_cache_freeable);
786 #ifdef CONFIG_BCACHE_DEBUG
787 mutex_init(&c->verify_lock);
789 c->verify_ondisk = (void *)
790 __get_free_pages(GFP_KERNEL|__GFP_COMP,
791 ilog2(meta_bucket_pages(&c->cache->sb)));
792 if (!c->verify_ondisk) {
794 * Don't worry about the mca_rereserve buckets
795 * allocated in previous for-loop, they will be
796 * handled properly in bch_cache_set_unregister().
801 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
803 if (c->verify_data &&
804 c->verify_data->keys.set->data)
805 list_del_init(&c->verify_data->list);
807 c->verify_data = NULL;
810 c->shrink.count_objects = bch_mca_count;
811 c->shrink.scan_objects = bch_mca_scan;
813 c->shrink.batch = c->btree_pages * 2;
815 if (register_shrinker(&c->shrink, "md-bcache:%pU", c->set_uuid))
816 pr_warn("bcache: %s: could not register shrinker\n",
822 /* Btree in memory cache - hash table */
824 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
826 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
829 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
834 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
835 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
843 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
845 spin_lock(&c->btree_cannibalize_lock);
846 if (likely(c->btree_cache_alloc_lock == NULL)) {
847 c->btree_cache_alloc_lock = current;
848 } else if (c->btree_cache_alloc_lock != current) {
850 prepare_to_wait(&c->btree_cache_wait, &op->wait,
851 TASK_UNINTERRUPTIBLE);
852 spin_unlock(&c->btree_cannibalize_lock);
855 spin_unlock(&c->btree_cannibalize_lock);
860 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
865 trace_bcache_btree_cache_cannibalize(c);
867 if (mca_cannibalize_lock(c, op))
868 return ERR_PTR(-EINTR);
870 list_for_each_entry_reverse(b, &c->btree_cache, list)
871 if (!mca_reap(b, btree_order(k), false))
874 list_for_each_entry_reverse(b, &c->btree_cache, list)
875 if (!mca_reap(b, btree_order(k), true))
878 WARN(1, "btree cache cannibalize failed\n");
879 return ERR_PTR(-ENOMEM);
883 * We can only have one thread cannibalizing other cached btree nodes at a time,
884 * or we'll deadlock. We use an open coded mutex to ensure that, which a
885 * cannibalize_bucket() will take. This means every time we unlock the root of
886 * the btree, we need to release this lock if we have it held.
888 static void bch_cannibalize_unlock(struct cache_set *c)
890 spin_lock(&c->btree_cannibalize_lock);
891 if (c->btree_cache_alloc_lock == current) {
892 c->btree_cache_alloc_lock = NULL;
893 wake_up(&c->btree_cache_wait);
895 spin_unlock(&c->btree_cannibalize_lock);
898 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
899 struct bkey *k, int level)
903 BUG_ON(current->bio_list);
905 lockdep_assert_held(&c->bucket_lock);
910 /* btree_free() doesn't free memory; it sticks the node on the end of
911 * the list. Check if there's any freed nodes there:
913 list_for_each_entry(b, &c->btree_cache_freeable, list)
914 if (!mca_reap(b, btree_order(k), false))
917 /* We never free struct btree itself, just the memory that holds the on
918 * disk node. Check the freed list before allocating a new one:
920 list_for_each_entry(b, &c->btree_cache_freed, list)
921 if (!mca_reap(b, 0, false)) {
922 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
923 if (!b->keys.set[0].data)
929 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
933 BUG_ON(!down_write_trylock(&b->lock));
934 if (!b->keys.set->data)
937 BUG_ON(b->io_mutex.count != 1);
939 bkey_copy(&b->key, k);
940 list_move(&b->list, &c->btree_cache);
941 hlist_del_init_rcu(&b->hash);
942 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
944 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
945 b->parent = (void *) ~0UL;
951 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
952 &b->c->expensive_debug_checks);
954 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
955 &b->c->expensive_debug_checks);
962 b = mca_cannibalize(c, op, k);
970 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
971 * in from disk if necessary.
973 * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
975 * The btree node will have either a read or a write lock held, depending on
976 * level and op->lock.
978 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
979 struct bkey *k, int level, bool write,
980 struct btree *parent)
990 if (current->bio_list)
991 return ERR_PTR(-EAGAIN);
993 mutex_lock(&c->bucket_lock);
994 b = mca_alloc(c, op, k, level);
995 mutex_unlock(&c->bucket_lock);
1002 bch_btree_node_read(b);
1005 downgrade_write(&b->lock);
1007 rw_lock(write, b, level);
1008 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1009 rw_unlock(write, b);
1012 BUG_ON(b->level != level);
1015 if (btree_node_io_error(b)) {
1016 rw_unlock(write, b);
1017 return ERR_PTR(-EIO);
1020 BUG_ON(!b->written);
1024 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1025 prefetch(b->keys.set[i].tree);
1026 prefetch(b->keys.set[i].data);
1029 for (; i <= b->keys.nsets; i++)
1030 prefetch(b->keys.set[i].data);
1035 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1039 mutex_lock(&parent->c->bucket_lock);
1040 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1041 mutex_unlock(&parent->c->bucket_lock);
1043 if (!IS_ERR_OR_NULL(b)) {
1045 bch_btree_node_read(b);
1052 static void btree_node_free(struct btree *b)
1054 trace_bcache_btree_node_free(b);
1056 BUG_ON(b == b->c->root);
1059 mutex_lock(&b->write_lock);
1061 * If the btree node is selected and flushing in btree_flush_write(),
1062 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1063 * then it is safe to free the btree node here. Otherwise this btree
1064 * node will be in race condition.
1066 if (btree_node_journal_flush(b)) {
1067 mutex_unlock(&b->write_lock);
1068 pr_debug("bnode %p journal_flush set, retry\n", b);
1073 if (btree_node_dirty(b)) {
1074 btree_complete_write(b, btree_current_write(b));
1075 clear_bit(BTREE_NODE_dirty, &b->flags);
1078 mutex_unlock(&b->write_lock);
1080 cancel_delayed_work(&b->work);
1082 mutex_lock(&b->c->bucket_lock);
1083 bch_bucket_free(b->c, &b->key);
1085 mutex_unlock(&b->c->bucket_lock);
1088 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1089 int level, bool wait,
1090 struct btree *parent)
1095 mutex_lock(&c->bucket_lock);
1097 /* return ERR_PTR(-EAGAIN) when it fails */
1098 b = ERR_PTR(-EAGAIN);
1099 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
1102 bkey_put(c, &k.key);
1103 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1105 b = mca_alloc(c, op, &k.key, level);
1111 "Tried to allocate bucket that was in btree cache");
1116 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
1118 mutex_unlock(&c->bucket_lock);
1120 trace_bcache_btree_node_alloc(b);
1123 bch_bucket_free(c, &k.key);
1125 mutex_unlock(&c->bucket_lock);
1127 trace_bcache_btree_node_alloc_fail(c);
1131 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1132 struct btree_op *op, int level,
1133 struct btree *parent)
1135 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1138 static struct btree *btree_node_alloc_replacement(struct btree *b,
1139 struct btree_op *op)
1141 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1144 mutex_lock(&n->write_lock);
1145 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1146 bkey_copy_key(&n->key, &b->key);
1147 mutex_unlock(&n->write_lock);
1153 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1157 mutex_lock(&b->c->bucket_lock);
1159 atomic_inc(&b->c->prio_blocked);
1161 bkey_copy(k, &b->key);
1162 bkey_copy_key(k, &ZERO_KEY);
1164 for (i = 0; i < KEY_PTRS(k); i++)
1166 bch_inc_gen(b->c->cache,
1167 PTR_BUCKET(b->c, &b->key, i)));
1169 mutex_unlock(&b->c->bucket_lock);
1172 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1174 struct cache_set *c = b->c;
1175 struct cache *ca = c->cache;
1176 unsigned int reserve = (c->root->level - b->level) * 2 + 1;
1178 mutex_lock(&c->bucket_lock);
1180 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1182 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1183 TASK_UNINTERRUPTIBLE);
1184 mutex_unlock(&c->bucket_lock);
1188 mutex_unlock(&c->bucket_lock);
1190 return mca_cannibalize_lock(b->c, op);
1193 /* Garbage collection */
1195 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1203 * ptr_invalid() can't return true for the keys that mark btree nodes as
1204 * freed, but since ptr_bad() returns true we'll never actually use them
1205 * for anything and thus we don't want mark their pointers here
1207 if (!bkey_cmp(k, &ZERO_KEY))
1210 for (i = 0; i < KEY_PTRS(k); i++) {
1211 if (!ptr_available(c, k, i))
1214 g = PTR_BUCKET(c, k, i);
1216 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1217 g->last_gc = PTR_GEN(k, i);
1219 if (ptr_stale(c, k, i)) {
1220 stale = max(stale, ptr_stale(c, k, i));
1224 cache_bug_on(GC_MARK(g) &&
1225 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1226 c, "inconsistent ptrs: mark = %llu, level = %i",
1230 SET_GC_MARK(g, GC_MARK_METADATA);
1231 else if (KEY_DIRTY(k))
1232 SET_GC_MARK(g, GC_MARK_DIRTY);
1233 else if (!GC_MARK(g))
1234 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1236 /* guard against overflow */
1237 SET_GC_SECTORS_USED(g, min_t(unsigned int,
1238 GC_SECTORS_USED(g) + KEY_SIZE(k),
1239 MAX_GC_SECTORS_USED));
1241 BUG_ON(!GC_SECTORS_USED(g));
1247 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1249 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1253 for (i = 0; i < KEY_PTRS(k); i++)
1254 if (ptr_available(c, k, i) &&
1255 !ptr_stale(c, k, i)) {
1256 struct bucket *b = PTR_BUCKET(c, k, i);
1258 b->gen = PTR_GEN(k, i);
1260 if (level && bkey_cmp(k, &ZERO_KEY))
1261 b->prio = BTREE_PRIO;
1262 else if (!level && b->prio == BTREE_PRIO)
1263 b->prio = INITIAL_PRIO;
1266 __bch_btree_mark_key(c, level, k);
1269 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1271 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1274 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1277 unsigned int keys = 0, good_keys = 0;
1279 struct btree_iter iter;
1280 struct bset_tree *t;
1284 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1285 stale = max(stale, btree_mark_key(b, k));
1288 if (bch_ptr_bad(&b->keys, k))
1291 gc->key_bytes += bkey_u64s(k);
1295 gc->data += KEY_SIZE(k);
1298 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1299 btree_bug_on(t->size &&
1300 bset_written(&b->keys, t) &&
1301 bkey_cmp(&b->key, &t->end) < 0,
1302 b, "found short btree key in gc");
1304 if (b->c->gc_always_rewrite)
1310 if ((keys - good_keys) * 2 > keys)
1316 #define GC_MERGE_NODES 4U
1318 struct gc_merge_info {
1323 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1324 struct keylist *insert_keys,
1325 atomic_t *journal_ref,
1326 struct bkey *replace_key);
1328 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1329 struct gc_stat *gc, struct gc_merge_info *r)
1331 unsigned int i, nodes = 0, keys = 0, blocks;
1332 struct btree *new_nodes[GC_MERGE_NODES];
1333 struct keylist keylist;
1337 bch_keylist_init(&keylist);
1339 if (btree_check_reserve(b, NULL))
1342 memset(new_nodes, 0, sizeof(new_nodes));
1343 closure_init_stack(&cl);
1345 while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
1346 keys += r[nodes++].keys;
1348 blocks = btree_default_blocks(b->c) * 2 / 3;
1351 __set_blocks(b->keys.set[0].data, keys,
1352 block_bytes(b->c->cache)) > blocks * (nodes - 1))
1355 for (i = 0; i < nodes; i++) {
1356 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1357 if (IS_ERR(new_nodes[i]))
1358 goto out_nocoalesce;
1362 * We have to check the reserve here, after we've allocated our new
1363 * nodes, to make sure the insert below will succeed - we also check
1364 * before as an optimization to potentially avoid a bunch of expensive
1367 if (btree_check_reserve(b, NULL))
1368 goto out_nocoalesce;
1370 for (i = 0; i < nodes; i++)
1371 mutex_lock(&new_nodes[i]->write_lock);
1373 for (i = nodes - 1; i > 0; --i) {
1374 struct bset *n1 = btree_bset_first(new_nodes[i]);
1375 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1376 struct bkey *k, *last = NULL;
1382 k < bset_bkey_last(n2);
1384 if (__set_blocks(n1, n1->keys + keys +
1386 block_bytes(b->c->cache)) > blocks)
1390 keys += bkey_u64s(k);
1394 * Last node we're not getting rid of - we're getting
1395 * rid of the node at r[0]. Have to try and fit all of
1396 * the remaining keys into this node; we can't ensure
1397 * they will always fit due to rounding and variable
1398 * length keys (shouldn't be possible in practice,
1401 if (__set_blocks(n1, n1->keys + n2->keys,
1402 block_bytes(b->c->cache)) >
1403 btree_blocks(new_nodes[i]))
1404 goto out_unlock_nocoalesce;
1407 /* Take the key of the node we're getting rid of */
1411 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
1412 btree_blocks(new_nodes[i]));
1415 bkey_copy_key(&new_nodes[i]->key, last);
1417 memcpy(bset_bkey_last(n1),
1419 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1422 r[i].keys = n1->keys;
1425 bset_bkey_idx(n2, keys),
1426 (void *) bset_bkey_last(n2) -
1427 (void *) bset_bkey_idx(n2, keys));
1431 if (__bch_keylist_realloc(&keylist,
1432 bkey_u64s(&new_nodes[i]->key)))
1433 goto out_unlock_nocoalesce;
1435 bch_btree_node_write(new_nodes[i], &cl);
1436 bch_keylist_add(&keylist, &new_nodes[i]->key);
1439 for (i = 0; i < nodes; i++)
1440 mutex_unlock(&new_nodes[i]->write_lock);
1444 /* We emptied out this node */
1445 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1446 btree_node_free(new_nodes[0]);
1447 rw_unlock(true, new_nodes[0]);
1448 new_nodes[0] = NULL;
1450 for (i = 0; i < nodes; i++) {
1451 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1452 goto out_nocoalesce;
1454 make_btree_freeing_key(r[i].b, keylist.top);
1455 bch_keylist_push(&keylist);
1458 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1459 BUG_ON(!bch_keylist_empty(&keylist));
1461 for (i = 0; i < nodes; i++) {
1462 btree_node_free(r[i].b);
1463 rw_unlock(true, r[i].b);
1465 r[i].b = new_nodes[i];
1468 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1469 r[nodes - 1].b = ERR_PTR(-EINTR);
1471 trace_bcache_btree_gc_coalesce(nodes);
1474 bch_keylist_free(&keylist);
1476 /* Invalidated our iterator */
1479 out_unlock_nocoalesce:
1480 for (i = 0; i < nodes; i++)
1481 mutex_unlock(&new_nodes[i]->write_lock);
1486 while ((k = bch_keylist_pop(&keylist)))
1487 if (!bkey_cmp(k, &ZERO_KEY))
1488 atomic_dec(&b->c->prio_blocked);
1489 bch_keylist_free(&keylist);
1491 for (i = 0; i < nodes; i++)
1492 if (!IS_ERR(new_nodes[i])) {
1493 btree_node_free(new_nodes[i]);
1494 rw_unlock(true, new_nodes[i]);
1499 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1500 struct btree *replace)
1502 struct keylist keys;
1505 if (btree_check_reserve(b, NULL))
1508 n = btree_node_alloc_replacement(replace, NULL);
1510 /* recheck reserve after allocating replacement node */
1511 if (btree_check_reserve(b, NULL)) {
1517 bch_btree_node_write_sync(n);
1519 bch_keylist_init(&keys);
1520 bch_keylist_add(&keys, &n->key);
1522 make_btree_freeing_key(replace, keys.top);
1523 bch_keylist_push(&keys);
1525 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1526 BUG_ON(!bch_keylist_empty(&keys));
1528 btree_node_free(replace);
1531 /* Invalidated our iterator */
1535 static unsigned int btree_gc_count_keys(struct btree *b)
1538 struct btree_iter iter;
1539 unsigned int ret = 0;
1541 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1542 ret += bkey_u64s(k);
1547 static size_t btree_gc_min_nodes(struct cache_set *c)
1552 * Since incremental GC would stop 100ms when front
1553 * side I/O comes, so when there are many btree nodes,
1554 * if GC only processes constant (100) nodes each time,
1555 * GC would last a long time, and the front side I/Os
1556 * would run out of the buckets (since no new bucket
1557 * can be allocated during GC), and be blocked again.
1558 * So GC should not process constant nodes, but varied
1559 * nodes according to the number of btree nodes, which
1560 * realized by dividing GC into constant(100) times,
1561 * so when there are many btree nodes, GC can process
1562 * more nodes each time, otherwise, GC will process less
1563 * nodes each time (but no less than MIN_GC_NODES)
1565 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1566 if (min_nodes < MIN_GC_NODES)
1567 min_nodes = MIN_GC_NODES;
1573 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1574 struct closure *writes, struct gc_stat *gc)
1577 bool should_rewrite;
1579 struct btree_iter iter;
1580 struct gc_merge_info r[GC_MERGE_NODES];
1581 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1583 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1585 for (i = r; i < r + ARRAY_SIZE(r); i++)
1586 i->b = ERR_PTR(-EINTR);
1589 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1591 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1594 ret = PTR_ERR(r->b);
1598 r->keys = btree_gc_count_keys(r->b);
1600 ret = btree_gc_coalesce(b, op, gc, r);
1608 if (!IS_ERR(last->b)) {
1609 should_rewrite = btree_gc_mark_node(last->b, gc);
1610 if (should_rewrite) {
1611 ret = btree_gc_rewrite_node(b, op, last->b);
1616 if (last->b->level) {
1617 ret = btree_gc_recurse(last->b, op, writes, gc);
1622 bkey_copy_key(&b->c->gc_done, &last->b->key);
1625 * Must flush leaf nodes before gc ends, since replace
1626 * operations aren't journalled
1628 mutex_lock(&last->b->write_lock);
1629 if (btree_node_dirty(last->b))
1630 bch_btree_node_write(last->b, writes);
1631 mutex_unlock(&last->b->write_lock);
1632 rw_unlock(true, last->b);
1635 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1638 if (atomic_read(&b->c->search_inflight) &&
1639 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1640 gc->nodes_pre = gc->nodes;
1645 if (need_resched()) {
1651 for (i = r; i < r + ARRAY_SIZE(r); i++)
1652 if (!IS_ERR_OR_NULL(i->b)) {
1653 mutex_lock(&i->b->write_lock);
1654 if (btree_node_dirty(i->b))
1655 bch_btree_node_write(i->b, writes);
1656 mutex_unlock(&i->b->write_lock);
1657 rw_unlock(true, i->b);
1663 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1664 struct closure *writes, struct gc_stat *gc)
1666 struct btree *n = NULL;
1668 bool should_rewrite;
1670 should_rewrite = btree_gc_mark_node(b, gc);
1671 if (should_rewrite) {
1672 n = btree_node_alloc_replacement(b, NULL);
1675 bch_btree_node_write_sync(n);
1677 bch_btree_set_root(n);
1685 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1688 ret = btree_gc_recurse(b, op, writes, gc);
1693 bkey_copy_key(&b->c->gc_done, &b->key);
1698 static void btree_gc_start(struct cache_set *c)
1703 if (!c->gc_mark_valid)
1706 mutex_lock(&c->bucket_lock);
1708 c->gc_mark_valid = 0;
1709 c->gc_done = ZERO_KEY;
1712 for_each_bucket(b, ca) {
1713 b->last_gc = b->gen;
1714 if (!atomic_read(&b->pin)) {
1716 SET_GC_SECTORS_USED(b, 0);
1720 mutex_unlock(&c->bucket_lock);
1723 static void bch_btree_gc_finish(struct cache_set *c)
1730 mutex_lock(&c->bucket_lock);
1733 c->gc_mark_valid = 1;
1736 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1737 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1740 /* don't reclaim buckets to which writeback keys point */
1742 for (i = 0; i < c->devices_max_used; i++) {
1743 struct bcache_device *d = c->devices[i];
1744 struct cached_dev *dc;
1745 struct keybuf_key *w, *n;
1747 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1749 dc = container_of(d, struct cached_dev, disk);
1751 spin_lock(&dc->writeback_keys.lock);
1752 rbtree_postorder_for_each_entry_safe(w, n,
1753 &dc->writeback_keys.keys, node)
1754 for (j = 0; j < KEY_PTRS(&w->key); j++)
1755 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1757 spin_unlock(&dc->writeback_keys.lock);
1761 c->avail_nbuckets = 0;
1764 ca->invalidate_needs_gc = 0;
1766 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
1767 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1769 for (k = ca->prio_buckets;
1770 k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
1771 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1773 for_each_bucket(b, ca) {
1774 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1776 if (atomic_read(&b->pin))
1779 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1781 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1782 c->avail_nbuckets++;
1785 mutex_unlock(&c->bucket_lock);
1788 static void bch_btree_gc(struct cache_set *c)
1791 struct gc_stat stats;
1792 struct closure writes;
1794 uint64_t start_time = local_clock();
1796 trace_bcache_gc_start(c);
1798 memset(&stats, 0, sizeof(struct gc_stat));
1799 closure_init_stack(&writes);
1800 bch_btree_op_init(&op, SHRT_MAX);
1804 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1806 ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
1807 closure_sync(&writes);
1811 schedule_timeout_interruptible(msecs_to_jiffies
1814 pr_warn("gc failed!\n");
1815 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1817 bch_btree_gc_finish(c);
1818 wake_up_allocators(c);
1820 bch_time_stats_update(&c->btree_gc_time, start_time);
1822 stats.key_bytes *= sizeof(uint64_t);
1824 bch_update_bucket_in_use(c, &stats);
1825 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1827 trace_bcache_gc_end(c);
1832 static bool gc_should_run(struct cache_set *c)
1834 struct cache *ca = c->cache;
1836 if (ca->invalidate_needs_gc)
1839 if (atomic_read(&c->sectors_to_gc) < 0)
1845 static int bch_gc_thread(void *arg)
1847 struct cache_set *c = arg;
1850 wait_event_interruptible(c->gc_wait,
1851 kthread_should_stop() ||
1852 test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1855 if (kthread_should_stop() ||
1856 test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1863 wait_for_kthread_stop();
1867 int bch_gc_thread_start(struct cache_set *c)
1869 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1870 return PTR_ERR_OR_ZERO(c->gc_thread);
1873 /* Initial partial gc */
1875 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1878 struct bkey *k, *p = NULL;
1879 struct btree_iter iter;
1881 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1882 bch_initial_mark_key(b->c, b->level, k);
1884 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1887 bch_btree_iter_init(&b->keys, &iter, NULL);
1890 k = bch_btree_iter_next_filter(&iter, &b->keys,
1893 btree_node_prefetch(b, k);
1895 * initiallize c->gc_stats.nodes
1896 * for incremental GC
1898 b->c->gc_stats.nodes++;
1902 ret = bcache_btree(check_recurse, p, b, op);
1905 } while (p && !ret);
1912 static int bch_btree_check_thread(void *arg)
1915 struct btree_check_info *info = arg;
1916 struct btree_check_state *check_state = info->state;
1917 struct cache_set *c = check_state->c;
1918 struct btree_iter iter;
1920 int cur_idx, prev_idx, skip_nr;
1923 cur_idx = prev_idx = 0;
1926 /* root node keys are checked before thread created */
1927 bch_btree_iter_init(&c->root->keys, &iter, NULL);
1928 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
1934 * Fetch a root node key index, skip the keys which
1935 * should be fetched by other threads, then check the
1936 * sub-tree indexed by the fetched key.
1938 spin_lock(&check_state->idx_lock);
1939 cur_idx = check_state->key_idx;
1940 check_state->key_idx++;
1941 spin_unlock(&check_state->idx_lock);
1943 skip_nr = cur_idx - prev_idx;
1946 k = bch_btree_iter_next_filter(&iter,
1953 * No more keys to check in root node,
1954 * current checking threads are enough,
1955 * stop creating more.
1957 atomic_set(&check_state->enough, 1);
1958 /* Update check_state->enough earlier */
1959 smp_mb__after_atomic();
1969 btree_node_prefetch(c->root, p);
1970 c->gc_stats.nodes++;
1971 bch_btree_op_init(&op, 0);
1972 ret = bcache_btree(check_recurse, p, c->root, &op);
1983 /* update check_state->started among all CPUs */
1984 smp_mb__before_atomic();
1985 if (atomic_dec_and_test(&check_state->started))
1986 wake_up(&check_state->wait);
1993 static int bch_btree_chkthread_nr(void)
1995 int n = num_online_cpus()/2;
1999 else if (n > BCH_BTR_CHKTHREAD_MAX)
2000 n = BCH_BTR_CHKTHREAD_MAX;
2005 int bch_btree_check(struct cache_set *c)
2009 struct bkey *k = NULL;
2010 struct btree_iter iter;
2011 struct btree_check_state check_state;
2013 /* check and mark root node keys */
2014 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
2015 bch_initial_mark_key(c, c->root->level, k);
2017 bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
2019 if (c->root->level == 0)
2022 memset(&check_state, 0, sizeof(struct btree_check_state));
2024 check_state.total_threads = bch_btree_chkthread_nr();
2025 check_state.key_idx = 0;
2026 spin_lock_init(&check_state.idx_lock);
2027 atomic_set(&check_state.started, 0);
2028 atomic_set(&check_state.enough, 0);
2029 init_waitqueue_head(&check_state.wait);
2031 rw_lock(0, c->root, c->root->level);
2033 * Run multiple threads to check btree nodes in parallel,
2034 * if check_state.enough is non-zero, it means current
2035 * running check threads are enough, unncessary to create
2038 for (i = 0; i < check_state.total_threads; i++) {
2039 /* fetch latest check_state.enough earlier */
2040 smp_mb__before_atomic();
2041 if (atomic_read(&check_state.enough))
2044 check_state.infos[i].result = 0;
2045 check_state.infos[i].state = &check_state;
2047 check_state.infos[i].thread =
2048 kthread_run(bch_btree_check_thread,
2049 &check_state.infos[i],
2050 "bch_btrchk[%d]", i);
2051 if (IS_ERR(check_state.infos[i].thread)) {
2052 pr_err("fails to run thread bch_btrchk[%d]\n", i);
2053 for (--i; i >= 0; i--)
2054 kthread_stop(check_state.infos[i].thread);
2058 atomic_inc(&check_state.started);
2062 * Must wait for all threads to stop.
2064 wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
2066 for (i = 0; i < check_state.total_threads; i++) {
2067 if (check_state.infos[i].result) {
2068 ret = check_state.infos[i].result;
2074 rw_unlock(0, c->root);
2078 void bch_initial_gc_finish(struct cache_set *c)
2080 struct cache *ca = c->cache;
2083 bch_btree_gc_finish(c);
2085 mutex_lock(&c->bucket_lock);
2088 * We need to put some unused buckets directly on the prio freelist in
2089 * order to get the allocator thread started - it needs freed buckets in
2090 * order to rewrite the prios and gens, and it needs to rewrite prios
2091 * and gens in order to free buckets.
2093 * This is only safe for buckets that have no live data in them, which
2094 * there should always be some of.
2096 for_each_bucket(b, ca) {
2097 if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2098 fifo_full(&ca->free[RESERVE_BTREE]))
2101 if (bch_can_invalidate_bucket(ca, b) &&
2103 __bch_invalidate_one_bucket(ca, b);
2104 if (!fifo_push(&ca->free[RESERVE_PRIO],
2106 fifo_push(&ca->free[RESERVE_BTREE],
2111 mutex_unlock(&c->bucket_lock);
2114 /* Btree insertion */
2116 static bool btree_insert_key(struct btree *b, struct bkey *k,
2117 struct bkey *replace_key)
2119 unsigned int status;
2121 BUG_ON(bkey_cmp(k, &b->key) > 0);
2123 status = bch_btree_insert_key(&b->keys, k, replace_key);
2124 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2125 bch_check_keys(&b->keys, "%u for %s", status,
2126 replace_key ? "replace" : "insert");
2128 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2135 static size_t insert_u64s_remaining(struct btree *b)
2137 long ret = bch_btree_keys_u64s_remaining(&b->keys);
2140 * Might land in the middle of an existing extent and have to split it
2142 if (b->keys.ops->is_extents)
2143 ret -= KEY_MAX_U64S;
2145 return max(ret, 0L);
2148 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2149 struct keylist *insert_keys,
2150 struct bkey *replace_key)
2153 int oldsize = bch_count_data(&b->keys);
2155 while (!bch_keylist_empty(insert_keys)) {
2156 struct bkey *k = insert_keys->keys;
2158 if (bkey_u64s(k) > insert_u64s_remaining(b))
2161 if (bkey_cmp(k, &b->key) <= 0) {
2165 ret |= btree_insert_key(b, k, replace_key);
2166 bch_keylist_pop_front(insert_keys);
2167 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2168 BKEY_PADDED(key) temp;
2169 bkey_copy(&temp.key, insert_keys->keys);
2171 bch_cut_back(&b->key, &temp.key);
2172 bch_cut_front(&b->key, insert_keys->keys);
2174 ret |= btree_insert_key(b, &temp.key, replace_key);
2182 op->insert_collision = true;
2184 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2186 BUG_ON(bch_count_data(&b->keys) < oldsize);
2190 static int btree_split(struct btree *b, struct btree_op *op,
2191 struct keylist *insert_keys,
2192 struct bkey *replace_key)
2195 struct btree *n1, *n2 = NULL, *n3 = NULL;
2196 uint64_t start_time = local_clock();
2198 struct keylist parent_keys;
2200 closure_init_stack(&cl);
2201 bch_keylist_init(&parent_keys);
2203 if (btree_check_reserve(b, op)) {
2207 WARN(1, "insufficient reserve for split\n");
2210 n1 = btree_node_alloc_replacement(b, op);
2214 split = set_blocks(btree_bset_first(n1),
2215 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
2218 unsigned int keys = 0;
2220 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2222 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2227 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2232 mutex_lock(&n1->write_lock);
2233 mutex_lock(&n2->write_lock);
2235 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2238 * Has to be a linear search because we don't have an auxiliary
2242 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2243 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2246 bkey_copy_key(&n1->key,
2247 bset_bkey_idx(btree_bset_first(n1), keys));
2248 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2250 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2251 btree_bset_first(n1)->keys = keys;
2253 memcpy(btree_bset_first(n2)->start,
2254 bset_bkey_last(btree_bset_first(n1)),
2255 btree_bset_first(n2)->keys * sizeof(uint64_t));
2257 bkey_copy_key(&n2->key, &b->key);
2259 bch_keylist_add(&parent_keys, &n2->key);
2260 bch_btree_node_write(n2, &cl);
2261 mutex_unlock(&n2->write_lock);
2262 rw_unlock(true, n2);
2264 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2266 mutex_lock(&n1->write_lock);
2267 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2270 bch_keylist_add(&parent_keys, &n1->key);
2271 bch_btree_node_write(n1, &cl);
2272 mutex_unlock(&n1->write_lock);
2275 /* Depth increases, make a new root */
2276 mutex_lock(&n3->write_lock);
2277 bkey_copy_key(&n3->key, &MAX_KEY);
2278 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2279 bch_btree_node_write(n3, &cl);
2280 mutex_unlock(&n3->write_lock);
2283 bch_btree_set_root(n3);
2284 rw_unlock(true, n3);
2285 } else if (!b->parent) {
2286 /* Root filled up but didn't need to be split */
2288 bch_btree_set_root(n1);
2290 /* Split a non root node */
2292 make_btree_freeing_key(b, parent_keys.top);
2293 bch_keylist_push(&parent_keys);
2295 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2296 BUG_ON(!bch_keylist_empty(&parent_keys));
2300 rw_unlock(true, n1);
2302 bch_time_stats_update(&b->c->btree_split_time, start_time);
2306 bkey_put(b->c, &n2->key);
2307 btree_node_free(n2);
2308 rw_unlock(true, n2);
2310 bkey_put(b->c, &n1->key);
2311 btree_node_free(n1);
2312 rw_unlock(true, n1);
2314 WARN(1, "bcache: btree split failed (level %u)", b->level);
2316 if (n3 == ERR_PTR(-EAGAIN) ||
2317 n2 == ERR_PTR(-EAGAIN) ||
2318 n1 == ERR_PTR(-EAGAIN))
2324 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2325 struct keylist *insert_keys,
2326 atomic_t *journal_ref,
2327 struct bkey *replace_key)
2331 BUG_ON(b->level && replace_key);
2333 closure_init_stack(&cl);
2335 mutex_lock(&b->write_lock);
2337 if (write_block(b) != btree_bset_last(b) &&
2338 b->keys.last_set_unwritten)
2339 bch_btree_init_next(b); /* just wrote a set */
2341 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2342 mutex_unlock(&b->write_lock);
2346 BUG_ON(write_block(b) != btree_bset_last(b));
2348 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2350 bch_btree_leaf_dirty(b, journal_ref);
2352 bch_btree_node_write(b, &cl);
2355 mutex_unlock(&b->write_lock);
2357 /* wait for btree node write if necessary, after unlock */
2362 if (current->bio_list) {
2363 op->lock = b->c->root->level + 1;
2365 } else if (op->lock <= b->c->root->level) {
2366 op->lock = b->c->root->level + 1;
2369 /* Invalidated all iterators */
2370 int ret = btree_split(b, op, insert_keys, replace_key);
2372 if (bch_keylist_empty(insert_keys))
2380 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2381 struct bkey *check_key)
2384 uint64_t btree_ptr = b->key.ptr[0];
2385 unsigned long seq = b->seq;
2386 struct keylist insert;
2387 bool upgrade = op->lock == -1;
2389 bch_keylist_init(&insert);
2392 rw_unlock(false, b);
2393 rw_lock(true, b, b->level);
2395 if (b->key.ptr[0] != btree_ptr ||
2396 b->seq != seq + 1) {
2397 op->lock = b->level;
2402 SET_KEY_PTRS(check_key, 1);
2403 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2405 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2407 bch_keylist_add(&insert, check_key);
2409 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2411 BUG_ON(!ret && !bch_keylist_empty(&insert));
2414 downgrade_write(&b->lock);
2418 struct btree_insert_op {
2420 struct keylist *keys;
2421 atomic_t *journal_ref;
2422 struct bkey *replace_key;
2425 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2427 struct btree_insert_op *op = container_of(b_op,
2428 struct btree_insert_op, op);
2430 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2431 op->journal_ref, op->replace_key);
2432 if (ret && !bch_keylist_empty(op->keys))
2438 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2439 atomic_t *journal_ref, struct bkey *replace_key)
2441 struct btree_insert_op op;
2444 BUG_ON(current->bio_list);
2445 BUG_ON(bch_keylist_empty(keys));
2447 bch_btree_op_init(&op.op, 0);
2449 op.journal_ref = journal_ref;
2450 op.replace_key = replace_key;
2452 while (!ret && !bch_keylist_empty(keys)) {
2454 ret = bch_btree_map_leaf_nodes(&op.op, c,
2455 &START_KEY(keys->keys),
2462 pr_err("error %i\n", ret);
2464 while ((k = bch_keylist_pop(keys)))
2466 } else if (op.op.insert_collision)
2472 void bch_btree_set_root(struct btree *b)
2477 closure_init_stack(&cl);
2479 trace_bcache_btree_set_root(b);
2481 BUG_ON(!b->written);
2483 for (i = 0; i < KEY_PTRS(&b->key); i++)
2484 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2486 mutex_lock(&b->c->bucket_lock);
2487 list_del_init(&b->list);
2488 mutex_unlock(&b->c->bucket_lock);
2492 bch_journal_meta(b->c, &cl);
2496 /* Map across nodes or keys */
2498 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2500 btree_map_nodes_fn *fn, int flags)
2502 int ret = MAP_CONTINUE;
2506 struct btree_iter iter;
2508 bch_btree_iter_init(&b->keys, &iter, from);
2510 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2512 ret = bcache_btree(map_nodes_recurse, k, b,
2513 op, from, fn, flags);
2516 if (ret != MAP_CONTINUE)
2521 if (!b->level || flags == MAP_ALL_NODES)
2527 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2528 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2530 return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
2533 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2534 struct bkey *from, btree_map_keys_fn *fn,
2537 int ret = MAP_CONTINUE;
2539 struct btree_iter iter;
2541 bch_btree_iter_init(&b->keys, &iter, from);
2543 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2546 : bcache_btree(map_keys_recurse, k,
2547 b, op, from, fn, flags);
2550 if (ret != MAP_CONTINUE)
2554 if (!b->level && (flags & MAP_END_KEY))
2555 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2556 KEY_OFFSET(&b->key), 0));
2561 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2562 struct bkey *from, btree_map_keys_fn *fn, int flags)
2564 return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
2569 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2571 /* Overlapping keys compare equal */
2572 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2574 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2579 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2580 struct keybuf_key *r)
2582 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2587 unsigned int nr_found;
2590 keybuf_pred_fn *pred;
2593 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2596 struct refill *refill = container_of(op, struct refill, op);
2597 struct keybuf *buf = refill->buf;
2598 int ret = MAP_CONTINUE;
2600 if (bkey_cmp(k, refill->end) > 0) {
2605 if (!KEY_SIZE(k)) /* end key */
2608 if (refill->pred(buf, k)) {
2609 struct keybuf_key *w;
2611 spin_lock(&buf->lock);
2613 w = array_alloc(&buf->freelist);
2615 spin_unlock(&buf->lock);
2620 bkey_copy(&w->key, k);
2622 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2623 array_free(&buf->freelist, w);
2627 if (array_freelist_empty(&buf->freelist))
2630 spin_unlock(&buf->lock);
2633 buf->last_scanned = *k;
2637 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2638 struct bkey *end, keybuf_pred_fn *pred)
2640 struct bkey start = buf->last_scanned;
2641 struct refill refill;
2645 bch_btree_op_init(&refill.op, -1);
2646 refill.nr_found = 0;
2651 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2652 refill_keybuf_fn, MAP_END_KEY);
2654 trace_bcache_keyscan(refill.nr_found,
2655 KEY_INODE(&start), KEY_OFFSET(&start),
2656 KEY_INODE(&buf->last_scanned),
2657 KEY_OFFSET(&buf->last_scanned));
2659 spin_lock(&buf->lock);
2661 if (!RB_EMPTY_ROOT(&buf->keys)) {
2662 struct keybuf_key *w;
2664 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2665 buf->start = START_KEY(&w->key);
2667 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2670 buf->start = MAX_KEY;
2674 spin_unlock(&buf->lock);
2677 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2679 rb_erase(&w->node, &buf->keys);
2680 array_free(&buf->freelist, w);
2683 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2685 spin_lock(&buf->lock);
2686 __bch_keybuf_del(buf, w);
2687 spin_unlock(&buf->lock);
2690 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2694 struct keybuf_key *p, *w, s;
2698 if (bkey_cmp(end, &buf->start) <= 0 ||
2699 bkey_cmp(start, &buf->end) >= 0)
2702 spin_lock(&buf->lock);
2703 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2705 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2707 w = RB_NEXT(w, node);
2712 __bch_keybuf_del(buf, p);
2715 spin_unlock(&buf->lock);
2719 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2721 struct keybuf_key *w;
2723 spin_lock(&buf->lock);
2725 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2727 while (w && w->private)
2728 w = RB_NEXT(w, node);
2731 w->private = ERR_PTR(-EINTR);
2733 spin_unlock(&buf->lock);
2737 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2740 keybuf_pred_fn *pred)
2742 struct keybuf_key *ret;
2745 ret = bch_keybuf_next(buf);
2749 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2750 pr_debug("scan finished\n");
2754 bch_refill_keybuf(c, buf, end, pred);
2760 void bch_keybuf_init(struct keybuf *buf)
2762 buf->last_scanned = MAX_KEY;
2763 buf->keys = RB_ROOT;
2765 spin_lock_init(&buf->lock);
2766 array_allocator_init(&buf->freelist);
2769 void bch_btree_exit(void)
2772 destroy_workqueue(btree_io_wq);
2775 int __init bch_btree_init(void)
2777 btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);