1 // SPDX-License-Identifier: GPL-2.0
3 * Primary bucket allocation code
5 * Copyright 2012 Google, Inc.
7 * Allocation in bcache is done in terms of buckets:
9 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
10 * btree pointers - they must match for the pointer to be considered valid.
12 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
13 * bucket simply by incrementing its gen.
15 * The gens (along with the priorities; it's really the gens are important but
16 * the code is named as if it's the priorities) are written in an arbitrary list
17 * of buckets on disk, with a pointer to them in the journal header.
19 * When we invalidate a bucket, we have to write its new gen to disk and wait
20 * for that write to complete before we use it - otherwise after a crash we
21 * could have pointers that appeared to be good but pointed to data that had
24 * Since the gens and priorities are all stored contiguously on disk, we can
25 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
26 * call prio_write(), and when prio_write() finishes we pull buckets off the
27 * free_inc list and optionally discard them.
29 * free_inc isn't the only freelist - if it was, we'd often to sleep while
30 * priorities and gens were being written before we could allocate. c->free is a
31 * smaller freelist, and buckets on that list are always ready to be used.
33 * If we've got discards enabled, that happens when a bucket moves from the
34 * free_inc list to the free list.
36 * There is another freelist, because sometimes we have buckets that we know
37 * have nothing pointing into them - these we can reuse without waiting for
38 * priorities to be rewritten. These come from freed btree nodes and buckets
39 * that garbage collection discovered no longer had valid keys pointing into
40 * them (because they were overwritten). That's the unused list - buckets on the
41 * unused list move to the free list, optionally being discarded in the process.
43 * It's also important to ensure that gens don't wrap around - with respect to
44 * either the oldest gen in the btree or the gen on disk. This is quite
45 * difficult to do in practice, but we explicitly guard against it anyways - if
46 * a bucket is in danger of wrapping around we simply skip invalidating it that
47 * time around, and we garbage collect or rewrite the priorities sooner than we
48 * would have otherwise.
50 * bch_bucket_alloc() allocates a single bucket from a specific cache.
52 * bch_bucket_alloc_set() allocates one or more buckets from different caches
55 * free_some_buckets() drives all the processes described above. It's called
56 * from bch_bucket_alloc() and a few other places that need to make sure free
59 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
60 * invalidated, and then invalidate them and stick them on the free_inc list -
61 * in either lru or fifo order.
67 #include <linux/blkdev.h>
68 #include <linux/kthread.h>
69 #include <linux/random.h>
70 #include <linux/sched/signal.h>
71 #include <trace/events/bcache.h>
73 #define MAX_OPEN_BUCKETS 128
75 /* Bucket heap / gen */
77 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
79 uint8_t ret = ++b->gen;
81 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
82 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
87 void bch_rescale_priorities(struct cache_set *c, int sectors)
91 unsigned int next = c->nbuckets * c->sb.bucket_size / 1024;
95 atomic_sub(sectors, &c->rescale);
98 r = atomic_read(&c->rescale);
102 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
104 mutex_lock(&c->bucket_lock);
106 c->min_prio = USHRT_MAX;
108 for_each_cache(ca, c, i)
109 for_each_bucket(b, ca)
111 b->prio != BTREE_PRIO &&
112 !atomic_read(&b->pin)) {
114 c->min_prio = min(c->min_prio, b->prio);
117 mutex_unlock(&c->bucket_lock);
121 * Background allocation thread: scans for buckets to be invalidated,
122 * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
123 * then optionally issues discard commands to the newly free buckets, then puts
124 * them on the various freelists.
127 static inline bool can_inc_bucket_gen(struct bucket *b)
129 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
132 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
134 BUG_ON(!ca->set->gc_mark_valid);
136 return (!GC_MARK(b) ||
137 GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
138 !atomic_read(&b->pin) &&
139 can_inc_bucket_gen(b);
142 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
144 lockdep_assert_held(&ca->set->bucket_lock);
145 BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
147 if (GC_SECTORS_USED(b))
148 trace_bcache_invalidate(ca, b - ca->buckets);
151 b->prio = INITIAL_PRIO;
155 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
157 __bch_invalidate_one_bucket(ca, b);
159 fifo_push(&ca->free_inc, b - ca->buckets);
163 * Determines what order we're going to reuse buckets, smallest bucket_prio()
164 * first: we also take into account the number of sectors of live data in that
165 * bucket, and in order for that multiply to make sense we have to scale bucket
167 * Thus, we scale the bucket priorities so that the bucket with the smallest
168 * prio is worth 1/8th of what INITIAL_PRIO is worth.
171 #define bucket_prio(b) \
173 unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
175 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
178 #define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
179 #define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
181 static void invalidate_buckets_lru(struct cache *ca)
188 for_each_bucket(b, ca) {
189 if (!bch_can_invalidate_bucket(ca, b))
192 if (!heap_full(&ca->heap))
193 heap_add(&ca->heap, b, bucket_max_cmp);
194 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
195 ca->heap.data[0] = b;
196 heap_sift(&ca->heap, 0, bucket_max_cmp);
200 for (i = ca->heap.used / 2 - 1; i >= 0; --i)
201 heap_sift(&ca->heap, i, bucket_min_cmp);
203 while (!fifo_full(&ca->free_inc)) {
204 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
206 * We don't want to be calling invalidate_buckets()
207 * multiple times when it can't do anything
209 ca->invalidate_needs_gc = 1;
214 bch_invalidate_one_bucket(ca, b);
218 static void invalidate_buckets_fifo(struct cache *ca)
223 while (!fifo_full(&ca->free_inc)) {
224 if (ca->fifo_last_bucket < ca->sb.first_bucket ||
225 ca->fifo_last_bucket >= ca->sb.nbuckets)
226 ca->fifo_last_bucket = ca->sb.first_bucket;
228 b = ca->buckets + ca->fifo_last_bucket++;
230 if (bch_can_invalidate_bucket(ca, b))
231 bch_invalidate_one_bucket(ca, b);
233 if (++checked >= ca->sb.nbuckets) {
234 ca->invalidate_needs_gc = 1;
241 static void invalidate_buckets_random(struct cache *ca)
246 while (!fifo_full(&ca->free_inc)) {
249 get_random_bytes(&n, sizeof(n));
251 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
252 n += ca->sb.first_bucket;
256 if (bch_can_invalidate_bucket(ca, b))
257 bch_invalidate_one_bucket(ca, b);
259 if (++checked >= ca->sb.nbuckets / 2) {
260 ca->invalidate_needs_gc = 1;
267 static void invalidate_buckets(struct cache *ca)
269 BUG_ON(ca->invalidate_needs_gc);
271 switch (CACHE_REPLACEMENT(&ca->sb)) {
272 case CACHE_REPLACEMENT_LRU:
273 invalidate_buckets_lru(ca);
275 case CACHE_REPLACEMENT_FIFO:
276 invalidate_buckets_fifo(ca);
278 case CACHE_REPLACEMENT_RANDOM:
279 invalidate_buckets_random(ca);
284 #define allocator_wait(ca, cond) \
287 set_current_state(TASK_INTERRUPTIBLE); \
291 mutex_unlock(&(ca)->set->bucket_lock); \
292 if (kthread_should_stop() || \
293 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
294 set_current_state(TASK_RUNNING); \
299 mutex_lock(&(ca)->set->bucket_lock); \
301 __set_current_state(TASK_RUNNING); \
304 static int bch_allocator_push(struct cache *ca, long bucket)
308 /* Prios/gens are actually the most important reserve */
309 if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
312 for (i = 0; i < RESERVE_NR; i++)
313 if (fifo_push(&ca->free[i], bucket))
319 static int bch_allocator_thread(void *arg)
321 struct cache *ca = arg;
323 mutex_lock(&ca->set->bucket_lock);
327 * First, we pull buckets off of the unused and free_inc lists,
328 * possibly issue discards to them, then we add the bucket to
334 if (!fifo_pop(&ca->free_inc, bucket))
338 mutex_unlock(&ca->set->bucket_lock);
339 blkdev_issue_discard(ca->bdev,
340 bucket_to_sector(ca->set, bucket),
341 ca->sb.bucket_size, GFP_KERNEL, 0);
342 mutex_lock(&ca->set->bucket_lock);
345 allocator_wait(ca, bch_allocator_push(ca, bucket));
346 wake_up(&ca->set->btree_cache_wait);
347 wake_up(&ca->set->bucket_wait);
351 * We've run out of free buckets, we need to find some buckets
352 * we can invalidate. First, invalidate them in memory and add
353 * them to the free_inc list:
357 allocator_wait(ca, ca->set->gc_mark_valid &&
358 !ca->invalidate_needs_gc);
359 invalidate_buckets(ca);
362 * Now, we write their new gens to disk so we can start writing
365 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
366 if (CACHE_SYNC(&ca->set->sb)) {
368 * This could deadlock if an allocation with a btree
369 * node locked ever blocked - having the btree node
370 * locked would block garbage collection, but here we're
371 * waiting on garbage collection before we invalidate
374 * But this should be safe since the btree code always
375 * uses btree_check_reserve() before allocating now, and
376 * if it fails it blocks without btree nodes locked.
378 if (!fifo_full(&ca->free_inc))
379 goto retry_invalidate;
381 if (bch_prio_write(ca, false) < 0) {
382 ca->invalidate_needs_gc = 1;
388 wait_for_kthread_stop();
394 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
401 /* No allocation if CACHE_SET_IO_DISABLE bit is set */
402 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
406 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
407 fifo_pop(&ca->free[reserve], r))
411 trace_bcache_alloc_fail(ca, reserve);
416 prepare_to_wait(&ca->set->bucket_wait, &w,
417 TASK_UNINTERRUPTIBLE);
419 mutex_unlock(&ca->set->bucket_lock);
421 mutex_lock(&ca->set->bucket_lock);
422 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
423 !fifo_pop(&ca->free[reserve], r));
425 finish_wait(&ca->set->bucket_wait, &w);
427 if (ca->alloc_thread)
428 wake_up_process(ca->alloc_thread);
430 trace_bcache_alloc(ca, reserve);
432 if (expensive_debug_checks(ca->set)) {
437 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
438 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
440 for (j = 0; j < RESERVE_NR; j++)
441 fifo_for_each(i, &ca->free[j], iter)
443 fifo_for_each(i, &ca->free_inc, iter)
449 BUG_ON(atomic_read(&b->pin) != 1);
451 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
453 if (reserve <= RESERVE_PRIO) {
454 SET_GC_MARK(b, GC_MARK_METADATA);
456 b->prio = BTREE_PRIO;
458 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
460 b->prio = INITIAL_PRIO;
463 if (ca->set->avail_nbuckets > 0) {
464 ca->set->avail_nbuckets--;
465 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
471 void __bch_bucket_free(struct cache *ca, struct bucket *b)
474 SET_GC_SECTORS_USED(b, 0);
476 if (ca->set->avail_nbuckets < ca->set->nbuckets) {
477 ca->set->avail_nbuckets++;
478 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
482 void bch_bucket_free(struct cache_set *c, struct bkey *k)
486 for (i = 0; i < KEY_PTRS(k); i++)
487 __bch_bucket_free(PTR_CACHE(c, k, i),
488 PTR_BUCKET(c, k, i));
491 int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
492 struct bkey *k, int n, bool wait)
496 /* No allocation if CACHE_SET_IO_DISABLE bit is set */
497 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
500 lockdep_assert_held(&c->bucket_lock);
501 BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
505 /* sort by free space/prio of oldest data in caches */
507 for (i = 0; i < n; i++) {
508 struct cache *ca = c->cache_by_alloc[i];
509 long b = bch_bucket_alloc(ca, reserve, wait);
514 k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
515 bucket_to_sector(c, b),
518 SET_KEY_PTRS(k, i + 1);
523 bch_bucket_free(c, k);
528 int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
529 struct bkey *k, int n, bool wait)
533 mutex_lock(&c->bucket_lock);
534 ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
535 mutex_unlock(&c->bucket_lock);
539 /* Sector allocator */
542 struct list_head list;
543 unsigned int last_write_point;
544 unsigned int sectors_free;
549 * We keep multiple buckets open for writes, and try to segregate different
550 * write streams for better cache utilization: first we try to segregate flash
551 * only volume write streams from cached devices, secondly we look for a bucket
552 * where the last write to it was sequential with the current write, and
553 * failing that we look for a bucket that was last used by the same task.
555 * The ideas is if you've got multiple tasks pulling data into the cache at the
556 * same time, you'll get better cache utilization if you try to segregate their
557 * data and preserve locality.
559 * For example, dirty sectors of flash only volume is not reclaimable, if their
560 * dirty sectors mixed with dirty sectors of cached device, such buckets will
561 * be marked as dirty and won't be reclaimed, though the dirty data of cached
562 * device have been written back to backend device.
564 * And say you've starting Firefox at the same time you're copying a
565 * bunch of files. Firefox will likely end up being fairly hot and stay in the
566 * cache awhile, but the data you copied might not be; if you wrote all that
567 * data to the same buckets it'd get invalidated at the same time.
569 * Both of those tasks will be doing fairly random IO so we can't rely on
570 * detecting sequential IO to segregate their data, but going off of the task
571 * should be a sane heuristic.
573 static struct open_bucket *pick_data_bucket(struct cache_set *c,
574 const struct bkey *search,
575 unsigned int write_point,
578 struct open_bucket *ret, *ret_task = NULL;
580 list_for_each_entry_reverse(ret, &c->data_buckets, list)
581 if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
582 UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
584 else if (!bkey_cmp(&ret->key, search))
586 else if (ret->last_write_point == write_point)
589 ret = ret_task ?: list_first_entry(&c->data_buckets,
590 struct open_bucket, list);
592 if (!ret->sectors_free && KEY_PTRS(alloc)) {
593 ret->sectors_free = c->sb.bucket_size;
594 bkey_copy(&ret->key, alloc);
598 if (!ret->sectors_free)
605 * Allocates some space in the cache to write to, and k to point to the newly
606 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
607 * end of the newly allocated space).
609 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
610 * sectors were actually allocated.
612 * If s->writeback is true, will not fail.
614 bool bch_alloc_sectors(struct cache_set *c,
616 unsigned int sectors,
617 unsigned int write_point,
618 unsigned int write_prio,
621 struct open_bucket *b;
622 BKEY_PADDED(key) alloc;
626 * We might have to allocate a new bucket, which we can't do with a
627 * spinlock held. So if we have to allocate, we drop the lock, allocate
628 * and then retry. KEY_PTRS() indicates whether alloc points to
629 * allocated bucket(s).
632 bkey_init(&alloc.key);
633 spin_lock(&c->data_bucket_lock);
635 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
636 unsigned int watermark = write_prio
640 spin_unlock(&c->data_bucket_lock);
642 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
645 spin_lock(&c->data_bucket_lock);
649 * If we had to allocate, we might race and not need to allocate the
650 * second time we call pick_data_bucket(). If we allocated a bucket but
651 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
653 if (KEY_PTRS(&alloc.key))
654 bkey_put(c, &alloc.key);
656 for (i = 0; i < KEY_PTRS(&b->key); i++)
657 EBUG_ON(ptr_stale(c, &b->key, i));
659 /* Set up the pointer to the space we're allocating: */
661 for (i = 0; i < KEY_PTRS(&b->key); i++)
662 k->ptr[i] = b->key.ptr[i];
664 sectors = min(sectors, b->sectors_free);
666 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
667 SET_KEY_SIZE(k, sectors);
668 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
671 * Move b to the end of the lru, and keep track of what this bucket was
674 list_move_tail(&b->list, &c->data_buckets);
675 bkey_copy_key(&b->key, k);
676 b->last_write_point = write_point;
678 b->sectors_free -= sectors;
680 for (i = 0; i < KEY_PTRS(&b->key); i++) {
681 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
683 atomic_long_add(sectors,
684 &PTR_CACHE(c, &b->key, i)->sectors_written);
687 if (b->sectors_free < c->sb.block_size)
691 * k takes refcounts on the buckets it points to until it's inserted
692 * into the btree, but if we're done with this bucket we just transfer
693 * get_data_bucket()'s refcount.
696 for (i = 0; i < KEY_PTRS(&b->key); i++)
697 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
699 spin_unlock(&c->data_bucket_lock);
705 void bch_open_buckets_free(struct cache_set *c)
707 struct open_bucket *b;
709 while (!list_empty(&c->data_buckets)) {
710 b = list_first_entry(&c->data_buckets,
711 struct open_bucket, list);
717 int bch_open_buckets_alloc(struct cache_set *c)
721 spin_lock_init(&c->data_bucket_lock);
723 for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
724 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
729 list_add(&b->list, &c->data_buckets);
735 int bch_cache_allocator_start(struct cache *ca)
737 struct task_struct *k;
740 * In case previous btree check operation occupies too many
741 * system memory for bcache btree node cache, and the
742 * registering process is selected by OOM killer. Here just
743 * ignore the SIGKILL sent by OOM killer if there is, to
744 * avoid kthread_run() being failed by pending signals. The
745 * bcache registering process will exit after the registration
748 if (signal_pending(current))
749 flush_signals(current);
751 k = kthread_run(bch_allocator_thread, ca, "bcache_allocator");
755 ca->alloc_thread = k;