1 // SPDX-License-Identifier: GPL-2.0
3 * background writeback - scan btree for dirty data and write it to the backing
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/clock.h>
18 #include <trace/events/bcache.h>
20 static void update_gc_after_writeback(struct cache_set *c)
22 if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
23 c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
26 c->gc_after_writeback |= BCH_DO_AUTO_GC;
30 static uint64_t __calc_target_rate(struct cached_dev *dc)
32 struct cache_set *c = dc->disk.c;
35 * This is the size of the cache, minus the amount used for
38 uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
39 atomic_long_read(&c->flash_dev_dirty_sectors);
42 * Unfortunately there is no control of global dirty data. If the
43 * user states that they want 10% dirty data in the cache, and has,
44 * e.g., 5 backing volumes of equal size, we try and ensure each
45 * backing volume uses about 2% of the cache for dirty data.
48 div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
49 c->cached_dev_sectors);
51 uint64_t cache_dirty_target =
52 div_u64(cache_sectors * dc->writeback_percent, 100);
54 /* Ensure each backing dev gets at least one dirty share */
58 return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
61 static void __update_writeback_rate(struct cached_dev *dc)
65 * Figures out the amount that should be written per second.
67 * First, the error (number of sectors that are dirty beyond our
68 * target) is calculated. The error is accumulated (numerically
71 * Then, the proportional value and integral value are scaled
72 * based on configured values. These are stored as inverses to
73 * avoid fixed point math and to make configuration easy-- e.g.
74 * the default value of 40 for writeback_rate_p_term_inverse
75 * attempts to write at a rate that would retire all the dirty
76 * blocks in 40 seconds.
78 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
79 * of the error is accumulated in the integral term per second.
80 * This acts as a slow, long-term average that is not subject to
81 * variations in usage like the p term.
83 int64_t target = __calc_target_rate(dc);
84 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
85 int64_t error = dirty - target;
86 int64_t proportional_scaled =
87 div_s64(error, dc->writeback_rate_p_term_inverse);
88 int64_t integral_scaled;
91 if ((error < 0 && dc->writeback_rate_integral > 0) ||
92 (error > 0 && time_before64(local_clock(),
93 dc->writeback_rate.next + NSEC_PER_MSEC))) {
95 * Only decrease the integral term if it's more than
96 * zero. Only increase the integral term if the device
97 * is keeping up. (Don't wind up the integral
98 * ineffectively in either case).
100 * It's necessary to scale this by
101 * writeback_rate_update_seconds to keep the integral
102 * term dimensioned properly.
104 dc->writeback_rate_integral += error *
105 dc->writeback_rate_update_seconds;
108 integral_scaled = div_s64(dc->writeback_rate_integral,
109 dc->writeback_rate_i_term_inverse);
111 new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
112 dc->writeback_rate_minimum, NSEC_PER_SEC);
114 dc->writeback_rate_proportional = proportional_scaled;
115 dc->writeback_rate_integral_scaled = integral_scaled;
116 dc->writeback_rate_change = new_rate -
117 atomic_long_read(&dc->writeback_rate.rate);
118 atomic_long_set(&dc->writeback_rate.rate, new_rate);
119 dc->writeback_rate_target = target;
122 static bool set_at_max_writeback_rate(struct cache_set *c,
123 struct cached_dev *dc)
125 /* Don't set max writeback rate if gc is running */
126 if (!c->gc_mark_valid)
129 * Idle_counter is increased everytime when update_writeback_rate() is
130 * called. If all backing devices attached to the same cache set have
131 * identical dc->writeback_rate_update_seconds values, it is about 6
132 * rounds of update_writeback_rate() on each backing device before
133 * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
134 * to each dc->writeback_rate.rate.
135 * In order to avoid extra locking cost for counting exact dirty cached
136 * devices number, c->attached_dev_nr is used to calculate the idle
137 * throushold. It might be bigger if not all cached device are in write-
138 * back mode, but it still works well with limited extra rounds of
139 * update_writeback_rate().
141 if (atomic_inc_return(&c->idle_counter) <
142 atomic_read(&c->attached_dev_nr) * 6)
145 if (atomic_read(&c->at_max_writeback_rate) != 1)
146 atomic_set(&c->at_max_writeback_rate, 1);
148 atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
150 /* keep writeback_rate_target as existing value */
151 dc->writeback_rate_proportional = 0;
152 dc->writeback_rate_integral_scaled = 0;
153 dc->writeback_rate_change = 0;
156 * Check c->idle_counter and c->at_max_writeback_rate agagain in case
157 * new I/O arrives during before set_at_max_writeback_rate() returns.
158 * Then the writeback rate is set to 1, and its new value should be
159 * decided via __update_writeback_rate().
161 if ((atomic_read(&c->idle_counter) <
162 atomic_read(&c->attached_dev_nr) * 6) ||
163 !atomic_read(&c->at_max_writeback_rate))
169 static void update_writeback_rate(struct work_struct *work)
171 struct cached_dev *dc = container_of(to_delayed_work(work),
173 writeback_rate_update);
174 struct cache_set *c = dc->disk.c;
177 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
178 * cancel_delayed_work_sync().
180 set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
181 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
185 * CACHE_SET_IO_DISABLE might be set via sysfs interface,
188 if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
189 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
190 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
191 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
196 if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
198 * If the whole cache set is idle, set_at_max_writeback_rate()
199 * will set writeback rate to a max number. Then it is
200 * unncessary to update writeback rate for an idle cache set
201 * in maximum writeback rate number(s).
203 if (!set_at_max_writeback_rate(c, dc)) {
204 down_read(&dc->writeback_lock);
205 __update_writeback_rate(dc);
206 update_gc_after_writeback(c);
207 up_read(&dc->writeback_lock);
213 * CACHE_SET_IO_DISABLE might be set via sysfs interface,
216 if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
217 !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
218 schedule_delayed_work(&dc->writeback_rate_update,
219 dc->writeback_rate_update_seconds * HZ);
223 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
224 * cancel_delayed_work_sync().
226 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
227 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
231 static unsigned int writeback_delay(struct cached_dev *dc,
232 unsigned int sectors)
234 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
235 !dc->writeback_percent)
238 return bch_next_delay(&dc->writeback_rate, sectors);
243 struct cached_dev *dc;
248 static void dirty_init(struct keybuf_key *w)
250 struct dirty_io *io = w->private;
251 struct bio *bio = &io->bio;
253 bio_init(bio, bio->bi_inline_vecs,
254 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
255 if (!io->dc->writeback_percent)
256 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
258 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
260 bch_bio_map(bio, NULL);
263 static void dirty_io_destructor(struct closure *cl)
265 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
270 static void write_dirty_finish(struct closure *cl)
272 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
273 struct keybuf_key *w = io->bio.bi_private;
274 struct cached_dev *dc = io->dc;
276 bio_free_pages(&io->bio);
278 /* This is kind of a dumb way of signalling errors. */
279 if (KEY_DIRTY(&w->key)) {
284 bch_keylist_init(&keys);
286 bkey_copy(keys.top, &w->key);
287 SET_KEY_DIRTY(keys.top, false);
288 bch_keylist_push(&keys);
290 for (i = 0; i < KEY_PTRS(&w->key); i++)
291 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
293 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
296 trace_bcache_writeback_collision(&w->key);
299 ? &dc->disk.c->writeback_keys_failed
300 : &dc->disk.c->writeback_keys_done);
303 bch_keybuf_del(&dc->writeback_keys, w);
306 closure_return_with_destructor(cl, dirty_io_destructor);
309 static void dirty_endio(struct bio *bio)
311 struct keybuf_key *w = bio->bi_private;
312 struct dirty_io *io = w->private;
314 if (bio->bi_status) {
315 SET_KEY_DIRTY(&w->key, false);
316 bch_count_backing_io_errors(io->dc, bio);
319 closure_put(&io->cl);
322 static void write_dirty(struct closure *cl)
324 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
325 struct keybuf_key *w = io->bio.bi_private;
326 struct cached_dev *dc = io->dc;
328 uint16_t next_sequence;
330 if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
331 /* Not our turn to write; wait for a write to complete */
332 closure_wait(&dc->writeback_ordering_wait, cl);
334 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
336 * Edge case-- it happened in indeterminate order
337 * relative to when we were added to wait list..
339 closure_wake_up(&dc->writeback_ordering_wait);
342 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
346 next_sequence = io->sequence + 1;
349 * IO errors are signalled using the dirty bit on the key.
350 * If we failed to read, we should not attempt to write to the
351 * backing device. Instead, immediately go to write_dirty_finish
354 if (KEY_DIRTY(&w->key)) {
356 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
357 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
358 bio_set_dev(&io->bio, io->dc->bdev);
359 io->bio.bi_end_io = dirty_endio;
361 /* I/O request sent to backing device */
362 closure_bio_submit(io->dc->disk.c, &io->bio, cl);
365 atomic_set(&dc->writeback_sequence_next, next_sequence);
366 closure_wake_up(&dc->writeback_ordering_wait);
368 continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
371 static void read_dirty_endio(struct bio *bio)
373 struct keybuf_key *w = bio->bi_private;
374 struct dirty_io *io = w->private;
377 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
379 "reading dirty data from cache");
384 static void read_dirty_submit(struct closure *cl)
386 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
388 closure_bio_submit(io->dc->disk.c, &io->bio, cl);
390 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
393 static void read_dirty(struct cached_dev *dc)
395 unsigned int delay = 0;
396 struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
401 uint16_t sequence = 0;
403 BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
404 atomic_set(&dc->writeback_sequence_next, sequence);
405 closure_init_stack(&cl);
408 * XXX: if we error, background writeback just spins. Should use some
412 next = bch_keybuf_next(&dc->writeback_keys);
414 while (!kthread_should_stop() &&
415 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
421 BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
424 * Don't combine too many operations, even if they
427 if (nk >= MAX_WRITEBACKS_IN_PASS)
431 * If the current operation is very large, don't
432 * further combine operations.
434 if (size >= MAX_WRITESIZE_IN_PASS)
438 * Operations are only eligible to be combined
439 * if they are contiguous.
441 * TODO: add a heuristic willing to fire a
442 * certain amount of non-contiguous IO per pass,
443 * so that we can benefit from backing device
446 if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
447 &START_KEY(&next->key)))
450 size += KEY_SIZE(&next->key);
452 } while ((next = bch_keybuf_next(&dc->writeback_keys)));
454 /* Now we have gathered a set of 1..5 keys to write back. */
455 for (i = 0; i < nk; i++) {
458 io = kzalloc(sizeof(struct dirty_io) +
459 sizeof(struct bio_vec) *
460 DIV_ROUND_UP(KEY_SIZE(&w->key),
468 io->sequence = sequence++;
471 bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
472 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
473 bio_set_dev(&io->bio,
474 PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
475 io->bio.bi_end_io = read_dirty_endio;
477 if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
480 trace_bcache_writeback(&w->key);
482 down(&dc->in_flight);
485 * We've acquired a semaphore for the maximum
486 * simultaneous number of writebacks; from here
487 * everything happens asynchronously.
489 closure_call(&io->cl, read_dirty_submit, NULL, &cl);
492 delay = writeback_delay(dc, size);
494 while (!kthread_should_stop() &&
495 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
497 schedule_timeout_interruptible(delay);
498 delay = writeback_delay(dc, 0);
506 bch_keybuf_del(&dc->writeback_keys, w);
510 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
511 * freed) before refilling again
516 /* Scan for dirty data */
518 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
519 uint64_t offset, int nr_sectors)
521 struct bcache_device *d = c->devices[inode];
522 unsigned int stripe_offset, stripe, sectors_dirty;
527 if (UUID_FLASH_ONLY(&c->uuids[inode]))
528 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
530 stripe = offset_to_stripe(d, offset);
531 stripe_offset = offset & (d->stripe_size - 1);
534 int s = min_t(unsigned int, abs(nr_sectors),
535 d->stripe_size - stripe_offset);
540 if (stripe >= d->nr_stripes)
543 sectors_dirty = atomic_add_return(s,
544 d->stripe_sectors_dirty + stripe);
545 if (sectors_dirty == d->stripe_size)
546 set_bit(stripe, d->full_dirty_stripes);
548 clear_bit(stripe, d->full_dirty_stripes);
556 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
558 struct cached_dev *dc = container_of(buf,
562 BUG_ON(KEY_INODE(k) != dc->disk.id);
567 static void refill_full_stripes(struct cached_dev *dc)
569 struct keybuf *buf = &dc->writeback_keys;
570 unsigned int start_stripe, stripe, next_stripe;
571 bool wrapped = false;
573 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
575 if (stripe >= dc->disk.nr_stripes)
578 start_stripe = stripe;
581 stripe = find_next_bit(dc->disk.full_dirty_stripes,
582 dc->disk.nr_stripes, stripe);
584 if (stripe == dc->disk.nr_stripes)
587 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
588 dc->disk.nr_stripes, stripe);
590 buf->last_scanned = KEY(dc->disk.id,
591 stripe * dc->disk.stripe_size, 0);
593 bch_refill_keybuf(dc->disk.c, buf,
595 next_stripe * dc->disk.stripe_size, 0),
598 if (array_freelist_empty(&buf->freelist))
601 stripe = next_stripe;
603 if (wrapped && stripe > start_stripe)
606 if (stripe == dc->disk.nr_stripes) {
614 * Returns true if we scanned the entire disk
616 static bool refill_dirty(struct cached_dev *dc)
618 struct keybuf *buf = &dc->writeback_keys;
619 struct bkey start = KEY(dc->disk.id, 0, 0);
620 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
621 struct bkey start_pos;
624 * make sure keybuf pos is inside the range for this disk - at bringup
625 * we might not be attached yet so this disk's inode nr isn't
628 if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
629 bkey_cmp(&buf->last_scanned, &end) > 0)
630 buf->last_scanned = start;
632 if (dc->partial_stripes_expensive) {
633 refill_full_stripes(dc);
634 if (array_freelist_empty(&buf->freelist))
638 start_pos = buf->last_scanned;
639 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
641 if (bkey_cmp(&buf->last_scanned, &end) < 0)
645 * If we get to the end start scanning again from the beginning, and
646 * only scan up to where we initially started scanning from:
648 buf->last_scanned = start;
649 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
651 return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
654 static int bch_writeback_thread(void *arg)
656 struct cached_dev *dc = arg;
657 struct cache_set *c = dc->disk.c;
658 bool searched_full_index;
660 bch_ratelimit_reset(&dc->writeback_rate);
662 while (!kthread_should_stop() &&
663 !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
664 down_write(&dc->writeback_lock);
665 set_current_state(TASK_INTERRUPTIBLE);
667 * If the bache device is detaching, skip here and continue
668 * to perform writeback. Otherwise, if no dirty data on cache,
669 * or there is dirty data on cache but writeback is disabled,
670 * the writeback thread should sleep here and wait for others
673 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
674 (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
675 up_write(&dc->writeback_lock);
677 if (kthread_should_stop() ||
678 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
679 set_current_state(TASK_RUNNING);
686 set_current_state(TASK_RUNNING);
688 searched_full_index = refill_dirty(dc);
690 if (searched_full_index &&
691 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
692 atomic_set(&dc->has_dirty, 0);
693 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
694 bch_write_bdev_super(dc, NULL);
696 * If bcache device is detaching via sysfs interface,
697 * writeback thread should stop after there is no dirty
698 * data on cache. BCACHE_DEV_DETACHING flag is set in
699 * bch_cached_dev_detach().
701 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
702 up_write(&dc->writeback_lock);
707 * When dirty data rate is high (e.g. 50%+), there might
708 * be heavy buckets fragmentation after writeback
709 * finished, which hurts following write performance.
710 * If users really care about write performance they
711 * may set BCH_ENABLE_AUTO_GC via sysfs, then when
712 * BCH_DO_AUTO_GC is set, garbage collection thread
713 * will be wake up here. After moving gc, the shrunk
714 * btree and discarded free buckets SSD space may be
715 * helpful for following write requests.
717 if (c->gc_after_writeback ==
718 (BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
719 c->gc_after_writeback &= ~BCH_DO_AUTO_GC;
724 up_write(&dc->writeback_lock);
728 if (searched_full_index) {
729 unsigned int delay = dc->writeback_delay * HZ;
732 !kthread_should_stop() &&
733 !test_bit(CACHE_SET_IO_DISABLE, &c->flags) &&
734 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
735 delay = schedule_timeout_interruptible(delay);
737 bch_ratelimit_reset(&dc->writeback_rate);
742 wait_for_kthread_stop();
748 #define INIT_KEYS_EACH_TIME 500000
749 #define INIT_KEYS_SLEEP_MS 100
751 struct sectors_dirty_init {
758 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
761 struct sectors_dirty_init *op = container_of(_op,
762 struct sectors_dirty_init, op);
763 if (KEY_INODE(k) > op->inode)
767 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
768 KEY_START(k), KEY_SIZE(k));
771 if (atomic_read(&b->c->search_inflight) &&
772 !(op->count % INIT_KEYS_EACH_TIME)) {
773 bkey_copy_key(&op->start, k);
780 void bch_sectors_dirty_init(struct bcache_device *d)
782 struct sectors_dirty_init op;
785 bch_btree_op_init(&op.op, -1);
788 op.start = KEY(op.inode, 0, 0);
791 ret = bch_btree_map_keys(&op.op, d->c, &op.start,
792 sectors_dirty_init_fn, 0);
794 schedule_timeout_interruptible(
795 msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
797 pr_warn("sectors dirty init failed, ret=%d!", ret);
800 } while (ret == -EAGAIN);
803 void bch_cached_dev_writeback_init(struct cached_dev *dc)
805 sema_init(&dc->in_flight, 64);
806 init_rwsem(&dc->writeback_lock);
807 bch_keybuf_init(&dc->writeback_keys);
809 dc->writeback_metadata = true;
810 dc->writeback_running = false;
811 dc->writeback_percent = 10;
812 dc->writeback_delay = 30;
813 atomic_long_set(&dc->writeback_rate.rate, 1024);
814 dc->writeback_rate_minimum = 8;
816 dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
817 dc->writeback_rate_p_term_inverse = 40;
818 dc->writeback_rate_i_term_inverse = 10000;
820 WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
821 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
824 int bch_cached_dev_writeback_start(struct cached_dev *dc)
826 dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
828 if (!dc->writeback_write_wq)
832 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
834 if (IS_ERR(dc->writeback_thread)) {
836 return PTR_ERR(dc->writeback_thread);
838 dc->writeback_running = true;
840 WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
841 schedule_delayed_work(&dc->writeback_rate_update,
842 dc->writeback_rate_update_seconds * HZ);
844 bch_writeback_queue(dc);