bcache: Convert writeback to a kthread
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / md / bcache / writeback.c
1 /*
2  * background writeback - scan btree for dirty data and write it to the backing
3  * device
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "writeback.h"
13
14 #include <linux/delay.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <trace/events/bcache.h>
18
19 /* Rate limiting */
20
21 static void __update_writeback_rate(struct cached_dev *dc)
22 {
23         struct cache_set *c = dc->disk.c;
24         uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
25         uint64_t cache_dirty_target =
26                 div_u64(cache_sectors * dc->writeback_percent, 100);
27
28         int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
29                                    c->cached_dev_sectors);
30
31         /* PD controller */
32
33         int change = 0;
34         int64_t error;
35         int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
36         int64_t derivative = dirty - dc->disk.sectors_dirty_last;
37
38         dc->disk.sectors_dirty_last = dirty;
39
40         derivative *= dc->writeback_rate_d_term;
41         derivative = clamp(derivative, -dirty, dirty);
42
43         derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
44                               dc->writeback_rate_d_smooth, 0);
45
46         /* Avoid divide by zero */
47         if (!target)
48                 goto out;
49
50         error = div64_s64((dirty + derivative - target) << 8, target);
51
52         change = div_s64((dc->writeback_rate.rate * error) >> 8,
53                          dc->writeback_rate_p_term_inverse);
54
55         /* Don't increase writeback rate if the device isn't keeping up */
56         if (change > 0 &&
57             time_after64(local_clock(),
58                          dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
59                 change = 0;
60
61         dc->writeback_rate.rate =
62                 clamp_t(int64_t, dc->writeback_rate.rate + change,
63                         1, NSEC_PER_MSEC);
64 out:
65         dc->writeback_rate_derivative = derivative;
66         dc->writeback_rate_change = change;
67         dc->writeback_rate_target = target;
68 }
69
70 static void update_writeback_rate(struct work_struct *work)
71 {
72         struct cached_dev *dc = container_of(to_delayed_work(work),
73                                              struct cached_dev,
74                                              writeback_rate_update);
75
76         down_read(&dc->writeback_lock);
77
78         if (atomic_read(&dc->has_dirty) &&
79             dc->writeback_percent)
80                 __update_writeback_rate(dc);
81
82         up_read(&dc->writeback_lock);
83
84         schedule_delayed_work(&dc->writeback_rate_update,
85                               dc->writeback_rate_update_seconds * HZ);
86 }
87
88 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
89 {
90         uint64_t ret;
91
92         if (atomic_read(&dc->disk.detaching) ||
93             !dc->writeback_percent)
94                 return 0;
95
96         ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
97
98         return min_t(uint64_t, ret, HZ);
99 }
100
101 struct dirty_io {
102         struct closure          cl;
103         struct cached_dev       *dc;
104         struct bio              bio;
105 };
106
107 static void dirty_init(struct keybuf_key *w)
108 {
109         struct dirty_io *io = w->private;
110         struct bio *bio = &io->bio;
111
112         bio_init(bio);
113         if (!io->dc->writeback_percent)
114                 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
115
116         bio->bi_size            = KEY_SIZE(&w->key) << 9;
117         bio->bi_max_vecs        = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
118         bio->bi_private         = w;
119         bio->bi_io_vec          = bio->bi_inline_vecs;
120         bch_bio_map(bio, NULL);
121 }
122
123 static void dirty_io_destructor(struct closure *cl)
124 {
125         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
126         kfree(io);
127 }
128
129 static void write_dirty_finish(struct closure *cl)
130 {
131         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
132         struct keybuf_key *w = io->bio.bi_private;
133         struct cached_dev *dc = io->dc;
134         struct bio_vec *bv;
135         int i;
136
137         bio_for_each_segment_all(bv, &io->bio, i)
138                 __free_page(bv->bv_page);
139
140         /* This is kind of a dumb way of signalling errors. */
141         if (KEY_DIRTY(&w->key)) {
142                 unsigned i;
143                 struct btree_op op;
144                 struct keylist keys;
145
146                 bch_btree_op_init_stack(&op);
147                 bch_keylist_init(&keys);
148
149                 op.type = BTREE_REPLACE;
150                 bkey_copy(&op.replace, &w->key);
151
152                 SET_KEY_DIRTY(&w->key, false);
153                 bch_keylist_add(&keys, &w->key);
154
155                 for (i = 0; i < KEY_PTRS(&w->key); i++)
156                         atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
157
158                 bch_btree_insert(&op, dc->disk.c, &keys);
159                 closure_sync(&op.cl);
160
161                 if (op.insert_collision)
162                         trace_bcache_writeback_collision(&w->key);
163
164                 atomic_long_inc(op.insert_collision
165                                 ? &dc->disk.c->writeback_keys_failed
166                                 : &dc->disk.c->writeback_keys_done);
167         }
168
169         bch_keybuf_del(&dc->writeback_keys, w);
170         up(&dc->in_flight);
171
172         closure_return_with_destructor(cl, dirty_io_destructor);
173 }
174
175 static void dirty_endio(struct bio *bio, int error)
176 {
177         struct keybuf_key *w = bio->bi_private;
178         struct dirty_io *io = w->private;
179
180         if (error)
181                 SET_KEY_DIRTY(&w->key, false);
182
183         closure_put(&io->cl);
184 }
185
186 static void write_dirty(struct closure *cl)
187 {
188         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
189         struct keybuf_key *w = io->bio.bi_private;
190
191         dirty_init(w);
192         io->bio.bi_rw           = WRITE;
193         io->bio.bi_sector       = KEY_START(&w->key);
194         io->bio.bi_bdev         = io->dc->bdev;
195         io->bio.bi_end_io       = dirty_endio;
196
197         closure_bio_submit(&io->bio, cl, &io->dc->disk);
198
199         continue_at(cl, write_dirty_finish, system_wq);
200 }
201
202 static void read_dirty_endio(struct bio *bio, int error)
203 {
204         struct keybuf_key *w = bio->bi_private;
205         struct dirty_io *io = w->private;
206
207         bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
208                             error, "reading dirty data from cache");
209
210         dirty_endio(bio, error);
211 }
212
213 static void read_dirty_submit(struct closure *cl)
214 {
215         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
216
217         closure_bio_submit(&io->bio, cl, &io->dc->disk);
218
219         continue_at(cl, write_dirty, system_wq);
220 }
221
222 static void read_dirty(struct cached_dev *dc)
223 {
224         unsigned delay = 0;
225         struct keybuf_key *w;
226         struct dirty_io *io;
227         struct closure cl;
228
229         closure_init_stack(&cl);
230
231         /*
232          * XXX: if we error, background writeback just spins. Should use some
233          * mempools.
234          */
235
236         while (!kthread_should_stop()) {
237                 try_to_freeze();
238
239                 w = bch_keybuf_next(&dc->writeback_keys);
240                 if (!w)
241                         break;
242
243                 BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
244
245                 if (KEY_START(&w->key) != dc->last_read ||
246                     jiffies_to_msecs(delay) > 50)
247                         while (!kthread_should_stop() && delay)
248                                 delay = schedule_timeout_interruptible(delay);
249
250                 dc->last_read   = KEY_OFFSET(&w->key);
251
252                 io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
253                              * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
254                              GFP_KERNEL);
255                 if (!io)
256                         goto err;
257
258                 w->private      = io;
259                 io->dc          = dc;
260
261                 dirty_init(w);
262                 io->bio.bi_sector       = PTR_OFFSET(&w->key, 0);
263                 io->bio.bi_bdev         = PTR_CACHE(dc->disk.c,
264                                                     &w->key, 0)->bdev;
265                 io->bio.bi_rw           = READ;
266                 io->bio.bi_end_io       = read_dirty_endio;
267
268                 if (bio_alloc_pages(&io->bio, GFP_KERNEL))
269                         goto err_free;
270
271                 trace_bcache_writeback(&w->key);
272
273                 down(&dc->in_flight);
274                 closure_call(&io->cl, read_dirty_submit, NULL, &cl);
275
276                 delay = writeback_delay(dc, KEY_SIZE(&w->key));
277         }
278
279         if (0) {
280 err_free:
281                 kfree(w->private);
282 err:
283                 bch_keybuf_del(&dc->writeback_keys, w);
284         }
285
286         /*
287          * Wait for outstanding writeback IOs to finish (and keybuf slots to be
288          * freed) before refilling again
289          */
290         closure_sync(&cl);
291 }
292
293 /* Scan for dirty data */
294
295 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
296                                   uint64_t offset, int nr_sectors)
297 {
298         struct bcache_device *d = c->devices[inode];
299         unsigned stripe_offset;
300         uint64_t stripe = offset;
301
302         if (!d)
303                 return;
304
305         do_div(stripe, d->stripe_size);
306
307         stripe_offset = offset & (d->stripe_size - 1);
308
309         while (nr_sectors) {
310                 int s = min_t(unsigned, abs(nr_sectors),
311                               d->stripe_size - stripe_offset);
312
313                 if (nr_sectors < 0)
314                         s = -s;
315
316                 atomic_add(s, d->stripe_sectors_dirty + stripe);
317                 nr_sectors -= s;
318                 stripe_offset = 0;
319                 stripe++;
320         }
321 }
322
323 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
324 {
325         return KEY_DIRTY(k);
326 }
327
328 static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
329 {
330         uint64_t stripe = KEY_START(k);
331         unsigned nr_sectors = KEY_SIZE(k);
332         struct cached_dev *dc = container_of(buf, struct cached_dev,
333                                              writeback_keys);
334
335         if (!KEY_DIRTY(k))
336                 return false;
337
338         do_div(stripe, dc->disk.stripe_size);
339
340         while (1) {
341                 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) ==
342                     dc->disk.stripe_size)
343                         return true;
344
345                 if (nr_sectors <= dc->disk.stripe_size)
346                         return false;
347
348                 nr_sectors -= dc->disk.stripe_size;
349                 stripe++;
350         }
351 }
352
353 static bool refill_dirty(struct cached_dev *dc)
354 {
355         struct keybuf *buf = &dc->writeback_keys;
356         bool searched_from_start = false;
357         struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
358
359         if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
360                 buf->last_scanned = KEY(dc->disk.id, 0, 0);
361                 searched_from_start = true;
362         }
363
364         if (dc->partial_stripes_expensive) {
365                 uint64_t i;
366
367                 for (i = 0; i < dc->disk.nr_stripes; i++)
368                         if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
369                             dc->disk.stripe_size)
370                                 goto full_stripes;
371
372                 goto normal_refill;
373 full_stripes:
374                 searched_from_start = false;    /* not searching entire btree */
375                 bch_refill_keybuf(dc->disk.c, buf, &end,
376                                   dirty_full_stripe_pred);
377         } else {
378 normal_refill:
379                 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
380         }
381
382         return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
383 }
384
385 static int bch_writeback_thread(void *arg)
386 {
387         struct cached_dev *dc = arg;
388         bool searched_full_index;
389
390         while (!kthread_should_stop()) {
391                 down_write(&dc->writeback_lock);
392                 if (!atomic_read(&dc->has_dirty) ||
393                     (!atomic_read(&dc->disk.detaching) &&
394                      !dc->writeback_running)) {
395                         up_write(&dc->writeback_lock);
396                         set_current_state(TASK_INTERRUPTIBLE);
397
398                         if (kthread_should_stop())
399                                 return 0;
400
401                         try_to_freeze();
402                         schedule();
403                         continue;
404                 }
405
406                 searched_full_index = refill_dirty(dc);
407
408                 if (searched_full_index &&
409                     RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
410                         atomic_set(&dc->has_dirty, 0);
411                         cached_dev_put(dc);
412                         SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
413                         bch_write_bdev_super(dc, NULL);
414                 }
415
416                 up_write(&dc->writeback_lock);
417
418                 bch_ratelimit_reset(&dc->writeback_rate);
419                 read_dirty(dc);
420
421                 if (searched_full_index) {
422                         unsigned delay = dc->writeback_delay * HZ;
423
424                         while (delay &&
425                                !kthread_should_stop() &&
426                                !atomic_read(&dc->disk.detaching))
427                                 delay = schedule_timeout_interruptible(delay);
428                 }
429         }
430
431         return 0;
432 }
433
434 /* Init */
435
436 static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
437                                         struct cached_dev *dc)
438 {
439         struct bkey *k;
440         struct btree_iter iter;
441
442         bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
443         while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
444                 if (!b->level) {
445                         if (KEY_INODE(k) > dc->disk.id)
446                                 break;
447
448                         if (KEY_DIRTY(k))
449                                 bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
450                                                              KEY_START(k),
451                                                              KEY_SIZE(k));
452                 } else {
453                         btree(sectors_dirty_init, k, b, op, dc);
454                         if (KEY_INODE(k) > dc->disk.id)
455                                 break;
456
457                         cond_resched();
458                 }
459
460         return 0;
461 }
462
463 void bch_sectors_dirty_init(struct cached_dev *dc)
464 {
465         struct btree_op op;
466
467         bch_btree_op_init_stack(&op);
468         btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
469 }
470
471 int bch_cached_dev_writeback_init(struct cached_dev *dc)
472 {
473         sema_init(&dc->in_flight, 64);
474         init_rwsem(&dc->writeback_lock);
475         bch_keybuf_init(&dc->writeback_keys);
476
477         dc->writeback_metadata          = true;
478         dc->writeback_running           = true;
479         dc->writeback_percent           = 10;
480         dc->writeback_delay             = 30;
481         dc->writeback_rate.rate         = 1024;
482
483         dc->writeback_rate_update_seconds = 30;
484         dc->writeback_rate_d_term       = 16;
485         dc->writeback_rate_p_term_inverse = 64;
486         dc->writeback_rate_d_smooth     = 8;
487
488         dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
489                                               "bcache_writeback");
490         if (IS_ERR(dc->writeback_thread))
491                 return PTR_ERR(dc->writeback_thread);
492
493         set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
494
495         INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
496         schedule_delayed_work(&dc->writeback_rate_update,
497                               dc->writeback_rate_update_seconds * HZ);
498
499         return 0;
500 }