33ddc5269e8dc702f10817687fcdcc9c1a224953
[platform/kernel/linux-starfive.git] / drivers / md / bcache / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcache.h"
9 #include "btree.h"
10 #include "debug.h"
11 #include "extents.h"
12
13 #include <trace/events/bcache.h>
14
15 /*
16  * Journal replay/recovery:
17  *
18  * This code is all driven from run_cache_set(); we first read the journal
19  * entries, do some other stuff, then we mark all the keys in the journal
20  * entries (same as garbage collection would), then we replay them - reinserting
21  * them into the cache in precisely the same order as they appear in the
22  * journal.
23  *
24  * We only journal keys that go in leaf nodes, which simplifies things quite a
25  * bit.
26  */
27
28 static void journal_read_endio(struct bio *bio)
29 {
30         struct closure *cl = bio->bi_private;
31
32         closure_put(cl);
33 }
34
35 static int journal_read_bucket(struct cache *ca, struct list_head *list,
36                                unsigned int bucket_index)
37 {
38         struct journal_device *ja = &ca->journal;
39         struct bio *bio = &ja->bio;
40
41         struct journal_replay *i;
42         struct jset *j, *data = ca->set->journal.w[0].data;
43         struct closure cl;
44         unsigned int len, left, offset = 0;
45         int ret = 0;
46         sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
47
48         closure_init_stack(&cl);
49
50         pr_debug("reading %u", bucket_index);
51
52         while (offset < ca->sb.bucket_size) {
53 reread:         left = ca->sb.bucket_size - offset;
54                 len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
55
56                 bio_reset(bio);
57                 bio->bi_iter.bi_sector  = bucket + offset;
58                 bio_set_dev(bio, ca->bdev);
59                 bio->bi_iter.bi_size    = len << 9;
60
61                 bio->bi_end_io  = journal_read_endio;
62                 bio->bi_private = &cl;
63                 bio_set_op_attrs(bio, REQ_OP_READ, 0);
64                 bch_bio_map(bio, data);
65
66                 closure_bio_submit(ca->set, bio, &cl);
67                 closure_sync(&cl);
68
69                 /* This function could be simpler now since we no longer write
70                  * journal entries that overlap bucket boundaries; this means
71                  * the start of a bucket will always have a valid journal entry
72                  * if it has any journal entries at all.
73                  */
74
75                 j = data;
76                 while (len) {
77                         struct list_head *where;
78                         size_t blocks, bytes = set_bytes(j);
79
80                         if (j->magic != jset_magic(&ca->sb)) {
81                                 pr_debug("%u: bad magic", bucket_index);
82                                 return ret;
83                         }
84
85                         if (bytes > left << 9 ||
86                             bytes > PAGE_SIZE << JSET_BITS) {
87                                 pr_info("%u: too big, %zu bytes, offset %u",
88                                         bucket_index, bytes, offset);
89                                 return ret;
90                         }
91
92                         if (bytes > len << 9)
93                                 goto reread;
94
95                         if (j->csum != csum_set(j)) {
96                                 pr_info("%u: bad csum, %zu bytes, offset %u",
97                                         bucket_index, bytes, offset);
98                                 return ret;
99                         }
100
101                         blocks = set_blocks(j, block_bytes(ca->set));
102
103                         /*
104                          * Nodes in 'list' are in linear increasing order of
105                          * i->j.seq, the node on head has the smallest (oldest)
106                          * journal seq, the node on tail has the biggest
107                          * (latest) journal seq.
108                          */
109
110                         /*
111                          * Check from the oldest jset for last_seq. If
112                          * i->j.seq < j->last_seq, it means the oldest jset
113                          * in list is expired and useless, remove it from
114                          * this list. Otherwise, j is a condidate jset for
115                          * further following checks.
116                          */
117                         while (!list_empty(list)) {
118                                 i = list_first_entry(list,
119                                         struct journal_replay, list);
120                                 if (i->j.seq >= j->last_seq)
121                                         break;
122                                 list_del(&i->list);
123                                 kfree(i);
124                         }
125
126                         /* iterate list in reverse order (from latest jset) */
127                         list_for_each_entry_reverse(i, list, list) {
128                                 if (j->seq == i->j.seq)
129                                         goto next_set;
130
131                                 /*
132                                  * if j->seq is less than any i->j.last_seq
133                                  * in list, j is an expired and useless jset.
134                                  */
135                                 if (j->seq < i->j.last_seq)
136                                         goto next_set;
137
138                                 /*
139                                  * 'where' points to first jset in list which
140                                  * is elder then j.
141                                  */
142                                 if (j->seq > i->j.seq) {
143                                         where = &i->list;
144                                         goto add;
145                                 }
146                         }
147
148                         where = list;
149 add:
150                         i = kmalloc(offsetof(struct journal_replay, j) +
151                                     bytes, GFP_KERNEL);
152                         if (!i)
153                                 return -ENOMEM;
154                         memcpy(&i->j, j, bytes);
155                         /* Add to the location after 'where' points to */
156                         list_add(&i->list, where);
157                         ret = 1;
158
159                         if (j->seq > ja->seq[bucket_index])
160                                 ja->seq[bucket_index] = j->seq;
161 next_set:
162                         offset  += blocks * ca->sb.block_size;
163                         len     -= blocks * ca->sb.block_size;
164                         j = ((void *) j) + blocks * block_bytes(ca);
165                 }
166         }
167
168         return ret;
169 }
170
171 int bch_journal_read(struct cache_set *c, struct list_head *list)
172 {
173 #define read_bucket(b)                                                  \
174         ({                                                              \
175                 ret = journal_read_bucket(ca, list, b);                 \
176                 __set_bit(b, bitmap);                                   \
177                 if (ret < 0)                                            \
178                         return ret;                                     \
179                 ret;                                                    \
180         })
181
182         struct cache *ca;
183         unsigned int iter;
184         int ret = 0;
185
186         for_each_cache(ca, c, iter) {
187                 struct journal_device *ja = &ca->journal;
188                 DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
189                 unsigned int i, l, r, m;
190                 uint64_t seq;
191
192                 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
193                 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
194
195                 /*
196                  * Read journal buckets ordered by golden ratio hash to quickly
197                  * find a sequence of buckets with valid journal entries
198                  */
199                 for (i = 0; i < ca->sb.njournal_buckets; i++) {
200                         /*
201                          * We must try the index l with ZERO first for
202                          * correctness due to the scenario that the journal
203                          * bucket is circular buffer which might have wrapped
204                          */
205                         l = (i * 2654435769U) % ca->sb.njournal_buckets;
206
207                         if (test_bit(l, bitmap))
208                                 break;
209
210                         if (read_bucket(l))
211                                 goto bsearch;
212                 }
213
214                 /*
215                  * If that fails, check all the buckets we haven't checked
216                  * already
217                  */
218                 pr_debug("falling back to linear search");
219
220                 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
221                      l < ca->sb.njournal_buckets;
222                      l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets,
223                                             l + 1))
224                         if (read_bucket(l))
225                                 goto bsearch;
226
227                 /* no journal entries on this device? */
228                 if (l == ca->sb.njournal_buckets)
229                         continue;
230 bsearch:
231                 BUG_ON(list_empty(list));
232
233                 /* Binary search */
234                 m = l;
235                 r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
236                 pr_debug("starting binary search, l %u r %u", l, r);
237
238                 while (l + 1 < r) {
239                         seq = list_entry(list->prev, struct journal_replay,
240                                          list)->j.seq;
241
242                         m = (l + r) >> 1;
243                         read_bucket(m);
244
245                         if (seq != list_entry(list->prev, struct journal_replay,
246                                               list)->j.seq)
247                                 l = m;
248                         else
249                                 r = m;
250                 }
251
252                 /*
253                  * Read buckets in reverse order until we stop finding more
254                  * journal entries
255                  */
256                 pr_debug("finishing up: m %u njournal_buckets %u",
257                          m, ca->sb.njournal_buckets);
258                 l = m;
259
260                 while (1) {
261                         if (!l--)
262                                 l = ca->sb.njournal_buckets - 1;
263
264                         if (l == m)
265                                 break;
266
267                         if (test_bit(l, bitmap))
268                                 continue;
269
270                         if (!read_bucket(l))
271                                 break;
272                 }
273
274                 seq = 0;
275
276                 for (i = 0; i < ca->sb.njournal_buckets; i++)
277                         if (ja->seq[i] > seq) {
278                                 seq = ja->seq[i];
279                                 /*
280                                  * When journal_reclaim() goes to allocate for
281                                  * the first time, it'll use the bucket after
282                                  * ja->cur_idx
283                                  */
284                                 ja->cur_idx = i;
285                                 ja->last_idx = ja->discard_idx = (i + 1) %
286                                         ca->sb.njournal_buckets;
287
288                         }
289         }
290
291         if (!list_empty(list))
292                 c->journal.seq = list_entry(list->prev,
293                                             struct journal_replay,
294                                             list)->j.seq;
295
296         return 0;
297 #undef read_bucket
298 }
299
300 void bch_journal_mark(struct cache_set *c, struct list_head *list)
301 {
302         atomic_t p = { 0 };
303         struct bkey *k;
304         struct journal_replay *i;
305         struct journal *j = &c->journal;
306         uint64_t last = j->seq;
307
308         /*
309          * journal.pin should never fill up - we never write a journal
310          * entry when it would fill up. But if for some reason it does, we
311          * iterate over the list in reverse order so that we can just skip that
312          * refcount instead of bugging.
313          */
314
315         list_for_each_entry_reverse(i, list, list) {
316                 BUG_ON(last < i->j.seq);
317                 i->pin = NULL;
318
319                 while (last-- != i->j.seq)
320                         if (fifo_free(&j->pin) > 1) {
321                                 fifo_push_front(&j->pin, p);
322                                 atomic_set(&fifo_front(&j->pin), 0);
323                         }
324
325                 if (fifo_free(&j->pin) > 1) {
326                         fifo_push_front(&j->pin, p);
327                         i->pin = &fifo_front(&j->pin);
328                         atomic_set(i->pin, 1);
329                 }
330
331                 for (k = i->j.start;
332                      k < bset_bkey_last(&i->j);
333                      k = bkey_next(k))
334                         if (!__bch_extent_invalid(c, k)) {
335                                 unsigned int j;
336
337                                 for (j = 0; j < KEY_PTRS(k); j++)
338                                         if (ptr_available(c, k, j))
339                                                 atomic_inc(&PTR_BUCKET(c, k, j)->pin);
340
341                                 bch_initial_mark_key(c, 0, k);
342                         }
343         }
344 }
345
346 static bool is_discard_enabled(struct cache_set *s)
347 {
348         struct cache *ca;
349         unsigned int i;
350
351         for_each_cache(ca, s, i)
352                 if (ca->discard)
353                         return true;
354
355         return false;
356 }
357
358 int bch_journal_replay(struct cache_set *s, struct list_head *list)
359 {
360         int ret = 0, keys = 0, entries = 0;
361         struct bkey *k;
362         struct journal_replay *i =
363                 list_entry(list->prev, struct journal_replay, list);
364
365         uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
366         struct keylist keylist;
367
368         list_for_each_entry(i, list, list) {
369                 BUG_ON(i->pin && atomic_read(i->pin) != 1);
370
371                 if (n != i->j.seq) {
372                         if (n == start && is_discard_enabled(s))
373                                 pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
374                                         n, i->j.seq - 1, start, end);
375                         else {
376                                 pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
377                                         n, i->j.seq - 1, start, end);
378                                 ret = -EIO;
379                                 goto err;
380                         }
381                 }
382
383                 for (k = i->j.start;
384                      k < bset_bkey_last(&i->j);
385                      k = bkey_next(k)) {
386                         trace_bcache_journal_replay_key(k);
387
388                         bch_keylist_init_single(&keylist, k);
389
390                         ret = bch_btree_insert(s, &keylist, i->pin, NULL);
391                         if (ret)
392                                 goto err;
393
394                         BUG_ON(!bch_keylist_empty(&keylist));
395                         keys++;
396
397                         cond_resched();
398                 }
399
400                 if (i->pin)
401                         atomic_dec(i->pin);
402                 n = i->j.seq + 1;
403                 entries++;
404         }
405
406         pr_info("journal replay done, %i keys in %i entries, seq %llu",
407                 keys, entries, end);
408 err:
409         while (!list_empty(list)) {
410                 i = list_first_entry(list, struct journal_replay, list);
411                 list_del(&i->list);
412                 kfree(i);
413         }
414
415         return ret;
416 }
417
418 /* Journalling */
419
420 #define nr_to_fifo_front(p, front_p, mask)      (((p) - (front_p)) & (mask))
421
422 static void btree_flush_write(struct cache_set *c)
423 {
424         struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
425         unsigned int i, nr, ref_nr;
426         atomic_t *fifo_front_p, *now_fifo_front_p;
427         size_t mask;
428
429         if (c->journal.btree_flushing)
430                 return;
431
432         spin_lock(&c->journal.flush_write_lock);
433         if (c->journal.btree_flushing) {
434                 spin_unlock(&c->journal.flush_write_lock);
435                 return;
436         }
437         c->journal.btree_flushing = true;
438         spin_unlock(&c->journal.flush_write_lock);
439
440         /* get the oldest journal entry and check its refcount */
441         spin_lock(&c->journal.lock);
442         fifo_front_p = &fifo_front(&c->journal.pin);
443         ref_nr = atomic_read(fifo_front_p);
444         if (ref_nr <= 0) {
445                 /*
446                  * do nothing if no btree node references
447                  * the oldest journal entry
448                  */
449                 spin_unlock(&c->journal.lock);
450                 goto out;
451         }
452         spin_unlock(&c->journal.lock);
453
454         mask = c->journal.pin.mask;
455         nr = 0;
456         atomic_long_inc(&c->flush_write);
457         memset(btree_nodes, 0, sizeof(btree_nodes));
458
459         mutex_lock(&c->bucket_lock);
460         list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
461                 /*
462                  * It is safe to get now_fifo_front_p without holding
463                  * c->journal.lock here, because we don't need to know
464                  * the exactly accurate value, just check whether the
465                  * front pointer of c->journal.pin is changed.
466                  */
467                 now_fifo_front_p = &fifo_front(&c->journal.pin);
468                 /*
469                  * If the oldest journal entry is reclaimed and front
470                  * pointer of c->journal.pin changes, it is unnecessary
471                  * to scan c->btree_cache anymore, just quit the loop and
472                  * flush out what we have already.
473                  */
474                 if (now_fifo_front_p != fifo_front_p)
475                         break;
476                 /*
477                  * quit this loop if all matching btree nodes are
478                  * scanned and record in btree_nodes[] already.
479                  */
480                 ref_nr = atomic_read(fifo_front_p);
481                 if (nr >= ref_nr)
482                         break;
483
484                 if (btree_node_journal_flush(b))
485                         pr_err("BUG: flush_write bit should not be set here!");
486
487                 mutex_lock(&b->write_lock);
488
489                 if (!btree_node_dirty(b)) {
490                         mutex_unlock(&b->write_lock);
491                         continue;
492                 }
493
494                 if (!btree_current_write(b)->journal) {
495                         mutex_unlock(&b->write_lock);
496                         continue;
497                 }
498
499                 /*
500                  * Only select the btree node which exactly references
501                  * the oldest journal entry.
502                  *
503                  * If the journal entry pointed by fifo_front_p is
504                  * reclaimed in parallel, don't worry:
505                  * - the list_for_each_xxx loop will quit when checking
506                  *   next now_fifo_front_p.
507                  * - If there are matched nodes recorded in btree_nodes[],
508                  *   they are clean now (this is why and how the oldest
509                  *   journal entry can be reclaimed). These selected nodes
510                  *   will be ignored and skipped in the folowing for-loop.
511                  */
512                 if (nr_to_fifo_front(btree_current_write(b)->journal,
513                                      fifo_front_p,
514                                      mask) != 0) {
515                         mutex_unlock(&b->write_lock);
516                         continue;
517                 }
518
519                 set_btree_node_journal_flush(b);
520
521                 mutex_unlock(&b->write_lock);
522
523                 btree_nodes[nr++] = b;
524                 /*
525                  * To avoid holding c->bucket_lock too long time,
526                  * only scan for BTREE_FLUSH_NR matched btree nodes
527                  * at most. If there are more btree nodes reference
528                  * the oldest journal entry, try to flush them next
529                  * time when btree_flush_write() is called.
530                  */
531                 if (nr == BTREE_FLUSH_NR)
532                         break;
533         }
534         mutex_unlock(&c->bucket_lock);
535
536         for (i = 0; i < nr; i++) {
537                 b = btree_nodes[i];
538                 if (!b) {
539                         pr_err("BUG: btree_nodes[%d] is NULL", i);
540                         continue;
541                 }
542
543                 /* safe to check without holding b->write_lock */
544                 if (!btree_node_journal_flush(b)) {
545                         pr_err("BUG: bnode %p: journal_flush bit cleaned", b);
546                         continue;
547                 }
548
549                 mutex_lock(&b->write_lock);
550                 if (!btree_current_write(b)->journal) {
551                         clear_bit(BTREE_NODE_journal_flush, &b->flags);
552                         mutex_unlock(&b->write_lock);
553                         pr_debug("bnode %p: written by others", b);
554                         continue;
555                 }
556
557                 if (!btree_node_dirty(b)) {
558                         clear_bit(BTREE_NODE_journal_flush, &b->flags);
559                         mutex_unlock(&b->write_lock);
560                         pr_debug("bnode %p: dirty bit cleaned by others", b);
561                         continue;
562                 }
563
564                 __bch_btree_node_write(b, NULL);
565                 clear_bit(BTREE_NODE_journal_flush, &b->flags);
566                 mutex_unlock(&b->write_lock);
567         }
568
569 out:
570         spin_lock(&c->journal.flush_write_lock);
571         c->journal.btree_flushing = false;
572         spin_unlock(&c->journal.flush_write_lock);
573 }
574
575 #define last_seq(j)     ((j)->seq - fifo_used(&(j)->pin) + 1)
576
577 static void journal_discard_endio(struct bio *bio)
578 {
579         struct journal_device *ja =
580                 container_of(bio, struct journal_device, discard_bio);
581         struct cache *ca = container_of(ja, struct cache, journal);
582
583         atomic_set(&ja->discard_in_flight, DISCARD_DONE);
584
585         closure_wake_up(&ca->set->journal.wait);
586         closure_put(&ca->set->cl);
587 }
588
589 static void journal_discard_work(struct work_struct *work)
590 {
591         struct journal_device *ja =
592                 container_of(work, struct journal_device, discard_work);
593
594         submit_bio(&ja->discard_bio);
595 }
596
597 static void do_journal_discard(struct cache *ca)
598 {
599         struct journal_device *ja = &ca->journal;
600         struct bio *bio = &ja->discard_bio;
601
602         if (!ca->discard) {
603                 ja->discard_idx = ja->last_idx;
604                 return;
605         }
606
607         switch (atomic_read(&ja->discard_in_flight)) {
608         case DISCARD_IN_FLIGHT:
609                 return;
610
611         case DISCARD_DONE:
612                 ja->discard_idx = (ja->discard_idx + 1) %
613                         ca->sb.njournal_buckets;
614
615                 atomic_set(&ja->discard_in_flight, DISCARD_READY);
616                 /* fallthrough */
617
618         case DISCARD_READY:
619                 if (ja->discard_idx == ja->last_idx)
620                         return;
621
622                 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
623
624                 bio_init(bio, bio->bi_inline_vecs, 1);
625                 bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
626                 bio->bi_iter.bi_sector  = bucket_to_sector(ca->set,
627                                                 ca->sb.d[ja->discard_idx]);
628                 bio_set_dev(bio, ca->bdev);
629                 bio->bi_iter.bi_size    = bucket_bytes(ca);
630                 bio->bi_end_io          = journal_discard_endio;
631
632                 closure_get(&ca->set->cl);
633                 INIT_WORK(&ja->discard_work, journal_discard_work);
634                 queue_work(bch_journal_wq, &ja->discard_work);
635         }
636 }
637
638 static void journal_reclaim(struct cache_set *c)
639 {
640         struct bkey *k = &c->journal.key;
641         struct cache *ca;
642         uint64_t last_seq;
643         unsigned int iter, n = 0;
644         atomic_t p __maybe_unused;
645
646         atomic_long_inc(&c->reclaim);
647
648         while (!atomic_read(&fifo_front(&c->journal.pin)))
649                 fifo_pop(&c->journal.pin, p);
650
651         last_seq = last_seq(&c->journal);
652
653         /* Update last_idx */
654
655         for_each_cache(ca, c, iter) {
656                 struct journal_device *ja = &ca->journal;
657
658                 while (ja->last_idx != ja->cur_idx &&
659                        ja->seq[ja->last_idx] < last_seq)
660                         ja->last_idx = (ja->last_idx + 1) %
661                                 ca->sb.njournal_buckets;
662         }
663
664         for_each_cache(ca, c, iter)
665                 do_journal_discard(ca);
666
667         if (c->journal.blocks_free)
668                 goto out;
669
670         /*
671          * Allocate:
672          * XXX: Sort by free journal space
673          */
674
675         for_each_cache(ca, c, iter) {
676                 struct journal_device *ja = &ca->journal;
677                 unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
678
679                 /* No space available on this device */
680                 if (next == ja->discard_idx)
681                         continue;
682
683                 ja->cur_idx = next;
684                 k->ptr[n++] = MAKE_PTR(0,
685                                   bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
686                                   ca->sb.nr_this_dev);
687                 atomic_long_inc(&c->reclaimed_journal_buckets);
688         }
689
690         if (n) {
691                 bkey_init(k);
692                 SET_KEY_PTRS(k, n);
693                 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
694         }
695 out:
696         if (!journal_full(&c->journal))
697                 __closure_wake_up(&c->journal.wait);
698 }
699
700 void bch_journal_next(struct journal *j)
701 {
702         atomic_t p = { 1 };
703
704         j->cur = (j->cur == j->w)
705                 ? &j->w[1]
706                 : &j->w[0];
707
708         /*
709          * The fifo_push() needs to happen at the same time as j->seq is
710          * incremented for last_seq() to be calculated correctly
711          */
712         BUG_ON(!fifo_push(&j->pin, p));
713         atomic_set(&fifo_back(&j->pin), 1);
714
715         j->cur->data->seq       = ++j->seq;
716         j->cur->dirty           = false;
717         j->cur->need_write      = false;
718         j->cur->data->keys      = 0;
719
720         if (fifo_full(&j->pin))
721                 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
722 }
723
724 static void journal_write_endio(struct bio *bio)
725 {
726         struct journal_write *w = bio->bi_private;
727
728         cache_set_err_on(bio->bi_status, w->c, "journal io error");
729         closure_put(&w->c->journal.io);
730 }
731
732 static void journal_write(struct closure *cl);
733
734 static void journal_write_done(struct closure *cl)
735 {
736         struct journal *j = container_of(cl, struct journal, io);
737         struct journal_write *w = (j->cur == j->w)
738                 ? &j->w[1]
739                 : &j->w[0];
740
741         __closure_wake_up(&w->wait);
742         continue_at_nobarrier(cl, journal_write, bch_journal_wq);
743 }
744
745 static void journal_write_unlock(struct closure *cl)
746         __releases(&c->journal.lock)
747 {
748         struct cache_set *c = container_of(cl, struct cache_set, journal.io);
749
750         c->journal.io_in_flight = 0;
751         spin_unlock(&c->journal.lock);
752 }
753
754 static void journal_write_unlocked(struct closure *cl)
755         __releases(c->journal.lock)
756 {
757         struct cache_set *c = container_of(cl, struct cache_set, journal.io);
758         struct cache *ca;
759         struct journal_write *w = c->journal.cur;
760         struct bkey *k = &c->journal.key;
761         unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
762                 c->sb.block_size;
763
764         struct bio *bio;
765         struct bio_list list;
766
767         bio_list_init(&list);
768
769         if (!w->need_write) {
770                 closure_return_with_destructor(cl, journal_write_unlock);
771                 return;
772         } else if (journal_full(&c->journal)) {
773                 journal_reclaim(c);
774                 spin_unlock(&c->journal.lock);
775
776                 btree_flush_write(c);
777                 continue_at(cl, journal_write, bch_journal_wq);
778                 return;
779         }
780
781         c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
782
783         w->data->btree_level = c->root->level;
784
785         bkey_copy(&w->data->btree_root, &c->root->key);
786         bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
787
788         for_each_cache(ca, c, i)
789                 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
790
791         w->data->magic          = jset_magic(&c->sb);
792         w->data->version        = BCACHE_JSET_VERSION;
793         w->data->last_seq       = last_seq(&c->journal);
794         w->data->csum           = csum_set(w->data);
795
796         for (i = 0; i < KEY_PTRS(k); i++) {
797                 ca = PTR_CACHE(c, k, i);
798                 bio = &ca->journal.bio;
799
800                 atomic_long_add(sectors, &ca->meta_sectors_written);
801
802                 bio_reset(bio);
803                 bio->bi_iter.bi_sector  = PTR_OFFSET(k, i);
804                 bio_set_dev(bio, ca->bdev);
805                 bio->bi_iter.bi_size = sectors << 9;
806
807                 bio->bi_end_io  = journal_write_endio;
808                 bio->bi_private = w;
809                 bio_set_op_attrs(bio, REQ_OP_WRITE,
810                                  REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
811                 bch_bio_map(bio, w->data);
812
813                 trace_bcache_journal_write(bio, w->data->keys);
814                 bio_list_add(&list, bio);
815
816                 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
817
818                 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
819         }
820
821         /* If KEY_PTRS(k) == 0, this jset gets lost in air */
822         BUG_ON(i == 0);
823
824         atomic_dec_bug(&fifo_back(&c->journal.pin));
825         bch_journal_next(&c->journal);
826         journal_reclaim(c);
827
828         spin_unlock(&c->journal.lock);
829
830         while ((bio = bio_list_pop(&list)))
831                 closure_bio_submit(c, bio, cl);
832
833         continue_at(cl, journal_write_done, NULL);
834 }
835
836 static void journal_write(struct closure *cl)
837 {
838         struct cache_set *c = container_of(cl, struct cache_set, journal.io);
839
840         spin_lock(&c->journal.lock);
841         journal_write_unlocked(cl);
842 }
843
844 static void journal_try_write(struct cache_set *c)
845         __releases(c->journal.lock)
846 {
847         struct closure *cl = &c->journal.io;
848         struct journal_write *w = c->journal.cur;
849
850         w->need_write = true;
851
852         if (!c->journal.io_in_flight) {
853                 c->journal.io_in_flight = 1;
854                 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
855         } else {
856                 spin_unlock(&c->journal.lock);
857         }
858 }
859
860 static struct journal_write *journal_wait_for_write(struct cache_set *c,
861                                                     unsigned int nkeys)
862         __acquires(&c->journal.lock)
863 {
864         size_t sectors;
865         struct closure cl;
866         bool wait = false;
867
868         closure_init_stack(&cl);
869
870         spin_lock(&c->journal.lock);
871
872         while (1) {
873                 struct journal_write *w = c->journal.cur;
874
875                 sectors = __set_blocks(w->data, w->data->keys + nkeys,
876                                        block_bytes(c)) * c->sb.block_size;
877
878                 if (sectors <= min_t(size_t,
879                                      c->journal.blocks_free * c->sb.block_size,
880                                      PAGE_SECTORS << JSET_BITS))
881                         return w;
882
883                 if (wait)
884                         closure_wait(&c->journal.wait, &cl);
885
886                 if (!journal_full(&c->journal)) {
887                         if (wait)
888                                 trace_bcache_journal_entry_full(c);
889
890                         /*
891                          * XXX: If we were inserting so many keys that they
892                          * won't fit in an _empty_ journal write, we'll
893                          * deadlock. For now, handle this in
894                          * bch_keylist_realloc() - but something to think about.
895                          */
896                         BUG_ON(!w->data->keys);
897
898                         journal_try_write(c); /* unlocks */
899                 } else {
900                         if (wait)
901                                 trace_bcache_journal_full(c);
902
903                         journal_reclaim(c);
904                         spin_unlock(&c->journal.lock);
905
906                         btree_flush_write(c);
907                 }
908
909                 closure_sync(&cl);
910                 spin_lock(&c->journal.lock);
911                 wait = true;
912         }
913 }
914
915 static void journal_write_work(struct work_struct *work)
916 {
917         struct cache_set *c = container_of(to_delayed_work(work),
918                                            struct cache_set,
919                                            journal.work);
920         spin_lock(&c->journal.lock);
921         if (c->journal.cur->dirty)
922                 journal_try_write(c);
923         else
924                 spin_unlock(&c->journal.lock);
925 }
926
927 /*
928  * Entry point to the journalling code - bio_insert() and btree_invalidate()
929  * pass bch_journal() a list of keys to be journalled, and then
930  * bch_journal() hands those same keys off to btree_insert_async()
931  */
932
933 atomic_t *bch_journal(struct cache_set *c,
934                       struct keylist *keys,
935                       struct closure *parent)
936 {
937         struct journal_write *w;
938         atomic_t *ret;
939
940         /* No journaling if CACHE_SET_IO_DISABLE set already */
941         if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
942                 return NULL;
943
944         if (!CACHE_SYNC(&c->sb))
945                 return NULL;
946
947         w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
948
949         memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
950         w->data->keys += bch_keylist_nkeys(keys);
951
952         ret = &fifo_back(&c->journal.pin);
953         atomic_inc(ret);
954
955         if (parent) {
956                 closure_wait(&w->wait, parent);
957                 journal_try_write(c);
958         } else if (!w->dirty) {
959                 w->dirty = true;
960                 schedule_delayed_work(&c->journal.work,
961                                       msecs_to_jiffies(c->journal_delay_ms));
962                 spin_unlock(&c->journal.lock);
963         } else {
964                 spin_unlock(&c->journal.lock);
965         }
966
967
968         return ret;
969 }
970
971 void bch_journal_meta(struct cache_set *c, struct closure *cl)
972 {
973         struct keylist keys;
974         atomic_t *ref;
975
976         bch_keylist_init(&keys);
977
978         ref = bch_journal(c, &keys, cl);
979         if (ref)
980                 atomic_dec_bug(ref);
981 }
982
983 void bch_journal_free(struct cache_set *c)
984 {
985         free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
986         free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
987         free_fifo(&c->journal.pin);
988 }
989
990 int bch_journal_alloc(struct cache_set *c)
991 {
992         struct journal *j = &c->journal;
993
994         spin_lock_init(&j->lock);
995         spin_lock_init(&j->flush_write_lock);
996         INIT_DELAYED_WORK(&j->work, journal_write_work);
997
998         c->journal_delay_ms = 100;
999
1000         j->w[0].c = c;
1001         j->w[1].c = c;
1002
1003         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
1004             !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
1005             !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
1006                 return -ENOMEM;
1007
1008         return 0;
1009 }