2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/kernel.h>
15 #include <linux/wait.h>
16 #include <linux/blkdev.h>
17 #include <linux/slab.h>
18 #include <linux/raid/md_p.h>
19 #include <linux/crc32c.h>
20 #include <linux/random.h>
25 * metadata/data stored in disk with 4k size unit (a block) regardless
26 * underneath hardware sector size. only works with PAGE_SIZE == 4096
28 #define BLOCK_SECTORS (8)
31 * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
32 * recovery scans a very long log
34 #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
35 #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
38 * We only need 2 bios per I/O unit to make progress, but ensure we
39 * have a few more available to not get too tight.
41 #define R5L_POOL_SIZE 4
48 sector_t device_size; /* log device size, round to
50 sector_t max_free_space; /* reclaim run if free space is at
53 sector_t last_checkpoint; /* log tail. where recovery scan
55 u64 last_cp_seq; /* log tail sequence */
57 sector_t log_start; /* log head. where new data appends */
58 u64 seq; /* log head sequence */
60 sector_t next_checkpoint;
63 struct mutex io_mutex;
64 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
66 spinlock_t io_list_lock;
67 struct list_head running_ios; /* io_units which are still running,
68 * and have not yet been completely
69 * written to the log */
70 struct list_head io_end_ios; /* io_units which have been completely
71 * written to the log but not yet written
73 struct list_head flushing_ios; /* io_units which are waiting for log
75 struct list_head finished_ios; /* io_units which settle down in log disk */
78 struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
80 struct kmem_cache *io_kc;
85 struct md_thread *reclaim_thread;
86 unsigned long reclaim_target; /* number of space that need to be
87 * reclaimed. if it's 0, reclaim spaces
88 * used by io_units which are in
89 * IO_UNIT_STRIPE_END state (eg, reclaim
90 * dones't wait for specific io_unit
91 * switching to IO_UNIT_STRIPE_END
93 wait_queue_head_t iounit_wait;
95 struct list_head no_space_stripes; /* pending stripes, log has no space */
96 spinlock_t no_space_stripes_lock;
98 bool need_cache_flush;
103 * an IO range starts from a meta data block and end at the next meta data
104 * block. The io unit's the meta data block tracks data/parity followed it. io
105 * unit is written to log disk with normal write, as we always flush log disk
106 * first and then start move data to raid disks, there is no requirement to
107 * write io unit with FLUSH/FUA
112 struct page *meta_page; /* store meta block */
113 int meta_offset; /* current offset in meta_page */
115 struct bio *current_bio;/* current_bio accepting new data */
117 atomic_t pending_stripe;/* how many stripes not flushed to raid */
118 u64 seq; /* seq number of the metablock */
119 sector_t log_start; /* where the io_unit starts */
120 sector_t log_end; /* where the io_unit ends */
121 struct list_head log_sibling; /* log->running_ios */
122 struct list_head stripe_list; /* stripes added to the io_unit */
128 /* r5l_io_unit state */
129 enum r5l_io_unit_state {
130 IO_UNIT_RUNNING = 0, /* accepting new IO */
131 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
132 * don't accepting new bio */
133 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
134 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
137 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
140 if (start >= log->device_size)
141 start = start - log->device_size;
145 static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
151 return end + log->device_size - start;
154 static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
158 used_size = r5l_ring_distance(log, log->last_checkpoint,
161 return log->device_size > used_size + size;
164 static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
165 enum r5l_io_unit_state state)
167 if (WARN_ON(io->state >= state))
172 static void r5l_io_run_stripes(struct r5l_io_unit *io)
174 struct stripe_head *sh, *next;
176 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
177 list_del_init(&sh->log_list);
178 set_bit(STRIPE_HANDLE, &sh->state);
179 raid5_release_stripe(sh);
183 static void r5l_log_run_stripes(struct r5l_log *log)
185 struct r5l_io_unit *io, *next;
187 assert_spin_locked(&log->io_list_lock);
189 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
190 /* don't change list order */
191 if (io->state < IO_UNIT_IO_END)
194 list_move_tail(&io->log_sibling, &log->finished_ios);
195 r5l_io_run_stripes(io);
199 static void r5l_move_to_end_ios(struct r5l_log *log)
201 struct r5l_io_unit *io, *next;
203 assert_spin_locked(&log->io_list_lock);
205 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
206 /* don't change list order */
207 if (io->state < IO_UNIT_IO_END)
209 list_move_tail(&io->log_sibling, &log->io_end_ios);
213 static void r5l_log_endio(struct bio *bio)
215 struct r5l_io_unit *io = bio->bi_private;
216 struct r5l_log *log = io->log;
220 md_error(log->rdev->mddev, log->rdev);
223 mempool_free(io->meta_page, log->meta_pool);
225 spin_lock_irqsave(&log->io_list_lock, flags);
226 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
227 if (log->need_cache_flush)
228 r5l_move_to_end_ios(log);
230 r5l_log_run_stripes(log);
231 spin_unlock_irqrestore(&log->io_list_lock, flags);
233 if (log->need_cache_flush)
234 md_wakeup_thread(log->rdev->mddev->thread);
237 static void r5l_submit_current_io(struct r5l_log *log)
239 struct r5l_io_unit *io = log->current_io;
240 struct r5l_meta_block *block;
247 block = page_address(io->meta_page);
248 block->meta_size = cpu_to_le32(io->meta_offset);
249 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
250 block->checksum = cpu_to_le32(crc);
252 log->current_io = NULL;
253 spin_lock_irqsave(&log->io_list_lock, flags);
254 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
255 spin_unlock_irqrestore(&log->io_list_lock, flags);
257 submit_bio(io->current_bio);
260 static struct bio *r5l_bio_alloc(struct r5l_log *log)
262 struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
264 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
265 bio->bi_bdev = log->rdev->bdev;
266 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
271 static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
273 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
276 * If we filled up the log device start from the beginning again,
277 * which will require a new bio.
279 * Note: for this to work properly the log size needs to me a multiple
282 if (log->log_start == 0)
283 io->need_split_bio = true;
285 io->log_end = log->log_start;
288 static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
290 struct r5l_io_unit *io;
291 struct r5l_meta_block *block;
293 io = mempool_alloc(log->io_pool, GFP_ATOMIC);
296 memset(io, 0, sizeof(*io));
299 INIT_LIST_HEAD(&io->log_sibling);
300 INIT_LIST_HEAD(&io->stripe_list);
301 io->state = IO_UNIT_RUNNING;
303 io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
304 block = page_address(io->meta_page);
306 block->magic = cpu_to_le32(R5LOG_MAGIC);
307 block->version = R5LOG_VERSION;
308 block->seq = cpu_to_le64(log->seq);
309 block->position = cpu_to_le64(log->log_start);
311 io->log_start = log->log_start;
312 io->meta_offset = sizeof(struct r5l_meta_block);
313 io->seq = log->seq++;
315 io->current_bio = r5l_bio_alloc(log);
316 io->current_bio->bi_end_io = r5l_log_endio;
317 io->current_bio->bi_private = io;
318 bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
320 r5_reserve_log_entry(log, io);
322 spin_lock_irq(&log->io_list_lock);
323 list_add_tail(&io->log_sibling, &log->running_ios);
324 spin_unlock_irq(&log->io_list_lock);
329 static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
331 if (log->current_io &&
332 log->current_io->meta_offset + payload_size > PAGE_SIZE)
333 r5l_submit_current_io(log);
335 if (!log->current_io) {
336 log->current_io = r5l_new_meta(log);
337 if (!log->current_io)
344 static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
346 u32 checksum1, u32 checksum2,
347 bool checksum2_valid)
349 struct r5l_io_unit *io = log->current_io;
350 struct r5l_payload_data_parity *payload;
352 payload = page_address(io->meta_page) + io->meta_offset;
353 payload->header.type = cpu_to_le16(type);
354 payload->header.flags = cpu_to_le16(0);
355 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
357 payload->location = cpu_to_le64(location);
358 payload->checksum[0] = cpu_to_le32(checksum1);
360 payload->checksum[1] = cpu_to_le32(checksum2);
362 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
363 sizeof(__le32) * (1 + !!checksum2_valid);
366 static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
368 struct r5l_io_unit *io = log->current_io;
370 if (io->need_split_bio) {
371 struct bio *prev = io->current_bio;
373 io->current_bio = r5l_bio_alloc(log);
374 bio_chain(io->current_bio, prev);
379 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
382 r5_reserve_log_entry(log, io);
385 static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
386 int data_pages, int parity_pages)
391 struct r5l_io_unit *io;
394 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
396 sizeof(struct r5l_payload_data_parity) +
397 sizeof(__le32) * parity_pages;
399 ret = r5l_get_meta(log, meta_size);
403 io = log->current_io;
405 for (i = 0; i < sh->disks; i++) {
406 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
408 if (i == sh->pd_idx || i == sh->qd_idx)
410 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
411 raid5_compute_blocknr(sh, i, 0),
412 sh->dev[i].log_checksum, 0, false);
413 r5l_append_payload_page(log, sh->dev[i].page);
416 if (sh->qd_idx >= 0) {
417 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
418 sh->sector, sh->dev[sh->pd_idx].log_checksum,
419 sh->dev[sh->qd_idx].log_checksum, true);
420 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
421 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
423 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
424 sh->sector, sh->dev[sh->pd_idx].log_checksum,
426 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
429 list_add_tail(&sh->log_list, &io->stripe_list);
430 atomic_inc(&io->pending_stripe);
436 static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
438 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
439 * data from log to raid disks), so we shouldn't wait for reclaim here
441 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
444 int data_pages, parity_pages;
452 /* Don't support stripe batch */
453 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
454 test_bit(STRIPE_SYNCING, &sh->state)) {
455 /* the stripe is written to log, we start writing it to raid */
456 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
460 for (i = 0; i < sh->disks; i++) {
463 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
466 /* checksum is already calculated in last run */
467 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
469 addr = kmap_atomic(sh->dev[i].page);
470 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
474 parity_pages = 1 + !!(sh->qd_idx >= 0);
475 data_pages = write_disks - parity_pages;
478 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
480 sizeof(struct r5l_payload_data_parity) +
481 sizeof(__le32) * parity_pages;
482 /* Doesn't work with very big raid array */
483 if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
486 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
488 * The stripe must enter state machine again to finish the write, so
491 clear_bit(STRIPE_DELAYED, &sh->state);
492 atomic_inc(&sh->count);
494 mutex_lock(&log->io_mutex);
496 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
497 if (!r5l_has_free_space(log, reserve)) {
498 spin_lock(&log->no_space_stripes_lock);
499 list_add_tail(&sh->log_list, &log->no_space_stripes);
500 spin_unlock(&log->no_space_stripes_lock);
502 r5l_wake_reclaim(log, reserve);
504 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
506 spin_lock_irq(&log->io_list_lock);
507 list_add_tail(&sh->log_list, &log->no_mem_stripes);
508 spin_unlock_irq(&log->io_list_lock);
512 mutex_unlock(&log->io_mutex);
516 void r5l_write_stripe_run(struct r5l_log *log)
520 mutex_lock(&log->io_mutex);
521 r5l_submit_current_io(log);
522 mutex_unlock(&log->io_mutex);
525 int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
530 * we flush log disk cache first, then write stripe data to raid disks.
531 * So if bio is finished, the log disk cache is flushed already. The
532 * recovery guarantees we can recovery the bio from log disk, so we
533 * don't need to flush again
535 if (bio->bi_iter.bi_size == 0) {
539 bio->bi_opf &= ~REQ_PREFLUSH;
543 /* This will run after log space is reclaimed */
544 static void r5l_run_no_space_stripes(struct r5l_log *log)
546 struct stripe_head *sh;
548 spin_lock(&log->no_space_stripes_lock);
549 while (!list_empty(&log->no_space_stripes)) {
550 sh = list_first_entry(&log->no_space_stripes,
551 struct stripe_head, log_list);
552 list_del_init(&sh->log_list);
553 set_bit(STRIPE_HANDLE, &sh->state);
554 raid5_release_stripe(sh);
556 spin_unlock(&log->no_space_stripes_lock);
559 static sector_t r5l_reclaimable_space(struct r5l_log *log)
561 return r5l_ring_distance(log, log->last_checkpoint,
562 log->next_checkpoint);
565 static void r5l_run_no_mem_stripe(struct r5l_log *log)
567 struct stripe_head *sh;
569 assert_spin_locked(&log->io_list_lock);
571 if (!list_empty(&log->no_mem_stripes)) {
572 sh = list_first_entry(&log->no_mem_stripes,
573 struct stripe_head, log_list);
574 list_del_init(&sh->log_list);
575 set_bit(STRIPE_HANDLE, &sh->state);
576 raid5_release_stripe(sh);
580 static bool r5l_complete_finished_ios(struct r5l_log *log)
582 struct r5l_io_unit *io, *next;
585 assert_spin_locked(&log->io_list_lock);
587 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
588 /* don't change list order */
589 if (io->state < IO_UNIT_STRIPE_END)
592 log->next_checkpoint = io->log_start;
593 log->next_cp_seq = io->seq;
595 list_del(&io->log_sibling);
596 mempool_free(io, log->io_pool);
597 r5l_run_no_mem_stripe(log);
605 static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
607 struct r5l_log *log = io->log;
610 spin_lock_irqsave(&log->io_list_lock, flags);
611 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
613 if (!r5l_complete_finished_ios(log)) {
614 spin_unlock_irqrestore(&log->io_list_lock, flags);
618 if (r5l_reclaimable_space(log) > log->max_free_space)
619 r5l_wake_reclaim(log, 0);
621 spin_unlock_irqrestore(&log->io_list_lock, flags);
622 wake_up(&log->iounit_wait);
625 void r5l_stripe_write_finished(struct stripe_head *sh)
627 struct r5l_io_unit *io;
632 if (io && atomic_dec_and_test(&io->pending_stripe))
633 __r5l_stripe_write_finished(io);
636 static void r5l_log_flush_endio(struct bio *bio)
638 struct r5l_log *log = container_of(bio, struct r5l_log,
641 struct r5l_io_unit *io;
644 md_error(log->rdev->mddev, log->rdev);
646 spin_lock_irqsave(&log->io_list_lock, flags);
647 list_for_each_entry(io, &log->flushing_ios, log_sibling)
648 r5l_io_run_stripes(io);
649 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
650 spin_unlock_irqrestore(&log->io_list_lock, flags);
654 * Starting dispatch IO to raid.
655 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
656 * broken meta in the middle of a log causes recovery can't find meta at the
657 * head of log. If operations require meta at the head persistent in log, we
658 * must make sure meta before it persistent in log too. A case is:
660 * stripe data/parity is in log, we start write stripe to raid disks. stripe
661 * data/parity must be persistent in log before we do the write to raid disks.
663 * The solution is we restrictly maintain io_unit list order. In this case, we
664 * only write stripes of an io_unit to raid disks till the io_unit is the first
665 * one whose data/parity is in log.
667 void r5l_flush_stripe_to_raid(struct r5l_log *log)
671 if (!log || !log->need_cache_flush)
674 spin_lock_irq(&log->io_list_lock);
675 /* flush bio is running */
676 if (!list_empty(&log->flushing_ios)) {
677 spin_unlock_irq(&log->io_list_lock);
680 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
681 do_flush = !list_empty(&log->flushing_ios);
682 spin_unlock_irq(&log->io_list_lock);
686 bio_reset(&log->flush_bio);
687 log->flush_bio.bi_bdev = log->rdev->bdev;
688 log->flush_bio.bi_end_io = r5l_log_flush_endio;
689 bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
690 submit_bio(&log->flush_bio);
693 static void r5l_write_super(struct r5l_log *log, sector_t cp);
694 static void r5l_write_super_and_discard_space(struct r5l_log *log,
697 struct block_device *bdev = log->rdev->bdev;
700 r5l_write_super(log, end);
702 if (!blk_queue_discard(bdev_get_queue(bdev)))
705 mddev = log->rdev->mddev;
707 * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and
708 * wait for this thread to finish. This thread waits for
709 * MD_CHANGE_PENDING clear, which is supposed to be done in
710 * md_check_recovery(). md_check_recovery() tries to get
711 * reconfig_mutex. Since r5l_quiesce already holds the mutex,
712 * md_check_recovery() fails, so the PENDING never get cleared. The
713 * in_teardown check workaround this issue.
715 if (!log->in_teardown) {
716 set_mask_bits(&mddev->flags, 0,
717 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
718 md_wakeup_thread(mddev->thread);
719 wait_event(mddev->sb_wait,
720 !test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
723 * r5l_quiesce could run after in_teardown check and hold
724 * mutex first. Superblock might get updated twice.
726 if (log->in_teardown)
727 md_update_sb(mddev, 1);
729 WARN_ON(!mddev_is_locked(mddev));
730 md_update_sb(mddev, 1);
733 /* discard IO error really doesn't matter, ignore it */
734 if (log->last_checkpoint < end) {
735 blkdev_issue_discard(bdev,
736 log->last_checkpoint + log->rdev->data_offset,
737 end - log->last_checkpoint, GFP_NOIO, 0);
739 blkdev_issue_discard(bdev,
740 log->last_checkpoint + log->rdev->data_offset,
741 log->device_size - log->last_checkpoint,
743 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
749 static void r5l_do_reclaim(struct r5l_log *log)
751 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
752 sector_t reclaimable;
753 sector_t next_checkpoint;
756 spin_lock_irq(&log->io_list_lock);
758 * move proper io_unit to reclaim list. We should not change the order.
759 * reclaimable/unreclaimable io_unit can be mixed in the list, we
760 * shouldn't reuse space of an unreclaimable io_unit
763 reclaimable = r5l_reclaimable_space(log);
764 if (reclaimable >= reclaim_target ||
765 (list_empty(&log->running_ios) &&
766 list_empty(&log->io_end_ios) &&
767 list_empty(&log->flushing_ios) &&
768 list_empty(&log->finished_ios)))
771 md_wakeup_thread(log->rdev->mddev->thread);
772 wait_event_lock_irq(log->iounit_wait,
773 r5l_reclaimable_space(log) > reclaimable,
777 next_checkpoint = log->next_checkpoint;
778 next_cp_seq = log->next_cp_seq;
779 spin_unlock_irq(&log->io_list_lock);
781 BUG_ON(reclaimable < 0);
782 if (reclaimable == 0)
786 * write_super will flush cache of each raid disk. We must write super
787 * here, because the log area might be reused soon and we don't want to
790 r5l_write_super_and_discard_space(log, next_checkpoint);
792 mutex_lock(&log->io_mutex);
793 log->last_checkpoint = next_checkpoint;
794 log->last_cp_seq = next_cp_seq;
795 mutex_unlock(&log->io_mutex);
797 r5l_run_no_space_stripes(log);
800 static void r5l_reclaim_thread(struct md_thread *thread)
802 struct mddev *mddev = thread->mddev;
803 struct r5conf *conf = mddev->private;
804 struct r5l_log *log = conf->log;
811 static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
813 unsigned long target;
814 unsigned long new = (unsigned long)space; /* overflow in theory */
817 target = log->reclaim_target;
820 } while (cmpxchg(&log->reclaim_target, target, new) != target);
821 md_wakeup_thread(log->reclaim_thread);
824 void r5l_quiesce(struct r5l_log *log, int state)
827 if (!log || state == 2)
830 log->in_teardown = 0;
832 * This is a special case for hotadd. In suspend, the array has
833 * no journal. In resume, journal is initialized as well as the
836 if (log->reclaim_thread)
838 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
839 log->rdev->mddev, "reclaim");
840 } else if (state == 1) {
842 * at this point all stripes are finished, so io_unit is at
843 * least in STRIPE_END state
845 log->in_teardown = 1;
846 /* make sure r5l_write_super_and_discard_space exits */
847 mddev = log->rdev->mddev;
848 wake_up(&mddev->sb_wait);
849 r5l_wake_reclaim(log, -1L);
850 md_unregister_thread(&log->reclaim_thread);
855 bool r5l_log_disk_error(struct r5conf *conf)
859 /* don't allow write if journal disk is missing */
861 log = rcu_dereference(conf->log);
864 ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
866 ret = test_bit(Faulty, &log->rdev->flags);
871 struct r5l_recovery_ctx {
872 struct page *meta_page; /* current meta */
873 sector_t meta_total_blocks; /* total size of current meta and data */
874 sector_t pos; /* recovery position */
875 u64 seq; /* recovery position seq */
878 static int r5l_read_meta_block(struct r5l_log *log,
879 struct r5l_recovery_ctx *ctx)
881 struct page *page = ctx->meta_page;
882 struct r5l_meta_block *mb;
885 if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_READ, 0,
889 mb = page_address(page);
890 stored_crc = le32_to_cpu(mb->checksum);
893 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
894 le64_to_cpu(mb->seq) != ctx->seq ||
895 mb->version != R5LOG_VERSION ||
896 le64_to_cpu(mb->position) != ctx->pos)
899 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
900 if (stored_crc != crc)
903 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
906 ctx->meta_total_blocks = BLOCK_SECTORS;
911 static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
912 struct r5l_recovery_ctx *ctx,
913 sector_t stripe_sect,
914 int *offset, sector_t *log_offset)
916 struct r5conf *conf = log->rdev->mddev->private;
917 struct stripe_head *sh;
918 struct r5l_payload_data_parity *payload;
921 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
923 payload = page_address(ctx->meta_page) + *offset;
925 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
926 raid5_compute_sector(conf,
927 le64_to_cpu(payload->location), 0,
930 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
931 sh->dev[disk_index].page, REQ_OP_READ, 0,
933 sh->dev[disk_index].log_checksum =
934 le32_to_cpu(payload->checksum[0]);
935 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
936 ctx->meta_total_blocks += BLOCK_SECTORS;
938 disk_index = sh->pd_idx;
939 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
940 sh->dev[disk_index].page, REQ_OP_READ, 0,
942 sh->dev[disk_index].log_checksum =
943 le32_to_cpu(payload->checksum[0]);
944 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
946 if (sh->qd_idx >= 0) {
947 disk_index = sh->qd_idx;
948 sync_page_io(log->rdev,
949 r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
950 PAGE_SIZE, sh->dev[disk_index].page,
951 REQ_OP_READ, 0, false);
952 sh->dev[disk_index].log_checksum =
953 le32_to_cpu(payload->checksum[1]);
954 set_bit(R5_Wantwrite,
955 &sh->dev[disk_index].flags);
957 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
960 *log_offset = r5l_ring_add(log, *log_offset,
961 le32_to_cpu(payload->size));
962 *offset += sizeof(struct r5l_payload_data_parity) +
964 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
965 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
969 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
973 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
975 addr = kmap_atomic(sh->dev[disk_index].page);
976 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
978 if (checksum != sh->dev[disk_index].log_checksum)
982 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
983 struct md_rdev *rdev, *rrdev;
985 if (!test_and_clear_bit(R5_Wantwrite,
986 &sh->dev[disk_index].flags))
989 /* in case device is broken */
990 rdev = rcu_dereference(conf->disks[disk_index].rdev);
992 sync_page_io(rdev, stripe_sect, PAGE_SIZE,
993 sh->dev[disk_index].page, REQ_OP_WRITE, 0,
995 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
997 sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
998 sh->dev[disk_index].page, REQ_OP_WRITE, 0,
1001 raid5_release_stripe(sh);
1005 for (disk_index = 0; disk_index < sh->disks; disk_index++)
1006 sh->dev[disk_index].flags = 0;
1007 raid5_release_stripe(sh);
1011 static int r5l_recovery_flush_one_meta(struct r5l_log *log,
1012 struct r5l_recovery_ctx *ctx)
1014 struct r5conf *conf = log->rdev->mddev->private;
1015 struct r5l_payload_data_parity *payload;
1016 struct r5l_meta_block *mb;
1018 sector_t log_offset;
1019 sector_t stripe_sector;
1021 mb = page_address(ctx->meta_page);
1022 offset = sizeof(struct r5l_meta_block);
1023 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
1025 while (offset < le32_to_cpu(mb->meta_size)) {
1028 payload = (void *)mb + offset;
1029 stripe_sector = raid5_compute_sector(conf,
1030 le64_to_cpu(payload->location), 0, &dd, NULL);
1031 if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
1032 &offset, &log_offset))
1038 /* copy data/parity from log to raid disks */
1039 static void r5l_recovery_flush_log(struct r5l_log *log,
1040 struct r5l_recovery_ctx *ctx)
1043 if (r5l_read_meta_block(log, ctx))
1045 if (r5l_recovery_flush_one_meta(log, ctx))
1048 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
1052 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1056 struct r5l_meta_block *mb;
1059 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1062 mb = page_address(page);
1063 mb->magic = cpu_to_le32(R5LOG_MAGIC);
1064 mb->version = R5LOG_VERSION;
1065 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1066 mb->seq = cpu_to_le64(seq);
1067 mb->position = cpu_to_le64(pos);
1068 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1069 mb->checksum = cpu_to_le32(crc);
1071 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
1072 WRITE_FUA, false)) {
1080 static int r5l_recovery_log(struct r5l_log *log)
1082 struct r5l_recovery_ctx ctx;
1084 ctx.pos = log->last_checkpoint;
1085 ctx.seq = log->last_cp_seq;
1086 ctx.meta_page = alloc_page(GFP_KERNEL);
1090 r5l_recovery_flush_log(log, &ctx);
1091 __free_page(ctx.meta_page);
1094 * we did a recovery. Now ctx.pos points to an invalid meta block. New
1095 * log will start here. but we can't let superblock point to last valid
1096 * meta block. The log might looks like:
1097 * | meta 1| meta 2| meta 3|
1098 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
1099 * superblock points to meta 1, we write a new valid meta 2n. if crash
1100 * happens again, new recovery will start from meta 1. Since meta 2n is
1101 * valid now, recovery will think meta 3 is valid, which is wrong.
1102 * The solution is we create a new meta in meta2 with its seq == meta
1103 * 1's seq + 10 and let superblock points to meta2. The same recovery will
1104 * not think meta 3 is a valid meta, because its seq doesn't match
1106 if (ctx.seq > log->last_cp_seq + 1) {
1109 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
1112 log->seq = ctx.seq + 11;
1113 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
1114 r5l_write_super(log, ctx.pos);
1116 log->log_start = ctx.pos;
1122 static void r5l_write_super(struct r5l_log *log, sector_t cp)
1124 struct mddev *mddev = log->rdev->mddev;
1126 log->rdev->journal_tail = cp;
1127 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1130 static int r5l_load_log(struct r5l_log *log)
1132 struct md_rdev *rdev = log->rdev;
1134 struct r5l_meta_block *mb;
1135 sector_t cp = log->rdev->journal_tail;
1136 u32 stored_crc, expected_crc;
1137 bool create_super = false;
1140 /* Make sure it's valid */
1141 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
1143 page = alloc_page(GFP_KERNEL);
1147 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
1151 mb = page_address(page);
1153 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1154 mb->version != R5LOG_VERSION) {
1155 create_super = true;
1158 stored_crc = le32_to_cpu(mb->checksum);
1160 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1161 if (stored_crc != expected_crc) {
1162 create_super = true;
1165 if (le64_to_cpu(mb->position) != cp) {
1166 create_super = true;
1171 log->last_cp_seq = prandom_u32();
1174 * Make sure super points to correct address. Log might have
1175 * data very soon. If super hasn't correct log tail address,
1176 * recovery can't find the log
1178 r5l_write_super(log, cp);
1180 log->last_cp_seq = le64_to_cpu(mb->seq);
1182 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
1183 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
1184 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
1185 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
1186 log->last_checkpoint = cp;
1190 return r5l_recovery_log(log);
1196 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
1198 struct request_queue *q = bdev_get_queue(rdev->bdev);
1199 struct r5l_log *log;
1201 if (PAGE_SIZE != 4096)
1203 log = kzalloc(sizeof(*log), GFP_KERNEL);
1208 log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
1210 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
1211 sizeof(rdev->mddev->uuid));
1213 mutex_init(&log->io_mutex);
1215 spin_lock_init(&log->io_list_lock);
1216 INIT_LIST_HEAD(&log->running_ios);
1217 INIT_LIST_HEAD(&log->io_end_ios);
1218 INIT_LIST_HEAD(&log->flushing_ios);
1219 INIT_LIST_HEAD(&log->finished_ios);
1220 bio_init(&log->flush_bio);
1222 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
1226 log->io_pool = mempool_create_slab_pool(R5L_POOL_SIZE, log->io_kc);
1230 log->bs = bioset_create(R5L_POOL_SIZE, 0);
1234 log->meta_pool = mempool_create_page_pool(R5L_POOL_SIZE, 0);
1235 if (!log->meta_pool)
1238 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
1239 log->rdev->mddev, "reclaim");
1240 if (!log->reclaim_thread)
1241 goto reclaim_thread;
1242 init_waitqueue_head(&log->iounit_wait);
1244 INIT_LIST_HEAD(&log->no_mem_stripes);
1246 INIT_LIST_HEAD(&log->no_space_stripes);
1247 spin_lock_init(&log->no_space_stripes_lock);
1249 if (r5l_load_log(log))
1252 rcu_assign_pointer(conf->log, log);
1253 set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1257 md_unregister_thread(&log->reclaim_thread);
1259 mempool_destroy(log->meta_pool);
1261 bioset_free(log->bs);
1263 mempool_destroy(log->io_pool);
1265 kmem_cache_destroy(log->io_kc);
1271 void r5l_exit_log(struct r5l_log *log)
1273 md_unregister_thread(&log->reclaim_thread);
1274 mempool_destroy(log->meta_pool);
1275 bioset_free(log->bs);
1276 mempool_destroy(log->io_pool);
1277 kmem_cache_destroy(log->io_kc);