2 * Partial Parity Log for closing the RAID5 write hole
3 * Copyright (c) 2017, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/kernel.h>
16 #include <linux/blkdev.h>
17 #include <linux/slab.h>
18 #include <linux/crc32c.h>
19 #include <linux/flex_array.h>
20 #include <linux/async_tx.h>
21 #include <linux/raid/md_p.h>
26 * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
27 * partial parity data. The header contains an array of entries
28 * (struct ppl_header_entry) which describe the logged write requests.
29 * Partial parity for the entries comes after the header, written in the same
30 * sequence as the entries:
41 * An entry describes one or more consecutive stripe_heads, up to a full
42 * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
43 * number of stripe_heads in the entry and n is the number of modified data
44 * disks. Every stripe_head in the entry must write to the same data disks.
45 * An example of a valid case described by a single entry (writes to the first
46 * stripe of a 4 disk array, 16k chunk size):
48 * sh->sector dd0 dd1 dd2 ppl
50 * 0 | --- | --- | --- | +----+
51 * 8 | -W- | -W- | --- | | pp | data_sector = 8
52 * 16 | -W- | -W- | --- | | pp | data_size = 3 * 2 * 4k
53 * 24 | -W- | -W- | --- | | pp | pp_size = 3 * 4k
54 * +-----+-----+-----+ +----+
56 * data_sector is the first raid sector of the modified data, data_size is the
57 * total size of modified data and pp_size is the size of partial parity for
58 * this entry. Entries for full stripe writes contain no partial parity
59 * (pp_size = 0), they only mark the stripes for which parity should be
60 * recalculated after an unclean shutdown. Every entry holds a checksum of its
61 * partial parity, the header also has a checksum of the header itself.
63 * A write request is always logged to the PPL instance stored on the parity
64 * disk of the corresponding stripe. For each member disk there is one ppl_log
65 * used to handle logging for this disk, independently from others. They are
66 * grouped in child_logs array in struct ppl_conf, which is assigned to
67 * r5conf->log_private.
69 * ppl_io_unit represents a full PPL write, header_page contains the ppl_header.
70 * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head
71 * can be appended to the last entry if it meets the conditions for a valid
72 * entry described above, otherwise a new entry is added. Checksums of entries
73 * are calculated incrementally as stripes containing partial parity are being
74 * added. ppl_submit_iounit() calculates the checksum of the header and submits
75 * a bio containing the header page and partial parity pages (sh->ppl_page) for
76 * all stripes of the io_unit. When the PPL write completes, the stripes
77 * associated with the io_unit are released and raid5d starts writing their data
78 * and parity. When all stripes are written, the io_unit is freed and the next
81 * An io_unit is used to gather stripes until it is submitted or becomes full
82 * (if the maximum number of entries or size of PPL is reached). Another io_unit
83 * can't be submitted until the previous has completed (PPL and stripe
84 * data+parity is written). The log->io_list tracks all io_units of a log
85 * (for a single member disk). New io_units are added to the end of the list
86 * and the first io_unit is submitted, if it is not submitted already.
87 * The current io_unit accepting new stripes is always at the end of the list.
90 #define PPL_SPACE_SIZE (128 * 1024)
95 /* array of child logs, one for each raid disk */
96 struct ppl_log *child_logs;
99 int block_size; /* the logical block size used for data_sector
100 * in ppl_header_entry */
101 u32 signature; /* raid array identifier */
102 atomic64_t seq; /* current log write sequence number */
104 struct kmem_cache *io_kc;
108 /* used only for recovery */
109 int recovered_entries;
112 /* stripes to retry if failed to allocate io_unit */
113 struct list_head no_mem_stripes;
114 spinlock_t no_mem_stripes_lock;
118 struct ppl_conf *ppl_conf; /* shared between all log instances */
120 struct md_rdev *rdev; /* array member disk associated with
121 * this log instance */
122 struct mutex io_mutex;
123 struct ppl_io_unit *current_io; /* current io_unit accepting new data
124 * always at the end of io_list */
125 spinlock_t io_list_lock;
126 struct list_head io_list; /* all io_units of this log */
128 sector_t next_io_sector;
129 unsigned int entry_space;
133 #define PPL_IO_INLINE_BVECS 32
138 struct page *header_page; /* for ppl_header */
140 unsigned int entries_count; /* number of entries in ppl_header */
141 unsigned int pp_size; /* total size current of partial parity */
143 u64 seq; /* sequence number of this log write */
144 struct list_head log_sibling; /* log->io_list */
146 struct list_head stripe_list; /* stripes added to the io_unit */
147 atomic_t pending_stripes; /* how many stripes not written to raid */
149 bool submitted; /* true if write to log started */
151 /* inline bio and its biovec for submitting the iounit */
153 struct bio_vec biovec[PPL_IO_INLINE_BVECS];
156 struct dma_async_tx_descriptor *
157 ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
158 struct dma_async_tx_descriptor *tx)
160 int disks = sh->disks;
161 struct page **srcs = flex_array_get(percpu->scribble, 0);
162 int count = 0, pd_idx = sh->pd_idx, i;
163 struct async_submit_ctl submit;
165 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
168 * Partial parity is the XOR of stripe data chunks that are not changed
169 * during the write request. Depending on available data
170 * (read-modify-write vs. reconstruct-write case) we calculate it
173 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
175 * rmw: xor old data and parity from updated disks
176 * This is calculated earlier by ops_run_prexor5() so just copy
177 * the parity dev page.
179 srcs[count++] = sh->dev[pd_idx].page;
180 } else if (sh->reconstruct_state == reconstruct_state_drain_run) {
181 /* rcw: xor data from all not updated disks */
182 for (i = disks; i--;) {
183 struct r5dev *dev = &sh->dev[i];
184 if (test_bit(R5_UPTODATE, &dev->flags))
185 srcs[count++] = dev->page;
191 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx,
192 NULL, sh, flex_array_get(percpu->scribble, 0)
193 + sizeof(struct page *) * (sh->disks + 2));
196 tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
199 tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE,
205 static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data)
207 struct kmem_cache *kc = pool_data;
208 struct ppl_io_unit *io;
210 io = kmem_cache_alloc(kc, gfp_mask);
214 io->header_page = alloc_page(gfp_mask);
215 if (!io->header_page) {
216 kmem_cache_free(kc, io);
223 static void ppl_io_pool_free(void *element, void *pool_data)
225 struct kmem_cache *kc = pool_data;
226 struct ppl_io_unit *io = element;
228 __free_page(io->header_page);
229 kmem_cache_free(kc, io);
232 static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
233 struct stripe_head *sh)
235 struct ppl_conf *ppl_conf = log->ppl_conf;
236 struct ppl_io_unit *io;
237 struct ppl_header *pplhdr;
238 struct page *header_page;
240 io = mempool_alloc(ppl_conf->io_pool, GFP_NOWAIT);
244 header_page = io->header_page;
245 memset(io, 0, sizeof(*io));
246 io->header_page = header_page;
249 INIT_LIST_HEAD(&io->log_sibling);
250 INIT_LIST_HEAD(&io->stripe_list);
251 atomic_set(&io->pending_stripes, 0);
252 bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
254 pplhdr = page_address(io->header_page);
256 memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
257 pplhdr->signature = cpu_to_le32(ppl_conf->signature);
259 io->seq = atomic64_add_return(1, &ppl_conf->seq);
260 pplhdr->generation = cpu_to_le64(io->seq);
265 static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
267 struct ppl_io_unit *io = log->current_io;
268 struct ppl_header_entry *e = NULL;
269 struct ppl_header *pplhdr;
271 sector_t data_sector = 0;
273 struct r5conf *conf = sh->raid_conf;
275 pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector);
277 /* check if current io_unit is full */
278 if (io && (io->pp_size == log->entry_space ||
279 io->entries_count == PPL_HDR_MAX_ENTRIES)) {
280 pr_debug("%s: add io_unit blocked by seq: %llu\n",
285 /* add a new unit if there is none or the current is full */
287 io = ppl_new_iounit(log, sh);
290 spin_lock_irq(&log->io_list_lock);
291 list_add_tail(&io->log_sibling, &log->io_list);
292 spin_unlock_irq(&log->io_list_lock);
294 log->current_io = io;
297 for (i = 0; i < sh->disks; i++) {
298 struct r5dev *dev = &sh->dev[i];
300 if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) {
301 if (!data_disks || dev->sector < data_sector)
302 data_sector = dev->sector;
308 pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__,
309 io->seq, (unsigned long long)data_sector, data_disks);
311 pplhdr = page_address(io->header_page);
313 if (io->entries_count > 0) {
314 struct ppl_header_entry *last =
315 &pplhdr->entries[io->entries_count - 1];
316 struct stripe_head *sh_last = list_last_entry(
317 &io->stripe_list, struct stripe_head, log_list);
318 u64 data_sector_last = le64_to_cpu(last->data_sector);
319 u32 data_size_last = le32_to_cpu(last->data_size);
322 * Check if we can append the stripe to the last entry. It must
323 * be just after the last logged stripe and write to the same
324 * disks. Use bit shift and logarithm to avoid 64-bit division.
326 if ((sh->sector == sh_last->sector + STRIPE_SECTORS) &&
327 (data_sector >> ilog2(conf->chunk_sectors) ==
328 data_sector_last >> ilog2(conf->chunk_sectors)) &&
329 ((data_sector - data_sector_last) * data_disks ==
330 data_size_last >> 9))
335 e = &pplhdr->entries[io->entries_count++];
336 e->data_sector = cpu_to_le64(data_sector);
337 e->parity_disk = cpu_to_le32(sh->pd_idx);
338 e->checksum = cpu_to_le32(~0);
341 le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT);
343 /* don't write any PP if full stripe write */
344 if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) {
345 le32_add_cpu(&e->pp_size, PAGE_SIZE);
346 io->pp_size += PAGE_SIZE;
347 e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
348 page_address(sh->ppl_page),
352 list_add_tail(&sh->log_list, &io->stripe_list);
353 atomic_inc(&io->pending_stripes);
359 int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh)
361 struct ppl_conf *ppl_conf = conf->log_private;
362 struct ppl_io_unit *io = sh->ppl_io;
365 if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page ||
366 !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
367 !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) {
368 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
372 log = &ppl_conf->child_logs[sh->pd_idx];
374 mutex_lock(&log->io_mutex);
376 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
377 mutex_unlock(&log->io_mutex);
381 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
382 clear_bit(STRIPE_DELAYED, &sh->state);
383 atomic_inc(&sh->count);
385 if (ppl_log_stripe(log, sh)) {
386 spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
387 list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes);
388 spin_unlock_irq(&ppl_conf->no_mem_stripes_lock);
391 mutex_unlock(&log->io_mutex);
396 static void ppl_log_endio(struct bio *bio)
398 struct ppl_io_unit *io = bio->bi_private;
399 struct ppl_log *log = io->log;
400 struct ppl_conf *ppl_conf = log->ppl_conf;
401 struct stripe_head *sh, *next;
403 pr_debug("%s: seq: %llu\n", __func__, io->seq);
406 md_error(ppl_conf->mddev, log->rdev);
408 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
409 list_del_init(&sh->log_list);
411 set_bit(STRIPE_HANDLE, &sh->state);
412 raid5_release_stripe(sh);
416 static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
418 char b[BDEVNAME_SIZE];
420 pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
421 __func__, io->seq, bio->bi_iter.bi_size,
422 (unsigned long long)bio->bi_iter.bi_sector,
423 bio_devname(bio, b));
428 static void ppl_submit_iounit(struct ppl_io_unit *io)
430 struct ppl_log *log = io->log;
431 struct ppl_conf *ppl_conf = log->ppl_conf;
432 struct ppl_header *pplhdr = page_address(io->header_page);
433 struct bio *bio = &io->bio;
434 struct stripe_head *sh;
437 bio->bi_private = io;
439 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
444 for (i = 0; i < io->entries_count; i++) {
445 struct ppl_header_entry *e = &pplhdr->entries[i];
447 pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
448 __func__, io->seq, i, le64_to_cpu(e->data_sector),
449 le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size));
451 e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >>
452 ilog2(ppl_conf->block_size >> 9));
453 e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum));
456 pplhdr->entries_count = cpu_to_le32(io->entries_count);
457 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
459 /* Rewind the buffer if current PPL is larger then remaining space */
460 if (log->use_multippl &&
461 log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
462 (PPL_HEADER_SIZE + io->pp_size) >> 9)
463 log->next_io_sector = log->rdev->ppl.sector;
466 bio->bi_end_io = ppl_log_endio;
467 bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
468 bio_set_dev(bio, log->rdev->bdev);
469 bio->bi_iter.bi_sector = log->next_io_sector;
470 bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
472 pr_debug("%s: log->current_io_sector: %llu\n", __func__,
473 (unsigned long long)log->next_io_sector);
475 if (log->use_multippl)
476 log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
478 list_for_each_entry(sh, &io->stripe_list, log_list) {
479 /* entries for full stripe writes have no partial parity */
480 if (test_bit(STRIPE_FULL_WRITE, &sh->state))
483 if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
484 struct bio *prev = bio;
486 bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
488 bio->bi_opf = prev->bi_opf;
489 bio_copy_dev(bio, prev);
490 bio->bi_iter.bi_sector = bio_end_sector(prev);
491 bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
493 bio_chain(bio, prev);
494 ppl_submit_iounit_bio(io, prev);
498 ppl_submit_iounit_bio(io, bio);
501 static void ppl_submit_current_io(struct ppl_log *log)
503 struct ppl_io_unit *io;
505 spin_lock_irq(&log->io_list_lock);
507 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
509 if (io && io->submitted)
512 spin_unlock_irq(&log->io_list_lock);
515 io->submitted = true;
517 if (io == log->current_io)
518 log->current_io = NULL;
520 ppl_submit_iounit(io);
524 void ppl_write_stripe_run(struct r5conf *conf)
526 struct ppl_conf *ppl_conf = conf->log_private;
530 for (i = 0; i < ppl_conf->count; i++) {
531 log = &ppl_conf->child_logs[i];
533 mutex_lock(&log->io_mutex);
534 ppl_submit_current_io(log);
535 mutex_unlock(&log->io_mutex);
539 static void ppl_io_unit_finished(struct ppl_io_unit *io)
541 struct ppl_log *log = io->log;
542 struct ppl_conf *ppl_conf = log->ppl_conf;
545 pr_debug("%s: seq: %llu\n", __func__, io->seq);
547 local_irq_save(flags);
549 spin_lock(&log->io_list_lock);
550 list_del(&io->log_sibling);
551 spin_unlock(&log->io_list_lock);
553 mempool_free(io, ppl_conf->io_pool);
555 spin_lock(&ppl_conf->no_mem_stripes_lock);
556 if (!list_empty(&ppl_conf->no_mem_stripes)) {
557 struct stripe_head *sh;
559 sh = list_first_entry(&ppl_conf->no_mem_stripes,
560 struct stripe_head, log_list);
561 list_del_init(&sh->log_list);
562 set_bit(STRIPE_HANDLE, &sh->state);
563 raid5_release_stripe(sh);
565 spin_unlock(&ppl_conf->no_mem_stripes_lock);
567 local_irq_restore(flags);
570 void ppl_stripe_write_finished(struct stripe_head *sh)
572 struct ppl_io_unit *io;
577 if (io && atomic_dec_and_test(&io->pending_stripes))
578 ppl_io_unit_finished(io);
581 static void ppl_xor(int size, struct page *page1, struct page *page2)
583 struct async_submit_ctl submit;
584 struct dma_async_tx_descriptor *tx;
585 struct page *xor_srcs[] = { page1, page2 };
587 init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST,
588 NULL, NULL, NULL, NULL);
589 tx = async_xor(page1, xor_srcs, 0, 2, size, &submit);
591 async_tx_quiesce(&tx);
595 * PPL recovery strategy: xor partial parity and data from all modified data
596 * disks within a stripe and write the result as the new stripe parity. If all
597 * stripe data disks are modified (full stripe write), no partial parity is
598 * available, so just xor the data disks.
600 * Recovery of a PPL entry shall occur only if all modified data disks are
601 * available and read from all of them succeeds.
603 * A PPL entry applies to a stripe, partial parity size for an entry is at most
604 * the size of the chunk. Examples of possible cases for a single entry:
606 * case 0: single data disk write:
607 * data0 data1 data2 ppl parity
608 * +--------+--------+--------+ +--------------------+
609 * | ------ | ------ | ------ | +----+ | (no change) |
610 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
611 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
612 * | ------ | ------ | ------ | +----+ | (no change) |
613 * +--------+--------+--------+ +--------------------+
614 * pp_size = data_size
616 * case 1: more than one data disk write:
617 * data0 data1 data2 ppl parity
618 * +--------+--------+--------+ +--------------------+
619 * | ------ | ------ | ------ | +----+ | (no change) |
620 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
621 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
622 * | ------ | ------ | ------ | +----+ | (no change) |
623 * +--------+--------+--------+ +--------------------+
624 * pp_size = data_size / modified_data_disks
626 * case 2: write to all data disks (also full stripe write):
627 * data0 data1 data2 parity
628 * +--------+--------+--------+ +--------------------+
629 * | ------ | ------ | ------ | | (no change) |
630 * | -data- | -data- | -data- | --------> | xor all data |
631 * | ------ | ------ | ------ | --------> | (no change) |
632 * | ------ | ------ | ------ | | (no change) |
633 * +--------+--------+--------+ +--------------------+
636 * The following cases are possible only in other implementations. The recovery
637 * code can handle them, but they are not generated at runtime because they can
638 * be reduced to cases 0, 1 and 2:
641 * data0 data1 data2 ppl parity
642 * +--------+--------+--------+ +----+ +--------------------+
643 * | ------ | -data- | -data- | | pp | | data1 ^ data2 ^ pp |
644 * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp |
645 * | -data- | -data- | -data- | | -- | -> | xor all data |
646 * | -data- | -data- | ------ | | pp | | data0 ^ data1 ^ pp |
647 * +--------+--------+--------+ +----+ +--------------------+
648 * pp_size = chunk_size
651 * data0 data1 data2 ppl parity
652 * +--------+--------+--------+ +----+ +--------------------+
653 * | ------ | -data- | ------ | | pp | | data1 ^ pp |
654 * | ------ | ------ | ------ | | -- | -> | (no change) |
655 * | ------ | ------ | ------ | | -- | -> | (no change) |
656 * | -data- | ------ | ------ | | pp | | data0 ^ pp |
657 * +--------+--------+--------+ +----+ +--------------------+
658 * pp_size = chunk_size
660 static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
663 struct ppl_conf *ppl_conf = log->ppl_conf;
664 struct mddev *mddev = ppl_conf->mddev;
665 struct r5conf *conf = mddev->private;
666 int block_size = ppl_conf->block_size;
669 sector_t r_sector_first;
670 sector_t r_sector_last;
675 char b[BDEVNAME_SIZE];
676 unsigned int pp_size = le32_to_cpu(e->pp_size);
677 unsigned int data_size = le32_to_cpu(e->data_size);
679 page1 = alloc_page(GFP_KERNEL);
680 page2 = alloc_page(GFP_KERNEL);
682 if (!page1 || !page2) {
687 r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9);
689 if ((pp_size >> 9) < conf->chunk_sectors) {
691 data_disks = data_size / pp_size;
692 strip_sectors = pp_size >> 9;
694 data_disks = conf->raid_disks - conf->max_degraded;
695 strip_sectors = (data_size >> 9) / data_disks;
697 r_sector_last = r_sector_first +
698 (data_disks - 1) * conf->chunk_sectors +
701 data_disks = conf->raid_disks - conf->max_degraded;
702 strip_sectors = conf->chunk_sectors;
703 r_sector_last = r_sector_first + (data_size >> 9);
706 pr_debug("%s: array sector first: %llu last: %llu\n", __func__,
707 (unsigned long long)r_sector_first,
708 (unsigned long long)r_sector_last);
710 /* if start and end is 4k aligned, use a 4k block */
711 if (block_size == 512 &&
712 (r_sector_first & (STRIPE_SECTORS - 1)) == 0 &&
713 (r_sector_last & (STRIPE_SECTORS - 1)) == 0)
714 block_size = STRIPE_SIZE;
716 /* iterate through blocks in strip */
717 for (i = 0; i < strip_sectors; i += (block_size >> 9)) {
718 bool update_parity = false;
719 sector_t parity_sector;
720 struct md_rdev *parity_rdev;
721 struct stripe_head sh;
725 pr_debug("%s:%*s iter %d start\n", __func__, indent, "", i);
728 memset(page_address(page1), 0, PAGE_SIZE);
730 /* iterate through data member disks */
731 for (disk = 0; disk < data_disks; disk++) {
733 struct md_rdev *rdev;
735 sector_t r_sector = r_sector_first + i +
736 (disk * conf->chunk_sectors);
738 pr_debug("%s:%*s data member disk %d start\n",
739 __func__, indent, "", disk);
742 if (r_sector >= r_sector_last) {
743 pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
744 __func__, indent, "",
745 (unsigned long long)r_sector);
750 update_parity = true;
752 /* map raid sector to member disk */
753 sector = raid5_compute_sector(conf, r_sector, 0,
755 pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
756 __func__, indent, "",
757 (unsigned long long)r_sector, dd_idx,
758 (unsigned long long)sector);
760 rdev = conf->disks[dd_idx].rdev;
761 if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
762 sector >= rdev->recovery_offset)) {
763 pr_debug("%s:%*s data member disk %d missing\n",
764 __func__, indent, "", dd_idx);
765 update_parity = false;
769 pr_debug("%s:%*s reading data member disk %s sector %llu\n",
770 __func__, indent, "", bdevname(rdev->bdev, b),
771 (unsigned long long)sector);
772 if (!sync_page_io(rdev, sector, block_size, page2,
773 REQ_OP_READ, 0, false)) {
774 md_error(mddev, rdev);
775 pr_debug("%s:%*s read failed!\n", __func__,
781 ppl_xor(block_size, page1, page2);
790 pr_debug("%s:%*s reading pp disk sector %llu\n",
791 __func__, indent, "",
792 (unsigned long long)(ppl_sector + i));
793 if (!sync_page_io(log->rdev,
794 ppl_sector - log->rdev->data_offset + i,
795 block_size, page2, REQ_OP_READ, 0,
797 pr_debug("%s:%*s read failed!\n", __func__,
799 md_error(mddev, log->rdev);
804 ppl_xor(block_size, page1, page2);
807 /* map raid sector to parity disk */
808 parity_sector = raid5_compute_sector(conf, r_sector_first + i,
810 BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
811 parity_rdev = conf->disks[sh.pd_idx].rdev;
813 BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
814 pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
815 __func__, indent, "",
816 (unsigned long long)parity_sector,
817 bdevname(parity_rdev->bdev, b));
818 if (!sync_page_io(parity_rdev, parity_sector, block_size,
819 page1, REQ_OP_WRITE, 0, false)) {
820 pr_debug("%s:%*s parity write error!\n", __func__,
822 md_error(mddev, parity_rdev);
835 static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
838 struct ppl_conf *ppl_conf = log->ppl_conf;
839 struct md_rdev *rdev = log->rdev;
840 struct mddev *mddev = rdev->mddev;
841 sector_t ppl_sector = rdev->ppl.sector + offset +
842 (PPL_HEADER_SIZE >> 9);
847 page = alloc_page(GFP_KERNEL);
851 /* iterate through all PPL entries saved */
852 for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) {
853 struct ppl_header_entry *e = &pplhdr->entries[i];
854 u32 pp_size = le32_to_cpu(e->pp_size);
855 sector_t sector = ppl_sector;
856 int ppl_entry_sectors = pp_size >> 9;
859 pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
860 __func__, rdev->raid_disk, i,
861 (unsigned long long)ppl_sector, pp_size);
864 crc_stored = le32_to_cpu(e->checksum);
866 /* read parial parity for this entry and calculate its checksum */
868 int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
870 if (!sync_page_io(rdev, sector - rdev->data_offset,
871 s, page, REQ_OP_READ, 0, false)) {
872 md_error(mddev, rdev);
877 crc = crc32c_le(crc, page_address(page), s);
885 if (crc != crc_stored) {
887 * Don't recover this entry if the checksum does not
888 * match, but keep going and try to recover other
891 pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
892 __func__, crc_stored, crc);
893 ppl_conf->mismatch_count++;
895 ret = ppl_recover_entry(log, e, ppl_sector);
898 ppl_conf->recovered_entries++;
901 ppl_sector += ppl_entry_sectors;
904 /* flush the disk cache after recovery if necessary */
905 ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL, NULL);
911 static int ppl_write_empty_header(struct ppl_log *log)
914 struct ppl_header *pplhdr;
915 struct md_rdev *rdev = log->rdev;
918 pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__,
919 rdev->raid_disk, (unsigned long long)rdev->ppl.sector);
921 page = alloc_page(GFP_NOIO | __GFP_ZERO);
925 pplhdr = page_address(page);
926 /* zero out PPL space to avoid collision with old PPLs */
927 blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
928 log->rdev->ppl.size, GFP_NOIO, 0);
929 memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
930 pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
931 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
933 if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
934 PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
935 REQ_FUA, 0, false)) {
936 md_error(rdev->mddev, rdev);
944 static int ppl_load_distributed(struct ppl_log *log)
946 struct ppl_conf *ppl_conf = log->ppl_conf;
947 struct md_rdev *rdev = log->rdev;
948 struct mddev *mddev = rdev->mddev;
949 struct page *page, *page2, *tmp;
950 struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL;
954 sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0;
956 pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk);
957 /* read PPL headers, find the recent one */
958 page = alloc_page(GFP_KERNEL);
962 page2 = alloc_page(GFP_KERNEL);
968 /* searching ppl area for latest ppl */
969 while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) {
970 if (!sync_page_io(rdev,
971 rdev->ppl.sector - rdev->data_offset +
972 pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
974 md_error(mddev, rdev);
976 /* if not able to read - don't recover any PPL */
980 pplhdr = page_address(page);
982 /* check header validity */
983 crc_stored = le32_to_cpu(pplhdr->checksum);
984 pplhdr->checksum = 0;
985 crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
987 if (crc_stored != crc) {
988 pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
989 __func__, crc_stored, crc,
990 (unsigned long long)pplhdr_offset);
991 pplhdr = prev_pplhdr;
992 pplhdr_offset = prev_pplhdr_offset;
996 signature = le32_to_cpu(pplhdr->signature);
998 if (mddev->external) {
1000 * For external metadata the header signature is set and
1001 * validated in userspace.
1003 ppl_conf->signature = signature;
1004 } else if (ppl_conf->signature != signature) {
1005 pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
1006 __func__, signature, ppl_conf->signature,
1007 (unsigned long long)pplhdr_offset);
1008 pplhdr = prev_pplhdr;
1009 pplhdr_offset = prev_pplhdr_offset;
1013 if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) >
1014 le64_to_cpu(pplhdr->generation)) {
1015 /* previous was newest */
1016 pplhdr = prev_pplhdr;
1017 pplhdr_offset = prev_pplhdr_offset;
1021 prev_pplhdr_offset = pplhdr_offset;
1022 prev_pplhdr = pplhdr;
1028 /* calculate next potential ppl offset */
1029 for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++)
1031 le32_to_cpu(pplhdr->entries[i].pp_size) >> 9;
1032 pplhdr_offset += PPL_HEADER_SIZE >> 9;
1035 /* no valid ppl found */
1037 ppl_conf->mismatch_count++;
1039 pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
1040 __func__, (unsigned long long)pplhdr_offset,
1041 le64_to_cpu(pplhdr->generation));
1043 /* attempt to recover from log if we are starting a dirty array */
1044 if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
1045 ret = ppl_recover(log, pplhdr, pplhdr_offset);
1047 /* write empty header if we are starting the array */
1048 if (!ret && !mddev->pers)
1049 ret = ppl_write_empty_header(log);
1054 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1055 __func__, ret, ppl_conf->mismatch_count,
1056 ppl_conf->recovered_entries);
1060 static int ppl_load(struct ppl_conf *ppl_conf)
1064 bool signature_set = false;
1067 for (i = 0; i < ppl_conf->count; i++) {
1068 struct ppl_log *log = &ppl_conf->child_logs[i];
1070 /* skip missing drive */
1074 ret = ppl_load_distributed(log);
1079 * For external metadata we can't check if the signature is
1080 * correct on a single drive, but we can check if it is the same
1083 if (ppl_conf->mddev->external) {
1084 if (!signature_set) {
1085 signature = ppl_conf->signature;
1086 signature_set = true;
1087 } else if (signature != ppl_conf->signature) {
1088 pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
1089 mdname(ppl_conf->mddev));
1096 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1097 __func__, ret, ppl_conf->mismatch_count,
1098 ppl_conf->recovered_entries);
1102 static void __ppl_exit_log(struct ppl_conf *ppl_conf)
1104 clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1105 clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
1107 kfree(ppl_conf->child_logs);
1110 bioset_free(ppl_conf->bs);
1111 mempool_destroy(ppl_conf->io_pool);
1112 kmem_cache_destroy(ppl_conf->io_kc);
1117 void ppl_exit_log(struct r5conf *conf)
1119 struct ppl_conf *ppl_conf = conf->log_private;
1122 __ppl_exit_log(ppl_conf);
1123 conf->log_private = NULL;
1127 static int ppl_validate_rdev(struct md_rdev *rdev)
1129 char b[BDEVNAME_SIZE];
1130 int ppl_data_sectors;
1134 * The configured PPL size must be enough to store
1135 * the header and (at the very least) partial parity
1136 * for one stripe. Round it down to ensure the data
1137 * space is cleanly divisible by stripe size.
1139 ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
1141 if (ppl_data_sectors > 0)
1142 ppl_data_sectors = rounddown(ppl_data_sectors, STRIPE_SECTORS);
1144 if (ppl_data_sectors <= 0) {
1145 pr_warn("md/raid:%s: PPL space too small on %s\n",
1146 mdname(rdev->mddev), bdevname(rdev->bdev, b));
1150 ppl_size_new = ppl_data_sectors + (PPL_HEADER_SIZE >> 9);
1152 if ((rdev->ppl.sector < rdev->data_offset &&
1153 rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
1154 (rdev->ppl.sector >= rdev->data_offset &&
1155 rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
1156 pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
1157 mdname(rdev->mddev), bdevname(rdev->bdev, b));
1161 if (!rdev->mddev->external &&
1162 ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
1163 (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
1164 pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
1165 mdname(rdev->mddev), bdevname(rdev->bdev, b));
1169 rdev->ppl.size = ppl_size_new;
1174 static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
1176 if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
1177 PPL_HEADER_SIZE) * 2) {
1178 log->use_multippl = true;
1179 set_bit(MD_HAS_MULTIPLE_PPLS,
1180 &log->ppl_conf->mddev->flags);
1181 log->entry_space = PPL_SPACE_SIZE;
1183 log->use_multippl = false;
1184 log->entry_space = (log->rdev->ppl.size << 9) -
1187 log->next_io_sector = rdev->ppl.sector;
1190 int ppl_init_log(struct r5conf *conf)
1192 struct ppl_conf *ppl_conf;
1193 struct mddev *mddev = conf->mddev;
1196 bool need_cache_flush = false;
1198 pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
1199 mdname(conf->mddev));
1201 if (PAGE_SIZE != 4096)
1204 if (mddev->level != 5) {
1205 pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
1206 mdname(mddev), mddev->level);
1210 if (mddev->bitmap_info.file || mddev->bitmap_info.offset) {
1211 pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
1216 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1217 pr_warn("md/raid:%s PPL is not compatible with journal\n",
1222 ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
1226 ppl_conf->mddev = mddev;
1228 ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0);
1229 if (!ppl_conf->io_kc) {
1234 ppl_conf->io_pool = mempool_create(conf->raid_disks, ppl_io_pool_alloc,
1235 ppl_io_pool_free, ppl_conf->io_kc);
1236 if (!ppl_conf->io_pool) {
1241 ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS);
1242 if (!ppl_conf->bs) {
1247 ppl_conf->count = conf->raid_disks;
1248 ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
1250 if (!ppl_conf->child_logs) {
1255 atomic64_set(&ppl_conf->seq, 0);
1256 INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
1257 spin_lock_init(&ppl_conf->no_mem_stripes_lock);
1259 if (!mddev->external) {
1260 ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
1261 ppl_conf->block_size = 512;
1263 ppl_conf->block_size = queue_logical_block_size(mddev->queue);
1266 for (i = 0; i < ppl_conf->count; i++) {
1267 struct ppl_log *log = &ppl_conf->child_logs[i];
1268 struct md_rdev *rdev = conf->disks[i].rdev;
1270 mutex_init(&log->io_mutex);
1271 spin_lock_init(&log->io_list_lock);
1272 INIT_LIST_HEAD(&log->io_list);
1274 log->ppl_conf = ppl_conf;
1278 struct request_queue *q;
1280 ret = ppl_validate_rdev(rdev);
1284 q = bdev_get_queue(rdev->bdev);
1285 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
1286 need_cache_flush = true;
1287 ppl_init_child_log(log, rdev);
1291 if (need_cache_flush)
1292 pr_warn("md/raid:%s: Volatile write-back cache should be disabled on all member drives when using PPL!\n",
1295 /* load and possibly recover the logs from the member disks */
1296 ret = ppl_load(ppl_conf);
1300 } else if (!mddev->pers &&
1301 mddev->recovery_cp == 0 && !mddev->degraded &&
1302 ppl_conf->recovered_entries > 0 &&
1303 ppl_conf->mismatch_count == 0) {
1305 * If we are starting a dirty array and the recovery succeeds
1306 * without any issues, set the array as clean.
1308 mddev->recovery_cp = MaxSector;
1309 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
1310 } else if (mddev->pers && ppl_conf->mismatch_count > 0) {
1311 /* no mismatch allowed when enabling PPL for a running array */
1316 conf->log_private = ppl_conf;
1317 set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1321 __ppl_exit_log(ppl_conf);
1325 int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
1327 struct ppl_conf *ppl_conf = conf->log_private;
1328 struct ppl_log *log;
1330 char b[BDEVNAME_SIZE];
1335 pr_debug("%s: disk: %d operation: %s dev: %s\n",
1336 __func__, rdev->raid_disk, add ? "add" : "remove",
1337 bdevname(rdev->bdev, b));
1339 if (rdev->raid_disk < 0)
1342 if (rdev->raid_disk >= ppl_conf->count)
1345 log = &ppl_conf->child_logs[rdev->raid_disk];
1347 mutex_lock(&log->io_mutex);
1349 ret = ppl_validate_rdev(rdev);
1352 ret = ppl_write_empty_header(log);
1353 ppl_init_child_log(log, rdev);
1358 mutex_unlock(&log->io_mutex);