1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/raid/xor.h>
6 #include <linux/dmaengine.h>
7 #include <linux/local_lock.h>
11 * Each stripe contains one buffer per device. Each buffer can be in
12 * one of a number of states stored in "flags". Changes between
13 * these states happen *almost* exclusively under the protection of the
14 * STRIPE_ACTIVE flag. Some very specific changes can happen in bi_end_io, and
15 * these are not protected by STRIPE_ACTIVE.
17 * The flag bits that are used to represent these states are:
18 * R5_UPTODATE and R5_LOCKED
20 * State Empty == !UPTODATE, !LOCK
21 * We have no data, and there is no active request
22 * State Want == !UPTODATE, LOCK
23 * A read request is being submitted for this block
24 * State Dirty == UPTODATE, LOCK
25 * Some new data is in this buffer, and it is being written out
26 * State Clean == UPTODATE, !LOCK
27 * We have valid data which is the same as on disc
29 * The possible state transitions are:
31 * Empty -> Want - on read or write to get old data for parity calc
32 * Empty -> Dirty - on compute_parity to satisfy write/sync request.
33 * Empty -> Clean - on compute_block when computing a block for failed drive
34 * Want -> Empty - on failed read
35 * Want -> Clean - on successful completion of read request
36 * Dirty -> Clean - on successful completion of write request
37 * Dirty -> Clean - on failed write
38 * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
40 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
41 * all happen in b_end_io at interrupt time.
42 * Each sets the Uptodate bit before releasing the Lock bit.
43 * This leaves one multi-stage transition:
45 * This is safe because thinking that a Clean buffer is actually dirty
46 * will at worst delay some action, and the stripe will be scheduled
47 * for attention after the transition is complete.
49 * There is one possibility that is not covered by these states. That
50 * is if one drive has failed and there is a spare being rebuilt. We
51 * can't distinguish between a clean block that has been generated
52 * from parity calculations, and a clean block that has been
53 * successfully written to the spare ( or to parity when resyncing).
54 * To distinguish these states we have a stripe bit STRIPE_INSYNC that
55 * is set whenever a write is scheduled to the spare, or to the parity
56 * disc if there is no spare. A sync request clears this bit, and
57 * when we find it set with no buffers locked, we know the sync is
60 * Buffers for the md device that arrive via make_request are attached
61 * to the appropriate stripe in one of two lists linked on b_reqnext.
62 * One list (bh_read) for read requests, one (bh_write) for write.
63 * There should never be more than one buffer on the two lists
64 * together, but we are not guaranteed of that so we allow for more.
66 * If a buffer is on the read list when the associated cache buffer is
67 * Uptodate, the data is copied into the read buffer and it's b_end_io
68 * routine is called. This may happen in the end_request routine only
69 * if the buffer has just successfully been read. end_request should
70 * remove the buffers from the list and then set the Uptodate bit on
71 * the buffer. Other threads may do this only if they first check
72 * that the Uptodate bit is set. Once they have checked that they may
73 * take buffers off the read queue.
75 * When a buffer on the write list is committed for write it is copied
76 * into the cache buffer, which is then marked dirty, and moved onto a
77 * third list, the written list (bh_written). Once both the parity
78 * block and the cached buffer are successfully written, any buffer on
79 * a written list can be returned with b_end_io.
81 * The write list and read list both act as fifos. The read list,
82 * write list and written list are protected by the device_lock.
83 * The device_lock is only for list manipulations and will only be
84 * held for a very short time. It can be claimed from interrupts.
87 * Stripes in the stripe cache can be on one of two lists (or on
88 * neither). The "inactive_list" contains stripes which are not
89 * currently being used for any request. They can freely be reused
90 * for another stripe. The "handle_list" contains stripes that need
91 * to be handled in some way. Both of these are fifo queues. Each
92 * stripe is also (potentially) linked to a hash bucket in the hash
93 * table so that it can be found by sector number. Stripes that are
94 * not hashed must be on the inactive_list, and will normally be at
95 * the front. All stripes start life this way.
97 * The inactive_list, handle_list and hash bucket lists are all protected by the
99 * - stripes have a reference counter. If count==0, they are on a list.
100 * - If a stripe might need handling, STRIPE_HANDLE is set.
101 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
102 * handle_list else inactive_list
104 * This, combined with the fact that STRIPE_HANDLE is only ever
105 * cleared while a stripe has a non-zero count means that if the
106 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
107 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
108 * the stripe is on inactive_list.
110 * The possible transitions are:
111 * activate an unhashed/inactive stripe (get_active_stripe())
112 * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
113 * activate a hashed, possibly active stripe (get_active_stripe())
114 * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
115 * attach a request to an active stripe (add_stripe_bh())
116 * lockdev attach-buffer unlockdev
117 * handle a stripe (handle_stripe())
118 * setSTRIPE_ACTIVE, clrSTRIPE_HANDLE ...
119 * (lockdev check-buffers unlockdev) ..
121 * record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
122 * release an active stripe (release_stripe())
123 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
125 * The refcount counts each thread that have activated the stripe,
126 * plus raid5d if it is handling it, plus one for each active request
127 * on a cached buffer, and plus one if the stripe is undergoing stripe
130 * The stripe operations are:
131 * -copying data between the stripe cache and user application buffers
132 * -computing blocks to save a disk access, or to recover a missing block
133 * -updating the parity on a write operation (reconstruct write and
135 * -checking parity correctness
136 * -running i/o to disk
137 * These operations are carried out by raid5_run_ops which uses the async_tx
138 * api to (optionally) offload operations to dedicated hardware engines.
139 * When requesting an operation handle_stripe sets the pending bit for the
140 * operation and increments the count. raid5_run_ops is then run whenever
141 * the count is non-zero.
142 * There are some critical dependencies between the operations that prevent some
143 * from being requested while another is in flight.
144 * 1/ Parity check operations destroy the in cache version of the parity block,
145 * so we prevent parity dependent operations like writes and compute_blocks
146 * from starting while a check is in progress. Some dma engines can perform
147 * the check without damaging the parity block, in these cases the parity
148 * block is re-marked up to date (assuming the check was successful) and is
149 * not re-read from disk.
150 * 2/ When a write operation is requested we immediately lock the affected
151 * blocks, and mark them as not up to date. This causes new read requests
152 * to be held off, as well as parity checks and compute block operations.
153 * 3/ Once a compute block operation has been requested handle_stripe treats
154 * that block as if it is up to date. raid5_run_ops guaruntees that any
155 * operation that is dependent on the compute block result is initiated after
156 * the compute block completes.
160 * Operations state - intermediate states that are visible outside of
162 * In general _idle indicates nothing is running, _run indicates a data
163 * processing operation is active, and _result means the data processing result
164 * is stable and can be acted upon. For simple operations like biofill and
165 * compute that only have an _idle and _run state they are indicated with
166 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
169 * enum check_states - handles syncing / repairing a stripe
170 * @check_state_idle - check operations are quiesced
171 * @check_state_run - check operation is running
172 * @check_state_result - set outside lock when check result is valid
173 * @check_state_compute_run - check failed and we are repairing
174 * @check_state_compute_result - set outside lock when compute result is valid
177 check_state_idle = 0,
178 check_state_run, /* xor parity check */
179 check_state_run_q, /* q-parity check */
180 check_state_run_pq, /* pq dual parity check */
181 check_state_check_result,
182 check_state_compute_run, /* parity repair */
183 check_state_compute_result,
187 * enum reconstruct_states - handles writing or expanding a stripe
189 enum reconstruct_states {
190 reconstruct_state_idle = 0,
191 reconstruct_state_prexor_drain_run, /* prexor-write */
192 reconstruct_state_drain_run, /* write */
193 reconstruct_state_run, /* expand */
194 reconstruct_state_prexor_drain_result,
195 reconstruct_state_drain_result,
196 reconstruct_state_result,
199 #define DEFAULT_STRIPE_SIZE 4096
201 struct hlist_node hash;
202 struct list_head lru; /* inactive_list or handle_list */
203 struct llist_node release_list;
204 struct r5conf *raid_conf;
205 short generation; /* increments with every
207 sector_t sector; /* sector of this row */
208 short pd_idx; /* parity disk index */
209 short qd_idx; /* 'Q' disk index for raid6 */
210 short ddf_layout;/* use DDF ordering to calculate Q */
211 short hash_lock_index;
212 unsigned long state; /* state flags */
213 atomic_t count; /* nr of active thread/requests */
214 int bm_seq; /* sequence number for bitmap flushes */
215 int disks; /* disks in stripe */
216 int overwrite_disks; /* total overwrite disks in stripe,
217 * this is only checked when stripe
218 * has STRIPE_BATCH_READY
220 enum check_states check_state;
221 enum reconstruct_states reconstruct_state;
222 spinlock_t stripe_lock;
224 struct r5worker_group *group;
226 struct stripe_head *batch_head; /* protected by stripe lock */
227 spinlock_t batch_lock; /* only header's lock is useful */
228 struct list_head batch_list; /* protected by head's batch lock*/
231 struct r5l_io_unit *log_io;
232 struct ppl_io_unit *ppl_io;
235 struct list_head log_list;
236 sector_t log_start; /* first meta block on the journal */
237 struct list_head r5c; /* for r5c_cache->stripe_in_journal */
239 struct page *ppl_page; /* partial parity of this stripe */
241 * struct stripe_operations
242 * @target - STRIPE_OP_COMPUTE_BLK target
243 * @target2 - 2nd compute target in the raid6 case
244 * @zero_sum_result - P and Q verification flags
245 * @request - async service request flags for raid_run_ops
247 struct stripe_operations {
249 enum sum_check_flags zero_sum_result;
252 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
253 /* These pages will be used by bios in dev[i] */
255 int nr_pages; /* page array size */
256 int stripes_per_page;
259 /* rreq and rvec are used for the replacement device when
260 * writing data to both devices.
262 struct bio req, rreq;
263 struct bio_vec vec, rvec;
264 struct page *page, *orig_page;
265 unsigned int offset; /* offset of the page */
266 struct bio *toread, *read, *towrite, *written;
267 sector_t sector; /* sector of this page */
270 unsigned short write_hint;
271 } dev[1]; /* allocated with extra space depending of RAID geometry */
274 /* stripe_head_state - collects and tracks the dynamic state of a stripe_head
277 struct stripe_head_state {
278 /* 'syncing' means that we need to read all devices, either
279 * to check/correct parity, or to reconstruct a missing device.
280 * 'replacing' means we are replacing one or more drives and
281 * the source is valid at this point so we don't need to
282 * read all devices, just the replacement targets.
284 int syncing, expanding, expanded, replacing;
285 int locked, uptodate, to_read, to_write, failed, written;
286 int to_fill, compute, req_compute, non_overwrite;
287 int injournal, just_cached;
289 int p_failed, q_failed;
290 int dec_preread_active;
291 unsigned long ops_request;
293 struct md_rdev *blocked_rdev;
294 int handle_bad_blocks;
296 int waiting_extra_page;
299 /* Flags for struct r5dev.flags */
301 R5_UPTODATE, /* page contains current data */
302 R5_LOCKED, /* IO has been submitted on "req" */
303 R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
304 R5_OVERWRITE, /* towrite covers whole page */
305 /* and some that are internal to handle_stripe */
306 R5_Insync, /* rdev && rdev->in_sync at start */
307 R5_Wantread, /* want to schedule a read */
309 R5_Overlap, /* There is a pending overlapping request
311 R5_ReadNoMerge, /* prevent bio from merging in block-layer */
312 R5_ReadError, /* seen a read error here recently */
313 R5_ReWrite, /* have tried to over-write the readerror */
315 R5_Expanded, /* This block now has post-expand data */
316 R5_Wantcompute, /* compute_block in progress treat as
319 R5_Wantfill, /* dev->toread contains a bio that needs
322 R5_Wantdrain, /* dev->towrite needs to be drained */
323 R5_WantFUA, /* Write should be FUA */
324 R5_SyncIO, /* The IO is sync */
325 R5_WriteError, /* got a write error - need to record it */
326 R5_MadeGood, /* A bad block has been fixed by writing to it */
327 R5_ReadRepl, /* Will/did read from replacement rather than orig */
328 R5_MadeGoodRepl,/* A bad block on the replacement device has been
329 * fixed by writing to it */
330 R5_NeedReplace, /* This device has a replacement which is not
331 * up-to-date at this stripe. */
332 R5_WantReplace, /* We need to update the replacement, we have read
333 * data in, and now is a good time to write it out.
335 R5_Discard, /* Discard the stripe */
336 R5_SkipCopy, /* Don't copy data from bio to stripe cache */
337 R5_InJournal, /* data being written is in the journal device.
338 * if R5_InJournal is set for parity pd_idx, all the
339 * data and parity being written are in the journal
342 R5_OrigPageUPTDODATE, /* with write back cache, we read old data into
343 * dev->orig_page for prexor. When this flag is
344 * set, orig_page contains latest data in the
355 STRIPE_SYNC_REQUESTED,
359 STRIPE_PREREAD_ACTIVE,
364 STRIPE_EXPAND_SOURCE,
366 STRIPE_IO_STARTED, /* do not count towards 'bypass_count' */
367 STRIPE_FULL_WRITE, /* all blocks are set to be overwritten */
370 STRIPE_ON_UNPLUG_LIST,
372 STRIPE_ON_RELEASE_LIST,
375 STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
378 STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
379 * this bit is used in two scenarios:
382 * set in first entry of r5l_write_stripe
383 * clear in second entry of r5l_write_stripe
384 * used to bypass logic in handle_stripe
387 * set in r5c_try_caching_write()
388 * clear when journal write is done
389 * used to initiate r5c_cache_data()
390 * also used to bypass logic in handle_stripe
392 STRIPE_R5C_CACHING, /* the stripe is in caching phase
393 * see more detail in the raid5-cache.c
395 STRIPE_R5C_PARTIAL_STRIPE, /* in r5c cache (to-be/being handled or
396 * in conf->r5c_partial_stripe_list)
398 STRIPE_R5C_FULL_STRIPE, /* in r5c cache (to-be/being handled or
399 * in conf->r5c_full_stripe_list)
401 STRIPE_R5C_PREFLUSH, /* need to flush journal device */
404 #define STRIPE_EXPAND_SYNC_FLAGS \
405 ((1 << STRIPE_EXPAND_SOURCE) |\
406 (1 << STRIPE_EXPAND_READY) |\
407 (1 << STRIPE_EXPANDING) |\
408 (1 << STRIPE_SYNC_REQUESTED))
410 * Operation request flags
414 STRIPE_OP_COMPUTE_BLK,
417 STRIPE_OP_RECONSTRUCT,
419 STRIPE_OP_PARTIAL_PARITY,
423 * RAID parity calculation preferences
426 PARITY_DISABLE_RMW = 0,
432 * Pages requested from set_syndrome_sources()
436 SYNDROME_SRC_WANT_DRAIN,
437 SYNDROME_SRC_WRITTEN,
442 * To improve write throughput, we need to delay the handling of some
443 * stripes until there has been a chance that several write requests
444 * for the one stripe have all been collected.
445 * In particular, any write request that would require pre-reading
446 * is put on a "delayed" queue until there are no stripes currently
447 * in a pre-read phase. Further, if the "delayed" queue is empty when
448 * a stripe is put on it then we "plug" the queue and do not process it
449 * until an unplug call is made. (the unplug_io_fn() is called).
451 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
452 * it to the count of prereading stripes.
453 * When write is initiated, or the stripe refcnt == 0 (just in case) we
454 * clear the PREREAD_ACTIVE flag and decrement the count
455 * Whenever the 'handle' queue is empty and the device is not plugged, we
456 * move any strips from delayed to handle and clear the DELAYED flag and set
458 * In stripe_handle, if we find pre-reading is necessary, we do it if
459 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
460 * HANDLE gets cleared if stripe_handle leaves nothing locked.
463 /* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk.
464 * There are three safe ways to access disk_info.rdev.
465 * 1/ when holding mddev->reconfig_mutex
466 * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that
467 * is called as part of performing resync/recovery/reshape.
468 * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
469 * and if it is non-NULL, increment rdev->nr_pending before dropping the RCU
471 * When .rdev is set to NULL, the nr_pending count checked again and if
472 * it has been incremented, the pointer is put back in .rdev.
476 struct md_rdev __rcu *rdev;
477 struct md_rdev __rcu *replacement;
478 struct page *extra_page; /* extra page to use in prexor */
485 #define NR_STRIPES 256
487 #if PAGE_SIZE == DEFAULT_STRIPE_SIZE
488 #define STRIPE_SIZE PAGE_SIZE
489 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
490 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
493 #define IO_THRESHOLD 1
494 #define BYPASS_THRESHOLD 1
495 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
496 #define HASH_MASK (NR_HASH - 1)
497 #define MAX_STRIPE_BATCH 8
499 /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
500 * This is because we sometimes take all the spinlocks
501 * and creating that much locking depth can cause
504 #define NR_STRIPE_HASH_LOCKS 8
505 #define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
508 struct work_struct work;
509 struct r5worker_group *group;
510 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
514 struct r5worker_group {
515 struct list_head handle_list;
516 struct list_head loprio_list;
518 struct r5worker *workers;
523 * r5c journal modes of the array: write-back or write-through.
524 * write-through mode has identical behavior as existing log only
527 enum r5c_journal_mode {
528 R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
529 R5C_JOURNAL_MODE_WRITE_BACK = 1,
532 enum r5_cache_state {
533 R5_INACTIVE_BLOCKED, /* release of inactive stripes blocked,
534 * waiting for 25% to be free
536 R5_ALLOC_MORE, /* It might help to allocate another
539 R5_DID_ALLOC, /* A stripe was allocated, don't allocate
540 * more until at least one has been
541 * released. This avoids flooding
544 R5C_LOG_TIGHT, /* log device space tight, need to
545 * prioritize stripes at last_checkpoint
547 R5C_LOG_CRITICAL, /* log device is running out of space,
548 * only process stripes that are already
551 R5C_EXTRA_PAGE_IN_USE, /* a stripe is using disk_info.extra_page
556 #define PENDING_IO_MAX 512
557 #define PENDING_IO_ONE_FLUSH 128
558 struct r5pending_data {
559 struct list_head sibling;
560 sector_t sector; /* stripe sector */
561 struct bio_list bios;
564 struct raid5_percpu {
565 struct page *spare_page; /* Used when checking P/Q in raid6 */
566 void *scribble; /* space for constructing buffer
567 * lists and performing address
570 int scribble_obj_size;
575 struct hlist_head *stripe_hashtbl;
576 /* only protect corresponding hash list and inactive_list */
577 spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
580 int level, algorithm, rmw_level;
585 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
586 unsigned long stripe_size;
587 unsigned int stripe_shift;
588 unsigned long stripe_sectors;
591 /* reshape_progress is the leading edge of a 'reshape'
592 * It has value MaxSector when no reshape is happening
593 * If delta_disks < 0, it is the last sector we started work on,
594 * else is it the next sector to work on.
596 sector_t reshape_progress;
597 /* reshape_safe is the trailing edge of a reshape. We know that
598 * before (or after) this address, all reshape has completed.
600 sector_t reshape_safe;
601 int previous_raid_disks;
602 int prev_chunk_sectors;
604 short generation; /* increments with every reshape */
605 seqcount_spinlock_t gen_lock; /* lock against generation changes */
606 unsigned long reshape_checkpoint; /* Time we last updated
608 long long min_offset_diff; /* minimum difference between
610 * new_data_offset across all
611 * devices. May be negative,
612 * but is closest to zero.
615 struct list_head handle_list; /* stripes needing handling */
616 struct list_head loprio_list; /* low priority stripes */
617 struct list_head hold_list; /* preread ready stripes */
618 struct list_head delayed_list; /* stripes that have plugged requests */
619 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
620 struct bio *retry_read_aligned; /* currently retrying aligned bios */
621 unsigned int retry_read_offset; /* sector offset into retry_read_aligned */
622 struct bio *retry_read_aligned_list; /* aligned bios retry list */
623 atomic_t preread_active_stripes; /* stripes with scheduled io */
624 atomic_t active_aligned_reads;
625 atomic_t pending_full_writes; /* full write backlog */
626 int bypass_count; /* bypassed prereads */
627 int bypass_threshold; /* preread nice */
628 int skip_copy; /* Don't copy data from bio to stripe cache */
629 struct list_head *last_hold; /* detect hold_list promotions */
631 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
632 /* unfortunately we need two cache names as we temporarily have
636 char cache_name[2][32];
637 struct kmem_cache *slab_cache; /* for allocating stripes */
638 struct mutex cache_size_mutex; /* Protect changes to cache size */
640 int seq_flush, seq_write;
643 int fullsync; /* set to 1 if a full sync is needed,
644 * (fresh device added).
645 * Cleared when a sync completes.
647 int recovery_disabled;
648 /* per cpu variables */
649 struct raid5_percpu __percpu *percpu;
651 int scribble_sectors;
652 struct hlist_node node;
657 atomic_t active_stripes;
658 struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
660 atomic_t r5c_cached_full_stripes;
661 struct list_head r5c_full_stripe_list;
662 atomic_t r5c_cached_partial_stripes;
663 struct list_head r5c_partial_stripe_list;
664 atomic_t r5c_flushing_full_stripes;
665 atomic_t r5c_flushing_partial_stripes;
667 atomic_t empty_inactive_list_nr;
668 struct llist_head released_stripes;
669 wait_queue_head_t wait_for_quiescent;
670 wait_queue_head_t wait_for_stripe;
671 wait_queue_head_t wait_for_overlap;
672 unsigned long cache_state;
673 struct shrinker shrinker;
674 int pool_size; /* number of disks in stripeheads in pool */
675 spinlock_t device_lock;
676 struct disk_info *disks;
677 struct bio_set bio_split;
679 /* When taking over an array from a different personality, we store
680 * the new thread here until we fully activate the array.
682 struct md_thread *thread;
683 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
684 struct r5worker_group *worker_groups;
686 int worker_cnt_per_group;
690 spinlock_t pending_bios_lock;
691 bool batch_bio_dispatch;
692 struct r5pending_data *pending_data;
693 struct list_head free_list;
694 struct list_head pending_list;
695 int pending_data_cnt;
696 struct r5pending_data *next_pending_data;
699 #if PAGE_SIZE == DEFAULT_STRIPE_SIZE
700 #define RAID5_STRIPE_SIZE(conf) STRIPE_SIZE
701 #define RAID5_STRIPE_SHIFT(conf) STRIPE_SHIFT
702 #define RAID5_STRIPE_SECTORS(conf) STRIPE_SECTORS
704 #define RAID5_STRIPE_SIZE(conf) ((conf)->stripe_size)
705 #define RAID5_STRIPE_SHIFT(conf) ((conf)->stripe_shift)
706 #define RAID5_STRIPE_SECTORS(conf) ((conf)->stripe_sectors)
709 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
710 * order without overlap. There may be several bio's per stripe+device, and
711 * a bio could span several devices.
712 * When walking this list for a particular stripe+device, we must never proceed
713 * beyond a bio that extends past this device, as the next bio might no longer
715 * This function is used to determine the 'next' bio in the list, given the
716 * sector of the current stripe+device
718 static inline struct bio *r5_next_bio(struct r5conf *conf, struct bio *bio, sector_t sector)
720 if (bio_end_sector(bio) < sector + RAID5_STRIPE_SECTORS(conf))
727 * Our supported algorithms
729 #define ALGORITHM_LEFT_ASYMMETRIC 0 /* Rotating Parity N with Data Restart */
730 #define ALGORITHM_RIGHT_ASYMMETRIC 1 /* Rotating Parity 0 with Data Restart */
731 #define ALGORITHM_LEFT_SYMMETRIC 2 /* Rotating Parity N with Data Continuation */
732 #define ALGORITHM_RIGHT_SYMMETRIC 3 /* Rotating Parity 0 with Data Continuation */
734 /* Define non-rotating (raid4) algorithms. These allow
735 * conversion of raid4 to raid5.
737 #define ALGORITHM_PARITY_0 4 /* P or P,Q are initial devices */
738 #define ALGORITHM_PARITY_N 5 /* P or P,Q are final devices. */
740 /* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
741 * Firstly, the exact positioning of the parity block is slightly
742 * different between the 'LEFT_*' modes of md and the "_N_*" modes
744 * Secondly, or order of datablocks over which the Q syndrome is computed
746 * Consequently we have different layouts for DDF/raid6 than md/raid6.
747 * These layouts are from the DDFv1.2 spec.
748 * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
749 * leaves RLQ=3 as 'Vendor Specific'
752 #define ALGORITHM_ROTATING_ZERO_RESTART 8 /* DDF PRL=6 RLQ=1 */
753 #define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */
754 #define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */
756 /* For every RAID5 algorithm we define a RAID6 algorithm
757 * with exactly the same layout for data and parity, and
758 * with the Q block always on the last device (N-1).
759 * This allows trivial conversion from RAID5 to RAID6
761 #define ALGORITHM_LEFT_ASYMMETRIC_6 16
762 #define ALGORITHM_RIGHT_ASYMMETRIC_6 17
763 #define ALGORITHM_LEFT_SYMMETRIC_6 18
764 #define ALGORITHM_RIGHT_SYMMETRIC_6 19
765 #define ALGORITHM_PARITY_0_6 20
766 #define ALGORITHM_PARITY_N_6 ALGORITHM_PARITY_N
768 static inline int algorithm_valid_raid5(int layout)
770 return (layout >= 0) &&
773 static inline int algorithm_valid_raid6(int layout)
775 return (layout >= 0 && layout <= 5)
777 (layout >= 8 && layout <= 10)
779 (layout >= 16 && layout <= 20);
782 static inline int algorithm_is_DDF(int layout)
784 return layout >= 8 && layout <= 10;
787 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
789 * Return offset of the corresponding page for r5dev.
791 static inline int raid5_get_page_offset(struct stripe_head *sh, int disk_idx)
793 return (disk_idx % sh->stripes_per_page) * RAID5_STRIPE_SIZE(sh->raid_conf);
797 * Return corresponding page address for r5dev.
799 static inline struct page *
800 raid5_get_dev_page(struct stripe_head *sh, int disk_idx)
802 return sh->pages[disk_idx / sh->stripes_per_page];
806 void md_raid5_kick_device(struct r5conf *conf);
807 int raid5_set_cache_size(struct mddev *mddev, int size);
808 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
809 void raid5_release_stripe(struct stripe_head *sh);
810 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
811 int previous, int *dd_idx, struct stripe_head *sh);
813 struct stripe_request_ctx;
814 /* get stripe from previous generation (when reshaping) */
815 #define R5_GAS_PREVIOUS (1 << 0)
816 /* do not block waiting for a free stripe */
817 #define R5_GAS_NOBLOCK (1 << 1)
818 /* do not block waiting for quiesce to be released */
819 #define R5_GAS_NOQUIESCE (1 << 2)
820 struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
821 struct stripe_request_ctx *ctx, sector_t sector,
824 int raid5_calc_degraded(struct r5conf *conf);
825 int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);