}
static struct dma_async_tx_descriptor *
-ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
+ unsigned long pending)
{
int disks = sh->disks;
int pd_idx = sh->pd_idx, i;
/* check if prexor is active which means only process blocks
* that are part of a read-modify-write (Wantprexor)
*/
- int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+ int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
pr_debug("%s: stripe %llu\n", __FUNCTION__,
(unsigned long long)sh->sector);
}
static void
-ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
+ unsigned long pending)
{
/* kernel stack size limits the total number of disks */
int disks = sh->disks;
int count = 0, pd_idx = sh->pd_idx, i;
struct page *xor_dest;
- int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+ int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
unsigned long flags;
dma_async_tx_callback callback;
}
/* check whether this postxor is part of a write */
- callback = test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending) ?
+ callback = test_bit(STRIPE_OP_BIODRAIN, &pending) ?
ops_complete_write : ops_complete_postxor;
/* 1/ if we prexor'd then the dest is reused as a source
tx = ops_run_prexor(sh, tx);
if (test_bit(STRIPE_OP_BIODRAIN, &pending)) {
- tx = ops_run_biodrain(sh, tx);
+ tx = ops_run_biodrain(sh, tx, pending);
overlap_clear++;
}
if (test_bit(STRIPE_OP_POSTXOR, &pending))
- ops_run_postxor(sh, tx);
+ ops_run_postxor(sh, tx, pending);
if (test_bit(STRIPE_OP_CHECK, &pending))
ops_run_check(sh);