}
}
+static void crypt_inc_pending(struct dm_crypt_io *io)
+{
+ atomic_inc(&io->pending);
+}
+
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
struct bio *base_bio = io->base_bio;
struct bio *clone;
- atomic_inc(&io->pending);
+ crypt_inc_pending(io);
/*
* The block layer might modify the bvec array, so always
if (async)
kcryptd_queue_io(io);
else {
- atomic_inc(&io->pending);
+ crypt_inc_pending(io);
generic_make_request(clone);
}
}
if (unlikely(r < 0))
return;
} else
- atomic_inc(&io->pending);
+ crypt_inc_pending(io);
/* out of memory -> run queues */
if (unlikely(remaining)) {
/*
* Prevent io from disappearing until this function completes.
*/
- atomic_inc(&io->pending);
+ crypt_inc_pending(io);
crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
kcryptd_crypt_write_convert_loop(io);
struct crypt_config *cc = io->target->private;
int r = 0;
- atomic_inc(&io->pending);
+ crypt_inc_pending(io);
crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
io->sector);