2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
10 #include <linux/device-mapper.h>
12 #include <linux/bio.h>
13 #include <linux/completion.h>
14 #include <linux/mempool.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dm-io.h>
20 #define DM_MSG_PREFIX "io"
22 #define DM_IO_MAX_REGIONS BITS_PER_LONG
30 * Aligning 'struct io' reduces the number of bits required to store
31 * its address. Refer to store_io_and_region_in_bio() below.
34 unsigned long error_bits;
36 struct dm_io_client *client;
37 io_notify_fn callback;
39 void *vma_invalidate_address;
40 unsigned long vma_invalidate_size;
41 } __attribute__((aligned(DM_IO_MAX_REGIONS)));
43 static struct kmem_cache *_dm_io_cache;
46 * Create a client with mempool and bioset.
48 struct dm_io_client *dm_io_client_create(void)
50 struct dm_io_client *client;
51 unsigned min_ios = dm_get_reserved_bio_based_ios();
54 client = kzalloc(sizeof(*client), GFP_KERNEL);
56 return ERR_PTR(-ENOMEM);
58 ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
62 ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
69 mempool_exit(&client->pool);
73 EXPORT_SYMBOL(dm_io_client_create);
75 void dm_io_client_destroy(struct dm_io_client *client)
77 mempool_exit(&client->pool);
78 bioset_exit(&client->bios);
81 EXPORT_SYMBOL(dm_io_client_destroy);
83 /*-----------------------------------------------------------------
84 * We need to keep track of which region a bio is doing io for.
85 * To avoid a memory allocation to store just 5 or 6 bits, we
86 * ensure the 'struct io' pointer is aligned so enough low bits are
87 * always zero and then combine it with the region number directly in
89 *---------------------------------------------------------------*/
90 static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
93 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
94 DMCRIT("Unaligned struct io pointer %p", io);
98 bio->bi_private = (void *)((unsigned long)io | region);
101 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
104 unsigned long val = (unsigned long)bio->bi_private;
106 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
107 *region = val & (DM_IO_MAX_REGIONS - 1);
110 /*-----------------------------------------------------------------
111 * We need an io object to keep track of the number of bios that
112 * have been dispatched for a particular io.
113 *---------------------------------------------------------------*/
114 static void complete_io(struct io *io)
116 unsigned long error_bits = io->error_bits;
117 io_notify_fn fn = io->callback;
118 void *context = io->context;
120 if (io->vma_invalidate_size)
121 invalidate_kernel_vmap_range(io->vma_invalidate_address,
122 io->vma_invalidate_size);
124 mempool_free(io, &io->client->pool);
125 fn(error_bits, context);
128 static void dec_count(struct io *io, unsigned int region, blk_status_t error)
131 set_bit(region, &io->error_bits);
133 if (atomic_dec_and_test(&io->count))
137 static void endio(struct bio *bio)
143 if (bio->bi_status && bio_data_dir(bio) == READ)
147 * The bio destructor in bio_put() may use the io object.
149 retrieve_io_and_region_from_bio(bio, &io, ®ion);
151 error = bio->bi_status;
154 dec_count(io, region, error);
157 /*-----------------------------------------------------------------
158 * These little objects provide an abstraction for getting a new
159 * destination page for io.
160 *---------------------------------------------------------------*/
162 void (*get_page)(struct dpages *dp,
163 struct page **p, unsigned long *len, unsigned *offset);
164 void (*next_page)(struct dpages *dp);
168 struct bvec_iter context_bi;
172 void *vma_invalidate_address;
173 unsigned long vma_invalidate_size;
177 * Functions for getting the pages from a list.
179 static void list_get_page(struct dpages *dp,
180 struct page **p, unsigned long *len, unsigned *offset)
182 unsigned o = dp->context_u;
183 struct page_list *pl = (struct page_list *) dp->context_ptr;
186 *len = PAGE_SIZE - o;
190 static void list_next_page(struct dpages *dp)
192 struct page_list *pl = (struct page_list *) dp->context_ptr;
193 dp->context_ptr = pl->next;
197 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
199 dp->get_page = list_get_page;
200 dp->next_page = list_next_page;
201 dp->context_u = offset;
202 dp->context_ptr = pl;
206 * Functions for getting the pages from a bvec.
208 static void bio_get_page(struct dpages *dp, struct page **p,
209 unsigned long *len, unsigned *offset)
211 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
216 *offset = bvec.bv_offset;
218 /* avoid figuring it out again in bio_next_page() */
219 dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
222 static void bio_next_page(struct dpages *dp)
224 unsigned int len = (unsigned int)dp->context_bi.bi_sector;
226 bvec_iter_advance((struct bio_vec *)dp->context_ptr,
227 &dp->context_bi, len);
230 static void bio_dp_init(struct dpages *dp, struct bio *bio)
232 dp->get_page = bio_get_page;
233 dp->next_page = bio_next_page;
236 * We just use bvec iterator to retrieve pages, so it is ok to
237 * access the bvec table directly here
239 dp->context_ptr = bio->bi_io_vec;
240 dp->context_bi = bio->bi_iter;
244 * Functions for getting the pages from a VMA.
246 static void vm_get_page(struct dpages *dp,
247 struct page **p, unsigned long *len, unsigned *offset)
249 *p = vmalloc_to_page(dp->context_ptr);
250 *offset = dp->context_u;
251 *len = PAGE_SIZE - dp->context_u;
254 static void vm_next_page(struct dpages *dp)
256 dp->context_ptr += PAGE_SIZE - dp->context_u;
260 static void vm_dp_init(struct dpages *dp, void *data)
262 dp->get_page = vm_get_page;
263 dp->next_page = vm_next_page;
264 dp->context_u = offset_in_page(data);
265 dp->context_ptr = data;
269 * Functions for getting the pages from kernel memory.
271 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
274 *p = virt_to_page(dp->context_ptr);
275 *offset = dp->context_u;
276 *len = PAGE_SIZE - dp->context_u;
279 static void km_next_page(struct dpages *dp)
281 dp->context_ptr += PAGE_SIZE - dp->context_u;
285 static void km_dp_init(struct dpages *dp, void *data)
287 dp->get_page = km_get_page;
288 dp->next_page = km_next_page;
289 dp->context_u = offset_in_page(data);
290 dp->context_ptr = data;
293 /*-----------------------------------------------------------------
294 * IO routines that accept a list of pages.
295 *---------------------------------------------------------------*/
296 static void do_region(int op, int op_flags, unsigned region,
297 struct dm_io_region *where, struct dpages *dp,
305 sector_t remaining = where->count;
306 struct request_queue *q = bdev_get_queue(where->bdev);
307 sector_t num_sectors;
308 unsigned int special_cmd_max_sectors;
311 * Reject unsupported discard and write same requests.
313 if (op == REQ_OP_DISCARD)
314 special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
315 else if (op == REQ_OP_WRITE_ZEROES)
316 special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
317 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
318 special_cmd_max_sectors == 0) {
319 atomic_inc(&io->count);
320 dec_count(io, region, BLK_STS_NOTSUPP);
325 * where->count may be zero if op holds a flush and we need to
326 * send a zero-sized flush.
330 * Allocate a suitably sized-bio.
334 case REQ_OP_WRITE_ZEROES:
338 num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
339 (PAGE_SIZE >> SECTOR_SHIFT)));
342 bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags,
343 GFP_NOIO, &io->client->bios);
344 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
345 bio->bi_end_io = endio;
346 store_io_and_region_in_bio(bio, io, region);
348 if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
349 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
350 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
351 remaining -= num_sectors;
352 } else while (remaining) {
354 * Try and add as many pages as possible.
356 dp->get_page(dp, &page, &len, &offset);
357 len = min(len, to_bytes(remaining));
358 if (!bio_add_page(bio, page, len, offset))
362 remaining -= to_sector(len);
366 atomic_inc(&io->count);
371 static void dispatch_io(int op, int op_flags, unsigned int num_regions,
372 struct dm_io_region *where, struct dpages *dp,
373 struct io *io, int sync)
376 struct dpages old_pages = *dp;
378 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
381 op_flags |= REQ_SYNC;
384 * For multiple regions we need to be careful to rewind
385 * the dp object for each call to do_region.
387 for (i = 0; i < num_regions; i++) {
389 if (where[i].count || (op_flags & REQ_PREFLUSH))
390 do_region(op, op_flags, i, where + i, dp, io);
394 * Drop the extra reference that we were holding to avoid
395 * the io being completed too early.
401 unsigned long error_bits;
402 struct completion wait;
405 static void sync_io_complete(unsigned long error, void *context)
407 struct sync_io *sio = context;
409 sio->error_bits = error;
410 complete(&sio->wait);
413 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
414 struct dm_io_region *where, int op, int op_flags,
415 struct dpages *dp, unsigned long *error_bits)
420 if (num_regions > 1 && !op_is_write(op)) {
425 init_completion(&sio.wait);
427 io = mempool_alloc(&client->pool, GFP_NOIO);
429 atomic_set(&io->count, 1); /* see dispatch_io() */
431 io->callback = sync_io_complete;
434 io->vma_invalidate_address = dp->vma_invalidate_address;
435 io->vma_invalidate_size = dp->vma_invalidate_size;
437 dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
439 wait_for_completion_io(&sio.wait);
442 *error_bits = sio.error_bits;
444 return sio.error_bits ? -EIO : 0;
447 static int async_io(struct dm_io_client *client, unsigned int num_regions,
448 struct dm_io_region *where, int op, int op_flags,
449 struct dpages *dp, io_notify_fn fn, void *context)
453 if (num_regions > 1 && !op_is_write(op)) {
459 io = mempool_alloc(&client->pool, GFP_NOIO);
461 atomic_set(&io->count, 1); /* see dispatch_io() */
464 io->context = context;
466 io->vma_invalidate_address = dp->vma_invalidate_address;
467 io->vma_invalidate_size = dp->vma_invalidate_size;
469 dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
473 static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
476 /* Set up dpages based on memory type */
478 dp->vma_invalidate_address = NULL;
479 dp->vma_invalidate_size = 0;
481 switch (io_req->mem.type) {
482 case DM_IO_PAGE_LIST:
483 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
487 bio_dp_init(dp, io_req->mem.ptr.bio);
491 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
492 if (io_req->bi_op == REQ_OP_READ) {
493 dp->vma_invalidate_address = io_req->mem.ptr.vma;
494 dp->vma_invalidate_size = size;
496 vm_dp_init(dp, io_req->mem.ptr.vma);
500 km_dp_init(dp, io_req->mem.ptr.addr);
510 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
511 struct dm_io_region *where, unsigned long *sync_error_bits)
516 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
520 if (!io_req->notify.fn)
521 return sync_io(io_req->client, num_regions, where,
522 io_req->bi_op, io_req->bi_op_flags, &dp,
525 return async_io(io_req->client, num_regions, where, io_req->bi_op,
526 io_req->bi_op_flags, &dp, io_req->notify.fn,
527 io_req->notify.context);
529 EXPORT_SYMBOL(dm_io);
531 int __init dm_io_init(void)
533 _dm_io_cache = KMEM_CACHE(io, 0);
540 void dm_io_exit(void)
542 kmem_cache_destroy(_dm_io_cache);