2 * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
4 * This file is released under the GPL.
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/dax.h>
13 #include <linux/slab.h>
14 #include <linux/device-mapper.h>
16 #define DM_MSG_PREFIX "linear"
19 * Linear: maps a linear range of a device.
27 * Construct a linear mapping: <dev_path> <offset>
29 static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
32 unsigned long long tmp;
37 ti->error = "Invalid argument count";
41 lc = kmalloc(sizeof(*lc), GFP_KERNEL);
43 ti->error = "Cannot allocate linear context";
48 if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
49 ti->error = "Invalid device sector";
54 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
56 ti->error = "Device lookup failed";
60 ti->num_flush_bios = 1;
61 ti->num_discard_bios = 1;
62 ti->num_secure_erase_bios = 1;
63 ti->num_write_same_bios = 1;
64 ti->num_write_zeroes_bios = 1;
73 static void linear_dtr(struct dm_target *ti)
75 struct linear_c *lc = (struct linear_c *) ti->private;
77 dm_put_device(ti, lc->dev);
81 static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
83 struct linear_c *lc = ti->private;
85 return lc->start + dm_target_offset(ti, bi_sector);
88 static void linear_map_bio(struct dm_target *ti, struct bio *bio)
90 struct linear_c *lc = ti->private;
92 bio_set_dev(bio, lc->dev->bdev);
93 if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
94 bio->bi_iter.bi_sector =
95 linear_map_sector(ti, bio->bi_iter.bi_sector);
98 static int linear_map(struct dm_target *ti, struct bio *bio)
100 linear_map_bio(ti, bio);
102 return DM_MAPIO_REMAPPED;
105 static void linear_status(struct dm_target *ti, status_type_t type,
106 unsigned status_flags, char *result, unsigned maxlen)
108 struct linear_c *lc = (struct linear_c *) ti->private;
112 case STATUSTYPE_INFO:
116 case STATUSTYPE_TABLE:
117 DMEMIT("%s %llu", lc->dev->name, (unsigned long long)lc->start);
121 DMEMIT_TARGET_NAME_VERSION(ti->type);
122 DMEMIT(",device_name=%s,start=%llu;", lc->dev->name,
123 (unsigned long long)lc->start);
128 static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
130 struct linear_c *lc = (struct linear_c *) ti->private;
131 struct dm_dev *dev = lc->dev;
136 * Only pass ioctls through if the device sizes match exactly.
139 ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
144 #ifdef CONFIG_BLK_DEV_ZONED
145 static int linear_report_zones(struct dm_target *ti,
146 struct dm_report_zones_args *args, unsigned int nr_zones)
148 struct linear_c *lc = ti->private;
150 return dm_report_zones(lc->dev->bdev, lc->start,
151 linear_map_sector(ti, args->next_sector),
155 #define linear_report_zones NULL
158 static int linear_iterate_devices(struct dm_target *ti,
159 iterate_devices_callout_fn fn, void *data)
161 struct linear_c *lc = ti->private;
163 return fn(ti, lc->dev, lc->start, ti->len, data);
166 #if IS_ENABLED(CONFIG_DAX_DRIVER)
167 static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
168 long nr_pages, void **kaddr, pfn_t *pfn)
171 struct linear_c *lc = ti->private;
172 struct block_device *bdev = lc->dev->bdev;
173 struct dax_device *dax_dev = lc->dev->dax_dev;
174 sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
176 dev_sector = linear_map_sector(ti, sector);
177 ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
180 return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
183 static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
184 void *addr, size_t bytes, struct iov_iter *i)
186 struct linear_c *lc = ti->private;
187 struct block_device *bdev = lc->dev->bdev;
188 struct dax_device *dax_dev = lc->dev->dax_dev;
189 sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
191 dev_sector = linear_map_sector(ti, sector);
192 if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
194 return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
197 static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
198 void *addr, size_t bytes, struct iov_iter *i)
200 struct linear_c *lc = ti->private;
201 struct block_device *bdev = lc->dev->bdev;
202 struct dax_device *dax_dev = lc->dev->dax_dev;
203 sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
205 dev_sector = linear_map_sector(ti, sector);
206 if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
208 return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
211 static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
215 struct linear_c *lc = ti->private;
216 struct block_device *bdev = lc->dev->bdev;
217 struct dax_device *dax_dev = lc->dev->dax_dev;
218 sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
220 dev_sector = linear_map_sector(ti, sector);
221 ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
224 return dax_zero_page_range(dax_dev, pgoff, nr_pages);
228 #define linear_dax_direct_access NULL
229 #define linear_dax_copy_from_iter NULL
230 #define linear_dax_copy_to_iter NULL
231 #define linear_dax_zero_page_range NULL
234 static struct target_type linear_target = {
236 .version = {1, 4, 0},
237 .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
238 DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
239 .report_zones = linear_report_zones,
240 .module = THIS_MODULE,
244 .status = linear_status,
245 .prepare_ioctl = linear_prepare_ioctl,
246 .iterate_devices = linear_iterate_devices,
247 .direct_access = linear_dax_direct_access,
248 .dax_copy_from_iter = linear_dax_copy_from_iter,
249 .dax_copy_to_iter = linear_dax_copy_to_iter,
250 .dax_zero_page_range = linear_dax_zero_page_range,
253 int __init dm_linear_init(void)
255 int r = dm_register_target(&linear_target);
258 DMERR("register failed %d", r);
263 void dm_linear_exit(void)
265 dm_unregister_target(&linear_target);