2 * Copyright (C) 2018 Google Limited.
4 * This file is released under the GPL.
10 #include <linux/crc32.h>
11 #include <linux/dm-bufio.h>
12 #include <linux/module.h>
14 #define DM_MSG_PREFIX "bow"
31 struct log_entry entries[];
35 * MAGIC is BOW in ascii
37 #define MAGIC 0x00574f42
38 #define HEADER_VERSION 0x0100
41 * A sorted set of ranges representing the state of the data on the device.
42 * Use an rb_tree for fast lookup of a given sector
43 * Consecutive ranges are always of different type - operations on this
44 * set must merge matching consecutive ranges.
46 * Top range is always of type TOP
52 INVALID, /* Type not set */
53 SECTOR0, /* First sector - holds log record */
54 SECTOR0_CURRENT,/* Live contents of sector0 */
55 UNCHANGED, /* Original contents */
56 TRIMMED, /* Range has been trimmed */
57 CHANGED, /* Range has been changed */
58 BACKUP, /* Range is being used as a backup */
59 TOP, /* Final range - sector is size of device */
61 struct list_head trimmed_list; /* list of TRIMMED ranges */
64 static const char * const readable_type[] = {
85 struct workqueue_struct *workqueue;
86 struct dm_bufio_client *bufio;
87 struct mutex ranges_lock; /* Hold to access this struct and/or ranges */
88 struct rb_root ranges;
89 struct dm_kobject_holder kobj_holder; /* for sysfs attributes */
90 atomic_t state; /* One of the enum state values above */
92 struct log_sector *log_sector;
93 struct list_head trimmed_list;
97 sector_t range_top(struct bow_range *br)
99 return container_of(rb_next(&br->node), struct bow_range, node)
103 u64 range_size(struct bow_range *br)
105 return (range_top(br) - br->sector) * SECTOR_SIZE;
108 static sector_t bvec_top(struct bvec_iter *bi_iter)
110 return bi_iter->bi_sector + bi_iter->bi_size / SECTOR_SIZE;
114 * Find the first range that overlaps with bi_iter
115 * bi_iter is set to the size of the overlapping sub-range
117 static struct bow_range *find_first_overlapping_range(struct rb_root *ranges,
118 struct bvec_iter *bi_iter)
120 struct rb_node *node = ranges->rb_node;
121 struct bow_range *br;
124 br = container_of(node, struct bow_range, node);
126 if (br->sector <= bi_iter->bi_sector
127 && bi_iter->bi_sector < range_top(br))
130 if (bi_iter->bi_sector < br->sector)
131 node = node->rb_left;
133 node = node->rb_right;
140 if (range_top(br) - bi_iter->bi_sector
141 < bi_iter->bi_size >> SECTOR_SHIFT)
142 bi_iter->bi_size = (range_top(br) - bi_iter->bi_sector)
148 void add_before(struct rb_root *ranges, struct bow_range *new_br,
149 struct bow_range *existing)
151 struct rb_node *parent = &(existing->node);
152 struct rb_node **link = &(parent->rb_left);
156 link = &((*link)->rb_right);
159 rb_link_node(&new_br->node, parent, link);
160 rb_insert_color(&new_br->node, ranges);
164 * Given a range br returned by find_first_overlapping_range, split br into a
165 * leading range, a range matching the bi_iter and a trailing range.
166 * Leading and trailing may end up size 0 and will then be deleted. The
167 * new range matching the bi_iter is then returned and should have its type
168 * and type specific fields populated.
169 * If bi_iter runs off the end of the range, bi_iter is truncated accordingly
171 static int split_range(struct bow_context *bc, struct bow_range **br,
172 struct bvec_iter *bi_iter)
174 struct bow_range *new_br;
176 if (bi_iter->bi_sector < (*br)->sector) {
178 return BLK_STS_IOERR;
181 if (bi_iter->bi_sector > (*br)->sector) {
182 struct bow_range *leading_br =
183 kzalloc(sizeof(*leading_br), GFP_KERNEL);
186 return BLK_STS_RESOURCE;
189 if (leading_br->type == TRIMMED)
190 list_add(&leading_br->trimmed_list, &bc->trimmed_list);
192 add_before(&bc->ranges, leading_br, *br);
193 (*br)->sector = bi_iter->bi_sector;
196 if (bvec_top(bi_iter) >= range_top(*br)) {
197 bi_iter->bi_size = (range_top(*br) - (*br)->sector)
202 /* new_br will be the beginning, existing br will be the tail */
203 new_br = kzalloc(sizeof(*new_br), GFP_KERNEL);
205 return BLK_STS_RESOURCE;
207 new_br->sector = (*br)->sector;
208 (*br)->sector = bvec_top(bi_iter);
209 add_before(&bc->ranges, new_br, *br);
216 * Sets type of a range. May merge range into surrounding ranges
217 * Since br may be invalidated, always sets br to NULL to prevent
218 * usage after this is called
220 static void set_type(struct bow_context *bc, struct bow_range **br, int type)
222 struct bow_range *prev = container_of(rb_prev(&(*br)->node),
223 struct bow_range, node);
224 struct bow_range *next = container_of(rb_next(&(*br)->node),
225 struct bow_range, node);
227 if ((*br)->type == TRIMMED) {
228 bc->trims_total -= range_size(*br);
229 list_del(&(*br)->trimmed_list);
232 if (type == TRIMMED) {
233 bc->trims_total += range_size(*br);
234 list_add(&(*br)->trimmed_list, &bc->trimmed_list);
239 if (next->type == type) {
241 list_del(&next->trimmed_list);
242 rb_erase(&next->node, &bc->ranges);
246 if (prev->type == type) {
248 list_del(&(*br)->trimmed_list);
249 rb_erase(&(*br)->node, &bc->ranges);
256 static struct bow_range *find_free_range(struct bow_context *bc)
258 if (list_empty(&bc->trimmed_list)) {
259 DMERR("Unable to find free space to back up to");
263 return list_first_entry(&bc->trimmed_list, struct bow_range,
267 static sector_t sector_to_page(struct bow_context const *bc, sector_t sector)
269 WARN_ON((sector & (((sector_t)1 << (bc->block_shift - SECTOR_SHIFT)) - 1))
271 return sector >> (bc->block_shift - SECTOR_SHIFT);
274 static int copy_data(struct bow_context const *bc,
275 struct bow_range *source, struct bow_range *dest,
280 if (range_size(source) != range_size(dest)) {
282 return BLK_STS_IOERR;
286 *checksum = sector_to_page(bc, source->sector);
288 for (i = 0; i < range_size(source) >> bc->block_shift; ++i) {
289 struct dm_buffer *read_buffer, *write_buffer;
291 sector_t page = sector_to_page(bc, source->sector) + i;
293 read = dm_bufio_read(bc->bufio, page, &read_buffer);
295 DMERR("Cannot read page %llu",
296 (unsigned long long)page);
297 return PTR_ERR(read);
301 *checksum = crc32(*checksum, read, bc->block_size);
303 write = dm_bufio_new(bc->bufio,
304 sector_to_page(bc, dest->sector) + i,
307 DMERR("Cannot write sector");
308 dm_bufio_release(read_buffer);
309 return PTR_ERR(write);
312 memcpy(write, read, bc->block_size);
314 dm_bufio_mark_buffer_dirty(write_buffer);
315 dm_bufio_release(write_buffer);
316 dm_bufio_release(read_buffer);
319 dm_bufio_write_dirty_buffers(bc->bufio);
323 /****** logging functions ******/
325 static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
326 unsigned int size, u32 checksum);
328 static int backup_log_sector(struct bow_context *bc)
330 struct bow_range *first_br, *free_br;
331 struct bvec_iter bi_iter;
335 first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
337 if (first_br->type != SECTOR0) {
339 return BLK_STS_IOERR;
342 if (range_size(first_br) != bc->block_size) {
344 return BLK_STS_IOERR;
347 free_br = find_free_range(bc);
348 /* No space left - return this error to userspace */
350 return BLK_STS_NOSPC;
351 bi_iter.bi_sector = free_br->sector;
352 bi_iter.bi_size = bc->block_size;
353 ret = split_range(bc, &free_br, &bi_iter);
356 if (bi_iter.bi_size != bc->block_size) {
358 return BLK_STS_IOERR;
361 ret = copy_data(bc, first_br, free_br, &checksum);
365 bc->log_sector->count = 0;
366 bc->log_sector->sequence++;
367 ret = add_log_entry(bc, first_br->sector, free_br->sector,
368 range_size(first_br), checksum);
372 set_type(bc, &free_br, BACKUP);
376 static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
377 unsigned int size, u32 checksum)
379 struct dm_buffer *sector_buffer;
382 if (sizeof(struct log_sector)
383 + sizeof(struct log_entry) * (bc->log_sector->count + 1)
385 int ret = backup_log_sector(bc);
391 sector = dm_bufio_new(bc->bufio, 0, §or_buffer);
392 if (IS_ERR(sector)) {
393 DMERR("Cannot write boot sector");
394 dm_bufio_release(sector_buffer);
395 return BLK_STS_NOSPC;
398 bc->log_sector->entries[bc->log_sector->count].source = source;
399 bc->log_sector->entries[bc->log_sector->count].dest = dest;
400 bc->log_sector->entries[bc->log_sector->count].size = size;
401 bc->log_sector->entries[bc->log_sector->count].checksum = checksum;
402 bc->log_sector->count++;
404 memcpy(sector, bc->log_sector, bc->block_size);
405 dm_bufio_mark_buffer_dirty(sector_buffer);
406 dm_bufio_release(sector_buffer);
407 dm_bufio_write_dirty_buffers(bc->bufio);
411 static int prepare_log(struct bow_context *bc)
413 struct bow_range *free_br, *first_br;
414 struct bvec_iter bi_iter;
418 /* Carve out first sector as log sector */
419 first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
420 if (first_br->type != UNCHANGED) {
422 return BLK_STS_IOERR;
425 if (range_size(first_br) < bc->block_size) {
427 return BLK_STS_IOERR;
429 bi_iter.bi_sector = 0;
430 bi_iter.bi_size = bc->block_size;
431 ret = split_range(bc, &first_br, &bi_iter);
434 first_br->type = SECTOR0;
435 if (range_size(first_br) != bc->block_size) {
437 return BLK_STS_IOERR;
440 /* Find free sector for active sector0 reads/writes */
441 free_br = find_free_range(bc);
443 return BLK_STS_NOSPC;
444 bi_iter.bi_sector = free_br->sector;
445 bi_iter.bi_size = bc->block_size;
446 ret = split_range(bc, &free_br, &bi_iter);
451 ret = copy_data(bc, first_br, free_br, NULL);
455 bc->log_sector->sector0 = free_br->sector;
457 set_type(bc, &free_br, SECTOR0_CURRENT);
459 /* Find free sector to back up original sector zero */
460 free_br = find_free_range(bc);
462 return BLK_STS_NOSPC;
463 bi_iter.bi_sector = free_br->sector;
464 bi_iter.bi_size = bc->block_size;
465 ret = split_range(bc, &free_br, &bi_iter);
470 ret = copy_data(bc, first_br, free_br, &checksum);
475 * Set up our replacement boot sector - it will get written when we
476 * add the first log entry, which we do immediately
478 bc->log_sector->magic = MAGIC;
479 bc->log_sector->header_version = HEADER_VERSION;
480 bc->log_sector->header_size = sizeof(*bc->log_sector);
481 bc->log_sector->block_size = bc->block_size;
482 bc->log_sector->count = 0;
483 bc->log_sector->sequence = 0;
486 ret = add_log_entry(bc, first_br->sector, free_br->sector,
487 range_size(first_br), checksum);
491 set_type(bc, &free_br, BACKUP);
495 static struct bow_range *find_sector0_current(struct bow_context *bc)
497 struct bvec_iter bi_iter;
499 bi_iter.bi_sector = bc->log_sector->sector0;
500 bi_iter.bi_size = bc->block_size;
501 return find_first_overlapping_range(&bc->ranges, &bi_iter);
504 /****** sysfs interface functions ******/
506 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
509 struct bow_context *bc = container_of(kobj, struct bow_context,
512 return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&bc->state));
515 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
516 const char *buf, size_t count)
518 struct bow_context *bc = container_of(kobj, struct bow_context,
520 enum state state, original_state;
523 state = buf[0] - '0';
524 if (state < TRIM || state > COMMITTED) {
525 DMERR("State value %d out of range", state);
529 mutex_lock(&bc->ranges_lock);
530 original_state = atomic_read(&bc->state);
531 if (state != original_state + 1) {
532 DMERR("Invalid state change from %d to %d",
533 original_state, state);
538 DMINFO("Switching to state %s", state == CHECKPOINT ? "Checkpoint"
539 : state == COMMITTED ? "Committed" : "Unknown");
541 if (state == CHECKPOINT) {
542 ret = prepare_log(bc);
544 DMERR("Failed to switch to checkpoint state");
547 } else if (state == COMMITTED) {
548 struct bow_range *br = find_sector0_current(bc);
549 struct bow_range *sector0_br =
550 container_of(rb_first(&bc->ranges), struct bow_range,
553 ret = copy_data(bc, br, sector0_br, 0);
555 DMERR("Failed to switch to committed state");
559 atomic_inc(&bc->state);
563 mutex_unlock(&bc->ranges_lock);
567 static ssize_t free_show(struct kobject *kobj, struct kobj_attribute *attr,
570 struct bow_context *bc = container_of(kobj, struct bow_context,
574 mutex_lock(&bc->ranges_lock);
575 trims_total = bc->trims_total;
576 mutex_unlock(&bc->ranges_lock);
578 return scnprintf(buf, PAGE_SIZE, "%llu\n", trims_total);
581 static struct kobj_attribute attr_state = __ATTR_RW(state);
582 static struct kobj_attribute attr_free = __ATTR_RO(free);
584 static struct attribute *bow_attrs[] = {
590 static struct kobj_type bow_ktype = {
591 .sysfs_ops = &kobj_sysfs_ops,
592 .default_attrs = bow_attrs,
593 .release = dm_kobject_release
596 /****** constructor/destructor ******/
598 static void dm_bow_dtr(struct dm_target *ti)
600 struct bow_context *bc = (struct bow_context *) ti->private;
601 struct kobject *kobj;
604 destroy_workqueue(bc->workqueue);
606 dm_bufio_client_destroy(bc->bufio);
608 kobj = &bc->kobj_holder.kobj;
609 if (kobj->state_initialized) {
611 wait_for_completion(dm_get_completion_from_kobject(kobj));
614 while (rb_first(&bc->ranges)) {
615 struct bow_range *br = container_of(rb_first(&bc->ranges),
616 struct bow_range, node);
618 rb_erase(&br->node, &bc->ranges);
622 mutex_destroy(&bc->ranges_lock);
623 kfree(bc->log_sector);
627 static void dm_bow_io_hints(struct dm_target *ti, struct queue_limits *limits)
629 struct bow_context *bc = ti->private;
630 const unsigned int block_size = bc->block_size;
632 limits->logical_block_size =
633 max_t(unsigned int, limits->logical_block_size, block_size);
634 limits->physical_block_size =
635 max_t(unsigned int, limits->physical_block_size, block_size);
636 limits->io_min = max_t(unsigned int, limits->io_min, block_size);
638 if (limits->max_discard_sectors == 0) {
639 limits->discard_granularity = 1 << 12;
640 limits->max_hw_discard_sectors = 1 << 15;
641 limits->max_discard_sectors = 1 << 15;
642 bc->forward_trims = false;
644 limits->discard_granularity = 1 << 12;
645 bc->forward_trims = true;
649 static int dm_bow_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
651 struct bow_context *bc = ti->private;
652 struct dm_arg_set as;
653 static const struct dm_arg _args[] = {
654 {0, 1, "Invalid number of feature args"},
656 unsigned int opt_params;
657 const char *opt_string;
664 err = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
668 while (opt_params--) {
669 opt_string = dm_shift_arg(&as);
671 ti->error = "Not enough feature arguments";
675 if (sscanf(opt_string, "block_size:%u%c",
676 &bc->block_size, &dummy) == 1) {
677 if (bc->block_size < SECTOR_SIZE ||
678 bc->block_size > 4096 ||
679 !is_power_of_2(bc->block_size)) {
680 ti->error = "Invalid block_size";
684 ti->error = "Invalid feature arguments";
692 static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv)
694 struct bow_context *bc;
695 struct bow_range *br;
699 ti->error = "Invalid argument count";
703 bc = kzalloc(sizeof(*bc), GFP_KERNEL);
705 ti->error = "Cannot allocate bow context";
709 ti->num_flush_bios = 1;
710 ti->num_discard_bios = 1;
711 ti->num_write_same_bios = 1;
714 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
717 ti->error = "Device lookup failed";
722 bdev_get_queue(bc->dev->bdev)->limits.logical_block_size;
724 ret = dm_bow_ctr_optional(ti, argc - 1, &argv[1]);
729 bc->block_shift = ilog2(bc->block_size);
730 bc->log_sector = kzalloc(bc->block_size, GFP_KERNEL);
731 if (!bc->log_sector) {
732 ti->error = "Cannot allocate log sector";
736 init_completion(&bc->kobj_holder.completion);
737 mutex_init(&bc->ranges_lock);
738 bc->ranges = RB_ROOT;
739 bc->bufio = dm_bufio_client_create(bc->dev->bdev, bc->block_size, 1, 0,
741 if (IS_ERR(bc->bufio)) {
742 ti->error = "Cannot initialize dm-bufio";
743 ret = PTR_ERR(bc->bufio);
748 bc->workqueue = alloc_workqueue("dm-bow",
749 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM
750 | WQ_UNBOUND, num_online_cpus());
751 if (!bc->workqueue) {
752 ti->error = "Cannot allocate workqueue";
757 INIT_LIST_HEAD(&bc->trimmed_list);
759 br = kzalloc(sizeof(*br), GFP_KERNEL);
761 ti->error = "Cannot allocate ranges";
766 br->sector = ti->len;
768 rb_link_node(&br->node, NULL, &bc->ranges.rb_node);
769 rb_insert_color(&br->node, &bc->ranges);
771 br = kzalloc(sizeof(*br), GFP_KERNEL);
773 ti->error = "Cannot allocate ranges";
779 br->type = UNCHANGED;
780 rb_link_node(&br->node, bc->ranges.rb_node,
781 &bc->ranges.rb_node->rb_left);
782 rb_insert_color(&br->node, &bc->ranges);
784 ti->discards_supported = true;
793 void dm_bow_resume(struct dm_target *ti)
795 struct mapped_device *md = dm_table_get_md(ti->table);
796 struct bow_context *bc = ti->private;
799 if (bc->kobj_holder.kobj.state_initialized)
802 ret = kobject_init_and_add(&bc->kobj_holder.kobj, &bow_ktype,
803 &disk_to_dev(dm_disk(md))->kobj, "%s",
806 ti->error = "Cannot create sysfs node";
809 /****** Handle writes ******/
811 static int prepare_unchanged_range(struct bow_context *bc, struct bow_range *br,
812 struct bvec_iter *bi_iter,
813 bool record_checksum)
815 struct bow_range *backup_br;
816 struct bvec_iter backup_bi;
817 sector_t log_source, log_dest;
818 unsigned int log_size;
824 /* Find a free range */
825 backup_br = find_free_range(bc);
827 return BLK_STS_NOSPC;
829 /* Carve out a backup range. This may be smaller than the br given */
830 backup_bi.bi_sector = backup_br->sector;
831 backup_bi.bi_size = min(range_size(backup_br), (u64) bi_iter->bi_size);
832 ret = split_range(bc, &backup_br, &backup_bi);
837 * Carve out a changed range. This will not be smaller than the backup
838 * br since the backup br is smaller than the source range and iterator
840 bi_iter->bi_size = backup_bi.bi_size;
841 ret = split_range(bc, &br, bi_iter);
844 if (range_size(br) != range_size(backup_br)) {
846 return BLK_STS_IOERR;
851 ret = copy_data(bc, br, backup_br, record_checksum ? &checksum : NULL);
855 /* Add an entry to the log */
856 log_source = br->sector;
857 log_dest = backup_br->sector;
858 log_size = range_size(br);
861 * Set the types. Note that since set_type also amalgamates ranges
862 * we have to set both sectors to their final type before calling
865 original_type = br->type;
866 sector0 = backup_br->sector;
867 bc->trims_total -= range_size(backup_br);
868 if (backup_br->type == TRIMMED)
869 list_del(&backup_br->trimmed_list);
870 backup_br->type = br->type == SECTOR0_CURRENT ? SECTOR0_CURRENT
873 set_type(bc, &backup_br, backup_br->type);
876 * Add the log entry after marking the backup sector, since adding a log
877 * can cause another backup
879 ret = add_log_entry(bc, log_source, log_dest, log_size, checksum);
881 br->type = original_type;
885 /* Now it is safe to mark this backup successful */
886 if (original_type == SECTOR0_CURRENT)
887 bc->log_sector->sector0 = sector0;
889 set_type(bc, &br, br->type);
893 static int prepare_free_range(struct bow_context *bc, struct bow_range *br,
894 struct bvec_iter *bi_iter)
898 ret = split_range(bc, &br, bi_iter);
901 set_type(bc, &br, CHANGED);
905 static int prepare_changed_range(struct bow_context *bc, struct bow_range *br,
906 struct bvec_iter *bi_iter)
908 /* Nothing to do ... */
912 static int prepare_one_range(struct bow_context *bc,
913 struct bvec_iter *bi_iter)
915 struct bow_range *br = find_first_overlapping_range(&bc->ranges,
919 return prepare_changed_range(bc, br, bi_iter);
922 return prepare_free_range(bc, br, bi_iter);
926 return prepare_unchanged_range(bc, br, bi_iter, true);
929 * We cannot track the checksum for the active sector0, since it
930 * may change at any point.
932 case SECTOR0_CURRENT:
933 return prepare_unchanged_range(bc, br, bi_iter, false);
935 case SECTOR0: /* Handled in the dm_bow_map */
936 case TOP: /* Illegal - top is off the end of the device */
939 return BLK_STS_IOERR;
944 struct work_struct work;
945 struct bow_context *bc;
949 static void bow_write(struct work_struct *work)
951 struct write_work *ww = container_of(work, struct write_work, work);
952 struct bow_context *bc = ww->bc;
953 struct bio *bio = ww->bio;
954 struct bvec_iter bi_iter = bio->bi_iter;
955 int ret = BLK_STS_OK;
959 mutex_lock(&bc->ranges_lock);
961 ret = prepare_one_range(bc, &bi_iter);
962 bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
963 bi_iter.bi_size = bio->bi_iter.bi_size
964 - (bi_iter.bi_sector - bio->bi_iter.bi_sector)
966 } while (!ret && bi_iter.bi_size);
968 mutex_unlock(&bc->ranges_lock);
971 bio_set_dev(bio, bc->dev->bdev);
974 DMERR("Write failure with error %d", -ret);
975 bio->bi_status = ret;
980 static int queue_write(struct bow_context *bc, struct bio *bio)
982 struct write_work *ww = kmalloc(sizeof(*ww), GFP_NOIO | __GFP_NORETRY
983 | __GFP_NOMEMALLOC | __GFP_NOWARN);
985 DMERR("Failed to allocate write_work");
989 INIT_WORK(&ww->work, bow_write);
992 queue_work(bc->workqueue, &ww->work);
993 return DM_MAPIO_SUBMITTED;
996 static int handle_sector0(struct bow_context *bc, struct bio *bio)
998 int ret = DM_MAPIO_REMAPPED;
1000 if (bio->bi_iter.bi_size > bc->block_size) {
1001 struct bio * split = bio_split(bio,
1002 bc->block_size >> SECTOR_SHIFT,
1006 DMERR("Failed to split bio");
1007 bio->bi_status = BLK_STS_RESOURCE;
1009 return DM_MAPIO_SUBMITTED;
1012 bio_chain(split, bio);
1013 split->bi_iter.bi_sector = bc->log_sector->sector0;
1014 bio_set_dev(split, bc->dev->bdev);
1017 if (bio_data_dir(bio) == WRITE)
1018 ret = queue_write(bc, bio);
1020 bio->bi_iter.bi_sector = bc->log_sector->sector0;
1026 static int add_trim(struct bow_context *bc, struct bio *bio)
1028 struct bow_range *br;
1029 struct bvec_iter bi_iter = bio->bi_iter;
1031 DMDEBUG("add_trim: %llu, %u",
1032 (unsigned long long)bio->bi_iter.bi_sector,
1033 bio->bi_iter.bi_size);
1036 br = find_first_overlapping_range(&bc->ranges, &bi_iter);
1040 if (!split_range(bc, &br, &bi_iter))
1041 set_type(bc, &br, TRIMMED);
1049 /* No other case is legal in TRIM state */
1054 bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
1055 bi_iter.bi_size = bio->bi_iter.bi_size
1056 - (bi_iter.bi_sector - bio->bi_iter.bi_sector)
1059 } while (bi_iter.bi_size);
1062 return DM_MAPIO_SUBMITTED;
1065 static int remove_trim(struct bow_context *bc, struct bio *bio)
1067 struct bow_range *br;
1068 struct bvec_iter bi_iter = bio->bi_iter;
1070 DMDEBUG("remove_trim: %llu, %u",
1071 (unsigned long long)bio->bi_iter.bi_sector,
1072 bio->bi_iter.bi_size);
1075 br = find_first_overlapping_range(&bc->ranges, &bi_iter);
1083 if (!split_range(bc, &br, &bi_iter))
1084 set_type(bc, &br, UNCHANGED);
1088 /* No other case is legal in TRIM state */
1093 bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
1094 bi_iter.bi_size = bio->bi_iter.bi_size
1095 - (bi_iter.bi_sector - bio->bi_iter.bi_sector)
1098 } while (bi_iter.bi_size);
1100 return DM_MAPIO_REMAPPED;
1103 int remap_unless_illegal_trim(struct bow_context *bc, struct bio *bio)
1105 if (!bc->forward_trims && bio_op(bio) == REQ_OP_DISCARD) {
1106 bio->bi_status = BLK_STS_NOTSUPP;
1108 return DM_MAPIO_SUBMITTED;
1110 bio_set_dev(bio, bc->dev->bdev);
1111 return DM_MAPIO_REMAPPED;
1115 /****** dm interface ******/
1117 static int dm_bow_map(struct dm_target *ti, struct bio *bio)
1119 int ret = DM_MAPIO_REMAPPED;
1120 struct bow_context *bc = ti->private;
1122 if (likely(bc->state.counter == COMMITTED))
1123 return remap_unless_illegal_trim(bc, bio);
1125 if (bio_data_dir(bio) == READ && bio->bi_iter.bi_sector != 0)
1126 return remap_unless_illegal_trim(bc, bio);
1128 if (atomic_read(&bc->state) != COMMITTED) {
1131 mutex_lock(&bc->ranges_lock);
1132 state = atomic_read(&bc->state);
1133 if (state == TRIM) {
1134 if (bio_op(bio) == REQ_OP_DISCARD)
1135 ret = add_trim(bc, bio);
1136 else if (bio_data_dir(bio) == WRITE)
1137 ret = remove_trim(bc, bio);
1140 } else if (state == CHECKPOINT) {
1141 if (bio->bi_iter.bi_sector == 0)
1142 ret = handle_sector0(bc, bio);
1143 else if (bio_data_dir(bio) == WRITE)
1144 ret = queue_write(bc, bio);
1150 mutex_unlock(&bc->ranges_lock);
1153 if (ret == DM_MAPIO_REMAPPED)
1154 return remap_unless_illegal_trim(bc, bio);
1159 static void dm_bow_tablestatus(struct dm_target *ti, char *result,
1160 unsigned int maxlen)
1162 char *end = result + maxlen;
1163 struct bow_context *bc = ti->private;
1165 int trimmed_list_length = 0;
1166 int trimmed_range_count = 0;
1167 struct bow_range *br;
1173 list_for_each_entry(br, &bc->trimmed_list, trimmed_list)
1174 if (br->type == TRIMMED) {
1175 ++trimmed_list_length;
1177 scnprintf(result, end - result,
1178 "ERROR: non-trimmed entry in trimmed_list");
1182 if (!rb_first(&bc->ranges)) {
1183 scnprintf(result, end - result, "ERROR: Empty ranges");
1187 if (container_of(rb_first(&bc->ranges), struct bow_range, node)
1189 scnprintf(result, end - result,
1190 "ERROR: First range does not start at sector 0");
1194 for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
1195 struct bow_range *br = container_of(i, struct bow_range, node);
1197 result += scnprintf(result, end - result, "%s: %llu",
1198 readable_type[br->type],
1199 (unsigned long long)br->sector);
1203 result += scnprintf(result, end - result, "\n");
1207 if (br->type == TRIMMED)
1208 ++trimmed_range_count;
1210 if (br->type == TOP) {
1211 if (br->sector != ti->len) {
1212 scnprintf(result, end - result,
1213 "\nERROR: Top sector is incorrect");
1216 if (&br->node != rb_last(&bc->ranges)) {
1217 scnprintf(result, end - result,
1218 "\nERROR: Top sector is not last");
1225 scnprintf(result, end - result,
1226 "\nERROR: Last range not of type TOP");
1230 if (br->sector > range_top(br)) {
1231 scnprintf(result, end - result,
1232 "\nERROR: sectors out of order");
1237 if (trimmed_range_count != trimmed_list_length)
1238 scnprintf(result, end - result,
1239 "\nERROR: not all trimmed ranges in trimmed list");
1242 static void dm_bow_status(struct dm_target *ti, status_type_t type,
1243 unsigned int status_flags, char *result,
1244 unsigned int maxlen)
1247 case STATUSTYPE_INFO:
1253 case STATUSTYPE_TABLE:
1254 dm_bow_tablestatus(ti, result, maxlen);
1259 int dm_bow_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
1261 struct bow_context *bc = ti->private;
1262 struct dm_dev *dev = bc->dev;
1265 /* Only pass ioctls through if the device sizes match exactly. */
1266 return ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1269 static int dm_bow_iterate_devices(struct dm_target *ti,
1270 iterate_devices_callout_fn fn, void *data)
1272 struct bow_context *bc = ti->private;
1274 return fn(ti, bc->dev, 0, ti->len, data);
1277 static struct target_type bow_target = {
1279 .version = {1, 2, 0},
1280 .module = THIS_MODULE,
1282 .resume = dm_bow_resume,
1285 .status = dm_bow_status,
1286 .prepare_ioctl = dm_bow_prepare_ioctl,
1287 .iterate_devices = dm_bow_iterate_devices,
1288 .io_hints = dm_bow_io_hints,
1291 int __init dm_bow_init(void)
1293 int r = dm_register_target(&bow_target);
1296 DMERR("registering bow failed %d", r);
1300 void dm_bow_exit(void)
1302 dm_unregister_target(&bow_target);
1305 MODULE_LICENSE("GPL");
1307 module_init(dm_bow_init);
1308 module_exit(dm_bow_exit);