1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 Arrikto, Inc. All Rights Reserved.
9 #include <linux/hash.h>
10 #include <linux/list.h>
11 #include <linux/log2.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/dm-io.h>
16 #include <linux/mutex.h>
17 #include <linux/atomic.h>
18 #include <linux/bitops.h>
19 #include <linux/blkdev.h>
20 #include <linux/kdev_t.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/jiffies.h>
24 #include <linux/mempool.h>
25 #include <linux/spinlock.h>
26 #include <linux/blk_types.h>
27 #include <linux/dm-kcopyd.h>
28 #include <linux/workqueue.h>
29 #include <linux/backing-dev.h>
30 #include <linux/device-mapper.h>
33 #include "dm-clone-metadata.h"
35 #define DM_MSG_PREFIX "clone"
38 * Minimum and maximum allowed region sizes
40 #define MIN_REGION_SIZE (1 << 3) /* 4KB */
41 #define MAX_REGION_SIZE (1 << 21) /* 1GB */
43 #define MIN_HYDRATIONS 256 /* Size of hydration mempool */
44 #define DEFAULT_HYDRATION_THRESHOLD 1 /* 1 region */
45 #define DEFAULT_HYDRATION_BATCH_SIZE 1 /* Hydrate in batches of 1 region */
47 #define COMMIT_PERIOD HZ /* 1 sec */
50 * Hydration hash table size: 1 << HASH_TABLE_BITS
52 #define HASH_TABLE_BITS 15
54 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(clone_hydration_throttle,
55 "A percentage of time allocated for hydrating regions");
57 /* Slab cache for struct dm_clone_region_hydration */
58 static struct kmem_cache *_hydration_cache;
60 /* dm-clone metadata modes */
61 enum clone_metadata_mode {
62 CM_WRITE, /* metadata may be changed */
63 CM_READ_ONLY, /* metadata may not be changed */
64 CM_FAIL, /* all metadata I/O fails */
67 struct hash_table_bucket;
71 struct dm_target_callbacks callbacks;
73 struct dm_dev *metadata_dev;
74 struct dm_dev *dest_dev;
75 struct dm_dev *source_dev;
77 unsigned long nr_regions;
79 unsigned int region_shift;
82 * A metadata commit and the actions taken in case it fails should run
83 * as a single atomic step.
85 struct mutex commit_lock;
87 struct dm_clone_metadata *cmd;
90 * bio used to flush the destination device, before committing the
95 /* Region hydration hash table */
96 struct hash_table_bucket *ht;
98 atomic_t ios_in_flight;
100 wait_queue_head_t hydration_stopped;
102 mempool_t hydration_pool;
104 unsigned long last_commit_jiffies;
107 * We defer incoming WRITE bios for regions that are not hydrated,
108 * until after these regions have been hydrated.
110 * Also, we defer REQ_FUA and REQ_PREFLUSH bios, until after the
111 * metadata have been committed.
114 struct bio_list deferred_bios;
115 struct bio_list deferred_discard_bios;
116 struct bio_list deferred_flush_bios;
117 struct bio_list deferred_flush_completions;
119 /* Maximum number of regions being copied during background hydration. */
120 unsigned int hydration_threshold;
122 /* Number of regions to batch together during background hydration. */
123 unsigned int hydration_batch_size;
125 /* Which region to hydrate next */
126 unsigned long hydration_offset;
128 atomic_t hydrations_in_flight;
131 * Save a copy of the table line rather than reconstructing it for the
134 unsigned int nr_ctr_args;
135 const char **ctr_args;
137 struct workqueue_struct *wq;
138 struct work_struct worker;
139 struct delayed_work waker;
141 struct dm_kcopyd_client *kcopyd_client;
143 enum clone_metadata_mode mode;
150 #define DM_CLONE_DISCARD_PASSDOWN 0
151 #define DM_CLONE_HYDRATION_ENABLED 1
152 #define DM_CLONE_HYDRATION_SUSPENDED 2
154 /*---------------------------------------------------------------------------*/
157 * Metadata failure handling.
159 static enum clone_metadata_mode get_clone_mode(struct clone *clone)
161 return READ_ONCE(clone->mode);
164 static const char *clone_device_name(struct clone *clone)
166 return dm_table_device_name(clone->ti->table);
169 static void __set_clone_mode(struct clone *clone, enum clone_metadata_mode new_mode)
171 const char *descs[] = {
177 enum clone_metadata_mode old_mode = get_clone_mode(clone);
179 /* Never move out of fail mode */
180 if (old_mode == CM_FAIL)
186 dm_clone_metadata_set_read_only(clone->cmd);
190 dm_clone_metadata_set_read_write(clone->cmd);
194 WRITE_ONCE(clone->mode, new_mode);
196 if (new_mode != old_mode) {
197 dm_table_event(clone->ti->table);
198 DMINFO("%s: Switching to %s mode", clone_device_name(clone),
199 descs[(int)new_mode]);
203 static void __abort_transaction(struct clone *clone)
205 const char *dev_name = clone_device_name(clone);
207 if (get_clone_mode(clone) >= CM_READ_ONLY)
210 DMERR("%s: Aborting current metadata transaction", dev_name);
211 if (dm_clone_metadata_abort(clone->cmd)) {
212 DMERR("%s: Failed to abort metadata transaction", dev_name);
213 __set_clone_mode(clone, CM_FAIL);
217 static void __reload_in_core_bitset(struct clone *clone)
219 const char *dev_name = clone_device_name(clone);
221 if (get_clone_mode(clone) == CM_FAIL)
224 /* Reload the on-disk bitset */
225 DMINFO("%s: Reloading on-disk bitmap", dev_name);
226 if (dm_clone_reload_in_core_bitset(clone->cmd)) {
227 DMERR("%s: Failed to reload on-disk bitmap", dev_name);
228 __set_clone_mode(clone, CM_FAIL);
232 static void __metadata_operation_failed(struct clone *clone, const char *op, int r)
234 DMERR("%s: Metadata operation `%s' failed: error = %d",
235 clone_device_name(clone), op, r);
237 __abort_transaction(clone);
238 __set_clone_mode(clone, CM_READ_ONLY);
241 * dm_clone_reload_in_core_bitset() may run concurrently with either
242 * dm_clone_set_region_hydrated() or dm_clone_cond_set_range(), but
243 * it's safe as we have already set the metadata to read-only mode.
245 __reload_in_core_bitset(clone);
248 /*---------------------------------------------------------------------------*/
250 /* Wake up anyone waiting for region hydrations to stop */
251 static inline void wakeup_hydration_waiters(struct clone *clone)
253 wake_up_all(&clone->hydration_stopped);
256 static inline void wake_worker(struct clone *clone)
258 queue_work(clone->wq, &clone->worker);
261 /*---------------------------------------------------------------------------*/
264 * bio helper functions.
266 static inline void remap_to_source(struct clone *clone, struct bio *bio)
268 bio_set_dev(bio, clone->source_dev->bdev);
271 static inline void remap_to_dest(struct clone *clone, struct bio *bio)
273 bio_set_dev(bio, clone->dest_dev->bdev);
276 static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
278 return op_is_flush(bio->bi_opf) &&
279 dm_clone_changed_this_transaction(clone->cmd);
282 /* Get the address of the region in sectors */
283 static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
285 return (region_nr << clone->region_shift);
288 /* Get the region number of the bio */
289 static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
291 return (bio->bi_iter.bi_sector >> clone->region_shift);
294 /* Get the region range covered by the bio */
295 static void bio_region_range(struct clone *clone, struct bio *bio,
296 unsigned long *rs, unsigned long *re)
298 *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
299 *re = bio_end_sector(bio) >> clone->region_shift;
302 /* Check whether a bio overwrites a region */
303 static inline bool is_overwrite_bio(struct clone *clone, struct bio *bio)
305 return (bio_data_dir(bio) == WRITE && bio_sectors(bio) == clone->region_size);
308 static void fail_bios(struct bio_list *bios, blk_status_t status)
312 while ((bio = bio_list_pop(bios))) {
313 bio->bi_status = status;
318 static void submit_bios(struct bio_list *bios)
321 struct blk_plug plug;
323 blk_start_plug(&plug);
325 while ((bio = bio_list_pop(bios)))
326 generic_make_request(bio);
328 blk_finish_plug(&plug);
332 * Submit bio to the underlying device.
334 * If the bio triggers a commit, delay it, until after the metadata have been
337 * NOTE: The bio remapping must be performed by the caller.
339 static void issue_bio(struct clone *clone, struct bio *bio)
343 if (!bio_triggers_commit(clone, bio)) {
344 generic_make_request(bio);
349 * If the metadata mode is RO or FAIL we won't be able to commit the
350 * metadata, so we complete the bio with an error.
352 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
358 * Batch together any bios that trigger commits and then issue a single
359 * commit for them in process_deferred_flush_bios().
361 spin_lock_irqsave(&clone->lock, flags);
362 bio_list_add(&clone->deferred_flush_bios, bio);
363 spin_unlock_irqrestore(&clone->lock, flags);
369 * Remap bio to the destination device and submit it.
371 * If the bio triggers a commit, delay it, until after the metadata have been
374 static void remap_and_issue(struct clone *clone, struct bio *bio)
376 remap_to_dest(clone, bio);
377 issue_bio(clone, bio);
381 * Issue bios that have been deferred until after their region has finished
384 * We delegate the bio submission to the worker thread, so this is safe to call
385 * from interrupt context.
387 static void issue_deferred_bios(struct clone *clone, struct bio_list *bios)
391 struct bio_list flush_bios = BIO_EMPTY_LIST;
392 struct bio_list normal_bios = BIO_EMPTY_LIST;
394 if (bio_list_empty(bios))
397 while ((bio = bio_list_pop(bios))) {
398 if (bio_triggers_commit(clone, bio))
399 bio_list_add(&flush_bios, bio);
401 bio_list_add(&normal_bios, bio);
404 spin_lock_irqsave(&clone->lock, flags);
405 bio_list_merge(&clone->deferred_bios, &normal_bios);
406 bio_list_merge(&clone->deferred_flush_bios, &flush_bios);
407 spin_unlock_irqrestore(&clone->lock, flags);
412 static void complete_overwrite_bio(struct clone *clone, struct bio *bio)
417 * If the bio has the REQ_FUA flag set we must commit the metadata
418 * before signaling its completion.
420 * complete_overwrite_bio() is only called by hydration_complete(),
421 * after having successfully updated the metadata. This means we don't
422 * need to call dm_clone_changed_this_transaction() to check if the
423 * metadata has changed and thus we can avoid taking the metadata spin
426 if (!(bio->bi_opf & REQ_FUA)) {
432 * If the metadata mode is RO or FAIL we won't be able to commit the
433 * metadata, so we complete the bio with an error.
435 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
441 * Batch together any bios that trigger commits and then issue a single
442 * commit for them in process_deferred_flush_bios().
444 spin_lock_irqsave(&clone->lock, flags);
445 bio_list_add(&clone->deferred_flush_completions, bio);
446 spin_unlock_irqrestore(&clone->lock, flags);
451 static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
453 bio->bi_iter.bi_sector = sector;
454 bio->bi_iter.bi_size = to_bytes(len);
457 static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
459 unsigned long rs, re;
462 * If the destination device supports discards, remap and trim the
463 * discard bio and pass it down. Otherwise complete the bio
466 if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
467 remap_to_dest(clone, bio);
468 bio_region_range(clone, bio, &rs, &re);
469 trim_bio(bio, rs << clone->region_shift,
470 (re - rs) << clone->region_shift);
471 generic_make_request(bio);
476 static void process_discard_bio(struct clone *clone, struct bio *bio)
478 unsigned long rs, re, flags;
480 bio_region_range(clone, bio, &rs, &re);
481 BUG_ON(re > clone->nr_regions);
483 if (unlikely(rs == re)) {
489 * The covered regions are already hydrated so we just need to pass
492 if (dm_clone_is_range_hydrated(clone->cmd, rs, re - rs)) {
493 complete_discard_bio(clone, bio, true);
498 * If the metadata mode is RO or FAIL we won't be able to update the
499 * metadata for the regions covered by the discard so we just ignore
502 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
508 * Defer discard processing.
510 spin_lock_irqsave(&clone->lock, flags);
511 bio_list_add(&clone->deferred_discard_bios, bio);
512 spin_unlock_irqrestore(&clone->lock, flags);
517 /*---------------------------------------------------------------------------*/
520 * dm-clone region hydrations.
522 struct dm_clone_region_hydration {
524 unsigned long region_nr;
526 struct bio *overwrite_bio;
527 bio_end_io_t *overwrite_bio_end_io;
529 struct bio_list deferred_bios;
533 /* Used by hydration batching */
534 struct list_head list;
536 /* Used by hydration hash table */
541 * Hydration hash table implementation.
543 * Ideally we would like to use list_bl, which uses bit spin locks and employs
544 * the least significant bit of the list head to lock the corresponding bucket,
545 * reducing the memory overhead for the locks. But, currently, list_bl and bit
546 * spin locks don't support IRQ safe versions. Since we have to take the lock
547 * in both process and interrupt context, we must fall back to using regular
548 * spin locks; one per hash table bucket.
550 struct hash_table_bucket {
551 struct hlist_head head;
553 /* Spinlock protecting the bucket */
557 #define bucket_lock_irqsave(bucket, flags) \
558 spin_lock_irqsave(&(bucket)->lock, flags)
560 #define bucket_unlock_irqrestore(bucket, flags) \
561 spin_unlock_irqrestore(&(bucket)->lock, flags)
563 static int hash_table_init(struct clone *clone)
566 struct hash_table_bucket *bucket;
568 sz = 1 << HASH_TABLE_BITS;
570 clone->ht = kvmalloc(sz * sizeof(struct hash_table_bucket), GFP_KERNEL);
574 for (i = 0; i < sz; i++) {
575 bucket = clone->ht + i;
577 INIT_HLIST_HEAD(&bucket->head);
578 spin_lock_init(&bucket->lock);
584 static void hash_table_exit(struct clone *clone)
589 static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
590 unsigned long region_nr)
592 return &clone->ht[hash_long(region_nr, HASH_TABLE_BITS)];
596 * Search hash table for a hydration with hd->region_nr == region_nr
598 * NOTE: Must be called with the bucket lock held
600 static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
601 unsigned long region_nr)
603 struct dm_clone_region_hydration *hd;
605 hlist_for_each_entry(hd, &bucket->head, h) {
606 if (hd->region_nr == region_nr)
614 * Insert a hydration into the hash table.
616 * NOTE: Must be called with the bucket lock held.
618 static inline void __insert_region_hydration(struct hash_table_bucket *bucket,
619 struct dm_clone_region_hydration *hd)
621 hlist_add_head(&hd->h, &bucket->head);
625 * This function inserts a hydration into the hash table, unless someone else
626 * managed to insert a hydration for the same region first. In the latter case
627 * it returns the existing hydration descriptor for this region.
629 * NOTE: Must be called with the hydration hash table lock held.
631 static struct dm_clone_region_hydration *
632 __find_or_insert_region_hydration(struct hash_table_bucket *bucket,
633 struct dm_clone_region_hydration *hd)
635 struct dm_clone_region_hydration *hd2;
637 hd2 = __hash_find(bucket, hd->region_nr);
641 __insert_region_hydration(bucket, hd);
646 /*---------------------------------------------------------------------------*/
648 /* Allocate a hydration */
649 static struct dm_clone_region_hydration *alloc_hydration(struct clone *clone)
651 struct dm_clone_region_hydration *hd;
654 * Allocate a hydration from the hydration mempool.
655 * This might block but it can't fail.
657 hd = mempool_alloc(&clone->hydration_pool, GFP_NOIO);
663 static inline void free_hydration(struct dm_clone_region_hydration *hd)
665 mempool_free(hd, &hd->clone->hydration_pool);
668 /* Initialize a hydration */
669 static void hydration_init(struct dm_clone_region_hydration *hd, unsigned long region_nr)
671 hd->region_nr = region_nr;
672 hd->overwrite_bio = NULL;
673 bio_list_init(&hd->deferred_bios);
676 INIT_LIST_HEAD(&hd->list);
677 INIT_HLIST_NODE(&hd->h);
680 /*---------------------------------------------------------------------------*/
683 * Update dm-clone's metadata after a region has finished hydrating and remove
684 * hydration from the hash table.
686 static int hydration_update_metadata(struct dm_clone_region_hydration *hd)
690 struct hash_table_bucket *bucket;
691 struct clone *clone = hd->clone;
693 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
696 /* Update the metadata */
697 if (likely(!r) && hd->status == BLK_STS_OK)
698 r = dm_clone_set_region_hydrated(clone->cmd, hd->region_nr);
700 bucket = get_hash_table_bucket(clone, hd->region_nr);
702 /* Remove hydration from hash table */
703 bucket_lock_irqsave(bucket, flags);
705 bucket_unlock_irqrestore(bucket, flags);
711 * Complete a region's hydration:
713 * 1. Update dm-clone's metadata.
714 * 2. Remove hydration from hash table.
715 * 3. Complete overwrite bio.
716 * 4. Issue deferred bios.
717 * 5. If this was the last hydration, wake up anyone waiting for
718 * hydrations to finish.
720 static void hydration_complete(struct dm_clone_region_hydration *hd)
724 struct clone *clone = hd->clone;
726 r = hydration_update_metadata(hd);
728 if (hd->status == BLK_STS_OK && likely(!r)) {
729 if (hd->overwrite_bio)
730 complete_overwrite_bio(clone, hd->overwrite_bio);
732 issue_deferred_bios(clone, &hd->deferred_bios);
734 status = r ? BLK_STS_IOERR : hd->status;
736 if (hd->overwrite_bio)
737 bio_list_add(&hd->deferred_bios, hd->overwrite_bio);
739 fail_bios(&hd->deferred_bios, status);
744 if (atomic_dec_and_test(&clone->hydrations_in_flight))
745 wakeup_hydration_waiters(clone);
748 static void hydration_kcopyd_callback(int read_err, unsigned long write_err, void *context)
752 struct dm_clone_region_hydration *tmp, *hd = context;
753 struct clone *clone = hd->clone;
755 LIST_HEAD(batched_hydrations);
757 if (read_err || write_err) {
758 DMERR_LIMIT("%s: hydration failed", clone_device_name(clone));
759 status = BLK_STS_IOERR;
763 list_splice_tail(&hd->list, &batched_hydrations);
766 hydration_complete(hd);
768 /* Complete batched hydrations */
769 list_for_each_entry_safe(hd, tmp, &batched_hydrations, list) {
771 hydration_complete(hd);
774 /* Continue background hydration, if there is no I/O in-flight */
775 if (test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
776 !atomic_read(&clone->ios_in_flight))
780 static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr_regions)
782 unsigned long region_start, region_end;
783 sector_t tail_size, region_size, total_size;
784 struct dm_io_region from, to;
785 struct clone *clone = hd->clone;
787 region_size = clone->region_size;
788 region_start = hd->region_nr;
789 region_end = region_start + nr_regions - 1;
791 total_size = (nr_regions - 1) << clone->region_shift;
793 if (region_end == clone->nr_regions - 1) {
795 * The last region of the target might be smaller than
798 tail_size = clone->ti->len & (region_size - 1);
800 tail_size = region_size;
802 tail_size = region_size;
805 total_size += tail_size;
807 from.bdev = clone->source_dev->bdev;
808 from.sector = region_to_sector(clone, region_start);
809 from.count = total_size;
811 to.bdev = clone->dest_dev->bdev;
812 to.sector = from.sector;
813 to.count = from.count;
816 atomic_add(nr_regions, &clone->hydrations_in_flight);
817 dm_kcopyd_copy(clone->kcopyd_client, &from, 1, &to, 0,
818 hydration_kcopyd_callback, hd);
821 static void overwrite_endio(struct bio *bio)
823 struct dm_clone_region_hydration *hd = bio->bi_private;
825 bio->bi_end_io = hd->overwrite_bio_end_io;
826 hd->status = bio->bi_status;
828 hydration_complete(hd);
831 static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio *bio)
834 * We don't need to save and restore bio->bi_private because device
835 * mapper core generates a new bio for us to use, with clean
838 hd->overwrite_bio = bio;
839 hd->overwrite_bio_end_io = bio->bi_end_io;
841 bio->bi_end_io = overwrite_endio;
842 bio->bi_private = hd;
844 atomic_inc(&hd->clone->hydrations_in_flight);
845 generic_make_request(bio);
849 * Hydrate bio's region.
851 * This function starts the hydration of the bio's region and puts the bio in
852 * the list of deferred bios for this region. In case, by the time this
853 * function is called, the region has finished hydrating it's submitted to the
854 * destination device.
856 * NOTE: The bio remapping must be performed by the caller.
858 static void hydrate_bio_region(struct clone *clone, struct bio *bio)
861 unsigned long region_nr;
862 struct hash_table_bucket *bucket;
863 struct dm_clone_region_hydration *hd, *hd2;
865 region_nr = bio_to_region(clone, bio);
866 bucket = get_hash_table_bucket(clone, region_nr);
868 bucket_lock_irqsave(bucket, flags);
870 hd = __hash_find(bucket, region_nr);
872 /* Someone else is hydrating the region */
873 bio_list_add(&hd->deferred_bios, bio);
874 bucket_unlock_irqrestore(bucket, flags);
878 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
879 /* The region has been hydrated */
880 bucket_unlock_irqrestore(bucket, flags);
881 issue_bio(clone, bio);
886 * We must allocate a hydration descriptor and start the hydration of
887 * the corresponding region.
889 bucket_unlock_irqrestore(bucket, flags);
891 hd = alloc_hydration(clone);
892 hydration_init(hd, region_nr);
894 bucket_lock_irqsave(bucket, flags);
896 /* Check if the region has been hydrated in the meantime. */
897 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
898 bucket_unlock_irqrestore(bucket, flags);
900 issue_bio(clone, bio);
904 hd2 = __find_or_insert_region_hydration(bucket, hd);
906 /* Someone else started the region's hydration. */
907 bio_list_add(&hd2->deferred_bios, bio);
908 bucket_unlock_irqrestore(bucket, flags);
914 * If the metadata mode is RO or FAIL then there is no point starting a
915 * hydration, since we will not be able to update the metadata when the
916 * hydration finishes.
918 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
920 bucket_unlock_irqrestore(bucket, flags);
927 * Start region hydration.
929 * If a bio overwrites a region, i.e., its size is equal to the
930 * region's size, then we don't need to copy the region from the source
931 * to the destination device.
933 if (is_overwrite_bio(clone, bio)) {
934 bucket_unlock_irqrestore(bucket, flags);
935 hydration_overwrite(hd, bio);
937 bio_list_add(&hd->deferred_bios, bio);
938 bucket_unlock_irqrestore(bucket, flags);
939 hydration_copy(hd, 1);
943 /*---------------------------------------------------------------------------*/
946 * Background hydrations.
950 * Batch region hydrations.
952 * To better utilize device bandwidth we batch together the hydration of
953 * adjacent regions. This allows us to use small region sizes, e.g., 4KB, which
954 * is good for small, random write performance (because of the overwriting of
955 * un-hydrated regions) and at the same time issue big copy requests to kcopyd
956 * to achieve high hydration bandwidth.
959 struct dm_clone_region_hydration *head;
960 unsigned int nr_batched_regions;
963 static void __batch_hydration(struct batch_info *batch,
964 struct dm_clone_region_hydration *hd)
966 struct clone *clone = hd->clone;
967 unsigned int max_batch_size = READ_ONCE(clone->hydration_batch_size);
970 /* Try to extend the current batch */
971 if (batch->nr_batched_regions < max_batch_size &&
972 (batch->head->region_nr + batch->nr_batched_regions) == hd->region_nr) {
973 list_add_tail(&hd->list, &batch->head->list);
974 batch->nr_batched_regions++;
978 /* Check if we should issue the current batch */
979 if (batch->nr_batched_regions >= max_batch_size || hd) {
980 hydration_copy(batch->head, batch->nr_batched_regions);
982 batch->nr_batched_regions = 0;
989 /* We treat max batch sizes of zero and one equivalently */
990 if (max_batch_size <= 1) {
991 hydration_copy(hd, 1);
995 /* Start a new batch */
996 BUG_ON(!list_empty(&hd->list));
998 batch->nr_batched_regions = 1;
1001 static unsigned long __start_next_hydration(struct clone *clone,
1002 unsigned long offset,
1003 struct batch_info *batch)
1005 unsigned long flags;
1006 struct hash_table_bucket *bucket;
1007 struct dm_clone_region_hydration *hd;
1008 unsigned long nr_regions = clone->nr_regions;
1010 hd = alloc_hydration(clone);
1012 /* Try to find a region to hydrate. */
1014 offset = dm_clone_find_next_unhydrated_region(clone->cmd, offset);
1015 if (offset == nr_regions)
1018 bucket = get_hash_table_bucket(clone, offset);
1019 bucket_lock_irqsave(bucket, flags);
1021 if (!dm_clone_is_region_hydrated(clone->cmd, offset) &&
1022 !__hash_find(bucket, offset)) {
1023 hydration_init(hd, offset);
1024 __insert_region_hydration(bucket, hd);
1025 bucket_unlock_irqrestore(bucket, flags);
1027 /* Batch hydration */
1028 __batch_hydration(batch, hd);
1030 return (offset + 1);
1033 bucket_unlock_irqrestore(bucket, flags);
1035 } while (++offset < nr_regions);
1044 * This function searches for regions that still reside in the source device
1045 * and starts their hydration.
1047 static void do_hydration(struct clone *clone)
1049 unsigned int current_volume;
1050 unsigned long offset, nr_regions = clone->nr_regions;
1052 struct batch_info batch = {
1054 .nr_batched_regions = 0,
1057 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1060 if (dm_clone_is_hydration_done(clone->cmd))
1064 * Avoid race with device suspension.
1066 atomic_inc(&clone->hydrations_in_flight);
1069 * Make sure atomic_inc() is ordered before test_bit(), otherwise we
1070 * might race with clone_postsuspend() and start a region hydration
1071 * after the target has been suspended.
1073 * This is paired with the smp_mb__after_atomic() in
1074 * clone_postsuspend().
1076 smp_mb__after_atomic();
1078 offset = clone->hydration_offset;
1079 while (likely(!test_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags)) &&
1080 !atomic_read(&clone->ios_in_flight) &&
1081 test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
1082 offset < nr_regions) {
1083 current_volume = atomic_read(&clone->hydrations_in_flight);
1084 current_volume += batch.nr_batched_regions;
1086 if (current_volume > READ_ONCE(clone->hydration_threshold))
1089 offset = __start_next_hydration(clone, offset, &batch);
1093 hydration_copy(batch.head, batch.nr_batched_regions);
1095 if (offset >= nr_regions)
1098 clone->hydration_offset = offset;
1100 if (atomic_dec_and_test(&clone->hydrations_in_flight))
1101 wakeup_hydration_waiters(clone);
1104 /*---------------------------------------------------------------------------*/
1106 static bool need_commit_due_to_time(struct clone *clone)
1108 return !time_in_range(jiffies, clone->last_commit_jiffies,
1109 clone->last_commit_jiffies + COMMIT_PERIOD);
1113 * A non-zero return indicates read-only or fail mode.
1115 static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
1119 if (dest_dev_flushed)
1120 *dest_dev_flushed = false;
1122 mutex_lock(&clone->commit_lock);
1124 if (!dm_clone_changed_this_transaction(clone->cmd))
1127 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
1132 r = dm_clone_metadata_pre_commit(clone->cmd);
1134 __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r);
1138 bio_reset(&clone->flush_bio);
1139 bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev);
1140 clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1142 r = submit_bio_wait(&clone->flush_bio);
1144 __metadata_operation_failed(clone, "flush destination device", r);
1148 if (dest_dev_flushed)
1149 *dest_dev_flushed = true;
1151 r = dm_clone_metadata_commit(clone->cmd);
1153 __metadata_operation_failed(clone, "dm_clone_metadata_commit", r);
1157 if (dm_clone_is_hydration_done(clone->cmd))
1158 dm_table_event(clone->ti->table);
1160 mutex_unlock(&clone->commit_lock);
1165 static void process_deferred_discards(struct clone *clone)
1169 struct blk_plug plug;
1170 unsigned long rs, re, flags;
1171 struct bio_list discards = BIO_EMPTY_LIST;
1173 spin_lock_irqsave(&clone->lock, flags);
1174 bio_list_merge(&discards, &clone->deferred_discard_bios);
1175 bio_list_init(&clone->deferred_discard_bios);
1176 spin_unlock_irqrestore(&clone->lock, flags);
1178 if (bio_list_empty(&discards))
1181 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1184 /* Update the metadata */
1185 bio_list_for_each(bio, &discards) {
1186 bio_region_range(clone, bio, &rs, &re);
1188 * A discard request might cover regions that have been already
1189 * hydrated. There is no need to update the metadata for these
1192 r = dm_clone_cond_set_range(clone->cmd, rs, re - rs);
1198 blk_start_plug(&plug);
1199 while ((bio = bio_list_pop(&discards)))
1200 complete_discard_bio(clone, bio, r == 0);
1201 blk_finish_plug(&plug);
1204 static void process_deferred_bios(struct clone *clone)
1206 unsigned long flags;
1207 struct bio_list bios = BIO_EMPTY_LIST;
1209 spin_lock_irqsave(&clone->lock, flags);
1210 bio_list_merge(&bios, &clone->deferred_bios);
1211 bio_list_init(&clone->deferred_bios);
1212 spin_unlock_irqrestore(&clone->lock, flags);
1214 if (bio_list_empty(&bios))
1220 static void process_deferred_flush_bios(struct clone *clone)
1223 unsigned long flags;
1224 bool dest_dev_flushed;
1225 struct bio_list bios = BIO_EMPTY_LIST;
1226 struct bio_list bio_completions = BIO_EMPTY_LIST;
1229 * If there are any deferred flush bios, we must commit the metadata
1230 * before issuing them or signaling their completion.
1232 spin_lock_irqsave(&clone->lock, flags);
1233 bio_list_merge(&bios, &clone->deferred_flush_bios);
1234 bio_list_init(&clone->deferred_flush_bios);
1236 bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
1237 bio_list_init(&clone->deferred_flush_completions);
1238 spin_unlock_irqrestore(&clone->lock, flags);
1240 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
1241 !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
1244 if (commit_metadata(clone, &dest_dev_flushed)) {
1245 bio_list_merge(&bios, &bio_completions);
1247 while ((bio = bio_list_pop(&bios)))
1253 clone->last_commit_jiffies = jiffies;
1255 while ((bio = bio_list_pop(&bio_completions)))
1258 while ((bio = bio_list_pop(&bios))) {
1259 if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) {
1260 /* We just flushed the destination device as part of
1261 * the metadata commit, so there is no reason to send
1266 generic_make_request(bio);
1271 static void do_worker(struct work_struct *work)
1273 struct clone *clone = container_of(work, typeof(*clone), worker);
1275 process_deferred_bios(clone);
1276 process_deferred_discards(clone);
1279 * process_deferred_flush_bios():
1283 * - Process deferred REQ_FUA completions
1285 * - Process deferred REQ_PREFLUSH bios
1287 process_deferred_flush_bios(clone);
1289 /* Background hydration */
1290 do_hydration(clone);
1294 * Commit periodically so that not too much unwritten data builds up.
1296 * Also, restart background hydration, if it has been stopped by in-flight I/O.
1298 static void do_waker(struct work_struct *work)
1300 struct clone *clone = container_of(to_delayed_work(work), struct clone, waker);
1303 queue_delayed_work(clone->wq, &clone->waker, COMMIT_PERIOD);
1306 /*---------------------------------------------------------------------------*/
1311 static int clone_map(struct dm_target *ti, struct bio *bio)
1313 struct clone *clone = ti->private;
1314 unsigned long region_nr;
1316 atomic_inc(&clone->ios_in_flight);
1318 if (unlikely(get_clone_mode(clone) == CM_FAIL))
1319 return DM_MAPIO_KILL;
1322 * REQ_PREFLUSH bios carry no data:
1324 * - Commit metadata, if changed
1326 * - Pass down to destination device
1328 if (bio->bi_opf & REQ_PREFLUSH) {
1329 remap_and_issue(clone, bio);
1330 return DM_MAPIO_SUBMITTED;
1333 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1336 * dm-clone interprets discards and performs a fast hydration of the
1337 * discarded regions, i.e., we skip the copy from the source device and
1338 * just mark the regions as hydrated.
1340 if (bio_op(bio) == REQ_OP_DISCARD) {
1341 process_discard_bio(clone, bio);
1342 return DM_MAPIO_SUBMITTED;
1346 * If the bio's region is hydrated, redirect it to the destination
1349 * If the region is not hydrated and the bio is a READ, redirect it to
1350 * the source device.
1352 * Else, defer WRITE bio until after its region has been hydrated and
1353 * start the region's hydration immediately.
1355 region_nr = bio_to_region(clone, bio);
1356 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
1357 remap_and_issue(clone, bio);
1358 return DM_MAPIO_SUBMITTED;
1359 } else if (bio_data_dir(bio) == READ) {
1360 remap_to_source(clone, bio);
1361 return DM_MAPIO_REMAPPED;
1364 remap_to_dest(clone, bio);
1365 hydrate_bio_region(clone, bio);
1367 return DM_MAPIO_SUBMITTED;
1370 static int clone_endio(struct dm_target *ti, struct bio *bio, blk_status_t *error)
1372 struct clone *clone = ti->private;
1374 atomic_dec(&clone->ios_in_flight);
1376 return DM_ENDIO_DONE;
1379 static void emit_flags(struct clone *clone, char *result, unsigned int maxlen,
1382 ssize_t sz = *sz_ptr;
1385 count = !test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1386 count += !test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1388 DMEMIT("%u ", count);
1390 if (!test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
1391 DMEMIT("no_hydration ");
1393 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
1394 DMEMIT("no_discard_passdown ");
1399 static void emit_core_args(struct clone *clone, char *result,
1400 unsigned int maxlen, ssize_t *sz_ptr)
1402 ssize_t sz = *sz_ptr;
1403 unsigned int count = 4;
1405 DMEMIT("%u hydration_threshold %u hydration_batch_size %u ", count,
1406 READ_ONCE(clone->hydration_threshold),
1407 READ_ONCE(clone->hydration_batch_size));
1415 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
1416 * <clone region size> <#hydrated regions>/<#total regions> <#hydrating regions>
1417 * <#features> <features>* <#core args> <core args>* <clone metadata mode>
1419 static void clone_status(struct dm_target *ti, status_type_t type,
1420 unsigned int status_flags, char *result,
1421 unsigned int maxlen)
1426 dm_block_t nr_free_metadata_blocks = 0;
1427 dm_block_t nr_metadata_blocks = 0;
1428 char buf[BDEVNAME_SIZE];
1429 struct clone *clone = ti->private;
1432 case STATUSTYPE_INFO:
1433 if (get_clone_mode(clone) == CM_FAIL) {
1438 /* Commit to ensure statistics aren't out-of-date */
1439 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
1440 (void) commit_metadata(clone, NULL);
1442 r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks);
1445 DMERR("%s: dm_clone_get_free_metadata_block_count returned %d",
1446 clone_device_name(clone), r);
1450 r = dm_clone_get_metadata_dev_size(clone->cmd, &nr_metadata_blocks);
1453 DMERR("%s: dm_clone_get_metadata_dev_size returned %d",
1454 clone_device_name(clone), r);
1458 DMEMIT("%u %llu/%llu %llu %lu/%lu %u ",
1459 DM_CLONE_METADATA_BLOCK_SIZE,
1460 (unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks),
1461 (unsigned long long)nr_metadata_blocks,
1462 (unsigned long long)clone->region_size,
1463 dm_clone_nr_of_hydrated_regions(clone->cmd),
1465 atomic_read(&clone->hydrations_in_flight));
1467 emit_flags(clone, result, maxlen, &sz);
1468 emit_core_args(clone, result, maxlen, &sz);
1470 switch (get_clone_mode(clone)) {
1483 case STATUSTYPE_TABLE:
1484 format_dev_t(buf, clone->metadata_dev->bdev->bd_dev);
1487 format_dev_t(buf, clone->dest_dev->bdev->bd_dev);
1490 format_dev_t(buf, clone->source_dev->bdev->bd_dev);
1493 for (i = 0; i < clone->nr_ctr_args; i++)
1494 DMEMIT(" %s", clone->ctr_args[i]);
1503 static int clone_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1505 struct request_queue *dest_q, *source_q;
1506 struct clone *clone = container_of(cb, struct clone, callbacks);
1508 source_q = bdev_get_queue(clone->source_dev->bdev);
1509 dest_q = bdev_get_queue(clone->dest_dev->bdev);
1511 return (bdi_congested(dest_q->backing_dev_info, bdi_bits) |
1512 bdi_congested(source_q->backing_dev_info, bdi_bits));
1515 static sector_t get_dev_size(struct dm_dev *dev)
1517 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1520 /*---------------------------------------------------------------------------*/
1523 * Construct a clone device mapping:
1525 * clone <metadata dev> <destination dev> <source dev> <region size>
1526 * [<#feature args> [<feature arg>]* [<#core args> [key value]*]]
1528 * metadata dev: Fast device holding the persistent metadata
1529 * destination dev: The destination device, which will become a clone of the
1531 * source dev: The read-only source device that gets cloned
1532 * region size: dm-clone unit size in sectors
1534 * #feature args: Number of feature arguments passed
1535 * feature args: E.g. no_hydration, no_discard_passdown
1537 * #core arguments: An even number of core arguments
1538 * core arguments: Key/value pairs for tuning the core
1539 * E.g. 'hydration_threshold 256'
1541 static int parse_feature_args(struct dm_arg_set *as, struct clone *clone)
1545 const char *arg_name;
1546 struct dm_target *ti = clone->ti;
1548 const struct dm_arg args = {
1551 .error = "Invalid number of feature arguments"
1554 /* No feature arguments supplied */
1558 r = dm_read_arg_group(&args, as, &argc, &ti->error);
1563 arg_name = dm_shift_arg(as);
1566 if (!strcasecmp(arg_name, "no_hydration")) {
1567 __clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1568 } else if (!strcasecmp(arg_name, "no_discard_passdown")) {
1569 __clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1571 ti->error = "Invalid feature argument";
1579 static int parse_core_args(struct dm_arg_set *as, struct clone *clone)
1584 const char *arg_name;
1585 struct dm_target *ti = clone->ti;
1587 const struct dm_arg args = {
1590 .error = "Invalid number of core arguments"
1593 /* Initialize core arguments */
1594 clone->hydration_batch_size = DEFAULT_HYDRATION_BATCH_SIZE;
1595 clone->hydration_threshold = DEFAULT_HYDRATION_THRESHOLD;
1597 /* No core arguments supplied */
1601 r = dm_read_arg_group(&args, as, &argc, &ti->error);
1606 ti->error = "Number of core arguments must be even";
1611 arg_name = dm_shift_arg(as);
1614 if (!strcasecmp(arg_name, "hydration_threshold")) {
1615 if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1616 ti->error = "Invalid value for argument `hydration_threshold'";
1619 clone->hydration_threshold = value;
1620 } else if (!strcasecmp(arg_name, "hydration_batch_size")) {
1621 if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1622 ti->error = "Invalid value for argument `hydration_batch_size'";
1625 clone->hydration_batch_size = value;
1627 ti->error = "Invalid core argument";
1635 static int parse_region_size(struct clone *clone, struct dm_arg_set *as, char **error)
1638 unsigned int region_size;
1641 arg.min = MIN_REGION_SIZE;
1642 arg.max = MAX_REGION_SIZE;
1643 arg.error = "Invalid region size";
1645 r = dm_read_arg(&arg, as, ®ion_size, error);
1649 /* Check region size is a power of 2 */
1650 if (!is_power_of_2(region_size)) {
1651 *error = "Region size is not a power of 2";
1655 /* Validate the region size against the device logical block size */
1656 if (region_size % (bdev_logical_block_size(clone->source_dev->bdev) >> 9) ||
1657 region_size % (bdev_logical_block_size(clone->dest_dev->bdev) >> 9)) {
1658 *error = "Region size is not a multiple of device logical block size";
1662 clone->region_size = region_size;
1667 static int validate_nr_regions(unsigned long n, char **error)
1670 * dm_bitset restricts us to 2^32 regions. test_bit & co. restrict us
1671 * further to 2^31 regions.
1673 if (n > (1UL << 31)) {
1674 *error = "Too many regions. Consider increasing the region size";
1681 static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1684 sector_t metadata_dev_size;
1685 char b[BDEVNAME_SIZE];
1687 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1688 &clone->metadata_dev);
1690 *error = "Error opening metadata device";
1694 metadata_dev_size = get_dev_size(clone->metadata_dev);
1695 if (metadata_dev_size > DM_CLONE_METADATA_MAX_SECTORS_WARNING)
1696 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1697 bdevname(clone->metadata_dev->bdev, b), DM_CLONE_METADATA_MAX_SECTORS);
1702 static int parse_dest_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1705 sector_t dest_dev_size;
1707 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1710 *error = "Error opening destination device";
1714 dest_dev_size = get_dev_size(clone->dest_dev);
1715 if (dest_dev_size < clone->ti->len) {
1716 dm_put_device(clone->ti, clone->dest_dev);
1717 *error = "Device size larger than destination device";
1724 static int parse_source_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1727 sector_t source_dev_size;
1729 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ,
1730 &clone->source_dev);
1732 *error = "Error opening source device";
1736 source_dev_size = get_dev_size(clone->source_dev);
1737 if (source_dev_size < clone->ti->len) {
1738 dm_put_device(clone->ti, clone->source_dev);
1739 *error = "Device size larger than source device";
1746 static int copy_ctr_args(struct clone *clone, int argc, const char **argv, char **error)
1751 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
1755 for (i = 0; i < argc; i++) {
1756 copy[i] = kstrdup(argv[i], GFP_KERNEL);
1766 clone->nr_ctr_args = argc;
1767 clone->ctr_args = copy;
1771 *error = "Failed to allocate memory for table line";
1775 static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1778 struct clone *clone;
1779 struct dm_arg_set as;
1782 ti->error = "Invalid number of arguments";
1789 clone = kzalloc(sizeof(*clone), GFP_KERNEL);
1791 ti->error = "Failed to allocate clone structure";
1797 /* Initialize dm-clone flags */
1798 __set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1799 __set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
1800 __set_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1802 r = parse_metadata_dev(clone, &as, &ti->error);
1804 goto out_with_clone;
1806 r = parse_dest_dev(clone, &as, &ti->error);
1808 goto out_with_meta_dev;
1810 r = parse_source_dev(clone, &as, &ti->error);
1812 goto out_with_dest_dev;
1814 r = parse_region_size(clone, &as, &ti->error);
1816 goto out_with_source_dev;
1818 clone->region_shift = __ffs(clone->region_size);
1819 clone->nr_regions = dm_sector_div_up(ti->len, clone->region_size);
1821 r = validate_nr_regions(clone->nr_regions, &ti->error);
1823 goto out_with_source_dev;
1825 r = dm_set_target_max_io_len(ti, clone->region_size);
1827 ti->error = "Failed to set max io len";
1828 goto out_with_source_dev;
1831 r = parse_feature_args(&as, clone);
1833 goto out_with_source_dev;
1835 r = parse_core_args(&as, clone);
1837 goto out_with_source_dev;
1840 clone->cmd = dm_clone_metadata_open(clone->metadata_dev->bdev, ti->len,
1841 clone->region_size);
1842 if (IS_ERR(clone->cmd)) {
1843 ti->error = "Failed to load metadata";
1844 r = PTR_ERR(clone->cmd);
1845 goto out_with_source_dev;
1848 __set_clone_mode(clone, CM_WRITE);
1850 if (get_clone_mode(clone) != CM_WRITE) {
1851 ti->error = "Unable to get write access to metadata, please check/repair metadata";
1853 goto out_with_metadata;
1856 clone->last_commit_jiffies = jiffies;
1858 /* Allocate hydration hash table */
1859 r = hash_table_init(clone);
1861 ti->error = "Failed to allocate hydration hash table";
1862 goto out_with_metadata;
1865 atomic_set(&clone->ios_in_flight, 0);
1866 init_waitqueue_head(&clone->hydration_stopped);
1867 spin_lock_init(&clone->lock);
1868 bio_list_init(&clone->deferred_bios);
1869 bio_list_init(&clone->deferred_discard_bios);
1870 bio_list_init(&clone->deferred_flush_bios);
1871 bio_list_init(&clone->deferred_flush_completions);
1872 clone->hydration_offset = 0;
1873 atomic_set(&clone->hydrations_in_flight, 0);
1874 bio_init(&clone->flush_bio, NULL, 0);
1876 clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
1878 ti->error = "Failed to allocate workqueue";
1883 INIT_WORK(&clone->worker, do_worker);
1884 INIT_DELAYED_WORK(&clone->waker, do_waker);
1886 clone->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1887 if (IS_ERR(clone->kcopyd_client)) {
1888 r = PTR_ERR(clone->kcopyd_client);
1892 r = mempool_init_slab_pool(&clone->hydration_pool, MIN_HYDRATIONS,
1895 ti->error = "Failed to create dm_clone_region_hydration memory pool";
1896 goto out_with_kcopyd;
1899 /* Save a copy of the table line */
1900 r = copy_ctr_args(clone, argc - 3, (const char **)argv + 3, &ti->error);
1902 goto out_with_mempool;
1904 mutex_init(&clone->commit_lock);
1905 clone->callbacks.congested_fn = clone_is_congested;
1906 dm_table_add_target_callbacks(ti->table, &clone->callbacks);
1908 /* Enable flushes */
1909 ti->num_flush_bios = 1;
1910 ti->flush_supported = true;
1912 /* Enable discards */
1913 ti->discards_supported = true;
1914 ti->num_discard_bios = 1;
1916 ti->private = clone;
1921 mempool_exit(&clone->hydration_pool);
1923 dm_kcopyd_client_destroy(clone->kcopyd_client);
1925 destroy_workqueue(clone->wq);
1927 hash_table_exit(clone);
1929 dm_clone_metadata_close(clone->cmd);
1930 out_with_source_dev:
1931 dm_put_device(ti, clone->source_dev);
1933 dm_put_device(ti, clone->dest_dev);
1935 dm_put_device(ti, clone->metadata_dev);
1942 static void clone_dtr(struct dm_target *ti)
1945 struct clone *clone = ti->private;
1947 mutex_destroy(&clone->commit_lock);
1948 bio_uninit(&clone->flush_bio);
1950 for (i = 0; i < clone->nr_ctr_args; i++)
1951 kfree(clone->ctr_args[i]);
1952 kfree(clone->ctr_args);
1954 mempool_exit(&clone->hydration_pool);
1955 dm_kcopyd_client_destroy(clone->kcopyd_client);
1956 destroy_workqueue(clone->wq);
1957 hash_table_exit(clone);
1958 dm_clone_metadata_close(clone->cmd);
1959 dm_put_device(ti, clone->source_dev);
1960 dm_put_device(ti, clone->dest_dev);
1961 dm_put_device(ti, clone->metadata_dev);
1966 /*---------------------------------------------------------------------------*/
1968 static void clone_postsuspend(struct dm_target *ti)
1970 struct clone *clone = ti->private;
1973 * To successfully suspend the device:
1975 * - We cancel the delayed work for periodic commits and wait for
1978 * - We stop the background hydration, i.e. we prevent new region
1979 * hydrations from starting.
1981 * - We wait for any in-flight hydrations to finish.
1983 * - We flush the workqueue.
1985 * - We commit the metadata.
1987 cancel_delayed_work_sync(&clone->waker);
1989 set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
1992 * Make sure set_bit() is ordered before atomic_read(), otherwise we
1993 * might race with do_hydration() and miss some started region
1996 * This is paired with smp_mb__after_atomic() in do_hydration().
1998 smp_mb__after_atomic();
2000 wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight));
2001 flush_workqueue(clone->wq);
2003 (void) commit_metadata(clone, NULL);
2006 static void clone_resume(struct dm_target *ti)
2008 struct clone *clone = ti->private;
2010 clear_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
2011 do_waker(&clone->waker.work);
2014 static bool bdev_supports_discards(struct block_device *bdev)
2016 struct request_queue *q = bdev_get_queue(bdev);
2018 return (q && blk_queue_discard(q));
2022 * If discard_passdown was enabled verify that the destination device supports
2023 * discards. Disable discard_passdown if not.
2025 static void disable_passdown_if_not_supported(struct clone *clone)
2027 struct block_device *dest_dev = clone->dest_dev->bdev;
2028 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
2029 const char *reason = NULL;
2030 char buf[BDEVNAME_SIZE];
2032 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
2035 if (!bdev_supports_discards(dest_dev))
2036 reason = "discard unsupported";
2037 else if (dest_limits->max_discard_sectors < clone->region_size)
2038 reason = "max discard sectors smaller than a region";
2041 DMWARN("Destination device (%s) %s: Disabling discard passdown.",
2042 bdevname(dest_dev, buf), reason);
2043 clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
2047 static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
2049 struct block_device *dest_bdev = clone->dest_dev->bdev;
2050 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits;
2052 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
2053 /* No passdown is done so we set our own virtual limits */
2054 limits->discard_granularity = clone->region_size << SECTOR_SHIFT;
2055 limits->max_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT, clone->region_size);
2060 * clone_iterate_devices() is stacking both the source and destination
2061 * device limits but discards aren't passed to the source device, so
2062 * inherit destination's limits.
2064 limits->max_discard_sectors = dest_limits->max_discard_sectors;
2065 limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
2066 limits->discard_granularity = dest_limits->discard_granularity;
2067 limits->discard_alignment = dest_limits->discard_alignment;
2068 limits->discard_misaligned = dest_limits->discard_misaligned;
2069 limits->max_discard_segments = dest_limits->max_discard_segments;
2072 static void clone_io_hints(struct dm_target *ti, struct queue_limits *limits)
2074 struct clone *clone = ti->private;
2075 u64 io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
2078 * If the system-determined stacked limits are compatible with
2079 * dm-clone's region size (io_opt is a factor) do not override them.
2081 if (io_opt_sectors < clone->region_size ||
2082 do_div(io_opt_sectors, clone->region_size)) {
2083 blk_limits_io_min(limits, clone->region_size << SECTOR_SHIFT);
2084 blk_limits_io_opt(limits, clone->region_size << SECTOR_SHIFT);
2087 disable_passdown_if_not_supported(clone);
2088 set_discard_limits(clone, limits);
2091 static int clone_iterate_devices(struct dm_target *ti,
2092 iterate_devices_callout_fn fn, void *data)
2095 struct clone *clone = ti->private;
2096 struct dm_dev *dest_dev = clone->dest_dev;
2097 struct dm_dev *source_dev = clone->source_dev;
2099 ret = fn(ti, source_dev, 0, ti->len, data);
2101 ret = fn(ti, dest_dev, 0, ti->len, data);
2106 * dm-clone message functions.
2108 static void set_hydration_threshold(struct clone *clone, unsigned int nr_regions)
2110 WRITE_ONCE(clone->hydration_threshold, nr_regions);
2113 * If user space sets hydration_threshold to zero then the hydration
2114 * will stop. If at a later time the hydration_threshold is increased
2115 * we must restart the hydration process by waking up the worker.
2120 static void set_hydration_batch_size(struct clone *clone, unsigned int nr_regions)
2122 WRITE_ONCE(clone->hydration_batch_size, nr_regions);
2125 static void enable_hydration(struct clone *clone)
2127 if (!test_and_set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
2131 static void disable_hydration(struct clone *clone)
2133 clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
2136 static int clone_message(struct dm_target *ti, unsigned int argc, char **argv,
2137 char *result, unsigned int maxlen)
2139 struct clone *clone = ti->private;
2145 if (!strcasecmp(argv[0], "enable_hydration")) {
2146 enable_hydration(clone);
2150 if (!strcasecmp(argv[0], "disable_hydration")) {
2151 disable_hydration(clone);
2158 if (!strcasecmp(argv[0], "hydration_threshold")) {
2159 if (kstrtouint(argv[1], 10, &value))
2162 set_hydration_threshold(clone, value);
2167 if (!strcasecmp(argv[0], "hydration_batch_size")) {
2168 if (kstrtouint(argv[1], 10, &value))
2171 set_hydration_batch_size(clone, value);
2176 DMERR("%s: Unsupported message `%s'", clone_device_name(clone), argv[0]);
2180 static struct target_type clone_target = {
2182 .version = {1, 0, 0},
2183 .module = THIS_MODULE,
2187 .end_io = clone_endio,
2188 .postsuspend = clone_postsuspend,
2189 .resume = clone_resume,
2190 .status = clone_status,
2191 .message = clone_message,
2192 .io_hints = clone_io_hints,
2193 .iterate_devices = clone_iterate_devices,
2196 /*---------------------------------------------------------------------------*/
2198 /* Module functions */
2199 static int __init dm_clone_init(void)
2203 _hydration_cache = KMEM_CACHE(dm_clone_region_hydration, 0);
2204 if (!_hydration_cache)
2207 r = dm_register_target(&clone_target);
2209 DMERR("Failed to register clone target");
2216 static void __exit dm_clone_exit(void)
2218 dm_unregister_target(&clone_target);
2220 kmem_cache_destroy(_hydration_cache);
2221 _hydration_cache = NULL;
2225 module_init(dm_clone_init);
2226 module_exit(dm_clone_exit);
2228 MODULE_DESCRIPTION(DM_NAME " clone target");
2229 MODULE_AUTHOR("Nikos Tsironis <ntsironis@arrikto.com>");
2230 MODULE_LICENSE("GPL");