e6e5d24a79f5672c4808de197102fc9671b8422f
[platform/kernel/linux-rpi.git] / drivers / md / dm-clone-target.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 Arrikto, Inc. All Rights Reserved.
4  */
5
6 #include <linux/mm.h>
7 #include <linux/bio.h>
8 #include <linux/err.h>
9 #include <linux/hash.h>
10 #include <linux/list.h>
11 #include <linux/log2.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/dm-io.h>
16 #include <linux/mutex.h>
17 #include <linux/atomic.h>
18 #include <linux/bitops.h>
19 #include <linux/blkdev.h>
20 #include <linux/kdev_t.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/jiffies.h>
24 #include <linux/mempool.h>
25 #include <linux/spinlock.h>
26 #include <linux/blk_types.h>
27 #include <linux/dm-kcopyd.h>
28 #include <linux/workqueue.h>
29 #include <linux/backing-dev.h>
30 #include <linux/device-mapper.h>
31
32 #include "dm.h"
33 #include "dm-clone-metadata.h"
34
35 #define DM_MSG_PREFIX "clone"
36
37 /*
38  * Minimum and maximum allowed region sizes
39  */
40 #define MIN_REGION_SIZE (1 << 3)  /* 4KB */
41 #define MAX_REGION_SIZE (1 << 21) /* 1GB */
42
43 #define MIN_HYDRATIONS 256 /* Size of hydration mempool */
44 #define DEFAULT_HYDRATION_THRESHOLD 1 /* 1 region */
45 #define DEFAULT_HYDRATION_BATCH_SIZE 1 /* Hydrate in batches of 1 region */
46
47 #define COMMIT_PERIOD HZ /* 1 sec */
48
49 /*
50  * Hydration hash table size: 1 << HASH_TABLE_BITS
51  */
52 #define HASH_TABLE_BITS 15
53
54 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(clone_hydration_throttle,
55         "A percentage of time allocated for hydrating regions");
56
57 /* Slab cache for struct dm_clone_region_hydration */
58 static struct kmem_cache *_hydration_cache;
59
60 /* dm-clone metadata modes */
61 enum clone_metadata_mode {
62         CM_WRITE,               /* metadata may be changed */
63         CM_READ_ONLY,           /* metadata may not be changed */
64         CM_FAIL,                /* all metadata I/O fails */
65 };
66
67 struct hash_table_bucket;
68
69 struct clone {
70         struct dm_target *ti;
71         struct dm_target_callbacks callbacks;
72
73         struct dm_dev *metadata_dev;
74         struct dm_dev *dest_dev;
75         struct dm_dev *source_dev;
76
77         unsigned long nr_regions;
78         sector_t region_size;
79         unsigned int region_shift;
80
81         /*
82          * A metadata commit and the actions taken in case it fails should run
83          * as a single atomic step.
84          */
85         struct mutex commit_lock;
86
87         struct dm_clone_metadata *cmd;
88
89         /*
90          * bio used to flush the destination device, before committing the
91          * metadata.
92          */
93         struct bio flush_bio;
94
95         /* Region hydration hash table */
96         struct hash_table_bucket *ht;
97
98         atomic_t ios_in_flight;
99
100         wait_queue_head_t hydration_stopped;
101
102         mempool_t hydration_pool;
103
104         unsigned long last_commit_jiffies;
105
106         /*
107          * We defer incoming WRITE bios for regions that are not hydrated,
108          * until after these regions have been hydrated.
109          *
110          * Also, we defer REQ_FUA and REQ_PREFLUSH bios, until after the
111          * metadata have been committed.
112          */
113         spinlock_t lock;
114         struct bio_list deferred_bios;
115         struct bio_list deferred_discard_bios;
116         struct bio_list deferred_flush_bios;
117         struct bio_list deferred_flush_completions;
118
119         /* Maximum number of regions being copied during background hydration. */
120         unsigned int hydration_threshold;
121
122         /* Number of regions to batch together during background hydration. */
123         unsigned int hydration_batch_size;
124
125         /* Which region to hydrate next */
126         unsigned long hydration_offset;
127
128         atomic_t hydrations_in_flight;
129
130         /*
131          * Save a copy of the table line rather than reconstructing it for the
132          * status.
133          */
134         unsigned int nr_ctr_args;
135         const char **ctr_args;
136
137         struct workqueue_struct *wq;
138         struct work_struct worker;
139         struct delayed_work waker;
140
141         struct dm_kcopyd_client *kcopyd_client;
142
143         enum clone_metadata_mode mode;
144         unsigned long flags;
145 };
146
147 /*
148  * dm-clone flags
149  */
150 #define DM_CLONE_DISCARD_PASSDOWN 0
151 #define DM_CLONE_HYDRATION_ENABLED 1
152 #define DM_CLONE_HYDRATION_SUSPENDED 2
153
154 /*---------------------------------------------------------------------------*/
155
156 /*
157  * Metadata failure handling.
158  */
159 static enum clone_metadata_mode get_clone_mode(struct clone *clone)
160 {
161         return READ_ONCE(clone->mode);
162 }
163
164 static const char *clone_device_name(struct clone *clone)
165 {
166         return dm_table_device_name(clone->ti->table);
167 }
168
169 static void __set_clone_mode(struct clone *clone, enum clone_metadata_mode new_mode)
170 {
171         const char *descs[] = {
172                 "read-write",
173                 "read-only",
174                 "fail"
175         };
176
177         enum clone_metadata_mode old_mode = get_clone_mode(clone);
178
179         /* Never move out of fail mode */
180         if (old_mode == CM_FAIL)
181                 new_mode = CM_FAIL;
182
183         switch (new_mode) {
184         case CM_FAIL:
185         case CM_READ_ONLY:
186                 dm_clone_metadata_set_read_only(clone->cmd);
187                 break;
188
189         case CM_WRITE:
190                 dm_clone_metadata_set_read_write(clone->cmd);
191                 break;
192         }
193
194         WRITE_ONCE(clone->mode, new_mode);
195
196         if (new_mode != old_mode) {
197                 dm_table_event(clone->ti->table);
198                 DMINFO("%s: Switching to %s mode", clone_device_name(clone),
199                        descs[(int)new_mode]);
200         }
201 }
202
203 static void __abort_transaction(struct clone *clone)
204 {
205         const char *dev_name = clone_device_name(clone);
206
207         if (get_clone_mode(clone) >= CM_READ_ONLY)
208                 return;
209
210         DMERR("%s: Aborting current metadata transaction", dev_name);
211         if (dm_clone_metadata_abort(clone->cmd)) {
212                 DMERR("%s: Failed to abort metadata transaction", dev_name);
213                 __set_clone_mode(clone, CM_FAIL);
214         }
215 }
216
217 static void __reload_in_core_bitset(struct clone *clone)
218 {
219         const char *dev_name = clone_device_name(clone);
220
221         if (get_clone_mode(clone) == CM_FAIL)
222                 return;
223
224         /* Reload the on-disk bitset */
225         DMINFO("%s: Reloading on-disk bitmap", dev_name);
226         if (dm_clone_reload_in_core_bitset(clone->cmd)) {
227                 DMERR("%s: Failed to reload on-disk bitmap", dev_name);
228                 __set_clone_mode(clone, CM_FAIL);
229         }
230 }
231
232 static void __metadata_operation_failed(struct clone *clone, const char *op, int r)
233 {
234         DMERR("%s: Metadata operation `%s' failed: error = %d",
235               clone_device_name(clone), op, r);
236
237         __abort_transaction(clone);
238         __set_clone_mode(clone, CM_READ_ONLY);
239
240         /*
241          * dm_clone_reload_in_core_bitset() may run concurrently with either
242          * dm_clone_set_region_hydrated() or dm_clone_cond_set_range(), but
243          * it's safe as we have already set the metadata to read-only mode.
244          */
245         __reload_in_core_bitset(clone);
246 }
247
248 /*---------------------------------------------------------------------------*/
249
250 /* Wake up anyone waiting for region hydrations to stop */
251 static inline void wakeup_hydration_waiters(struct clone *clone)
252 {
253         wake_up_all(&clone->hydration_stopped);
254 }
255
256 static inline void wake_worker(struct clone *clone)
257 {
258         queue_work(clone->wq, &clone->worker);
259 }
260
261 /*---------------------------------------------------------------------------*/
262
263 /*
264  * bio helper functions.
265  */
266 static inline void remap_to_source(struct clone *clone, struct bio *bio)
267 {
268         bio_set_dev(bio, clone->source_dev->bdev);
269 }
270
271 static inline void remap_to_dest(struct clone *clone, struct bio *bio)
272 {
273         bio_set_dev(bio, clone->dest_dev->bdev);
274 }
275
276 static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
277 {
278         return op_is_flush(bio->bi_opf) &&
279                 dm_clone_changed_this_transaction(clone->cmd);
280 }
281
282 /* Get the address of the region in sectors */
283 static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
284 {
285         return (region_nr << clone->region_shift);
286 }
287
288 /* Get the region number of the bio */
289 static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
290 {
291         return (bio->bi_iter.bi_sector >> clone->region_shift);
292 }
293
294 /* Get the region range covered by the bio */
295 static void bio_region_range(struct clone *clone, struct bio *bio,
296                              unsigned long *rs, unsigned long *re)
297 {
298         *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
299         *re = bio_end_sector(bio) >> clone->region_shift;
300 }
301
302 /* Check whether a bio overwrites a region */
303 static inline bool is_overwrite_bio(struct clone *clone, struct bio *bio)
304 {
305         return (bio_data_dir(bio) == WRITE && bio_sectors(bio) == clone->region_size);
306 }
307
308 static void fail_bios(struct bio_list *bios, blk_status_t status)
309 {
310         struct bio *bio;
311
312         while ((bio = bio_list_pop(bios))) {
313                 bio->bi_status = status;
314                 bio_endio(bio);
315         }
316 }
317
318 static void submit_bios(struct bio_list *bios)
319 {
320         struct bio *bio;
321         struct blk_plug plug;
322
323         blk_start_plug(&plug);
324
325         while ((bio = bio_list_pop(bios)))
326                 generic_make_request(bio);
327
328         blk_finish_plug(&plug);
329 }
330
331 /*
332  * Submit bio to the underlying device.
333  *
334  * If the bio triggers a commit, delay it, until after the metadata have been
335  * committed.
336  *
337  * NOTE: The bio remapping must be performed by the caller.
338  */
339 static void issue_bio(struct clone *clone, struct bio *bio)
340 {
341         unsigned long flags;
342
343         if (!bio_triggers_commit(clone, bio)) {
344                 generic_make_request(bio);
345                 return;
346         }
347
348         /*
349          * If the metadata mode is RO or FAIL we won't be able to commit the
350          * metadata, so we complete the bio with an error.
351          */
352         if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
353                 bio_io_error(bio);
354                 return;
355         }
356
357         /*
358          * Batch together any bios that trigger commits and then issue a single
359          * commit for them in process_deferred_flush_bios().
360          */
361         spin_lock_irqsave(&clone->lock, flags);
362         bio_list_add(&clone->deferred_flush_bios, bio);
363         spin_unlock_irqrestore(&clone->lock, flags);
364
365         wake_worker(clone);
366 }
367
368 /*
369  * Remap bio to the destination device and submit it.
370  *
371  * If the bio triggers a commit, delay it, until after the metadata have been
372  * committed.
373  */
374 static void remap_and_issue(struct clone *clone, struct bio *bio)
375 {
376         remap_to_dest(clone, bio);
377         issue_bio(clone, bio);
378 }
379
380 /*
381  * Issue bios that have been deferred until after their region has finished
382  * hydrating.
383  *
384  * We delegate the bio submission to the worker thread, so this is safe to call
385  * from interrupt context.
386  */
387 static void issue_deferred_bios(struct clone *clone, struct bio_list *bios)
388 {
389         struct bio *bio;
390         unsigned long flags;
391         struct bio_list flush_bios = BIO_EMPTY_LIST;
392         struct bio_list normal_bios = BIO_EMPTY_LIST;
393
394         if (bio_list_empty(bios))
395                 return;
396
397         while ((bio = bio_list_pop(bios))) {
398                 if (bio_triggers_commit(clone, bio))
399                         bio_list_add(&flush_bios, bio);
400                 else
401                         bio_list_add(&normal_bios, bio);
402         }
403
404         spin_lock_irqsave(&clone->lock, flags);
405         bio_list_merge(&clone->deferred_bios, &normal_bios);
406         bio_list_merge(&clone->deferred_flush_bios, &flush_bios);
407         spin_unlock_irqrestore(&clone->lock, flags);
408
409         wake_worker(clone);
410 }
411
412 static void complete_overwrite_bio(struct clone *clone, struct bio *bio)
413 {
414         unsigned long flags;
415
416         /*
417          * If the bio has the REQ_FUA flag set we must commit the metadata
418          * before signaling its completion.
419          *
420          * complete_overwrite_bio() is only called by hydration_complete(),
421          * after having successfully updated the metadata. This means we don't
422          * need to call dm_clone_changed_this_transaction() to check if the
423          * metadata has changed and thus we can avoid taking the metadata spin
424          * lock.
425          */
426         if (!(bio->bi_opf & REQ_FUA)) {
427                 bio_endio(bio);
428                 return;
429         }
430
431         /*
432          * If the metadata mode is RO or FAIL we won't be able to commit the
433          * metadata, so we complete the bio with an error.
434          */
435         if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
436                 bio_io_error(bio);
437                 return;
438         }
439
440         /*
441          * Batch together any bios that trigger commits and then issue a single
442          * commit for them in process_deferred_flush_bios().
443          */
444         spin_lock_irqsave(&clone->lock, flags);
445         bio_list_add(&clone->deferred_flush_completions, bio);
446         spin_unlock_irqrestore(&clone->lock, flags);
447
448         wake_worker(clone);
449 }
450
451 static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
452 {
453         bio->bi_iter.bi_sector = sector;
454         bio->bi_iter.bi_size = to_bytes(len);
455 }
456
457 static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
458 {
459         unsigned long rs, re;
460
461         /*
462          * If the destination device supports discards, remap and trim the
463          * discard bio and pass it down. Otherwise complete the bio
464          * immediately.
465          */
466         if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
467                 remap_to_dest(clone, bio);
468                 bio_region_range(clone, bio, &rs, &re);
469                 trim_bio(bio, rs << clone->region_shift,
470                          (re - rs) << clone->region_shift);
471                 generic_make_request(bio);
472         } else
473                 bio_endio(bio);
474 }
475
476 static void process_discard_bio(struct clone *clone, struct bio *bio)
477 {
478         unsigned long rs, re, flags;
479
480         bio_region_range(clone, bio, &rs, &re);
481         BUG_ON(re > clone->nr_regions);
482
483         if (unlikely(rs == re)) {
484                 bio_endio(bio);
485                 return;
486         }
487
488         /*
489          * The covered regions are already hydrated so we just need to pass
490          * down the discard.
491          */
492         if (dm_clone_is_range_hydrated(clone->cmd, rs, re - rs)) {
493                 complete_discard_bio(clone, bio, true);
494                 return;
495         }
496
497         /*
498          * If the metadata mode is RO or FAIL we won't be able to update the
499          * metadata for the regions covered by the discard so we just ignore
500          * it.
501          */
502         if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
503                 bio_endio(bio);
504                 return;
505         }
506
507         /*
508          * Defer discard processing.
509          */
510         spin_lock_irqsave(&clone->lock, flags);
511         bio_list_add(&clone->deferred_discard_bios, bio);
512         spin_unlock_irqrestore(&clone->lock, flags);
513
514         wake_worker(clone);
515 }
516
517 /*---------------------------------------------------------------------------*/
518
519 /*
520  * dm-clone region hydrations.
521  */
522 struct dm_clone_region_hydration {
523         struct clone *clone;
524         unsigned long region_nr;
525
526         struct bio *overwrite_bio;
527         bio_end_io_t *overwrite_bio_end_io;
528
529         struct bio_list deferred_bios;
530
531         blk_status_t status;
532
533         /* Used by hydration batching */
534         struct list_head list;
535
536         /* Used by hydration hash table */
537         struct hlist_node h;
538 };
539
540 /*
541  * Hydration hash table implementation.
542  *
543  * Ideally we would like to use list_bl, which uses bit spin locks and employs
544  * the least significant bit of the list head to lock the corresponding bucket,
545  * reducing the memory overhead for the locks. But, currently, list_bl and bit
546  * spin locks don't support IRQ safe versions. Since we have to take the lock
547  * in both process and interrupt context, we must fall back to using regular
548  * spin locks; one per hash table bucket.
549  */
550 struct hash_table_bucket {
551         struct hlist_head head;
552
553         /* Spinlock protecting the bucket */
554         spinlock_t lock;
555 };
556
557 #define bucket_lock_irqsave(bucket, flags) \
558         spin_lock_irqsave(&(bucket)->lock, flags)
559
560 #define bucket_unlock_irqrestore(bucket, flags) \
561         spin_unlock_irqrestore(&(bucket)->lock, flags)
562
563 static int hash_table_init(struct clone *clone)
564 {
565         unsigned int i, sz;
566         struct hash_table_bucket *bucket;
567
568         sz = 1 << HASH_TABLE_BITS;
569
570         clone->ht = kvmalloc(sz * sizeof(struct hash_table_bucket), GFP_KERNEL);
571         if (!clone->ht)
572                 return -ENOMEM;
573
574         for (i = 0; i < sz; i++) {
575                 bucket = clone->ht + i;
576
577                 INIT_HLIST_HEAD(&bucket->head);
578                 spin_lock_init(&bucket->lock);
579         }
580
581         return 0;
582 }
583
584 static void hash_table_exit(struct clone *clone)
585 {
586         kvfree(clone->ht);
587 }
588
589 static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
590                                                        unsigned long region_nr)
591 {
592         return &clone->ht[hash_long(region_nr, HASH_TABLE_BITS)];
593 }
594
595 /*
596  * Search hash table for a hydration with hd->region_nr == region_nr
597  *
598  * NOTE: Must be called with the bucket lock held
599  */
600 static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
601                                                      unsigned long region_nr)
602 {
603         struct dm_clone_region_hydration *hd;
604
605         hlist_for_each_entry(hd, &bucket->head, h) {
606                 if (hd->region_nr == region_nr)
607                         return hd;
608         }
609
610         return NULL;
611 }
612
613 /*
614  * Insert a hydration into the hash table.
615  *
616  * NOTE: Must be called with the bucket lock held.
617  */
618 static inline void __insert_region_hydration(struct hash_table_bucket *bucket,
619                                              struct dm_clone_region_hydration *hd)
620 {
621         hlist_add_head(&hd->h, &bucket->head);
622 }
623
624 /*
625  * This function inserts a hydration into the hash table, unless someone else
626  * managed to insert a hydration for the same region first. In the latter case
627  * it returns the existing hydration descriptor for this region.
628  *
629  * NOTE: Must be called with the hydration hash table lock held.
630  */
631 static struct dm_clone_region_hydration *
632 __find_or_insert_region_hydration(struct hash_table_bucket *bucket,
633                                   struct dm_clone_region_hydration *hd)
634 {
635         struct dm_clone_region_hydration *hd2;
636
637         hd2 = __hash_find(bucket, hd->region_nr);
638         if (hd2)
639                 return hd2;
640
641         __insert_region_hydration(bucket, hd);
642
643         return hd;
644 }
645
646 /*---------------------------------------------------------------------------*/
647
648 /* Allocate a hydration */
649 static struct dm_clone_region_hydration *alloc_hydration(struct clone *clone)
650 {
651         struct dm_clone_region_hydration *hd;
652
653         /*
654          * Allocate a hydration from the hydration mempool.
655          * This might block but it can't fail.
656          */
657         hd = mempool_alloc(&clone->hydration_pool, GFP_NOIO);
658         hd->clone = clone;
659
660         return hd;
661 }
662
663 static inline void free_hydration(struct dm_clone_region_hydration *hd)
664 {
665         mempool_free(hd, &hd->clone->hydration_pool);
666 }
667
668 /* Initialize a hydration */
669 static void hydration_init(struct dm_clone_region_hydration *hd, unsigned long region_nr)
670 {
671         hd->region_nr = region_nr;
672         hd->overwrite_bio = NULL;
673         bio_list_init(&hd->deferred_bios);
674         hd->status = 0;
675
676         INIT_LIST_HEAD(&hd->list);
677         INIT_HLIST_NODE(&hd->h);
678 }
679
680 /*---------------------------------------------------------------------------*/
681
682 /*
683  * Update dm-clone's metadata after a region has finished hydrating and remove
684  * hydration from the hash table.
685  */
686 static int hydration_update_metadata(struct dm_clone_region_hydration *hd)
687 {
688         int r = 0;
689         unsigned long flags;
690         struct hash_table_bucket *bucket;
691         struct clone *clone = hd->clone;
692
693         if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
694                 r = -EPERM;
695
696         /* Update the metadata */
697         if (likely(!r) && hd->status == BLK_STS_OK)
698                 r = dm_clone_set_region_hydrated(clone->cmd, hd->region_nr);
699
700         bucket = get_hash_table_bucket(clone, hd->region_nr);
701
702         /* Remove hydration from hash table */
703         bucket_lock_irqsave(bucket, flags);
704         hlist_del(&hd->h);
705         bucket_unlock_irqrestore(bucket, flags);
706
707         return r;
708 }
709
710 /*
711  * Complete a region's hydration:
712  *
713  *      1. Update dm-clone's metadata.
714  *      2. Remove hydration from hash table.
715  *      3. Complete overwrite bio.
716  *      4. Issue deferred bios.
717  *      5. If this was the last hydration, wake up anyone waiting for
718  *         hydrations to finish.
719  */
720 static void hydration_complete(struct dm_clone_region_hydration *hd)
721 {
722         int r;
723         blk_status_t status;
724         struct clone *clone = hd->clone;
725
726         r = hydration_update_metadata(hd);
727
728         if (hd->status == BLK_STS_OK && likely(!r)) {
729                 if (hd->overwrite_bio)
730                         complete_overwrite_bio(clone, hd->overwrite_bio);
731
732                 issue_deferred_bios(clone, &hd->deferred_bios);
733         } else {
734                 status = r ? BLK_STS_IOERR : hd->status;
735
736                 if (hd->overwrite_bio)
737                         bio_list_add(&hd->deferred_bios, hd->overwrite_bio);
738
739                 fail_bios(&hd->deferred_bios, status);
740         }
741
742         free_hydration(hd);
743
744         if (atomic_dec_and_test(&clone->hydrations_in_flight))
745                 wakeup_hydration_waiters(clone);
746 }
747
748 static void hydration_kcopyd_callback(int read_err, unsigned long write_err, void *context)
749 {
750         blk_status_t status;
751
752         struct dm_clone_region_hydration *tmp, *hd = context;
753         struct clone *clone = hd->clone;
754
755         LIST_HEAD(batched_hydrations);
756
757         if (read_err || write_err) {
758                 DMERR_LIMIT("%s: hydration failed", clone_device_name(clone));
759                 status = BLK_STS_IOERR;
760         } else {
761                 status = BLK_STS_OK;
762         }
763         list_splice_tail(&hd->list, &batched_hydrations);
764
765         hd->status = status;
766         hydration_complete(hd);
767
768         /* Complete batched hydrations */
769         list_for_each_entry_safe(hd, tmp, &batched_hydrations, list) {
770                 hd->status = status;
771                 hydration_complete(hd);
772         }
773
774         /* Continue background hydration, if there is no I/O in-flight */
775         if (test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
776             !atomic_read(&clone->ios_in_flight))
777                 wake_worker(clone);
778 }
779
780 static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr_regions)
781 {
782         unsigned long region_start, region_end;
783         sector_t tail_size, region_size, total_size;
784         struct dm_io_region from, to;
785         struct clone *clone = hd->clone;
786
787         region_size = clone->region_size;
788         region_start = hd->region_nr;
789         region_end = region_start + nr_regions - 1;
790
791         total_size = (nr_regions - 1) << clone->region_shift;
792
793         if (region_end == clone->nr_regions - 1) {
794                 /*
795                  * The last region of the target might be smaller than
796                  * region_size.
797                  */
798                 tail_size = clone->ti->len & (region_size - 1);
799                 if (!tail_size)
800                         tail_size = region_size;
801         } else {
802                 tail_size = region_size;
803         }
804
805         total_size += tail_size;
806
807         from.bdev = clone->source_dev->bdev;
808         from.sector = region_to_sector(clone, region_start);
809         from.count = total_size;
810
811         to.bdev = clone->dest_dev->bdev;
812         to.sector = from.sector;
813         to.count = from.count;
814
815         /* Issue copy */
816         atomic_add(nr_regions, &clone->hydrations_in_flight);
817         dm_kcopyd_copy(clone->kcopyd_client, &from, 1, &to, 0,
818                        hydration_kcopyd_callback, hd);
819 }
820
821 static void overwrite_endio(struct bio *bio)
822 {
823         struct dm_clone_region_hydration *hd = bio->bi_private;
824
825         bio->bi_end_io = hd->overwrite_bio_end_io;
826         hd->status = bio->bi_status;
827
828         hydration_complete(hd);
829 }
830
831 static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio *bio)
832 {
833         /*
834          * We don't need to save and restore bio->bi_private because device
835          * mapper core generates a new bio for us to use, with clean
836          * bi_private.
837          */
838         hd->overwrite_bio = bio;
839         hd->overwrite_bio_end_io = bio->bi_end_io;
840
841         bio->bi_end_io = overwrite_endio;
842         bio->bi_private = hd;
843
844         atomic_inc(&hd->clone->hydrations_in_flight);
845         generic_make_request(bio);
846 }
847
848 /*
849  * Hydrate bio's region.
850  *
851  * This function starts the hydration of the bio's region and puts the bio in
852  * the list of deferred bios for this region. In case, by the time this
853  * function is called, the region has finished hydrating it's submitted to the
854  * destination device.
855  *
856  * NOTE: The bio remapping must be performed by the caller.
857  */
858 static void hydrate_bio_region(struct clone *clone, struct bio *bio)
859 {
860         unsigned long flags;
861         unsigned long region_nr;
862         struct hash_table_bucket *bucket;
863         struct dm_clone_region_hydration *hd, *hd2;
864
865         region_nr = bio_to_region(clone, bio);
866         bucket = get_hash_table_bucket(clone, region_nr);
867
868         bucket_lock_irqsave(bucket, flags);
869
870         hd = __hash_find(bucket, region_nr);
871         if (hd) {
872                 /* Someone else is hydrating the region */
873                 bio_list_add(&hd->deferred_bios, bio);
874                 bucket_unlock_irqrestore(bucket, flags);
875                 return;
876         }
877
878         if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
879                 /* The region has been hydrated */
880                 bucket_unlock_irqrestore(bucket, flags);
881                 issue_bio(clone, bio);
882                 return;
883         }
884
885         /*
886          * We must allocate a hydration descriptor and start the hydration of
887          * the corresponding region.
888          */
889         bucket_unlock_irqrestore(bucket, flags);
890
891         hd = alloc_hydration(clone);
892         hydration_init(hd, region_nr);
893
894         bucket_lock_irqsave(bucket, flags);
895
896         /* Check if the region has been hydrated in the meantime. */
897         if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
898                 bucket_unlock_irqrestore(bucket, flags);
899                 free_hydration(hd);
900                 issue_bio(clone, bio);
901                 return;
902         }
903
904         hd2 = __find_or_insert_region_hydration(bucket, hd);
905         if (hd2 != hd) {
906                 /* Someone else started the region's hydration. */
907                 bio_list_add(&hd2->deferred_bios, bio);
908                 bucket_unlock_irqrestore(bucket, flags);
909                 free_hydration(hd);
910                 return;
911         }
912
913         /*
914          * If the metadata mode is RO or FAIL then there is no point starting a
915          * hydration, since we will not be able to update the metadata when the
916          * hydration finishes.
917          */
918         if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
919                 hlist_del(&hd->h);
920                 bucket_unlock_irqrestore(bucket, flags);
921                 free_hydration(hd);
922                 bio_io_error(bio);
923                 return;
924         }
925
926         /*
927          * Start region hydration.
928          *
929          * If a bio overwrites a region, i.e., its size is equal to the
930          * region's size, then we don't need to copy the region from the source
931          * to the destination device.
932          */
933         if (is_overwrite_bio(clone, bio)) {
934                 bucket_unlock_irqrestore(bucket, flags);
935                 hydration_overwrite(hd, bio);
936         } else {
937                 bio_list_add(&hd->deferred_bios, bio);
938                 bucket_unlock_irqrestore(bucket, flags);
939                 hydration_copy(hd, 1);
940         }
941 }
942
943 /*---------------------------------------------------------------------------*/
944
945 /*
946  * Background hydrations.
947  */
948
949 /*
950  * Batch region hydrations.
951  *
952  * To better utilize device bandwidth we batch together the hydration of
953  * adjacent regions. This allows us to use small region sizes, e.g., 4KB, which
954  * is good for small, random write performance (because of the overwriting of
955  * un-hydrated regions) and at the same time issue big copy requests to kcopyd
956  * to achieve high hydration bandwidth.
957  */
958 struct batch_info {
959         struct dm_clone_region_hydration *head;
960         unsigned int nr_batched_regions;
961 };
962
963 static void __batch_hydration(struct batch_info *batch,
964                               struct dm_clone_region_hydration *hd)
965 {
966         struct clone *clone = hd->clone;
967         unsigned int max_batch_size = READ_ONCE(clone->hydration_batch_size);
968
969         if (batch->head) {
970                 /* Try to extend the current batch */
971                 if (batch->nr_batched_regions < max_batch_size &&
972                     (batch->head->region_nr + batch->nr_batched_regions) == hd->region_nr) {
973                         list_add_tail(&hd->list, &batch->head->list);
974                         batch->nr_batched_regions++;
975                         hd = NULL;
976                 }
977
978                 /* Check if we should issue the current batch */
979                 if (batch->nr_batched_regions >= max_batch_size || hd) {
980                         hydration_copy(batch->head, batch->nr_batched_regions);
981                         batch->head = NULL;
982                         batch->nr_batched_regions = 0;
983                 }
984         }
985
986         if (!hd)
987                 return;
988
989         /* We treat max batch sizes of zero and one equivalently */
990         if (max_batch_size <= 1) {
991                 hydration_copy(hd, 1);
992                 return;
993         }
994
995         /* Start a new batch */
996         BUG_ON(!list_empty(&hd->list));
997         batch->head = hd;
998         batch->nr_batched_regions = 1;
999 }
1000
1001 static unsigned long __start_next_hydration(struct clone *clone,
1002                                             unsigned long offset,
1003                                             struct batch_info *batch)
1004 {
1005         unsigned long flags;
1006         struct hash_table_bucket *bucket;
1007         struct dm_clone_region_hydration *hd;
1008         unsigned long nr_regions = clone->nr_regions;
1009
1010         hd = alloc_hydration(clone);
1011
1012         /* Try to find a region to hydrate. */
1013         do {
1014                 offset = dm_clone_find_next_unhydrated_region(clone->cmd, offset);
1015                 if (offset == nr_regions)
1016                         break;
1017
1018                 bucket = get_hash_table_bucket(clone, offset);
1019                 bucket_lock_irqsave(bucket, flags);
1020
1021                 if (!dm_clone_is_region_hydrated(clone->cmd, offset) &&
1022                     !__hash_find(bucket, offset)) {
1023                         hydration_init(hd, offset);
1024                         __insert_region_hydration(bucket, hd);
1025                         bucket_unlock_irqrestore(bucket, flags);
1026
1027                         /* Batch hydration */
1028                         __batch_hydration(batch, hd);
1029
1030                         return (offset + 1);
1031                 }
1032
1033                 bucket_unlock_irqrestore(bucket, flags);
1034
1035         } while (++offset < nr_regions);
1036
1037         if (hd)
1038                 free_hydration(hd);
1039
1040         return offset;
1041 }
1042
1043 /*
1044  * This function searches for regions that still reside in the source device
1045  * and starts their hydration.
1046  */
1047 static void do_hydration(struct clone *clone)
1048 {
1049         unsigned int current_volume;
1050         unsigned long offset, nr_regions = clone->nr_regions;
1051
1052         struct batch_info batch = {
1053                 .head = NULL,
1054                 .nr_batched_regions = 0,
1055         };
1056
1057         if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1058                 return;
1059
1060         if (dm_clone_is_hydration_done(clone->cmd))
1061                 return;
1062
1063         /*
1064          * Avoid race with device suspension.
1065          */
1066         atomic_inc(&clone->hydrations_in_flight);
1067
1068         /*
1069          * Make sure atomic_inc() is ordered before test_bit(), otherwise we
1070          * might race with clone_postsuspend() and start a region hydration
1071          * after the target has been suspended.
1072          *
1073          * This is paired with the smp_mb__after_atomic() in
1074          * clone_postsuspend().
1075          */
1076         smp_mb__after_atomic();
1077
1078         offset = clone->hydration_offset;
1079         while (likely(!test_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags)) &&
1080                !atomic_read(&clone->ios_in_flight) &&
1081                test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
1082                offset < nr_regions) {
1083                 current_volume = atomic_read(&clone->hydrations_in_flight);
1084                 current_volume += batch.nr_batched_regions;
1085
1086                 if (current_volume > READ_ONCE(clone->hydration_threshold))
1087                         break;
1088
1089                 offset = __start_next_hydration(clone, offset, &batch);
1090         }
1091
1092         if (batch.head)
1093                 hydration_copy(batch.head, batch.nr_batched_regions);
1094
1095         if (offset >= nr_regions)
1096                 offset = 0;
1097
1098         clone->hydration_offset = offset;
1099
1100         if (atomic_dec_and_test(&clone->hydrations_in_flight))
1101                 wakeup_hydration_waiters(clone);
1102 }
1103
1104 /*---------------------------------------------------------------------------*/
1105
1106 static bool need_commit_due_to_time(struct clone *clone)
1107 {
1108         return !time_in_range(jiffies, clone->last_commit_jiffies,
1109                               clone->last_commit_jiffies + COMMIT_PERIOD);
1110 }
1111
1112 /*
1113  * A non-zero return indicates read-only or fail mode.
1114  */
1115 static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
1116 {
1117         int r = 0;
1118
1119         if (dest_dev_flushed)
1120                 *dest_dev_flushed = false;
1121
1122         mutex_lock(&clone->commit_lock);
1123
1124         if (!dm_clone_changed_this_transaction(clone->cmd))
1125                 goto out;
1126
1127         if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
1128                 r = -EPERM;
1129                 goto out;
1130         }
1131
1132         r = dm_clone_metadata_pre_commit(clone->cmd);
1133         if (unlikely(r)) {
1134                 __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r);
1135                 goto out;
1136         }
1137
1138         bio_reset(&clone->flush_bio);
1139         bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev);
1140         clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1141
1142         r = submit_bio_wait(&clone->flush_bio);
1143         if (unlikely(r)) {
1144                 __metadata_operation_failed(clone, "flush destination device", r);
1145                 goto out;
1146         }
1147
1148         if (dest_dev_flushed)
1149                 *dest_dev_flushed = true;
1150
1151         r = dm_clone_metadata_commit(clone->cmd);
1152         if (unlikely(r)) {
1153                 __metadata_operation_failed(clone, "dm_clone_metadata_commit", r);
1154                 goto out;
1155         }
1156
1157         if (dm_clone_is_hydration_done(clone->cmd))
1158                 dm_table_event(clone->ti->table);
1159 out:
1160         mutex_unlock(&clone->commit_lock);
1161
1162         return r;
1163 }
1164
1165 static void process_deferred_discards(struct clone *clone)
1166 {
1167         int r = -EPERM;
1168         struct bio *bio;
1169         struct blk_plug plug;
1170         unsigned long rs, re, flags;
1171         struct bio_list discards = BIO_EMPTY_LIST;
1172
1173         spin_lock_irqsave(&clone->lock, flags);
1174         bio_list_merge(&discards, &clone->deferred_discard_bios);
1175         bio_list_init(&clone->deferred_discard_bios);
1176         spin_unlock_irqrestore(&clone->lock, flags);
1177
1178         if (bio_list_empty(&discards))
1179                 return;
1180
1181         if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1182                 goto out;
1183
1184         /* Update the metadata */
1185         bio_list_for_each(bio, &discards) {
1186                 bio_region_range(clone, bio, &rs, &re);
1187                 /*
1188                  * A discard request might cover regions that have been already
1189                  * hydrated. There is no need to update the metadata for these
1190                  * regions.
1191                  */
1192                 r = dm_clone_cond_set_range(clone->cmd, rs, re - rs);
1193
1194                 if (unlikely(r))
1195                         break;
1196         }
1197 out:
1198         blk_start_plug(&plug);
1199         while ((bio = bio_list_pop(&discards)))
1200                 complete_discard_bio(clone, bio, r == 0);
1201         blk_finish_plug(&plug);
1202 }
1203
1204 static void process_deferred_bios(struct clone *clone)
1205 {
1206         unsigned long flags;
1207         struct bio_list bios = BIO_EMPTY_LIST;
1208
1209         spin_lock_irqsave(&clone->lock, flags);
1210         bio_list_merge(&bios, &clone->deferred_bios);
1211         bio_list_init(&clone->deferred_bios);
1212         spin_unlock_irqrestore(&clone->lock, flags);
1213
1214         if (bio_list_empty(&bios))
1215                 return;
1216
1217         submit_bios(&bios);
1218 }
1219
1220 static void process_deferred_flush_bios(struct clone *clone)
1221 {
1222         struct bio *bio;
1223         unsigned long flags;
1224         bool dest_dev_flushed;
1225         struct bio_list bios = BIO_EMPTY_LIST;
1226         struct bio_list bio_completions = BIO_EMPTY_LIST;
1227
1228         /*
1229          * If there are any deferred flush bios, we must commit the metadata
1230          * before issuing them or signaling their completion.
1231          */
1232         spin_lock_irqsave(&clone->lock, flags);
1233         bio_list_merge(&bios, &clone->deferred_flush_bios);
1234         bio_list_init(&clone->deferred_flush_bios);
1235
1236         bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
1237         bio_list_init(&clone->deferred_flush_completions);
1238         spin_unlock_irqrestore(&clone->lock, flags);
1239
1240         if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
1241             !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
1242                 return;
1243
1244         if (commit_metadata(clone, &dest_dev_flushed)) {
1245                 bio_list_merge(&bios, &bio_completions);
1246
1247                 while ((bio = bio_list_pop(&bios)))
1248                         bio_io_error(bio);
1249
1250                 return;
1251         }
1252
1253         clone->last_commit_jiffies = jiffies;
1254
1255         while ((bio = bio_list_pop(&bio_completions)))
1256                 bio_endio(bio);
1257
1258         while ((bio = bio_list_pop(&bios))) {
1259                 if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) {
1260                         /* We just flushed the destination device as part of
1261                          * the metadata commit, so there is no reason to send
1262                          * another flush.
1263                          */
1264                         bio_endio(bio);
1265                 } else {
1266                         generic_make_request(bio);
1267                 }
1268         }
1269 }
1270
1271 static void do_worker(struct work_struct *work)
1272 {
1273         struct clone *clone = container_of(work, typeof(*clone), worker);
1274
1275         process_deferred_bios(clone);
1276         process_deferred_discards(clone);
1277
1278         /*
1279          * process_deferred_flush_bios():
1280          *
1281          *   - Commit metadata
1282          *
1283          *   - Process deferred REQ_FUA completions
1284          *
1285          *   - Process deferred REQ_PREFLUSH bios
1286          */
1287         process_deferred_flush_bios(clone);
1288
1289         /* Background hydration */
1290         do_hydration(clone);
1291 }
1292
1293 /*
1294  * Commit periodically so that not too much unwritten data builds up.
1295  *
1296  * Also, restart background hydration, if it has been stopped by in-flight I/O.
1297  */
1298 static void do_waker(struct work_struct *work)
1299 {
1300         struct clone *clone = container_of(to_delayed_work(work), struct clone, waker);
1301
1302         wake_worker(clone);
1303         queue_delayed_work(clone->wq, &clone->waker, COMMIT_PERIOD);
1304 }
1305
1306 /*---------------------------------------------------------------------------*/
1307
1308 /*
1309  * Target methods
1310  */
1311 static int clone_map(struct dm_target *ti, struct bio *bio)
1312 {
1313         struct clone *clone = ti->private;
1314         unsigned long region_nr;
1315
1316         atomic_inc(&clone->ios_in_flight);
1317
1318         if (unlikely(get_clone_mode(clone) == CM_FAIL))
1319                 return DM_MAPIO_KILL;
1320
1321         /*
1322          * REQ_PREFLUSH bios carry no data:
1323          *
1324          * - Commit metadata, if changed
1325          *
1326          * - Pass down to destination device
1327          */
1328         if (bio->bi_opf & REQ_PREFLUSH) {
1329                 remap_and_issue(clone, bio);
1330                 return DM_MAPIO_SUBMITTED;
1331         }
1332
1333         bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1334
1335         /*
1336          * dm-clone interprets discards and performs a fast hydration of the
1337          * discarded regions, i.e., we skip the copy from the source device and
1338          * just mark the regions as hydrated.
1339          */
1340         if (bio_op(bio) == REQ_OP_DISCARD) {
1341                 process_discard_bio(clone, bio);
1342                 return DM_MAPIO_SUBMITTED;
1343         }
1344
1345         /*
1346          * If the bio's region is hydrated, redirect it to the destination
1347          * device.
1348          *
1349          * If the region is not hydrated and the bio is a READ, redirect it to
1350          * the source device.
1351          *
1352          * Else, defer WRITE bio until after its region has been hydrated and
1353          * start the region's hydration immediately.
1354          */
1355         region_nr = bio_to_region(clone, bio);
1356         if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
1357                 remap_and_issue(clone, bio);
1358                 return DM_MAPIO_SUBMITTED;
1359         } else if (bio_data_dir(bio) == READ) {
1360                 remap_to_source(clone, bio);
1361                 return DM_MAPIO_REMAPPED;
1362         }
1363
1364         remap_to_dest(clone, bio);
1365         hydrate_bio_region(clone, bio);
1366
1367         return DM_MAPIO_SUBMITTED;
1368 }
1369
1370 static int clone_endio(struct dm_target *ti, struct bio *bio, blk_status_t *error)
1371 {
1372         struct clone *clone = ti->private;
1373
1374         atomic_dec(&clone->ios_in_flight);
1375
1376         return DM_ENDIO_DONE;
1377 }
1378
1379 static void emit_flags(struct clone *clone, char *result, unsigned int maxlen,
1380                        ssize_t *sz_ptr)
1381 {
1382         ssize_t sz = *sz_ptr;
1383         unsigned int count;
1384
1385         count = !test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1386         count += !test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1387
1388         DMEMIT("%u ", count);
1389
1390         if (!test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
1391                 DMEMIT("no_hydration ");
1392
1393         if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
1394                 DMEMIT("no_discard_passdown ");
1395
1396         *sz_ptr = sz;
1397 }
1398
1399 static void emit_core_args(struct clone *clone, char *result,
1400                            unsigned int maxlen, ssize_t *sz_ptr)
1401 {
1402         ssize_t sz = *sz_ptr;
1403         unsigned int count = 4;
1404
1405         DMEMIT("%u hydration_threshold %u hydration_batch_size %u ", count,
1406                READ_ONCE(clone->hydration_threshold),
1407                READ_ONCE(clone->hydration_batch_size));
1408
1409         *sz_ptr = sz;
1410 }
1411
1412 /*
1413  * Status format:
1414  *
1415  * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
1416  * <clone region size> <#hydrated regions>/<#total regions> <#hydrating regions>
1417  * <#features> <features>* <#core args> <core args>* <clone metadata mode>
1418  */
1419 static void clone_status(struct dm_target *ti, status_type_t type,
1420                          unsigned int status_flags, char *result,
1421                          unsigned int maxlen)
1422 {
1423         int r;
1424         unsigned int i;
1425         ssize_t sz = 0;
1426         dm_block_t nr_free_metadata_blocks = 0;
1427         dm_block_t nr_metadata_blocks = 0;
1428         char buf[BDEVNAME_SIZE];
1429         struct clone *clone = ti->private;
1430
1431         switch (type) {
1432         case STATUSTYPE_INFO:
1433                 if (get_clone_mode(clone) == CM_FAIL) {
1434                         DMEMIT("Fail");
1435                         break;
1436                 }
1437
1438                 /* Commit to ensure statistics aren't out-of-date */
1439                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
1440                         (void) commit_metadata(clone, NULL);
1441
1442                 r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks);
1443
1444                 if (r) {
1445                         DMERR("%s: dm_clone_get_free_metadata_block_count returned %d",
1446                               clone_device_name(clone), r);
1447                         goto error;
1448                 }
1449
1450                 r = dm_clone_get_metadata_dev_size(clone->cmd, &nr_metadata_blocks);
1451
1452                 if (r) {
1453                         DMERR("%s: dm_clone_get_metadata_dev_size returned %d",
1454                               clone_device_name(clone), r);
1455                         goto error;
1456                 }
1457
1458                 DMEMIT("%u %llu/%llu %llu %lu/%lu %u ",
1459                        DM_CLONE_METADATA_BLOCK_SIZE,
1460                        (unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks),
1461                        (unsigned long long)nr_metadata_blocks,
1462                        (unsigned long long)clone->region_size,
1463                        dm_clone_nr_of_hydrated_regions(clone->cmd),
1464                        clone->nr_regions,
1465                        atomic_read(&clone->hydrations_in_flight));
1466
1467                 emit_flags(clone, result, maxlen, &sz);
1468                 emit_core_args(clone, result, maxlen, &sz);
1469
1470                 switch (get_clone_mode(clone)) {
1471                 case CM_WRITE:
1472                         DMEMIT("rw");
1473                         break;
1474                 case CM_READ_ONLY:
1475                         DMEMIT("ro");
1476                         break;
1477                 case CM_FAIL:
1478                         DMEMIT("Fail");
1479                 }
1480
1481                 break;
1482
1483         case STATUSTYPE_TABLE:
1484                 format_dev_t(buf, clone->metadata_dev->bdev->bd_dev);
1485                 DMEMIT("%s ", buf);
1486
1487                 format_dev_t(buf, clone->dest_dev->bdev->bd_dev);
1488                 DMEMIT("%s ", buf);
1489
1490                 format_dev_t(buf, clone->source_dev->bdev->bd_dev);
1491                 DMEMIT("%s", buf);
1492
1493                 for (i = 0; i < clone->nr_ctr_args; i++)
1494                         DMEMIT(" %s", clone->ctr_args[i]);
1495         }
1496
1497         return;
1498
1499 error:
1500         DMEMIT("Error");
1501 }
1502
1503 static int clone_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1504 {
1505         struct request_queue *dest_q, *source_q;
1506         struct clone *clone = container_of(cb, struct clone, callbacks);
1507
1508         source_q = bdev_get_queue(clone->source_dev->bdev);
1509         dest_q = bdev_get_queue(clone->dest_dev->bdev);
1510
1511         return (bdi_congested(dest_q->backing_dev_info, bdi_bits) |
1512                 bdi_congested(source_q->backing_dev_info, bdi_bits));
1513 }
1514
1515 static sector_t get_dev_size(struct dm_dev *dev)
1516 {
1517         return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1518 }
1519
1520 /*---------------------------------------------------------------------------*/
1521
1522 /*
1523  * Construct a clone device mapping:
1524  *
1525  * clone <metadata dev> <destination dev> <source dev> <region size>
1526  *      [<#feature args> [<feature arg>]* [<#core args> [key value]*]]
1527  *
1528  * metadata dev: Fast device holding the persistent metadata
1529  * destination dev: The destination device, which will become a clone of the
1530  *                  source device
1531  * source dev: The read-only source device that gets cloned
1532  * region size: dm-clone unit size in sectors
1533  *
1534  * #feature args: Number of feature arguments passed
1535  * feature args: E.g. no_hydration, no_discard_passdown
1536  *
1537  * #core arguments: An even number of core arguments
1538  * core arguments: Key/value pairs for tuning the core
1539  *                 E.g. 'hydration_threshold 256'
1540  */
1541 static int parse_feature_args(struct dm_arg_set *as, struct clone *clone)
1542 {
1543         int r;
1544         unsigned int argc;
1545         const char *arg_name;
1546         struct dm_target *ti = clone->ti;
1547
1548         const struct dm_arg args = {
1549                 .min = 0,
1550                 .max = 2,
1551                 .error = "Invalid number of feature arguments"
1552         };
1553
1554         /* No feature arguments supplied */
1555         if (!as->argc)
1556                 return 0;
1557
1558         r = dm_read_arg_group(&args, as, &argc, &ti->error);
1559         if (r)
1560                 return r;
1561
1562         while (argc) {
1563                 arg_name = dm_shift_arg(as);
1564                 argc--;
1565
1566                 if (!strcasecmp(arg_name, "no_hydration")) {
1567                         __clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1568                 } else if (!strcasecmp(arg_name, "no_discard_passdown")) {
1569                         __clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1570                 } else {
1571                         ti->error = "Invalid feature argument";
1572                         return -EINVAL;
1573                 }
1574         }
1575
1576         return 0;
1577 }
1578
1579 static int parse_core_args(struct dm_arg_set *as, struct clone *clone)
1580 {
1581         int r;
1582         unsigned int argc;
1583         unsigned int value;
1584         const char *arg_name;
1585         struct dm_target *ti = clone->ti;
1586
1587         const struct dm_arg args = {
1588                 .min = 0,
1589                 .max = 4,
1590                 .error = "Invalid number of core arguments"
1591         };
1592
1593         /* Initialize core arguments */
1594         clone->hydration_batch_size = DEFAULT_HYDRATION_BATCH_SIZE;
1595         clone->hydration_threshold = DEFAULT_HYDRATION_THRESHOLD;
1596
1597         /* No core arguments supplied */
1598         if (!as->argc)
1599                 return 0;
1600
1601         r = dm_read_arg_group(&args, as, &argc, &ti->error);
1602         if (r)
1603                 return r;
1604
1605         if (argc & 1) {
1606                 ti->error = "Number of core arguments must be even";
1607                 return -EINVAL;
1608         }
1609
1610         while (argc) {
1611                 arg_name = dm_shift_arg(as);
1612                 argc -= 2;
1613
1614                 if (!strcasecmp(arg_name, "hydration_threshold")) {
1615                         if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1616                                 ti->error = "Invalid value for argument `hydration_threshold'";
1617                                 return -EINVAL;
1618                         }
1619                         clone->hydration_threshold = value;
1620                 } else if (!strcasecmp(arg_name, "hydration_batch_size")) {
1621                         if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1622                                 ti->error = "Invalid value for argument `hydration_batch_size'";
1623                                 return -EINVAL;
1624                         }
1625                         clone->hydration_batch_size = value;
1626                 } else {
1627                         ti->error = "Invalid core argument";
1628                         return -EINVAL;
1629                 }
1630         }
1631
1632         return 0;
1633 }
1634
1635 static int parse_region_size(struct clone *clone, struct dm_arg_set *as, char **error)
1636 {
1637         int r;
1638         unsigned int region_size;
1639         struct dm_arg arg;
1640
1641         arg.min = MIN_REGION_SIZE;
1642         arg.max = MAX_REGION_SIZE;
1643         arg.error = "Invalid region size";
1644
1645         r = dm_read_arg(&arg, as, &region_size, error);
1646         if (r)
1647                 return r;
1648
1649         /* Check region size is a power of 2 */
1650         if (!is_power_of_2(region_size)) {
1651                 *error = "Region size is not a power of 2";
1652                 return -EINVAL;
1653         }
1654
1655         /* Validate the region size against the device logical block size */
1656         if (region_size % (bdev_logical_block_size(clone->source_dev->bdev) >> 9) ||
1657             region_size % (bdev_logical_block_size(clone->dest_dev->bdev) >> 9)) {
1658                 *error = "Region size is not a multiple of device logical block size";
1659                 return -EINVAL;
1660         }
1661
1662         clone->region_size = region_size;
1663
1664         return 0;
1665 }
1666
1667 static int validate_nr_regions(unsigned long n, char **error)
1668 {
1669         /*
1670          * dm_bitset restricts us to 2^32 regions. test_bit & co. restrict us
1671          * further to 2^31 regions.
1672          */
1673         if (n > (1UL << 31)) {
1674                 *error = "Too many regions. Consider increasing the region size";
1675                 return -EINVAL;
1676         }
1677
1678         return 0;
1679 }
1680
1681 static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1682 {
1683         int r;
1684         sector_t metadata_dev_size;
1685         char b[BDEVNAME_SIZE];
1686
1687         r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1688                           &clone->metadata_dev);
1689         if (r) {
1690                 *error = "Error opening metadata device";
1691                 return r;
1692         }
1693
1694         metadata_dev_size = get_dev_size(clone->metadata_dev);
1695         if (metadata_dev_size > DM_CLONE_METADATA_MAX_SECTORS_WARNING)
1696                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1697                        bdevname(clone->metadata_dev->bdev, b), DM_CLONE_METADATA_MAX_SECTORS);
1698
1699         return 0;
1700 }
1701
1702 static int parse_dest_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1703 {
1704         int r;
1705         sector_t dest_dev_size;
1706
1707         r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1708                           &clone->dest_dev);
1709         if (r) {
1710                 *error = "Error opening destination device";
1711                 return r;
1712         }
1713
1714         dest_dev_size = get_dev_size(clone->dest_dev);
1715         if (dest_dev_size < clone->ti->len) {
1716                 dm_put_device(clone->ti, clone->dest_dev);
1717                 *error = "Device size larger than destination device";
1718                 return -EINVAL;
1719         }
1720
1721         return 0;
1722 }
1723
1724 static int parse_source_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1725 {
1726         int r;
1727         sector_t source_dev_size;
1728
1729         r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ,
1730                           &clone->source_dev);
1731         if (r) {
1732                 *error = "Error opening source device";
1733                 return r;
1734         }
1735
1736         source_dev_size = get_dev_size(clone->source_dev);
1737         if (source_dev_size < clone->ti->len) {
1738                 dm_put_device(clone->ti, clone->source_dev);
1739                 *error = "Device size larger than source device";
1740                 return -EINVAL;
1741         }
1742
1743         return 0;
1744 }
1745
1746 static int copy_ctr_args(struct clone *clone, int argc, const char **argv, char **error)
1747 {
1748         unsigned int i;
1749         const char **copy;
1750
1751         copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
1752         if (!copy)
1753                 goto error;
1754
1755         for (i = 0; i < argc; i++) {
1756                 copy[i] = kstrdup(argv[i], GFP_KERNEL);
1757
1758                 if (!copy[i]) {
1759                         while (i--)
1760                                 kfree(copy[i]);
1761                         kfree(copy);
1762                         goto error;
1763                 }
1764         }
1765
1766         clone->nr_ctr_args = argc;
1767         clone->ctr_args = copy;
1768         return 0;
1769
1770 error:
1771         *error = "Failed to allocate memory for table line";
1772         return -ENOMEM;
1773 }
1774
1775 static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1776 {
1777         int r;
1778         struct clone *clone;
1779         struct dm_arg_set as;
1780
1781         if (argc < 4) {
1782                 ti->error = "Invalid number of arguments";
1783                 return -EINVAL;
1784         }
1785
1786         as.argc = argc;
1787         as.argv = argv;
1788
1789         clone = kzalloc(sizeof(*clone), GFP_KERNEL);
1790         if (!clone) {
1791                 ti->error = "Failed to allocate clone structure";
1792                 return -ENOMEM;
1793         }
1794
1795         clone->ti = ti;
1796
1797         /* Initialize dm-clone flags */
1798         __set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1799         __set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
1800         __set_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1801
1802         r = parse_metadata_dev(clone, &as, &ti->error);
1803         if (r)
1804                 goto out_with_clone;
1805
1806         r = parse_dest_dev(clone, &as, &ti->error);
1807         if (r)
1808                 goto out_with_meta_dev;
1809
1810         r = parse_source_dev(clone, &as, &ti->error);
1811         if (r)
1812                 goto out_with_dest_dev;
1813
1814         r = parse_region_size(clone, &as, &ti->error);
1815         if (r)
1816                 goto out_with_source_dev;
1817
1818         clone->region_shift = __ffs(clone->region_size);
1819         clone->nr_regions = dm_sector_div_up(ti->len, clone->region_size);
1820
1821         r = validate_nr_regions(clone->nr_regions, &ti->error);
1822         if (r)
1823                 goto out_with_source_dev;
1824
1825         r = dm_set_target_max_io_len(ti, clone->region_size);
1826         if (r) {
1827                 ti->error = "Failed to set max io len";
1828                 goto out_with_source_dev;
1829         }
1830
1831         r = parse_feature_args(&as, clone);
1832         if (r)
1833                 goto out_with_source_dev;
1834
1835         r = parse_core_args(&as, clone);
1836         if (r)
1837                 goto out_with_source_dev;
1838
1839         /* Load metadata */
1840         clone->cmd = dm_clone_metadata_open(clone->metadata_dev->bdev, ti->len,
1841                                             clone->region_size);
1842         if (IS_ERR(clone->cmd)) {
1843                 ti->error = "Failed to load metadata";
1844                 r = PTR_ERR(clone->cmd);
1845                 goto out_with_source_dev;
1846         }
1847
1848         __set_clone_mode(clone, CM_WRITE);
1849
1850         if (get_clone_mode(clone) != CM_WRITE) {
1851                 ti->error = "Unable to get write access to metadata, please check/repair metadata";
1852                 r = -EPERM;
1853                 goto out_with_metadata;
1854         }
1855
1856         clone->last_commit_jiffies = jiffies;
1857
1858         /* Allocate hydration hash table */
1859         r = hash_table_init(clone);
1860         if (r) {
1861                 ti->error = "Failed to allocate hydration hash table";
1862                 goto out_with_metadata;
1863         }
1864
1865         atomic_set(&clone->ios_in_flight, 0);
1866         init_waitqueue_head(&clone->hydration_stopped);
1867         spin_lock_init(&clone->lock);
1868         bio_list_init(&clone->deferred_bios);
1869         bio_list_init(&clone->deferred_discard_bios);
1870         bio_list_init(&clone->deferred_flush_bios);
1871         bio_list_init(&clone->deferred_flush_completions);
1872         clone->hydration_offset = 0;
1873         atomic_set(&clone->hydrations_in_flight, 0);
1874         bio_init(&clone->flush_bio, NULL, 0);
1875
1876         clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
1877         if (!clone->wq) {
1878                 ti->error = "Failed to allocate workqueue";
1879                 r = -ENOMEM;
1880                 goto out_with_ht;
1881         }
1882
1883         INIT_WORK(&clone->worker, do_worker);
1884         INIT_DELAYED_WORK(&clone->waker, do_waker);
1885
1886         clone->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1887         if (IS_ERR(clone->kcopyd_client)) {
1888                 r = PTR_ERR(clone->kcopyd_client);
1889                 goto out_with_wq;
1890         }
1891
1892         r = mempool_init_slab_pool(&clone->hydration_pool, MIN_HYDRATIONS,
1893                                    _hydration_cache);
1894         if (r) {
1895                 ti->error = "Failed to create dm_clone_region_hydration memory pool";
1896                 goto out_with_kcopyd;
1897         }
1898
1899         /* Save a copy of the table line */
1900         r = copy_ctr_args(clone, argc - 3, (const char **)argv + 3, &ti->error);
1901         if (r)
1902                 goto out_with_mempool;
1903
1904         mutex_init(&clone->commit_lock);
1905         clone->callbacks.congested_fn = clone_is_congested;
1906         dm_table_add_target_callbacks(ti->table, &clone->callbacks);
1907
1908         /* Enable flushes */
1909         ti->num_flush_bios = 1;
1910         ti->flush_supported = true;
1911
1912         /* Enable discards */
1913         ti->discards_supported = true;
1914         ti->num_discard_bios = 1;
1915
1916         ti->private = clone;
1917
1918         return 0;
1919
1920 out_with_mempool:
1921         mempool_exit(&clone->hydration_pool);
1922 out_with_kcopyd:
1923         dm_kcopyd_client_destroy(clone->kcopyd_client);
1924 out_with_wq:
1925         destroy_workqueue(clone->wq);
1926 out_with_ht:
1927         hash_table_exit(clone);
1928 out_with_metadata:
1929         dm_clone_metadata_close(clone->cmd);
1930 out_with_source_dev:
1931         dm_put_device(ti, clone->source_dev);
1932 out_with_dest_dev:
1933         dm_put_device(ti, clone->dest_dev);
1934 out_with_meta_dev:
1935         dm_put_device(ti, clone->metadata_dev);
1936 out_with_clone:
1937         kfree(clone);
1938
1939         return r;
1940 }
1941
1942 static void clone_dtr(struct dm_target *ti)
1943 {
1944         unsigned int i;
1945         struct clone *clone = ti->private;
1946
1947         mutex_destroy(&clone->commit_lock);
1948         bio_uninit(&clone->flush_bio);
1949
1950         for (i = 0; i < clone->nr_ctr_args; i++)
1951                 kfree(clone->ctr_args[i]);
1952         kfree(clone->ctr_args);
1953
1954         mempool_exit(&clone->hydration_pool);
1955         dm_kcopyd_client_destroy(clone->kcopyd_client);
1956         destroy_workqueue(clone->wq);
1957         hash_table_exit(clone);
1958         dm_clone_metadata_close(clone->cmd);
1959         dm_put_device(ti, clone->source_dev);
1960         dm_put_device(ti, clone->dest_dev);
1961         dm_put_device(ti, clone->metadata_dev);
1962
1963         kfree(clone);
1964 }
1965
1966 /*---------------------------------------------------------------------------*/
1967
1968 static void clone_postsuspend(struct dm_target *ti)
1969 {
1970         struct clone *clone = ti->private;
1971
1972         /*
1973          * To successfully suspend the device:
1974          *
1975          *      - We cancel the delayed work for periodic commits and wait for
1976          *        it to finish.
1977          *
1978          *      - We stop the background hydration, i.e. we prevent new region
1979          *        hydrations from starting.
1980          *
1981          *      - We wait for any in-flight hydrations to finish.
1982          *
1983          *      - We flush the workqueue.
1984          *
1985          *      - We commit the metadata.
1986          */
1987         cancel_delayed_work_sync(&clone->waker);
1988
1989         set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
1990
1991         /*
1992          * Make sure set_bit() is ordered before atomic_read(), otherwise we
1993          * might race with do_hydration() and miss some started region
1994          * hydrations.
1995          *
1996          * This is paired with smp_mb__after_atomic() in do_hydration().
1997          */
1998         smp_mb__after_atomic();
1999
2000         wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight));
2001         flush_workqueue(clone->wq);
2002
2003         (void) commit_metadata(clone, NULL);
2004 }
2005
2006 static void clone_resume(struct dm_target *ti)
2007 {
2008         struct clone *clone = ti->private;
2009
2010         clear_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
2011         do_waker(&clone->waker.work);
2012 }
2013
2014 static bool bdev_supports_discards(struct block_device *bdev)
2015 {
2016         struct request_queue *q = bdev_get_queue(bdev);
2017
2018         return (q && blk_queue_discard(q));
2019 }
2020
2021 /*
2022  * If discard_passdown was enabled verify that the destination device supports
2023  * discards. Disable discard_passdown if not.
2024  */
2025 static void disable_passdown_if_not_supported(struct clone *clone)
2026 {
2027         struct block_device *dest_dev = clone->dest_dev->bdev;
2028         struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
2029         const char *reason = NULL;
2030         char buf[BDEVNAME_SIZE];
2031
2032         if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
2033                 return;
2034
2035         if (!bdev_supports_discards(dest_dev))
2036                 reason = "discard unsupported";
2037         else if (dest_limits->max_discard_sectors < clone->region_size)
2038                 reason = "max discard sectors smaller than a region";
2039
2040         if (reason) {
2041                 DMWARN("Destination device (%s) %s: Disabling discard passdown.",
2042                        bdevname(dest_dev, buf), reason);
2043                 clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
2044         }
2045 }
2046
2047 static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
2048 {
2049         struct block_device *dest_bdev = clone->dest_dev->bdev;
2050         struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits;
2051
2052         if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
2053                 /* No passdown is done so we set our own virtual limits */
2054                 limits->discard_granularity = clone->region_size << SECTOR_SHIFT;
2055                 limits->max_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT, clone->region_size);
2056                 return;
2057         }
2058
2059         /*
2060          * clone_iterate_devices() is stacking both the source and destination
2061          * device limits but discards aren't passed to the source device, so
2062          * inherit destination's limits.
2063          */
2064         limits->max_discard_sectors = dest_limits->max_discard_sectors;
2065         limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
2066         limits->discard_granularity = dest_limits->discard_granularity;
2067         limits->discard_alignment = dest_limits->discard_alignment;
2068         limits->discard_misaligned = dest_limits->discard_misaligned;
2069         limits->max_discard_segments = dest_limits->max_discard_segments;
2070 }
2071
2072 static void clone_io_hints(struct dm_target *ti, struct queue_limits *limits)
2073 {
2074         struct clone *clone = ti->private;
2075         u64 io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
2076
2077         /*
2078          * If the system-determined stacked limits are compatible with
2079          * dm-clone's region size (io_opt is a factor) do not override them.
2080          */
2081         if (io_opt_sectors < clone->region_size ||
2082             do_div(io_opt_sectors, clone->region_size)) {
2083                 blk_limits_io_min(limits, clone->region_size << SECTOR_SHIFT);
2084                 blk_limits_io_opt(limits, clone->region_size << SECTOR_SHIFT);
2085         }
2086
2087         disable_passdown_if_not_supported(clone);
2088         set_discard_limits(clone, limits);
2089 }
2090
2091 static int clone_iterate_devices(struct dm_target *ti,
2092                                  iterate_devices_callout_fn fn, void *data)
2093 {
2094         int ret;
2095         struct clone *clone = ti->private;
2096         struct dm_dev *dest_dev = clone->dest_dev;
2097         struct dm_dev *source_dev = clone->source_dev;
2098
2099         ret = fn(ti, source_dev, 0, ti->len, data);
2100         if (!ret)
2101                 ret = fn(ti, dest_dev, 0, ti->len, data);
2102         return ret;
2103 }
2104
2105 /*
2106  * dm-clone message functions.
2107  */
2108 static void set_hydration_threshold(struct clone *clone, unsigned int nr_regions)
2109 {
2110         WRITE_ONCE(clone->hydration_threshold, nr_regions);
2111
2112         /*
2113          * If user space sets hydration_threshold to zero then the hydration
2114          * will stop. If at a later time the hydration_threshold is increased
2115          * we must restart the hydration process by waking up the worker.
2116          */
2117         wake_worker(clone);
2118 }
2119
2120 static void set_hydration_batch_size(struct clone *clone, unsigned int nr_regions)
2121 {
2122         WRITE_ONCE(clone->hydration_batch_size, nr_regions);
2123 }
2124
2125 static void enable_hydration(struct clone *clone)
2126 {
2127         if (!test_and_set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
2128                 wake_worker(clone);
2129 }
2130
2131 static void disable_hydration(struct clone *clone)
2132 {
2133         clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
2134 }
2135
2136 static int clone_message(struct dm_target *ti, unsigned int argc, char **argv,
2137                          char *result, unsigned int maxlen)
2138 {
2139         struct clone *clone = ti->private;
2140         unsigned int value;
2141
2142         if (!argc)
2143                 return -EINVAL;
2144
2145         if (!strcasecmp(argv[0], "enable_hydration")) {
2146                 enable_hydration(clone);
2147                 return 0;
2148         }
2149
2150         if (!strcasecmp(argv[0], "disable_hydration")) {
2151                 disable_hydration(clone);
2152                 return 0;
2153         }
2154
2155         if (argc != 2)
2156                 return -EINVAL;
2157
2158         if (!strcasecmp(argv[0], "hydration_threshold")) {
2159                 if (kstrtouint(argv[1], 10, &value))
2160                         return -EINVAL;
2161
2162                 set_hydration_threshold(clone, value);
2163
2164                 return 0;
2165         }
2166
2167         if (!strcasecmp(argv[0], "hydration_batch_size")) {
2168                 if (kstrtouint(argv[1], 10, &value))
2169                         return -EINVAL;
2170
2171                 set_hydration_batch_size(clone, value);
2172
2173                 return 0;
2174         }
2175
2176         DMERR("%s: Unsupported message `%s'", clone_device_name(clone), argv[0]);
2177         return -EINVAL;
2178 }
2179
2180 static struct target_type clone_target = {
2181         .name = "clone",
2182         .version = {1, 0, 0},
2183         .module = THIS_MODULE,
2184         .ctr = clone_ctr,
2185         .dtr =  clone_dtr,
2186         .map = clone_map,
2187         .end_io = clone_endio,
2188         .postsuspend = clone_postsuspend,
2189         .resume = clone_resume,
2190         .status = clone_status,
2191         .message = clone_message,
2192         .io_hints = clone_io_hints,
2193         .iterate_devices = clone_iterate_devices,
2194 };
2195
2196 /*---------------------------------------------------------------------------*/
2197
2198 /* Module functions */
2199 static int __init dm_clone_init(void)
2200 {
2201         int r;
2202
2203         _hydration_cache = KMEM_CACHE(dm_clone_region_hydration, 0);
2204         if (!_hydration_cache)
2205                 return -ENOMEM;
2206
2207         r = dm_register_target(&clone_target);
2208         if (r < 0) {
2209                 DMERR("Failed to register clone target");
2210                 return r;
2211         }
2212
2213         return 0;
2214 }
2215
2216 static void __exit dm_clone_exit(void)
2217 {
2218         dm_unregister_target(&clone_target);
2219
2220         kmem_cache_destroy(_hydration_cache);
2221         _hydration_cache = NULL;
2222 }
2223
2224 /* Module hooks */
2225 module_init(dm_clone_init);
2226 module_exit(dm_clone_exit);
2227
2228 MODULE_DESCRIPTION(DM_NAME " clone target");
2229 MODULE_AUTHOR("Nikos Tsironis <ntsironis@arrikto.com>");
2230 MODULE_LICENSE("GPL");