dm: fix truncated status strings
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / md / dm-snap.c
1 /*
2  * dm-snapshot.c
3  *
4  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5  *
6  * This file is released under the GPL.
7  */
8
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
22
23 #include "dm-exception-store.h"
24
25 #define DM_MSG_PREFIX "snapshots"
26
27 static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
28
29 #define dm_target_is_snapshot_merge(ti) \
30         ((ti)->type->name == dm_snapshot_merge_target_name)
31
32 /*
33  * The size of the mempool used to track chunks in use.
34  */
35 #define MIN_IOS 256
36
37 #define DM_TRACKED_CHUNK_HASH_SIZE      16
38 #define DM_TRACKED_CHUNK_HASH(x)        ((unsigned long)(x) & \
39                                          (DM_TRACKED_CHUNK_HASH_SIZE - 1))
40
41 struct dm_exception_table {
42         uint32_t hash_mask;
43         unsigned hash_shift;
44         struct list_head *table;
45 };
46
47 struct dm_snapshot {
48         struct rw_semaphore lock;
49
50         struct dm_dev *origin;
51         struct dm_dev *cow;
52
53         struct dm_target *ti;
54
55         /* List of snapshots per Origin */
56         struct list_head list;
57
58         /*
59          * You can't use a snapshot if this is 0 (e.g. if full).
60          * A snapshot-merge target never clears this.
61          */
62         int valid;
63
64         /* Origin writes don't trigger exceptions until this is set */
65         int active;
66
67         atomic_t pending_exceptions_count;
68
69         mempool_t *pending_pool;
70
71         struct dm_exception_table pending;
72         struct dm_exception_table complete;
73
74         /*
75          * pe_lock protects all pending_exception operations and access
76          * as well as the snapshot_bios list.
77          */
78         spinlock_t pe_lock;
79
80         /* Chunks with outstanding reads */
81         spinlock_t tracked_chunk_lock;
82         struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
83
84         /* The on disk metadata handler */
85         struct dm_exception_store *store;
86
87         struct dm_kcopyd_client *kcopyd_client;
88
89         /* Wait for events based on state_bits */
90         unsigned long state_bits;
91
92         /* Range of chunks currently being merged. */
93         chunk_t first_merging_chunk;
94         int num_merging_chunks;
95
96         /*
97          * The merge operation failed if this flag is set.
98          * Failure modes are handled as follows:
99          * - I/O error reading the header
100          *      => don't load the target; abort.
101          * - Header does not have "valid" flag set
102          *      => use the origin; forget about the snapshot.
103          * - I/O error when reading exceptions
104          *      => don't load the target; abort.
105          *         (We can't use the intermediate origin state.)
106          * - I/O error while merging
107          *      => stop merging; set merge_failed; process I/O normally.
108          */
109         int merge_failed;
110
111         /*
112          * Incoming bios that overlap with chunks being merged must wait
113          * for them to be committed.
114          */
115         struct bio_list bios_queued_during_merge;
116 };
117
118 /*
119  * state_bits:
120  *   RUNNING_MERGE  - Merge operation is in progress.
121  *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
122  *                    cleared afterwards.
123  */
124 #define RUNNING_MERGE          0
125 #define SHUTDOWN_MERGE         1
126
127 struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
128 {
129         return s->origin;
130 }
131 EXPORT_SYMBOL(dm_snap_origin);
132
133 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
134 {
135         return s->cow;
136 }
137 EXPORT_SYMBOL(dm_snap_cow);
138
139 static sector_t chunk_to_sector(struct dm_exception_store *store,
140                                 chunk_t chunk)
141 {
142         return chunk << store->chunk_shift;
143 }
144
145 static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
146 {
147         /*
148          * There is only ever one instance of a particular block
149          * device so we can compare pointers safely.
150          */
151         return lhs == rhs;
152 }
153
154 struct dm_snap_pending_exception {
155         struct dm_exception e;
156
157         /*
158          * Origin buffers waiting for this to complete are held
159          * in a bio list
160          */
161         struct bio_list origin_bios;
162         struct bio_list snapshot_bios;
163
164         /* Pointer back to snapshot context */
165         struct dm_snapshot *snap;
166
167         /*
168          * 1 indicates the exception has already been sent to
169          * kcopyd.
170          */
171         int started;
172
173         /*
174          * For writing a complete chunk, bypassing the copy.
175          */
176         struct bio *full_bio;
177         bio_end_io_t *full_bio_end_io;
178         void *full_bio_private;
179 };
180
181 /*
182  * Hash table mapping origin volumes to lists of snapshots and
183  * a lock to protect it
184  */
185 static struct kmem_cache *exception_cache;
186 static struct kmem_cache *pending_cache;
187
188 struct dm_snap_tracked_chunk {
189         struct hlist_node node;
190         chunk_t chunk;
191 };
192
193 static void init_tracked_chunk(struct bio *bio)
194 {
195         struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
196         INIT_HLIST_NODE(&c->node);
197 }
198
199 static bool is_bio_tracked(struct bio *bio)
200 {
201         struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
202         return !hlist_unhashed(&c->node);
203 }
204
205 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
206 {
207         struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
208
209         c->chunk = chunk;
210
211         spin_lock_irq(&s->tracked_chunk_lock);
212         hlist_add_head(&c->node,
213                        &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
214         spin_unlock_irq(&s->tracked_chunk_lock);
215 }
216
217 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
218 {
219         struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
220         unsigned long flags;
221
222         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
223         hlist_del(&c->node);
224         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
225 }
226
227 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
228 {
229         struct dm_snap_tracked_chunk *c;
230         int found = 0;
231
232         spin_lock_irq(&s->tracked_chunk_lock);
233
234         hlist_for_each_entry(c,
235             &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
236                 if (c->chunk == chunk) {
237                         found = 1;
238                         break;
239                 }
240         }
241
242         spin_unlock_irq(&s->tracked_chunk_lock);
243
244         return found;
245 }
246
247 /*
248  * This conflicting I/O is extremely improbable in the caller,
249  * so msleep(1) is sufficient and there is no need for a wait queue.
250  */
251 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
252 {
253         while (__chunk_is_tracked(s, chunk))
254                 msleep(1);
255 }
256
257 /*
258  * One of these per registered origin, held in the snapshot_origins hash
259  */
260 struct origin {
261         /* The origin device */
262         struct block_device *bdev;
263
264         struct list_head hash_list;
265
266         /* List of snapshots for this origin */
267         struct list_head snapshots;
268 };
269
270 /*
271  * Size of the hash table for origin volumes. If we make this
272  * the size of the minors list then it should be nearly perfect
273  */
274 #define ORIGIN_HASH_SIZE 256
275 #define ORIGIN_MASK      0xFF
276 static struct list_head *_origins;
277 static struct rw_semaphore _origins_lock;
278
279 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
280 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
281 static uint64_t _pending_exceptions_done_count;
282
283 static int init_origin_hash(void)
284 {
285         int i;
286
287         _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
288                            GFP_KERNEL);
289         if (!_origins) {
290                 DMERR("unable to allocate memory");
291                 return -ENOMEM;
292         }
293
294         for (i = 0; i < ORIGIN_HASH_SIZE; i++)
295                 INIT_LIST_HEAD(_origins + i);
296         init_rwsem(&_origins_lock);
297
298         return 0;
299 }
300
301 static void exit_origin_hash(void)
302 {
303         kfree(_origins);
304 }
305
306 static unsigned origin_hash(struct block_device *bdev)
307 {
308         return bdev->bd_dev & ORIGIN_MASK;
309 }
310
311 static struct origin *__lookup_origin(struct block_device *origin)
312 {
313         struct list_head *ol;
314         struct origin *o;
315
316         ol = &_origins[origin_hash(origin)];
317         list_for_each_entry (o, ol, hash_list)
318                 if (bdev_equal(o->bdev, origin))
319                         return o;
320
321         return NULL;
322 }
323
324 static void __insert_origin(struct origin *o)
325 {
326         struct list_head *sl = &_origins[origin_hash(o->bdev)];
327         list_add_tail(&o->hash_list, sl);
328 }
329
330 /*
331  * _origins_lock must be held when calling this function.
332  * Returns number of snapshots registered using the supplied cow device, plus:
333  * snap_src - a snapshot suitable for use as a source of exception handover
334  * snap_dest - a snapshot capable of receiving exception handover.
335  * snap_merge - an existing snapshot-merge target linked to the same origin.
336  *   There can be at most one snapshot-merge target. The parameter is optional.
337  *
338  * Possible return values and states of snap_src and snap_dest.
339  *   0: NULL, NULL  - first new snapshot
340  *   1: snap_src, NULL - normal snapshot
341  *   2: snap_src, snap_dest  - waiting for handover
342  *   2: snap_src, NULL - handed over, waiting for old to be deleted
343  *   1: NULL, snap_dest - source got destroyed without handover
344  */
345 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
346                                         struct dm_snapshot **snap_src,
347                                         struct dm_snapshot **snap_dest,
348                                         struct dm_snapshot **snap_merge)
349 {
350         struct dm_snapshot *s;
351         struct origin *o;
352         int count = 0;
353         int active;
354
355         o = __lookup_origin(snap->origin->bdev);
356         if (!o)
357                 goto out;
358
359         list_for_each_entry(s, &o->snapshots, list) {
360                 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
361                         *snap_merge = s;
362                 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
363                         continue;
364
365                 down_read(&s->lock);
366                 active = s->active;
367                 up_read(&s->lock);
368
369                 if (active) {
370                         if (snap_src)
371                                 *snap_src = s;
372                 } else if (snap_dest)
373                         *snap_dest = s;
374
375                 count++;
376         }
377
378 out:
379         return count;
380 }
381
382 /*
383  * On success, returns 1 if this snapshot is a handover destination,
384  * otherwise returns 0.
385  */
386 static int __validate_exception_handover(struct dm_snapshot *snap)
387 {
388         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
389         struct dm_snapshot *snap_merge = NULL;
390
391         /* Does snapshot need exceptions handed over to it? */
392         if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
393                                           &snap_merge) == 2) ||
394             snap_dest) {
395                 snap->ti->error = "Snapshot cow pairing for exception "
396                                   "table handover failed";
397                 return -EINVAL;
398         }
399
400         /*
401          * If no snap_src was found, snap cannot become a handover
402          * destination.
403          */
404         if (!snap_src)
405                 return 0;
406
407         /*
408          * Non-snapshot-merge handover?
409          */
410         if (!dm_target_is_snapshot_merge(snap->ti))
411                 return 1;
412
413         /*
414          * Do not allow more than one merging snapshot.
415          */
416         if (snap_merge) {
417                 snap->ti->error = "A snapshot is already merging.";
418                 return -EINVAL;
419         }
420
421         if (!snap_src->store->type->prepare_merge ||
422             !snap_src->store->type->commit_merge) {
423                 snap->ti->error = "Snapshot exception store does not "
424                                   "support snapshot-merge.";
425                 return -EINVAL;
426         }
427
428         return 1;
429 }
430
431 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
432 {
433         struct dm_snapshot *l;
434
435         /* Sort the list according to chunk size, largest-first smallest-last */
436         list_for_each_entry(l, &o->snapshots, list)
437                 if (l->store->chunk_size < s->store->chunk_size)
438                         break;
439         list_add_tail(&s->list, &l->list);
440 }
441
442 /*
443  * Make a note of the snapshot and its origin so we can look it
444  * up when the origin has a write on it.
445  *
446  * Also validate snapshot exception store handovers.
447  * On success, returns 1 if this registration is a handover destination,
448  * otherwise returns 0.
449  */
450 static int register_snapshot(struct dm_snapshot *snap)
451 {
452         struct origin *o, *new_o = NULL;
453         struct block_device *bdev = snap->origin->bdev;
454         int r = 0;
455
456         new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
457         if (!new_o)
458                 return -ENOMEM;
459
460         down_write(&_origins_lock);
461
462         r = __validate_exception_handover(snap);
463         if (r < 0) {
464                 kfree(new_o);
465                 goto out;
466         }
467
468         o = __lookup_origin(bdev);
469         if (o)
470                 kfree(new_o);
471         else {
472                 /* New origin */
473                 o = new_o;
474
475                 /* Initialise the struct */
476                 INIT_LIST_HEAD(&o->snapshots);
477                 o->bdev = bdev;
478
479                 __insert_origin(o);
480         }
481
482         __insert_snapshot(o, snap);
483
484 out:
485         up_write(&_origins_lock);
486
487         return r;
488 }
489
490 /*
491  * Move snapshot to correct place in list according to chunk size.
492  */
493 static void reregister_snapshot(struct dm_snapshot *s)
494 {
495         struct block_device *bdev = s->origin->bdev;
496
497         down_write(&_origins_lock);
498
499         list_del(&s->list);
500         __insert_snapshot(__lookup_origin(bdev), s);
501
502         up_write(&_origins_lock);
503 }
504
505 static void unregister_snapshot(struct dm_snapshot *s)
506 {
507         struct origin *o;
508
509         down_write(&_origins_lock);
510         o = __lookup_origin(s->origin->bdev);
511
512         list_del(&s->list);
513         if (o && list_empty(&o->snapshots)) {
514                 list_del(&o->hash_list);
515                 kfree(o);
516         }
517
518         up_write(&_origins_lock);
519 }
520
521 /*
522  * Implementation of the exception hash tables.
523  * The lowest hash_shift bits of the chunk number are ignored, allowing
524  * some consecutive chunks to be grouped together.
525  */
526 static int dm_exception_table_init(struct dm_exception_table *et,
527                                    uint32_t size, unsigned hash_shift)
528 {
529         unsigned int i;
530
531         et->hash_shift = hash_shift;
532         et->hash_mask = size - 1;
533         et->table = dm_vcalloc(size, sizeof(struct list_head));
534         if (!et->table)
535                 return -ENOMEM;
536
537         for (i = 0; i < size; i++)
538                 INIT_LIST_HEAD(et->table + i);
539
540         return 0;
541 }
542
543 static void dm_exception_table_exit(struct dm_exception_table *et,
544                                     struct kmem_cache *mem)
545 {
546         struct list_head *slot;
547         struct dm_exception *ex, *next;
548         int i, size;
549
550         size = et->hash_mask + 1;
551         for (i = 0; i < size; i++) {
552                 slot = et->table + i;
553
554                 list_for_each_entry_safe (ex, next, slot, hash_list)
555                         kmem_cache_free(mem, ex);
556         }
557
558         vfree(et->table);
559 }
560
561 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
562 {
563         return (chunk >> et->hash_shift) & et->hash_mask;
564 }
565
566 static void dm_remove_exception(struct dm_exception *e)
567 {
568         list_del(&e->hash_list);
569 }
570
571 /*
572  * Return the exception data for a sector, or NULL if not
573  * remapped.
574  */
575 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
576                                                 chunk_t chunk)
577 {
578         struct list_head *slot;
579         struct dm_exception *e;
580
581         slot = &et->table[exception_hash(et, chunk)];
582         list_for_each_entry (e, slot, hash_list)
583                 if (chunk >= e->old_chunk &&
584                     chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
585                         return e;
586
587         return NULL;
588 }
589
590 static struct dm_exception *alloc_completed_exception(void)
591 {
592         struct dm_exception *e;
593
594         e = kmem_cache_alloc(exception_cache, GFP_NOIO);
595         if (!e)
596                 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
597
598         return e;
599 }
600
601 static void free_completed_exception(struct dm_exception *e)
602 {
603         kmem_cache_free(exception_cache, e);
604 }
605
606 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
607 {
608         struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
609                                                              GFP_NOIO);
610
611         atomic_inc(&s->pending_exceptions_count);
612         pe->snap = s;
613
614         return pe;
615 }
616
617 static void free_pending_exception(struct dm_snap_pending_exception *pe)
618 {
619         struct dm_snapshot *s = pe->snap;
620
621         mempool_free(pe, s->pending_pool);
622         smp_mb__before_atomic_dec();
623         atomic_dec(&s->pending_exceptions_count);
624 }
625
626 static void dm_insert_exception(struct dm_exception_table *eh,
627                                 struct dm_exception *new_e)
628 {
629         struct list_head *l;
630         struct dm_exception *e = NULL;
631
632         l = &eh->table[exception_hash(eh, new_e->old_chunk)];
633
634         /* Add immediately if this table doesn't support consecutive chunks */
635         if (!eh->hash_shift)
636                 goto out;
637
638         /* List is ordered by old_chunk */
639         list_for_each_entry_reverse(e, l, hash_list) {
640                 /* Insert after an existing chunk? */
641                 if (new_e->old_chunk == (e->old_chunk +
642                                          dm_consecutive_chunk_count(e) + 1) &&
643                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
644                                          dm_consecutive_chunk_count(e) + 1)) {
645                         dm_consecutive_chunk_count_inc(e);
646                         free_completed_exception(new_e);
647                         return;
648                 }
649
650                 /* Insert before an existing chunk? */
651                 if (new_e->old_chunk == (e->old_chunk - 1) &&
652                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
653                         dm_consecutive_chunk_count_inc(e);
654                         e->old_chunk--;
655                         e->new_chunk--;
656                         free_completed_exception(new_e);
657                         return;
658                 }
659
660                 if (new_e->old_chunk > e->old_chunk)
661                         break;
662         }
663
664 out:
665         list_add(&new_e->hash_list, e ? &e->hash_list : l);
666 }
667
668 /*
669  * Callback used by the exception stores to load exceptions when
670  * initialising.
671  */
672 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
673 {
674         struct dm_snapshot *s = context;
675         struct dm_exception *e;
676
677         e = alloc_completed_exception();
678         if (!e)
679                 return -ENOMEM;
680
681         e->old_chunk = old;
682
683         /* Consecutive_count is implicitly initialised to zero */
684         e->new_chunk = new;
685
686         dm_insert_exception(&s->complete, e);
687
688         return 0;
689 }
690
691 /*
692  * Return a minimum chunk size of all snapshots that have the specified origin.
693  * Return zero if the origin has no snapshots.
694  */
695 static uint32_t __minimum_chunk_size(struct origin *o)
696 {
697         struct dm_snapshot *snap;
698         unsigned chunk_size = 0;
699
700         if (o)
701                 list_for_each_entry(snap, &o->snapshots, list)
702                         chunk_size = min_not_zero(chunk_size,
703                                                   snap->store->chunk_size);
704
705         return (uint32_t) chunk_size;
706 }
707
708 /*
709  * Hard coded magic.
710  */
711 static int calc_max_buckets(void)
712 {
713         /* use a fixed size of 2MB */
714         unsigned long mem = 2 * 1024 * 1024;
715         mem /= sizeof(struct list_head);
716
717         return mem;
718 }
719
720 /*
721  * Allocate room for a suitable hash table.
722  */
723 static int init_hash_tables(struct dm_snapshot *s)
724 {
725         sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
726
727         /*
728          * Calculate based on the size of the original volume or
729          * the COW volume...
730          */
731         cow_dev_size = get_dev_size(s->cow->bdev);
732         origin_dev_size = get_dev_size(s->origin->bdev);
733         max_buckets = calc_max_buckets();
734
735         hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
736         hash_size = min(hash_size, max_buckets);
737
738         if (hash_size < 64)
739                 hash_size = 64;
740         hash_size = rounddown_pow_of_two(hash_size);
741         if (dm_exception_table_init(&s->complete, hash_size,
742                                     DM_CHUNK_CONSECUTIVE_BITS))
743                 return -ENOMEM;
744
745         /*
746          * Allocate hash table for in-flight exceptions
747          * Make this smaller than the real hash table
748          */
749         hash_size >>= 3;
750         if (hash_size < 64)
751                 hash_size = 64;
752
753         if (dm_exception_table_init(&s->pending, hash_size, 0)) {
754                 dm_exception_table_exit(&s->complete, exception_cache);
755                 return -ENOMEM;
756         }
757
758         return 0;
759 }
760
761 static void merge_shutdown(struct dm_snapshot *s)
762 {
763         clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
764         smp_mb__after_clear_bit();
765         wake_up_bit(&s->state_bits, RUNNING_MERGE);
766 }
767
768 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
769 {
770         s->first_merging_chunk = 0;
771         s->num_merging_chunks = 0;
772
773         return bio_list_get(&s->bios_queued_during_merge);
774 }
775
776 /*
777  * Remove one chunk from the index of completed exceptions.
778  */
779 static int __remove_single_exception_chunk(struct dm_snapshot *s,
780                                            chunk_t old_chunk)
781 {
782         struct dm_exception *e;
783
784         e = dm_lookup_exception(&s->complete, old_chunk);
785         if (!e) {
786                 DMERR("Corruption detected: exception for block %llu is "
787                       "on disk but not in memory",
788                       (unsigned long long)old_chunk);
789                 return -EINVAL;
790         }
791
792         /*
793          * If this is the only chunk using this exception, remove exception.
794          */
795         if (!dm_consecutive_chunk_count(e)) {
796                 dm_remove_exception(e);
797                 free_completed_exception(e);
798                 return 0;
799         }
800
801         /*
802          * The chunk may be either at the beginning or the end of a
803          * group of consecutive chunks - never in the middle.  We are
804          * removing chunks in the opposite order to that in which they
805          * were added, so this should always be true.
806          * Decrement the consecutive chunk counter and adjust the
807          * starting point if necessary.
808          */
809         if (old_chunk == e->old_chunk) {
810                 e->old_chunk++;
811                 e->new_chunk++;
812         } else if (old_chunk != e->old_chunk +
813                    dm_consecutive_chunk_count(e)) {
814                 DMERR("Attempt to merge block %llu from the "
815                       "middle of a chunk range [%llu - %llu]",
816                       (unsigned long long)old_chunk,
817                       (unsigned long long)e->old_chunk,
818                       (unsigned long long)
819                       e->old_chunk + dm_consecutive_chunk_count(e));
820                 return -EINVAL;
821         }
822
823         dm_consecutive_chunk_count_dec(e);
824
825         return 0;
826 }
827
828 static void flush_bios(struct bio *bio);
829
830 static int remove_single_exception_chunk(struct dm_snapshot *s)
831 {
832         struct bio *b = NULL;
833         int r;
834         chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
835
836         down_write(&s->lock);
837
838         /*
839          * Process chunks (and associated exceptions) in reverse order
840          * so that dm_consecutive_chunk_count_dec() accounting works.
841          */
842         do {
843                 r = __remove_single_exception_chunk(s, old_chunk);
844                 if (r)
845                         goto out;
846         } while (old_chunk-- > s->first_merging_chunk);
847
848         b = __release_queued_bios_after_merge(s);
849
850 out:
851         up_write(&s->lock);
852         if (b)
853                 flush_bios(b);
854
855         return r;
856 }
857
858 static int origin_write_extent(struct dm_snapshot *merging_snap,
859                                sector_t sector, unsigned chunk_size);
860
861 static void merge_callback(int read_err, unsigned long write_err,
862                            void *context);
863
864 static uint64_t read_pending_exceptions_done_count(void)
865 {
866         uint64_t pending_exceptions_done;
867
868         spin_lock(&_pending_exceptions_done_spinlock);
869         pending_exceptions_done = _pending_exceptions_done_count;
870         spin_unlock(&_pending_exceptions_done_spinlock);
871
872         return pending_exceptions_done;
873 }
874
875 static void increment_pending_exceptions_done_count(void)
876 {
877         spin_lock(&_pending_exceptions_done_spinlock);
878         _pending_exceptions_done_count++;
879         spin_unlock(&_pending_exceptions_done_spinlock);
880
881         wake_up_all(&_pending_exceptions_done);
882 }
883
884 static void snapshot_merge_next_chunks(struct dm_snapshot *s)
885 {
886         int i, linear_chunks;
887         chunk_t old_chunk, new_chunk;
888         struct dm_io_region src, dest;
889         sector_t io_size;
890         uint64_t previous_count;
891
892         BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
893         if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
894                 goto shut;
895
896         /*
897          * valid flag never changes during merge, so no lock required.
898          */
899         if (!s->valid) {
900                 DMERR("Snapshot is invalid: can't merge");
901                 goto shut;
902         }
903
904         linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
905                                                       &new_chunk);
906         if (linear_chunks <= 0) {
907                 if (linear_chunks < 0) {
908                         DMERR("Read error in exception store: "
909                               "shutting down merge");
910                         down_write(&s->lock);
911                         s->merge_failed = 1;
912                         up_write(&s->lock);
913                 }
914                 goto shut;
915         }
916
917         /* Adjust old_chunk and new_chunk to reflect start of linear region */
918         old_chunk = old_chunk + 1 - linear_chunks;
919         new_chunk = new_chunk + 1 - linear_chunks;
920
921         /*
922          * Use one (potentially large) I/O to copy all 'linear_chunks'
923          * from the exception store to the origin
924          */
925         io_size = linear_chunks * s->store->chunk_size;
926
927         dest.bdev = s->origin->bdev;
928         dest.sector = chunk_to_sector(s->store, old_chunk);
929         dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
930
931         src.bdev = s->cow->bdev;
932         src.sector = chunk_to_sector(s->store, new_chunk);
933         src.count = dest.count;
934
935         /*
936          * Reallocate any exceptions needed in other snapshots then
937          * wait for the pending exceptions to complete.
938          * Each time any pending exception (globally on the system)
939          * completes we are woken and repeat the process to find out
940          * if we can proceed.  While this may not seem a particularly
941          * efficient algorithm, it is not expected to have any
942          * significant impact on performance.
943          */
944         previous_count = read_pending_exceptions_done_count();
945         while (origin_write_extent(s, dest.sector, io_size)) {
946                 wait_event(_pending_exceptions_done,
947                            (read_pending_exceptions_done_count() !=
948                             previous_count));
949                 /* Retry after the wait, until all exceptions are done. */
950                 previous_count = read_pending_exceptions_done_count();
951         }
952
953         down_write(&s->lock);
954         s->first_merging_chunk = old_chunk;
955         s->num_merging_chunks = linear_chunks;
956         up_write(&s->lock);
957
958         /* Wait until writes to all 'linear_chunks' drain */
959         for (i = 0; i < linear_chunks; i++)
960                 __check_for_conflicting_io(s, old_chunk + i);
961
962         dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
963         return;
964
965 shut:
966         merge_shutdown(s);
967 }
968
969 static void error_bios(struct bio *bio);
970
971 static void merge_callback(int read_err, unsigned long write_err, void *context)
972 {
973         struct dm_snapshot *s = context;
974         struct bio *b = NULL;
975
976         if (read_err || write_err) {
977                 if (read_err)
978                         DMERR("Read error: shutting down merge.");
979                 else
980                         DMERR("Write error: shutting down merge.");
981                 goto shut;
982         }
983
984         if (s->store->type->commit_merge(s->store,
985                                          s->num_merging_chunks) < 0) {
986                 DMERR("Write error in exception store: shutting down merge");
987                 goto shut;
988         }
989
990         if (remove_single_exception_chunk(s) < 0)
991                 goto shut;
992
993         snapshot_merge_next_chunks(s);
994
995         return;
996
997 shut:
998         down_write(&s->lock);
999         s->merge_failed = 1;
1000         b = __release_queued_bios_after_merge(s);
1001         up_write(&s->lock);
1002         error_bios(b);
1003
1004         merge_shutdown(s);
1005 }
1006
1007 static void start_merge(struct dm_snapshot *s)
1008 {
1009         if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1010                 snapshot_merge_next_chunks(s);
1011 }
1012
1013 static int wait_schedule(void *ptr)
1014 {
1015         schedule();
1016
1017         return 0;
1018 }
1019
1020 /*
1021  * Stop the merging process and wait until it finishes.
1022  */
1023 static void stop_merge(struct dm_snapshot *s)
1024 {
1025         set_bit(SHUTDOWN_MERGE, &s->state_bits);
1026         wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1027                     TASK_UNINTERRUPTIBLE);
1028         clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1029 }
1030
1031 /*
1032  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1033  */
1034 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1035 {
1036         struct dm_snapshot *s;
1037         int i;
1038         int r = -EINVAL;
1039         char *origin_path, *cow_path;
1040         unsigned args_used, num_flush_requests = 1;
1041         fmode_t origin_mode = FMODE_READ;
1042
1043         if (argc != 4) {
1044                 ti->error = "requires exactly 4 arguments";
1045                 r = -EINVAL;
1046                 goto bad;
1047         }
1048
1049         if (dm_target_is_snapshot_merge(ti)) {
1050                 num_flush_requests = 2;
1051                 origin_mode = FMODE_WRITE;
1052         }
1053
1054         s = kmalloc(sizeof(*s), GFP_KERNEL);
1055         if (!s) {
1056                 ti->error = "Cannot allocate private snapshot structure";
1057                 r = -ENOMEM;
1058                 goto bad;
1059         }
1060
1061         origin_path = argv[0];
1062         argv++;
1063         argc--;
1064
1065         r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1066         if (r) {
1067                 ti->error = "Cannot get origin device";
1068                 goto bad_origin;
1069         }
1070
1071         cow_path = argv[0];
1072         argv++;
1073         argc--;
1074
1075         r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1076         if (r) {
1077                 ti->error = "Cannot get COW device";
1078                 goto bad_cow;
1079         }
1080
1081         r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1082         if (r) {
1083                 ti->error = "Couldn't create exception store";
1084                 r = -EINVAL;
1085                 goto bad_store;
1086         }
1087
1088         argv += args_used;
1089         argc -= args_used;
1090
1091         s->ti = ti;
1092         s->valid = 1;
1093         s->active = 0;
1094         atomic_set(&s->pending_exceptions_count, 0);
1095         init_rwsem(&s->lock);
1096         INIT_LIST_HEAD(&s->list);
1097         spin_lock_init(&s->pe_lock);
1098         s->state_bits = 0;
1099         s->merge_failed = 0;
1100         s->first_merging_chunk = 0;
1101         s->num_merging_chunks = 0;
1102         bio_list_init(&s->bios_queued_during_merge);
1103
1104         /* Allocate hash table for COW data */
1105         if (init_hash_tables(s)) {
1106                 ti->error = "Unable to allocate hash table space";
1107                 r = -ENOMEM;
1108                 goto bad_hash_tables;
1109         }
1110
1111         s->kcopyd_client = dm_kcopyd_client_create();
1112         if (IS_ERR(s->kcopyd_client)) {
1113                 r = PTR_ERR(s->kcopyd_client);
1114                 ti->error = "Could not create kcopyd client";
1115                 goto bad_kcopyd;
1116         }
1117
1118         s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1119         if (!s->pending_pool) {
1120                 ti->error = "Could not allocate mempool for pending exceptions";
1121                 goto bad_pending_pool;
1122         }
1123
1124         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1125                 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1126
1127         spin_lock_init(&s->tracked_chunk_lock);
1128
1129         ti->private = s;
1130         ti->num_flush_requests = num_flush_requests;
1131         ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
1132
1133         /* Add snapshot to the list of snapshots for this origin */
1134         /* Exceptions aren't triggered till snapshot_resume() is called */
1135         r = register_snapshot(s);
1136         if (r == -ENOMEM) {
1137                 ti->error = "Snapshot origin struct allocation failed";
1138                 goto bad_load_and_register;
1139         } else if (r < 0) {
1140                 /* invalid handover, register_snapshot has set ti->error */
1141                 goto bad_load_and_register;
1142         }
1143
1144         /*
1145          * Metadata must only be loaded into one table at once, so skip this
1146          * if metadata will be handed over during resume.
1147          * Chunk size will be set during the handover - set it to zero to
1148          * ensure it's ignored.
1149          */
1150         if (r > 0) {
1151                 s->store->chunk_size = 0;
1152                 return 0;
1153         }
1154
1155         r = s->store->type->read_metadata(s->store, dm_add_exception,
1156                                           (void *)s);
1157         if (r < 0) {
1158                 ti->error = "Failed to read snapshot metadata";
1159                 goto bad_read_metadata;
1160         } else if (r > 0) {
1161                 s->valid = 0;
1162                 DMWARN("Snapshot is marked invalid.");
1163         }
1164
1165         if (!s->store->chunk_size) {
1166                 ti->error = "Chunk size not set";
1167                 goto bad_read_metadata;
1168         }
1169
1170         r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1171         if (r)
1172                 goto bad_read_metadata;
1173
1174         return 0;
1175
1176 bad_read_metadata:
1177         unregister_snapshot(s);
1178
1179 bad_load_and_register:
1180         mempool_destroy(s->pending_pool);
1181
1182 bad_pending_pool:
1183         dm_kcopyd_client_destroy(s->kcopyd_client);
1184
1185 bad_kcopyd:
1186         dm_exception_table_exit(&s->pending, pending_cache);
1187         dm_exception_table_exit(&s->complete, exception_cache);
1188
1189 bad_hash_tables:
1190         dm_exception_store_destroy(s->store);
1191
1192 bad_store:
1193         dm_put_device(ti, s->cow);
1194
1195 bad_cow:
1196         dm_put_device(ti, s->origin);
1197
1198 bad_origin:
1199         kfree(s);
1200
1201 bad:
1202         return r;
1203 }
1204
1205 static void __free_exceptions(struct dm_snapshot *s)
1206 {
1207         dm_kcopyd_client_destroy(s->kcopyd_client);
1208         s->kcopyd_client = NULL;
1209
1210         dm_exception_table_exit(&s->pending, pending_cache);
1211         dm_exception_table_exit(&s->complete, exception_cache);
1212 }
1213
1214 static void __handover_exceptions(struct dm_snapshot *snap_src,
1215                                   struct dm_snapshot *snap_dest)
1216 {
1217         union {
1218                 struct dm_exception_table table_swap;
1219                 struct dm_exception_store *store_swap;
1220         } u;
1221
1222         /*
1223          * Swap all snapshot context information between the two instances.
1224          */
1225         u.table_swap = snap_dest->complete;
1226         snap_dest->complete = snap_src->complete;
1227         snap_src->complete = u.table_swap;
1228
1229         u.store_swap = snap_dest->store;
1230         snap_dest->store = snap_src->store;
1231         snap_src->store = u.store_swap;
1232
1233         snap_dest->store->snap = snap_dest;
1234         snap_src->store->snap = snap_src;
1235
1236         snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1237         snap_dest->valid = snap_src->valid;
1238
1239         /*
1240          * Set source invalid to ensure it receives no further I/O.
1241          */
1242         snap_src->valid = 0;
1243 }
1244
1245 static void snapshot_dtr(struct dm_target *ti)
1246 {
1247 #ifdef CONFIG_DM_DEBUG
1248         int i;
1249 #endif
1250         struct dm_snapshot *s = ti->private;
1251         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1252
1253         down_read(&_origins_lock);
1254         /* Check whether exception handover must be cancelled */
1255         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1256         if (snap_src && snap_dest && (s == snap_src)) {
1257                 down_write(&snap_dest->lock);
1258                 snap_dest->valid = 0;
1259                 up_write(&snap_dest->lock);
1260                 DMERR("Cancelling snapshot handover.");
1261         }
1262         up_read(&_origins_lock);
1263
1264         if (dm_target_is_snapshot_merge(ti))
1265                 stop_merge(s);
1266
1267         /* Prevent further origin writes from using this snapshot. */
1268         /* After this returns there can be no new kcopyd jobs. */
1269         unregister_snapshot(s);
1270
1271         while (atomic_read(&s->pending_exceptions_count))
1272                 msleep(1);
1273         /*
1274          * Ensure instructions in mempool_destroy aren't reordered
1275          * before atomic_read.
1276          */
1277         smp_mb();
1278
1279 #ifdef CONFIG_DM_DEBUG
1280         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1281                 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1282 #endif
1283
1284         __free_exceptions(s);
1285
1286         mempool_destroy(s->pending_pool);
1287
1288         dm_exception_store_destroy(s->store);
1289
1290         dm_put_device(ti, s->cow);
1291
1292         dm_put_device(ti, s->origin);
1293
1294         kfree(s);
1295 }
1296
1297 /*
1298  * Flush a list of buffers.
1299  */
1300 static void flush_bios(struct bio *bio)
1301 {
1302         struct bio *n;
1303
1304         while (bio) {
1305                 n = bio->bi_next;
1306                 bio->bi_next = NULL;
1307                 generic_make_request(bio);
1308                 bio = n;
1309         }
1310 }
1311
1312 static int do_origin(struct dm_dev *origin, struct bio *bio);
1313
1314 /*
1315  * Flush a list of buffers.
1316  */
1317 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1318 {
1319         struct bio *n;
1320         int r;
1321
1322         while (bio) {
1323                 n = bio->bi_next;
1324                 bio->bi_next = NULL;
1325                 r = do_origin(s->origin, bio);
1326                 if (r == DM_MAPIO_REMAPPED)
1327                         generic_make_request(bio);
1328                 bio = n;
1329         }
1330 }
1331
1332 /*
1333  * Error a list of buffers.
1334  */
1335 static void error_bios(struct bio *bio)
1336 {
1337         struct bio *n;
1338
1339         while (bio) {
1340                 n = bio->bi_next;
1341                 bio->bi_next = NULL;
1342                 bio_io_error(bio);
1343                 bio = n;
1344         }
1345 }
1346
1347 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1348 {
1349         if (!s->valid)
1350                 return;
1351
1352         if (err == -EIO)
1353                 DMERR("Invalidating snapshot: Error reading/writing.");
1354         else if (err == -ENOMEM)
1355                 DMERR("Invalidating snapshot: Unable to allocate exception.");
1356
1357         if (s->store->type->drop_snapshot)
1358                 s->store->type->drop_snapshot(s->store);
1359
1360         s->valid = 0;
1361
1362         dm_table_event(s->ti->table);
1363 }
1364
1365 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1366 {
1367         struct dm_exception *e;
1368         struct dm_snapshot *s = pe->snap;
1369         struct bio *origin_bios = NULL;
1370         struct bio *snapshot_bios = NULL;
1371         struct bio *full_bio = NULL;
1372         int error = 0;
1373
1374         if (!success) {
1375                 /* Read/write error - snapshot is unusable */
1376                 down_write(&s->lock);
1377                 __invalidate_snapshot(s, -EIO);
1378                 error = 1;
1379                 goto out;
1380         }
1381
1382         e = alloc_completed_exception();
1383         if (!e) {
1384                 down_write(&s->lock);
1385                 __invalidate_snapshot(s, -ENOMEM);
1386                 error = 1;
1387                 goto out;
1388         }
1389         *e = pe->e;
1390
1391         down_write(&s->lock);
1392         if (!s->valid) {
1393                 free_completed_exception(e);
1394                 error = 1;
1395                 goto out;
1396         }
1397
1398         /* Check for conflicting reads */
1399         __check_for_conflicting_io(s, pe->e.old_chunk);
1400
1401         /*
1402          * Add a proper exception, and remove the
1403          * in-flight exception from the list.
1404          */
1405         dm_insert_exception(&s->complete, e);
1406
1407 out:
1408         dm_remove_exception(&pe->e);
1409         snapshot_bios = bio_list_get(&pe->snapshot_bios);
1410         origin_bios = bio_list_get(&pe->origin_bios);
1411         full_bio = pe->full_bio;
1412         if (full_bio) {
1413                 full_bio->bi_end_io = pe->full_bio_end_io;
1414                 full_bio->bi_private = pe->full_bio_private;
1415         }
1416         free_pending_exception(pe);
1417
1418         increment_pending_exceptions_done_count();
1419
1420         up_write(&s->lock);
1421
1422         /* Submit any pending write bios */
1423         if (error) {
1424                 if (full_bio)
1425                         bio_io_error(full_bio);
1426                 error_bios(snapshot_bios);
1427         } else {
1428                 if (full_bio)
1429                         bio_endio(full_bio, 0);
1430                 flush_bios(snapshot_bios);
1431         }
1432
1433         retry_origin_bios(s, origin_bios);
1434 }
1435
1436 static void commit_callback(void *context, int success)
1437 {
1438         struct dm_snap_pending_exception *pe = context;
1439
1440         pending_complete(pe, success);
1441 }
1442
1443 /*
1444  * Called when the copy I/O has finished.  kcopyd actually runs
1445  * this code so don't block.
1446  */
1447 static void copy_callback(int read_err, unsigned long write_err, void *context)
1448 {
1449         struct dm_snap_pending_exception *pe = context;
1450         struct dm_snapshot *s = pe->snap;
1451
1452         if (read_err || write_err)
1453                 pending_complete(pe, 0);
1454
1455         else
1456                 /* Update the metadata if we are persistent */
1457                 s->store->type->commit_exception(s->store, &pe->e,
1458                                                  commit_callback, pe);
1459 }
1460
1461 /*
1462  * Dispatches the copy operation to kcopyd.
1463  */
1464 static void start_copy(struct dm_snap_pending_exception *pe)
1465 {
1466         struct dm_snapshot *s = pe->snap;
1467         struct dm_io_region src, dest;
1468         struct block_device *bdev = s->origin->bdev;
1469         sector_t dev_size;
1470
1471         dev_size = get_dev_size(bdev);
1472
1473         src.bdev = bdev;
1474         src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1475         src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1476
1477         dest.bdev = s->cow->bdev;
1478         dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1479         dest.count = src.count;
1480
1481         /* Hand over to kcopyd */
1482         dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1483 }
1484
1485 static void full_bio_end_io(struct bio *bio, int error)
1486 {
1487         void *callback_data = bio->bi_private;
1488
1489         dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
1490 }
1491
1492 static void start_full_bio(struct dm_snap_pending_exception *pe,
1493                            struct bio *bio)
1494 {
1495         struct dm_snapshot *s = pe->snap;
1496         void *callback_data;
1497
1498         pe->full_bio = bio;
1499         pe->full_bio_end_io = bio->bi_end_io;
1500         pe->full_bio_private = bio->bi_private;
1501
1502         callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1503                                                    copy_callback, pe);
1504
1505         bio->bi_end_io = full_bio_end_io;
1506         bio->bi_private = callback_data;
1507
1508         generic_make_request(bio);
1509 }
1510
1511 static struct dm_snap_pending_exception *
1512 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1513 {
1514         struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1515
1516         if (!e)
1517                 return NULL;
1518
1519         return container_of(e, struct dm_snap_pending_exception, e);
1520 }
1521
1522 /*
1523  * Looks to see if this snapshot already has a pending exception
1524  * for this chunk, otherwise it allocates a new one and inserts
1525  * it into the pending table.
1526  *
1527  * NOTE: a write lock must be held on snap->lock before calling
1528  * this.
1529  */
1530 static struct dm_snap_pending_exception *
1531 __find_pending_exception(struct dm_snapshot *s,
1532                          struct dm_snap_pending_exception *pe, chunk_t chunk)
1533 {
1534         struct dm_snap_pending_exception *pe2;
1535
1536         pe2 = __lookup_pending_exception(s, chunk);
1537         if (pe2) {
1538                 free_pending_exception(pe);
1539                 return pe2;
1540         }
1541
1542         pe->e.old_chunk = chunk;
1543         bio_list_init(&pe->origin_bios);
1544         bio_list_init(&pe->snapshot_bios);
1545         pe->started = 0;
1546         pe->full_bio = NULL;
1547
1548         if (s->store->type->prepare_exception(s->store, &pe->e)) {
1549                 free_pending_exception(pe);
1550                 return NULL;
1551         }
1552
1553         dm_insert_exception(&s->pending, &pe->e);
1554
1555         return pe;
1556 }
1557
1558 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1559                             struct bio *bio, chunk_t chunk)
1560 {
1561         bio->bi_bdev = s->cow->bdev;
1562         bio->bi_sector = chunk_to_sector(s->store,
1563                                          dm_chunk_number(e->new_chunk) +
1564                                          (chunk - e->old_chunk)) +
1565                                          (bio->bi_sector &
1566                                           s->store->chunk_mask);
1567 }
1568
1569 static int snapshot_map(struct dm_target *ti, struct bio *bio)
1570 {
1571         struct dm_exception *e;
1572         struct dm_snapshot *s = ti->private;
1573         int r = DM_MAPIO_REMAPPED;
1574         chunk_t chunk;
1575         struct dm_snap_pending_exception *pe = NULL;
1576
1577         init_tracked_chunk(bio);
1578
1579         if (bio->bi_rw & REQ_FLUSH) {
1580                 bio->bi_bdev = s->cow->bdev;
1581                 return DM_MAPIO_REMAPPED;
1582         }
1583
1584         chunk = sector_to_chunk(s->store, bio->bi_sector);
1585
1586         /* Full snapshots are not usable */
1587         /* To get here the table must be live so s->active is always set. */
1588         if (!s->valid)
1589                 return -EIO;
1590
1591         /* FIXME: should only take write lock if we need
1592          * to copy an exception */
1593         down_write(&s->lock);
1594
1595         if (!s->valid) {
1596                 r = -EIO;
1597                 goto out_unlock;
1598         }
1599
1600         /* If the block is already remapped - use that, else remap it */
1601         e = dm_lookup_exception(&s->complete, chunk);
1602         if (e) {
1603                 remap_exception(s, e, bio, chunk);
1604                 goto out_unlock;
1605         }
1606
1607         /*
1608          * Write to snapshot - higher level takes care of RW/RO
1609          * flags so we should only get this if we are
1610          * writeable.
1611          */
1612         if (bio_rw(bio) == WRITE) {
1613                 pe = __lookup_pending_exception(s, chunk);
1614                 if (!pe) {
1615                         up_write(&s->lock);
1616                         pe = alloc_pending_exception(s);
1617                         down_write(&s->lock);
1618
1619                         if (!s->valid) {
1620                                 free_pending_exception(pe);
1621                                 r = -EIO;
1622                                 goto out_unlock;
1623                         }
1624
1625                         e = dm_lookup_exception(&s->complete, chunk);
1626                         if (e) {
1627                                 free_pending_exception(pe);
1628                                 remap_exception(s, e, bio, chunk);
1629                                 goto out_unlock;
1630                         }
1631
1632                         pe = __find_pending_exception(s, pe, chunk);
1633                         if (!pe) {
1634                                 __invalidate_snapshot(s, -ENOMEM);
1635                                 r = -EIO;
1636                                 goto out_unlock;
1637                         }
1638                 }
1639
1640                 remap_exception(s, &pe->e, bio, chunk);
1641
1642                 r = DM_MAPIO_SUBMITTED;
1643
1644                 if (!pe->started &&
1645                     bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
1646                         pe->started = 1;
1647                         up_write(&s->lock);
1648                         start_full_bio(pe, bio);
1649                         goto out;
1650                 }
1651
1652                 bio_list_add(&pe->snapshot_bios, bio);
1653
1654                 if (!pe->started) {
1655                         /* this is protected by snap->lock */
1656                         pe->started = 1;
1657                         up_write(&s->lock);
1658                         start_copy(pe);
1659                         goto out;
1660                 }
1661         } else {
1662                 bio->bi_bdev = s->origin->bdev;
1663                 track_chunk(s, bio, chunk);
1664         }
1665
1666 out_unlock:
1667         up_write(&s->lock);
1668 out:
1669         return r;
1670 }
1671
1672 /*
1673  * A snapshot-merge target behaves like a combination of a snapshot
1674  * target and a snapshot-origin target.  It only generates new
1675  * exceptions in other snapshots and not in the one that is being
1676  * merged.
1677  *
1678  * For each chunk, if there is an existing exception, it is used to
1679  * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1680  * which in turn might generate exceptions in other snapshots.
1681  * If merging is currently taking place on the chunk in question, the
1682  * I/O is deferred by adding it to s->bios_queued_during_merge.
1683  */
1684 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1685 {
1686         struct dm_exception *e;
1687         struct dm_snapshot *s = ti->private;
1688         int r = DM_MAPIO_REMAPPED;
1689         chunk_t chunk;
1690
1691         init_tracked_chunk(bio);
1692
1693         if (bio->bi_rw & REQ_FLUSH) {
1694                 if (!dm_bio_get_target_request_nr(bio))
1695                         bio->bi_bdev = s->origin->bdev;
1696                 else
1697                         bio->bi_bdev = s->cow->bdev;
1698                 return DM_MAPIO_REMAPPED;
1699         }
1700
1701         chunk = sector_to_chunk(s->store, bio->bi_sector);
1702
1703         down_write(&s->lock);
1704
1705         /* Full merging snapshots are redirected to the origin */
1706         if (!s->valid)
1707                 goto redirect_to_origin;
1708
1709         /* If the block is already remapped - use that */
1710         e = dm_lookup_exception(&s->complete, chunk);
1711         if (e) {
1712                 /* Queue writes overlapping with chunks being merged */
1713                 if (bio_rw(bio) == WRITE &&
1714                     chunk >= s->first_merging_chunk &&
1715                     chunk < (s->first_merging_chunk +
1716                              s->num_merging_chunks)) {
1717                         bio->bi_bdev = s->origin->bdev;
1718                         bio_list_add(&s->bios_queued_during_merge, bio);
1719                         r = DM_MAPIO_SUBMITTED;
1720                         goto out_unlock;
1721                 }
1722
1723                 remap_exception(s, e, bio, chunk);
1724
1725                 if (bio_rw(bio) == WRITE)
1726                         track_chunk(s, bio, chunk);
1727                 goto out_unlock;
1728         }
1729
1730 redirect_to_origin:
1731         bio->bi_bdev = s->origin->bdev;
1732
1733         if (bio_rw(bio) == WRITE) {
1734                 up_write(&s->lock);
1735                 return do_origin(s->origin, bio);
1736         }
1737
1738 out_unlock:
1739         up_write(&s->lock);
1740
1741         return r;
1742 }
1743
1744 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
1745 {
1746         struct dm_snapshot *s = ti->private;
1747
1748         if (is_bio_tracked(bio))
1749                 stop_tracking_chunk(s, bio);
1750
1751         return 0;
1752 }
1753
1754 static void snapshot_merge_presuspend(struct dm_target *ti)
1755 {
1756         struct dm_snapshot *s = ti->private;
1757
1758         stop_merge(s);
1759 }
1760
1761 static int snapshot_preresume(struct dm_target *ti)
1762 {
1763         int r = 0;
1764         struct dm_snapshot *s = ti->private;
1765         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1766
1767         down_read(&_origins_lock);
1768         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1769         if (snap_src && snap_dest) {
1770                 down_read(&snap_src->lock);
1771                 if (s == snap_src) {
1772                         DMERR("Unable to resume snapshot source until "
1773                               "handover completes.");
1774                         r = -EINVAL;
1775                 } else if (!dm_suspended(snap_src->ti)) {
1776                         DMERR("Unable to perform snapshot handover until "
1777                               "source is suspended.");
1778                         r = -EINVAL;
1779                 }
1780                 up_read(&snap_src->lock);
1781         }
1782         up_read(&_origins_lock);
1783
1784         return r;
1785 }
1786
1787 static void snapshot_resume(struct dm_target *ti)
1788 {
1789         struct dm_snapshot *s = ti->private;
1790         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1791
1792         down_read(&_origins_lock);
1793         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1794         if (snap_src && snap_dest) {
1795                 down_write(&snap_src->lock);
1796                 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1797                 __handover_exceptions(snap_src, snap_dest);
1798                 up_write(&snap_dest->lock);
1799                 up_write(&snap_src->lock);
1800         }
1801         up_read(&_origins_lock);
1802
1803         /* Now we have correct chunk size, reregister */
1804         reregister_snapshot(s);
1805
1806         down_write(&s->lock);
1807         s->active = 1;
1808         up_write(&s->lock);
1809 }
1810
1811 static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
1812 {
1813         uint32_t min_chunksize;
1814
1815         down_read(&_origins_lock);
1816         min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1817         up_read(&_origins_lock);
1818
1819         return min_chunksize;
1820 }
1821
1822 static void snapshot_merge_resume(struct dm_target *ti)
1823 {
1824         struct dm_snapshot *s = ti->private;
1825
1826         /*
1827          * Handover exceptions from existing snapshot.
1828          */
1829         snapshot_resume(ti);
1830
1831         /*
1832          * snapshot-merge acts as an origin, so set ti->max_io_len
1833          */
1834         ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
1835
1836         start_merge(s);
1837 }
1838
1839 static void snapshot_status(struct dm_target *ti, status_type_t type,
1840                             unsigned status_flags, char *result, unsigned maxlen)
1841 {
1842         unsigned sz = 0;
1843         struct dm_snapshot *snap = ti->private;
1844
1845         switch (type) {
1846         case STATUSTYPE_INFO:
1847
1848                 down_write(&snap->lock);
1849
1850                 if (!snap->valid)
1851                         DMEMIT("Invalid");
1852                 else if (snap->merge_failed)
1853                         DMEMIT("Merge failed");
1854                 else {
1855                         if (snap->store->type->usage) {
1856                                 sector_t total_sectors, sectors_allocated,
1857                                          metadata_sectors;
1858                                 snap->store->type->usage(snap->store,
1859                                                          &total_sectors,
1860                                                          &sectors_allocated,
1861                                                          &metadata_sectors);
1862                                 DMEMIT("%llu/%llu %llu",
1863                                        (unsigned long long)sectors_allocated,
1864                                        (unsigned long long)total_sectors,
1865                                        (unsigned long long)metadata_sectors);
1866                         }
1867                         else
1868                                 DMEMIT("Unknown");
1869                 }
1870
1871                 up_write(&snap->lock);
1872
1873                 break;
1874
1875         case STATUSTYPE_TABLE:
1876                 /*
1877                  * kdevname returns a static pointer so we need
1878                  * to make private copies if the output is to
1879                  * make sense.
1880                  */
1881                 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1882                 snap->store->type->status(snap->store, type, result + sz,
1883                                           maxlen - sz);
1884                 break;
1885         }
1886 }
1887
1888 static int snapshot_iterate_devices(struct dm_target *ti,
1889                                     iterate_devices_callout_fn fn, void *data)
1890 {
1891         struct dm_snapshot *snap = ti->private;
1892         int r;
1893
1894         r = fn(ti, snap->origin, 0, ti->len, data);
1895
1896         if (!r)
1897                 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1898
1899         return r;
1900 }
1901
1902
1903 /*-----------------------------------------------------------------
1904  * Origin methods
1905  *---------------------------------------------------------------*/
1906
1907 /*
1908  * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1909  * supplied bio was ignored.  The caller may submit it immediately.
1910  * (No remapping actually occurs as the origin is always a direct linear
1911  * map.)
1912  *
1913  * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1914  * and any supplied bio is added to a list to be submitted once all
1915  * the necessary exceptions exist.
1916  */
1917 static int __origin_write(struct list_head *snapshots, sector_t sector,
1918                           struct bio *bio)
1919 {
1920         int r = DM_MAPIO_REMAPPED;
1921         struct dm_snapshot *snap;
1922         struct dm_exception *e;
1923         struct dm_snap_pending_exception *pe;
1924         struct dm_snap_pending_exception *pe_to_start_now = NULL;
1925         struct dm_snap_pending_exception *pe_to_start_last = NULL;
1926         chunk_t chunk;
1927
1928         /* Do all the snapshots on this origin */
1929         list_for_each_entry (snap, snapshots, list) {
1930                 /*
1931                  * Don't make new exceptions in a merging snapshot
1932                  * because it has effectively been deleted
1933                  */
1934                 if (dm_target_is_snapshot_merge(snap->ti))
1935                         continue;
1936
1937                 down_write(&snap->lock);
1938
1939                 /* Only deal with valid and active snapshots */
1940                 if (!snap->valid || !snap->active)
1941                         goto next_snapshot;
1942
1943                 /* Nothing to do if writing beyond end of snapshot */
1944                 if (sector >= dm_table_get_size(snap->ti->table))
1945                         goto next_snapshot;
1946
1947                 /*
1948                  * Remember, different snapshots can have
1949                  * different chunk sizes.
1950                  */
1951                 chunk = sector_to_chunk(snap->store, sector);
1952
1953                 /*
1954                  * Check exception table to see if block
1955                  * is already remapped in this snapshot
1956                  * and trigger an exception if not.
1957                  */
1958                 e = dm_lookup_exception(&snap->complete, chunk);
1959                 if (e)
1960                         goto next_snapshot;
1961
1962                 pe = __lookup_pending_exception(snap, chunk);
1963                 if (!pe) {
1964                         up_write(&snap->lock);
1965                         pe = alloc_pending_exception(snap);
1966                         down_write(&snap->lock);
1967
1968                         if (!snap->valid) {
1969                                 free_pending_exception(pe);
1970                                 goto next_snapshot;
1971                         }
1972
1973                         e = dm_lookup_exception(&snap->complete, chunk);
1974                         if (e) {
1975                                 free_pending_exception(pe);
1976                                 goto next_snapshot;
1977                         }
1978
1979                         pe = __find_pending_exception(snap, pe, chunk);
1980                         if (!pe) {
1981                                 __invalidate_snapshot(snap, -ENOMEM);
1982                                 goto next_snapshot;
1983                         }
1984                 }
1985
1986                 r = DM_MAPIO_SUBMITTED;
1987
1988                 /*
1989                  * If an origin bio was supplied, queue it to wait for the
1990                  * completion of this exception, and start this one last,
1991                  * at the end of the function.
1992                  */
1993                 if (bio) {
1994                         bio_list_add(&pe->origin_bios, bio);
1995                         bio = NULL;
1996
1997                         if (!pe->started) {
1998                                 pe->started = 1;
1999                                 pe_to_start_last = pe;
2000                         }
2001                 }
2002
2003                 if (!pe->started) {
2004                         pe->started = 1;
2005                         pe_to_start_now = pe;
2006                 }
2007
2008 next_snapshot:
2009                 up_write(&snap->lock);
2010
2011                 if (pe_to_start_now) {
2012                         start_copy(pe_to_start_now);
2013                         pe_to_start_now = NULL;
2014                 }
2015         }
2016
2017         /*
2018          * Submit the exception against which the bio is queued last,
2019          * to give the other exceptions a head start.
2020          */
2021         if (pe_to_start_last)
2022                 start_copy(pe_to_start_last);
2023
2024         return r;
2025 }
2026
2027 /*
2028  * Called on a write from the origin driver.
2029  */
2030 static int do_origin(struct dm_dev *origin, struct bio *bio)
2031 {
2032         struct origin *o;
2033         int r = DM_MAPIO_REMAPPED;
2034
2035         down_read(&_origins_lock);
2036         o = __lookup_origin(origin->bdev);
2037         if (o)
2038                 r = __origin_write(&o->snapshots, bio->bi_sector, bio);
2039         up_read(&_origins_lock);
2040
2041         return r;
2042 }
2043
2044 /*
2045  * Trigger exceptions in all non-merging snapshots.
2046  *
2047  * The chunk size of the merging snapshot may be larger than the chunk
2048  * size of some other snapshot so we may need to reallocate multiple
2049  * chunks in other snapshots.
2050  *
2051  * We scan all the overlapping exceptions in the other snapshots.
2052  * Returns 1 if anything was reallocated and must be waited for,
2053  * otherwise returns 0.
2054  *
2055  * size must be a multiple of merging_snap's chunk_size.
2056  */
2057 static int origin_write_extent(struct dm_snapshot *merging_snap,
2058                                sector_t sector, unsigned size)
2059 {
2060         int must_wait = 0;
2061         sector_t n;
2062         struct origin *o;
2063
2064         /*
2065          * The origin's __minimum_chunk_size() got stored in max_io_len
2066          * by snapshot_merge_resume().
2067          */
2068         down_read(&_origins_lock);
2069         o = __lookup_origin(merging_snap->origin->bdev);
2070         for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2071                 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2072                     DM_MAPIO_SUBMITTED)
2073                         must_wait = 1;
2074         up_read(&_origins_lock);
2075
2076         return must_wait;
2077 }
2078
2079 /*
2080  * Origin: maps a linear range of a device, with hooks for snapshotting.
2081  */
2082
2083 /*
2084  * Construct an origin mapping: <dev_path>
2085  * The context for an origin is merely a 'struct dm_dev *'
2086  * pointing to the real device.
2087  */
2088 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2089 {
2090         int r;
2091         struct dm_dev *dev;
2092
2093         if (argc != 1) {
2094                 ti->error = "origin: incorrect number of arguments";
2095                 return -EINVAL;
2096         }
2097
2098         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
2099         if (r) {
2100                 ti->error = "Cannot get target device";
2101                 return r;
2102         }
2103
2104         ti->private = dev;
2105         ti->num_flush_requests = 1;
2106
2107         return 0;
2108 }
2109
2110 static void origin_dtr(struct dm_target *ti)
2111 {
2112         struct dm_dev *dev = ti->private;
2113         dm_put_device(ti, dev);
2114 }
2115
2116 static int origin_map(struct dm_target *ti, struct bio *bio)
2117 {
2118         struct dm_dev *dev = ti->private;
2119         bio->bi_bdev = dev->bdev;
2120
2121         if (bio->bi_rw & REQ_FLUSH)
2122                 return DM_MAPIO_REMAPPED;
2123
2124         /* Only tell snapshots if this is a write */
2125         return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
2126 }
2127
2128 /*
2129  * Set the target "max_io_len" field to the minimum of all the snapshots'
2130  * chunk sizes.
2131  */
2132 static void origin_resume(struct dm_target *ti)
2133 {
2134         struct dm_dev *dev = ti->private;
2135
2136         ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
2137 }
2138
2139 static void origin_status(struct dm_target *ti, status_type_t type,
2140                           unsigned status_flags, char *result, unsigned maxlen)
2141 {
2142         struct dm_dev *dev = ti->private;
2143
2144         switch (type) {
2145         case STATUSTYPE_INFO:
2146                 result[0] = '\0';
2147                 break;
2148
2149         case STATUSTYPE_TABLE:
2150                 snprintf(result, maxlen, "%s", dev->name);
2151                 break;
2152         }
2153 }
2154
2155 static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2156                         struct bio_vec *biovec, int max_size)
2157 {
2158         struct dm_dev *dev = ti->private;
2159         struct request_queue *q = bdev_get_queue(dev->bdev);
2160
2161         if (!q->merge_bvec_fn)
2162                 return max_size;
2163
2164         bvm->bi_bdev = dev->bdev;
2165
2166         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2167 }
2168
2169 static int origin_iterate_devices(struct dm_target *ti,
2170                                   iterate_devices_callout_fn fn, void *data)
2171 {
2172         struct dm_dev *dev = ti->private;
2173
2174         return fn(ti, dev, 0, ti->len, data);
2175 }
2176
2177 static struct target_type origin_target = {
2178         .name    = "snapshot-origin",
2179         .version = {1, 8, 1},
2180         .module  = THIS_MODULE,
2181         .ctr     = origin_ctr,
2182         .dtr     = origin_dtr,
2183         .map     = origin_map,
2184         .resume  = origin_resume,
2185         .status  = origin_status,
2186         .merge   = origin_merge,
2187         .iterate_devices = origin_iterate_devices,
2188 };
2189
2190 static struct target_type snapshot_target = {
2191         .name    = "snapshot",
2192         .version = {1, 11, 1},
2193         .module  = THIS_MODULE,
2194         .ctr     = snapshot_ctr,
2195         .dtr     = snapshot_dtr,
2196         .map     = snapshot_map,
2197         .end_io  = snapshot_end_io,
2198         .preresume  = snapshot_preresume,
2199         .resume  = snapshot_resume,
2200         .status  = snapshot_status,
2201         .iterate_devices = snapshot_iterate_devices,
2202 };
2203
2204 static struct target_type merge_target = {
2205         .name    = dm_snapshot_merge_target_name,
2206         .version = {1, 2, 0},
2207         .module  = THIS_MODULE,
2208         .ctr     = snapshot_ctr,
2209         .dtr     = snapshot_dtr,
2210         .map     = snapshot_merge_map,
2211         .end_io  = snapshot_end_io,
2212         .presuspend = snapshot_merge_presuspend,
2213         .preresume  = snapshot_preresume,
2214         .resume  = snapshot_merge_resume,
2215         .status  = snapshot_status,
2216         .iterate_devices = snapshot_iterate_devices,
2217 };
2218
2219 static int __init dm_snapshot_init(void)
2220 {
2221         int r;
2222
2223         r = dm_exception_store_init();
2224         if (r) {
2225                 DMERR("Failed to initialize exception stores");
2226                 return r;
2227         }
2228
2229         r = dm_register_target(&snapshot_target);
2230         if (r < 0) {
2231                 DMERR("snapshot target register failed %d", r);
2232                 goto bad_register_snapshot_target;
2233         }
2234
2235         r = dm_register_target(&origin_target);
2236         if (r < 0) {
2237                 DMERR("Origin target register failed %d", r);
2238                 goto bad_register_origin_target;
2239         }
2240
2241         r = dm_register_target(&merge_target);
2242         if (r < 0) {
2243                 DMERR("Merge target register failed %d", r);
2244                 goto bad_register_merge_target;
2245         }
2246
2247         r = init_origin_hash();
2248         if (r) {
2249                 DMERR("init_origin_hash failed.");
2250                 goto bad_origin_hash;
2251         }
2252
2253         exception_cache = KMEM_CACHE(dm_exception, 0);
2254         if (!exception_cache) {
2255                 DMERR("Couldn't create exception cache.");
2256                 r = -ENOMEM;
2257                 goto bad_exception_cache;
2258         }
2259
2260         pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2261         if (!pending_cache) {
2262                 DMERR("Couldn't create pending cache.");
2263                 r = -ENOMEM;
2264                 goto bad_pending_cache;
2265         }
2266
2267         return 0;
2268
2269 bad_pending_cache:
2270         kmem_cache_destroy(exception_cache);
2271 bad_exception_cache:
2272         exit_origin_hash();
2273 bad_origin_hash:
2274         dm_unregister_target(&merge_target);
2275 bad_register_merge_target:
2276         dm_unregister_target(&origin_target);
2277 bad_register_origin_target:
2278         dm_unregister_target(&snapshot_target);
2279 bad_register_snapshot_target:
2280         dm_exception_store_exit();
2281
2282         return r;
2283 }
2284
2285 static void __exit dm_snapshot_exit(void)
2286 {
2287         dm_unregister_target(&snapshot_target);
2288         dm_unregister_target(&origin_target);
2289         dm_unregister_target(&merge_target);
2290
2291         exit_origin_hash();
2292         kmem_cache_destroy(pending_cache);
2293         kmem_cache_destroy(exception_cache);
2294
2295         dm_exception_store_exit();
2296 }
2297
2298 /* Module hooks */
2299 module_init(dm_snapshot_init);
2300 module_exit(dm_snapshot_exit);
2301
2302 MODULE_DESCRIPTION(DM_NAME " snapshot target");
2303 MODULE_AUTHOR("Joe Thornber");
2304 MODULE_LICENSE("GPL");