Revert "block: remove __blkdev_driver_ioctl"
[platform/kernel/linux-rpi.git] / drivers / md / dm-cache-target.c
1 /*
2  * Copyright (C) 2012 Red Hat. All rights reserved.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm.h"
8 #include "dm-bio-prison-v2.h"
9 #include "dm-bio-record.h"
10 #include "dm-cache-metadata.h"
11 #include "dm-io-tracker.h"
12
13 #include <linux/dm-io.h>
14 #include <linux/dm-kcopyd.h>
15 #include <linux/jiffies.h>
16 #include <linux/init.h>
17 #include <linux/mempool.h>
18 #include <linux/module.h>
19 #include <linux/rwsem.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22
23 #define DM_MSG_PREFIX "cache"
24
25 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
26         "A percentage of time allocated for copying to and/or from cache");
27
28 /*----------------------------------------------------------------*/
29
30 /*
31  * Glossary:
32  *
33  * oblock: index of an origin block
34  * cblock: index of a cache block
35  * promotion: movement of a block from origin to cache
36  * demotion: movement of a block from cache to origin
37  * migration: movement of a block between the origin and cache device,
38  *            either direction
39  */
40
41 /*----------------------------------------------------------------*/
42
43 /*
44  * Represents a chunk of future work.  'input' allows continuations to pass
45  * values between themselves, typically error values.
46  */
47 struct continuation {
48         struct work_struct ws;
49         blk_status_t input;
50 };
51
52 static inline void init_continuation(struct continuation *k,
53                                      void (*fn)(struct work_struct *))
54 {
55         INIT_WORK(&k->ws, fn);
56         k->input = 0;
57 }
58
59 static inline void queue_continuation(struct workqueue_struct *wq,
60                                       struct continuation *k)
61 {
62         queue_work(wq, &k->ws);
63 }
64
65 /*----------------------------------------------------------------*/
66
67 /*
68  * The batcher collects together pieces of work that need a particular
69  * operation to occur before they can proceed (typically a commit).
70  */
71 struct batcher {
72         /*
73          * The operation that everyone is waiting for.
74          */
75         blk_status_t (*commit_op)(void *context);
76         void *commit_context;
77
78         /*
79          * This is how bios should be issued once the commit op is complete
80          * (accounted_request).
81          */
82         void (*issue_op)(struct bio *bio, void *context);
83         void *issue_context;
84
85         /*
86          * Queued work gets put on here after commit.
87          */
88         struct workqueue_struct *wq;
89
90         spinlock_t lock;
91         struct list_head work_items;
92         struct bio_list bios;
93         struct work_struct commit_work;
94
95         bool commit_scheduled;
96 };
97
98 static void __commit(struct work_struct *_ws)
99 {
100         struct batcher *b = container_of(_ws, struct batcher, commit_work);
101         blk_status_t r;
102         struct list_head work_items;
103         struct work_struct *ws, *tmp;
104         struct continuation *k;
105         struct bio *bio;
106         struct bio_list bios;
107
108         INIT_LIST_HEAD(&work_items);
109         bio_list_init(&bios);
110
111         /*
112          * We have to grab these before the commit_op to avoid a race
113          * condition.
114          */
115         spin_lock_irq(&b->lock);
116         list_splice_init(&b->work_items, &work_items);
117         bio_list_merge(&bios, &b->bios);
118         bio_list_init(&b->bios);
119         b->commit_scheduled = false;
120         spin_unlock_irq(&b->lock);
121
122         r = b->commit_op(b->commit_context);
123
124         list_for_each_entry_safe(ws, tmp, &work_items, entry) {
125                 k = container_of(ws, struct continuation, ws);
126                 k->input = r;
127                 INIT_LIST_HEAD(&ws->entry); /* to avoid a WARN_ON */
128                 queue_work(b->wq, ws);
129         }
130
131         while ((bio = bio_list_pop(&bios))) {
132                 if (r) {
133                         bio->bi_status = r;
134                         bio_endio(bio);
135                 } else
136                         b->issue_op(bio, b->issue_context);
137         }
138 }
139
140 static void batcher_init(struct batcher *b,
141                          blk_status_t (*commit_op)(void *),
142                          void *commit_context,
143                          void (*issue_op)(struct bio *bio, void *),
144                          void *issue_context,
145                          struct workqueue_struct *wq)
146 {
147         b->commit_op = commit_op;
148         b->commit_context = commit_context;
149         b->issue_op = issue_op;
150         b->issue_context = issue_context;
151         b->wq = wq;
152
153         spin_lock_init(&b->lock);
154         INIT_LIST_HEAD(&b->work_items);
155         bio_list_init(&b->bios);
156         INIT_WORK(&b->commit_work, __commit);
157         b->commit_scheduled = false;
158 }
159
160 static void async_commit(struct batcher *b)
161 {
162         queue_work(b->wq, &b->commit_work);
163 }
164
165 static void continue_after_commit(struct batcher *b, struct continuation *k)
166 {
167         bool commit_scheduled;
168
169         spin_lock_irq(&b->lock);
170         commit_scheduled = b->commit_scheduled;
171         list_add_tail(&k->ws.entry, &b->work_items);
172         spin_unlock_irq(&b->lock);
173
174         if (commit_scheduled)
175                 async_commit(b);
176 }
177
178 /*
179  * Bios are errored if commit failed.
180  */
181 static void issue_after_commit(struct batcher *b, struct bio *bio)
182 {
183        bool commit_scheduled;
184
185        spin_lock_irq(&b->lock);
186        commit_scheduled = b->commit_scheduled;
187        bio_list_add(&b->bios, bio);
188        spin_unlock_irq(&b->lock);
189
190        if (commit_scheduled)
191                async_commit(b);
192 }
193
194 /*
195  * Call this if some urgent work is waiting for the commit to complete.
196  */
197 static void schedule_commit(struct batcher *b)
198 {
199         bool immediate;
200
201         spin_lock_irq(&b->lock);
202         immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
203         b->commit_scheduled = true;
204         spin_unlock_irq(&b->lock);
205
206         if (immediate)
207                 async_commit(b);
208 }
209
210 /*
211  * There are a couple of places where we let a bio run, but want to do some
212  * work before calling its endio function.  We do this by temporarily
213  * changing the endio fn.
214  */
215 struct dm_hook_info {
216         bio_end_io_t *bi_end_io;
217 };
218
219 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
220                         bio_end_io_t *bi_end_io, void *bi_private)
221 {
222         h->bi_end_io = bio->bi_end_io;
223
224         bio->bi_end_io = bi_end_io;
225         bio->bi_private = bi_private;
226 }
227
228 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
229 {
230         bio->bi_end_io = h->bi_end_io;
231 }
232
233 /*----------------------------------------------------------------*/
234
235 #define MIGRATION_POOL_SIZE 128
236 #define COMMIT_PERIOD HZ
237 #define MIGRATION_COUNT_WINDOW 10
238
239 /*
240  * The block size of the device holding cache data must be
241  * between 32KB and 1GB.
242  */
243 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
244 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
245
246 enum cache_metadata_mode {
247         CM_WRITE,               /* metadata may be changed */
248         CM_READ_ONLY,           /* metadata may not be changed */
249         CM_FAIL
250 };
251
252 enum cache_io_mode {
253         /*
254          * Data is written to cached blocks only.  These blocks are marked
255          * dirty.  If you lose the cache device you will lose data.
256          * Potential performance increase for both reads and writes.
257          */
258         CM_IO_WRITEBACK,
259
260         /*
261          * Data is written to both cache and origin.  Blocks are never
262          * dirty.  Potential performance benfit for reads only.
263          */
264         CM_IO_WRITETHROUGH,
265
266         /*
267          * A degraded mode useful for various cache coherency situations
268          * (eg, rolling back snapshots).  Reads and writes always go to the
269          * origin.  If a write goes to a cached oblock, then the cache
270          * block is invalidated.
271          */
272         CM_IO_PASSTHROUGH
273 };
274
275 struct cache_features {
276         enum cache_metadata_mode mode;
277         enum cache_io_mode io_mode;
278         unsigned metadata_version;
279         bool discard_passdown:1;
280 };
281
282 struct cache_stats {
283         atomic_t read_hit;
284         atomic_t read_miss;
285         atomic_t write_hit;
286         atomic_t write_miss;
287         atomic_t demotion;
288         atomic_t promotion;
289         atomic_t writeback;
290         atomic_t copies_avoided;
291         atomic_t cache_cell_clash;
292         atomic_t commit_count;
293         atomic_t discard_count;
294 };
295
296 struct cache {
297         struct dm_target *ti;
298         spinlock_t lock;
299
300         /*
301          * Fields for converting from sectors to blocks.
302          */
303         int sectors_per_block_shift;
304         sector_t sectors_per_block;
305
306         struct dm_cache_metadata *cmd;
307
308         /*
309          * Metadata is written to this device.
310          */
311         struct dm_dev *metadata_dev;
312
313         /*
314          * The slower of the two data devices.  Typically a spindle.
315          */
316         struct dm_dev *origin_dev;
317
318         /*
319          * The faster of the two data devices.  Typically an SSD.
320          */
321         struct dm_dev *cache_dev;
322
323         /*
324          * Size of the origin device in _complete_ blocks and native sectors.
325          */
326         dm_oblock_t origin_blocks;
327         sector_t origin_sectors;
328
329         /*
330          * Size of the cache device in blocks.
331          */
332         dm_cblock_t cache_size;
333
334         /*
335          * Invalidation fields.
336          */
337         spinlock_t invalidation_lock;
338         struct list_head invalidation_requests;
339
340         sector_t migration_threshold;
341         wait_queue_head_t migration_wait;
342         atomic_t nr_allocated_migrations;
343
344         /*
345          * The number of in flight migrations that are performing
346          * background io. eg, promotion, writeback.
347          */
348         atomic_t nr_io_migrations;
349
350         struct bio_list deferred_bios;
351
352         struct rw_semaphore quiesce_lock;
353
354         /*
355          * origin_blocks entries, discarded if set.
356          */
357         dm_dblock_t discard_nr_blocks;
358         unsigned long *discard_bitset;
359         uint32_t discard_block_size; /* a power of 2 times sectors per block */
360
361         /*
362          * Rather than reconstructing the table line for the status we just
363          * save it and regurgitate.
364          */
365         unsigned nr_ctr_args;
366         const char **ctr_args;
367
368         struct dm_kcopyd_client *copier;
369         struct work_struct deferred_bio_worker;
370         struct work_struct migration_worker;
371         struct workqueue_struct *wq;
372         struct delayed_work waker;
373         struct dm_bio_prison_v2 *prison;
374
375         /*
376          * cache_size entries, dirty if set
377          */
378         unsigned long *dirty_bitset;
379         atomic_t nr_dirty;
380
381         unsigned policy_nr_args;
382         struct dm_cache_policy *policy;
383
384         /*
385          * Cache features such as write-through.
386          */
387         struct cache_features features;
388
389         struct cache_stats stats;
390
391         bool need_tick_bio:1;
392         bool sized:1;
393         bool invalidate:1;
394         bool commit_requested:1;
395         bool loaded_mappings:1;
396         bool loaded_discards:1;
397
398         struct rw_semaphore background_work_lock;
399
400         struct batcher committer;
401         struct work_struct commit_ws;
402
403         struct dm_io_tracker tracker;
404
405         mempool_t migration_pool;
406
407         struct bio_set bs;
408 };
409
410 struct per_bio_data {
411         bool tick:1;
412         unsigned req_nr:2;
413         struct dm_bio_prison_cell_v2 *cell;
414         struct dm_hook_info hook_info;
415         sector_t len;
416 };
417
418 struct dm_cache_migration {
419         struct continuation k;
420         struct cache *cache;
421
422         struct policy_work *op;
423         struct bio *overwrite_bio;
424         struct dm_bio_prison_cell_v2 *cell;
425
426         dm_cblock_t invalidate_cblock;
427         dm_oblock_t invalidate_oblock;
428 };
429
430 /*----------------------------------------------------------------*/
431
432 static bool writethrough_mode(struct cache *cache)
433 {
434         return cache->features.io_mode == CM_IO_WRITETHROUGH;
435 }
436
437 static bool writeback_mode(struct cache *cache)
438 {
439         return cache->features.io_mode == CM_IO_WRITEBACK;
440 }
441
442 static inline bool passthrough_mode(struct cache *cache)
443 {
444         return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
445 }
446
447 /*----------------------------------------------------------------*/
448
449 static void wake_deferred_bio_worker(struct cache *cache)
450 {
451         queue_work(cache->wq, &cache->deferred_bio_worker);
452 }
453
454 static void wake_migration_worker(struct cache *cache)
455 {
456         if (passthrough_mode(cache))
457                 return;
458
459         queue_work(cache->wq, &cache->migration_worker);
460 }
461
462 /*----------------------------------------------------------------*/
463
464 static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
465 {
466         return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
467 }
468
469 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
470 {
471         dm_bio_prison_free_cell_v2(cache->prison, cell);
472 }
473
474 static struct dm_cache_migration *alloc_migration(struct cache *cache)
475 {
476         struct dm_cache_migration *mg;
477
478         mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
479
480         memset(mg, 0, sizeof(*mg));
481
482         mg->cache = cache;
483         atomic_inc(&cache->nr_allocated_migrations);
484
485         return mg;
486 }
487
488 static void free_migration(struct dm_cache_migration *mg)
489 {
490         struct cache *cache = mg->cache;
491
492         if (atomic_dec_and_test(&cache->nr_allocated_migrations))
493                 wake_up(&cache->migration_wait);
494
495         mempool_free(mg, &cache->migration_pool);
496 }
497
498 /*----------------------------------------------------------------*/
499
500 static inline dm_oblock_t oblock_succ(dm_oblock_t b)
501 {
502         return to_oblock(from_oblock(b) + 1ull);
503 }
504
505 static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2 *key)
506 {
507         key->virtual = 0;
508         key->dev = 0;
509         key->block_begin = from_oblock(begin);
510         key->block_end = from_oblock(end);
511 }
512
513 /*
514  * We have two lock levels.  Level 0, which is used to prevent WRITEs, and
515  * level 1 which prevents *both* READs and WRITEs.
516  */
517 #define WRITE_LOCK_LEVEL 0
518 #define READ_WRITE_LOCK_LEVEL 1
519
520 static unsigned lock_level(struct bio *bio)
521 {
522         return bio_data_dir(bio) == WRITE ?
523                 WRITE_LOCK_LEVEL :
524                 READ_WRITE_LOCK_LEVEL;
525 }
526
527 /*----------------------------------------------------------------
528  * Per bio data
529  *--------------------------------------------------------------*/
530
531 static struct per_bio_data *get_per_bio_data(struct bio *bio)
532 {
533         struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
534         BUG_ON(!pb);
535         return pb;
536 }
537
538 static struct per_bio_data *init_per_bio_data(struct bio *bio)
539 {
540         struct per_bio_data *pb = get_per_bio_data(bio);
541
542         pb->tick = false;
543         pb->req_nr = dm_bio_get_target_bio_nr(bio);
544         pb->cell = NULL;
545         pb->len = 0;
546
547         return pb;
548 }
549
550 /*----------------------------------------------------------------*/
551
552 static void defer_bio(struct cache *cache, struct bio *bio)
553 {
554         spin_lock_irq(&cache->lock);
555         bio_list_add(&cache->deferred_bios, bio);
556         spin_unlock_irq(&cache->lock);
557
558         wake_deferred_bio_worker(cache);
559 }
560
561 static void defer_bios(struct cache *cache, struct bio_list *bios)
562 {
563         spin_lock_irq(&cache->lock);
564         bio_list_merge(&cache->deferred_bios, bios);
565         bio_list_init(bios);
566         spin_unlock_irq(&cache->lock);
567
568         wake_deferred_bio_worker(cache);
569 }
570
571 /*----------------------------------------------------------------*/
572
573 static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
574 {
575         bool r;
576         struct per_bio_data *pb;
577         struct dm_cell_key_v2 key;
578         dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
579         struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
580
581         cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
582
583         build_key(oblock, end, &key);
584         r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
585         if (!r) {
586                 /*
587                  * Failed to get the lock.
588                  */
589                 free_prison_cell(cache, cell_prealloc);
590                 return r;
591         }
592
593         if (cell != cell_prealloc)
594                 free_prison_cell(cache, cell_prealloc);
595
596         pb = get_per_bio_data(bio);
597         pb->cell = cell;
598
599         return r;
600 }
601
602 /*----------------------------------------------------------------*/
603
604 static bool is_dirty(struct cache *cache, dm_cblock_t b)
605 {
606         return test_bit(from_cblock(b), cache->dirty_bitset);
607 }
608
609 static void set_dirty(struct cache *cache, dm_cblock_t cblock)
610 {
611         if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
612                 atomic_inc(&cache->nr_dirty);
613                 policy_set_dirty(cache->policy, cblock);
614         }
615 }
616
617 /*
618  * These two are called when setting after migrations to force the policy
619  * and dirty bitset to be in sync.
620  */
621 static void force_set_dirty(struct cache *cache, dm_cblock_t cblock)
622 {
623         if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset))
624                 atomic_inc(&cache->nr_dirty);
625         policy_set_dirty(cache->policy, cblock);
626 }
627
628 static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock)
629 {
630         if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
631                 if (atomic_dec_return(&cache->nr_dirty) == 0)
632                         dm_table_event(cache->ti->table);
633         }
634
635         policy_clear_dirty(cache->policy, cblock);
636 }
637
638 /*----------------------------------------------------------------*/
639
640 static bool block_size_is_power_of_two(struct cache *cache)
641 {
642         return cache->sectors_per_block_shift >= 0;
643 }
644
645 static dm_block_t block_div(dm_block_t b, uint32_t n)
646 {
647         do_div(b, n);
648
649         return b;
650 }
651
652 static dm_block_t oblocks_per_dblock(struct cache *cache)
653 {
654         dm_block_t oblocks = cache->discard_block_size;
655
656         if (block_size_is_power_of_two(cache))
657                 oblocks >>= cache->sectors_per_block_shift;
658         else
659                 oblocks = block_div(oblocks, cache->sectors_per_block);
660
661         return oblocks;
662 }
663
664 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
665 {
666         return to_dblock(block_div(from_oblock(oblock),
667                                    oblocks_per_dblock(cache)));
668 }
669
670 static void set_discard(struct cache *cache, dm_dblock_t b)
671 {
672         BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
673         atomic_inc(&cache->stats.discard_count);
674
675         spin_lock_irq(&cache->lock);
676         set_bit(from_dblock(b), cache->discard_bitset);
677         spin_unlock_irq(&cache->lock);
678 }
679
680 static void clear_discard(struct cache *cache, dm_dblock_t b)
681 {
682         spin_lock_irq(&cache->lock);
683         clear_bit(from_dblock(b), cache->discard_bitset);
684         spin_unlock_irq(&cache->lock);
685 }
686
687 static bool is_discarded(struct cache *cache, dm_dblock_t b)
688 {
689         int r;
690         spin_lock_irq(&cache->lock);
691         r = test_bit(from_dblock(b), cache->discard_bitset);
692         spin_unlock_irq(&cache->lock);
693
694         return r;
695 }
696
697 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
698 {
699         int r;
700         spin_lock_irq(&cache->lock);
701         r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
702                      cache->discard_bitset);
703         spin_unlock_irq(&cache->lock);
704
705         return r;
706 }
707
708 /*----------------------------------------------------------------
709  * Remapping
710  *--------------------------------------------------------------*/
711 static void remap_to_origin(struct cache *cache, struct bio *bio)
712 {
713         bio_set_dev(bio, cache->origin_dev->bdev);
714 }
715
716 static void remap_to_cache(struct cache *cache, struct bio *bio,
717                            dm_cblock_t cblock)
718 {
719         sector_t bi_sector = bio->bi_iter.bi_sector;
720         sector_t block = from_cblock(cblock);
721
722         bio_set_dev(bio, cache->cache_dev->bdev);
723         if (!block_size_is_power_of_two(cache))
724                 bio->bi_iter.bi_sector =
725                         (block * cache->sectors_per_block) +
726                         sector_div(bi_sector, cache->sectors_per_block);
727         else
728                 bio->bi_iter.bi_sector =
729                         (block << cache->sectors_per_block_shift) |
730                         (bi_sector & (cache->sectors_per_block - 1));
731 }
732
733 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
734 {
735         struct per_bio_data *pb;
736
737         spin_lock_irq(&cache->lock);
738         if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
739             bio_op(bio) != REQ_OP_DISCARD) {
740                 pb = get_per_bio_data(bio);
741                 pb->tick = true;
742                 cache->need_tick_bio = false;
743         }
744         spin_unlock_irq(&cache->lock);
745 }
746
747 static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
748                                             dm_oblock_t oblock, bool bio_has_pbd)
749 {
750         if (bio_has_pbd)
751                 check_if_tick_bio_needed(cache, bio);
752         remap_to_origin(cache, bio);
753         if (bio_data_dir(bio) == WRITE)
754                 clear_discard(cache, oblock_to_dblock(cache, oblock));
755 }
756
757 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
758                                           dm_oblock_t oblock)
759 {
760         // FIXME: check_if_tick_bio_needed() is called way too much through this interface
761         __remap_to_origin_clear_discard(cache, bio, oblock, true);
762 }
763
764 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
765                                  dm_oblock_t oblock, dm_cblock_t cblock)
766 {
767         check_if_tick_bio_needed(cache, bio);
768         remap_to_cache(cache, bio, cblock);
769         if (bio_data_dir(bio) == WRITE) {
770                 set_dirty(cache, cblock);
771                 clear_discard(cache, oblock_to_dblock(cache, oblock));
772         }
773 }
774
775 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
776 {
777         sector_t block_nr = bio->bi_iter.bi_sector;
778
779         if (!block_size_is_power_of_two(cache))
780                 (void) sector_div(block_nr, cache->sectors_per_block);
781         else
782                 block_nr >>= cache->sectors_per_block_shift;
783
784         return to_oblock(block_nr);
785 }
786
787 static bool accountable_bio(struct cache *cache, struct bio *bio)
788 {
789         return bio_op(bio) != REQ_OP_DISCARD;
790 }
791
792 static void accounted_begin(struct cache *cache, struct bio *bio)
793 {
794         struct per_bio_data *pb;
795
796         if (accountable_bio(cache, bio)) {
797                 pb = get_per_bio_data(bio);
798                 pb->len = bio_sectors(bio);
799                 dm_iot_io_begin(&cache->tracker, pb->len);
800         }
801 }
802
803 static void accounted_complete(struct cache *cache, struct bio *bio)
804 {
805         struct per_bio_data *pb = get_per_bio_data(bio);
806
807         dm_iot_io_end(&cache->tracker, pb->len);
808 }
809
810 static void accounted_request(struct cache *cache, struct bio *bio)
811 {
812         accounted_begin(cache, bio);
813         submit_bio_noacct(bio);
814 }
815
816 static void issue_op(struct bio *bio, void *context)
817 {
818         struct cache *cache = context;
819         accounted_request(cache, bio);
820 }
821
822 /*
823  * When running in writethrough mode we need to send writes to clean blocks
824  * to both the cache and origin devices.  Clone the bio and send them in parallel.
825  */
826 static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
827                                       dm_oblock_t oblock, dm_cblock_t cblock)
828 {
829         struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);
830
831         BUG_ON(!origin_bio);
832
833         bio_chain(origin_bio, bio);
834         /*
835          * Passing false to __remap_to_origin_clear_discard() skips
836          * all code that might use per_bio_data (since clone doesn't have it)
837          */
838         __remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
839         submit_bio(origin_bio);
840
841         remap_to_cache(cache, bio, cblock);
842 }
843
844 /*----------------------------------------------------------------
845  * Failure modes
846  *--------------------------------------------------------------*/
847 static enum cache_metadata_mode get_cache_mode(struct cache *cache)
848 {
849         return cache->features.mode;
850 }
851
852 static const char *cache_device_name(struct cache *cache)
853 {
854         return dm_table_device_name(cache->ti->table);
855 }
856
857 static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
858 {
859         const char *descs[] = {
860                 "write",
861                 "read-only",
862                 "fail"
863         };
864
865         dm_table_event(cache->ti->table);
866         DMINFO("%s: switching cache to %s mode",
867                cache_device_name(cache), descs[(int)mode]);
868 }
869
870 static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
871 {
872         bool needs_check;
873         enum cache_metadata_mode old_mode = get_cache_mode(cache);
874
875         if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
876                 DMERR("%s: unable to read needs_check flag, setting failure mode.",
877                       cache_device_name(cache));
878                 new_mode = CM_FAIL;
879         }
880
881         if (new_mode == CM_WRITE && needs_check) {
882                 DMERR("%s: unable to switch cache to write mode until repaired.",
883                       cache_device_name(cache));
884                 if (old_mode != new_mode)
885                         new_mode = old_mode;
886                 else
887                         new_mode = CM_READ_ONLY;
888         }
889
890         /* Never move out of fail mode */
891         if (old_mode == CM_FAIL)
892                 new_mode = CM_FAIL;
893
894         switch (new_mode) {
895         case CM_FAIL:
896         case CM_READ_ONLY:
897                 dm_cache_metadata_set_read_only(cache->cmd);
898                 break;
899
900         case CM_WRITE:
901                 dm_cache_metadata_set_read_write(cache->cmd);
902                 break;
903         }
904
905         cache->features.mode = new_mode;
906
907         if (new_mode != old_mode)
908                 notify_mode_switch(cache, new_mode);
909 }
910
911 static void abort_transaction(struct cache *cache)
912 {
913         const char *dev_name = cache_device_name(cache);
914
915         if (get_cache_mode(cache) >= CM_READ_ONLY)
916                 return;
917
918         DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
919         if (dm_cache_metadata_abort(cache->cmd)) {
920                 DMERR("%s: failed to abort metadata transaction", dev_name);
921                 set_cache_mode(cache, CM_FAIL);
922         }
923
924         if (dm_cache_metadata_set_needs_check(cache->cmd)) {
925                 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
926                 set_cache_mode(cache, CM_FAIL);
927         }
928 }
929
930 static void metadata_operation_failed(struct cache *cache, const char *op, int r)
931 {
932         DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
933                     cache_device_name(cache), op, r);
934         abort_transaction(cache);
935         set_cache_mode(cache, CM_READ_ONLY);
936 }
937
938 /*----------------------------------------------------------------*/
939
940 static void load_stats(struct cache *cache)
941 {
942         struct dm_cache_statistics stats;
943
944         dm_cache_metadata_get_stats(cache->cmd, &stats);
945         atomic_set(&cache->stats.read_hit, stats.read_hits);
946         atomic_set(&cache->stats.read_miss, stats.read_misses);
947         atomic_set(&cache->stats.write_hit, stats.write_hits);
948         atomic_set(&cache->stats.write_miss, stats.write_misses);
949 }
950
951 static void save_stats(struct cache *cache)
952 {
953         struct dm_cache_statistics stats;
954
955         if (get_cache_mode(cache) >= CM_READ_ONLY)
956                 return;
957
958         stats.read_hits = atomic_read(&cache->stats.read_hit);
959         stats.read_misses = atomic_read(&cache->stats.read_miss);
960         stats.write_hits = atomic_read(&cache->stats.write_hit);
961         stats.write_misses = atomic_read(&cache->stats.write_miss);
962
963         dm_cache_metadata_set_stats(cache->cmd, &stats);
964 }
965
966 static void update_stats(struct cache_stats *stats, enum policy_operation op)
967 {
968         switch (op) {
969         case POLICY_PROMOTE:
970                 atomic_inc(&stats->promotion);
971                 break;
972
973         case POLICY_DEMOTE:
974                 atomic_inc(&stats->demotion);
975                 break;
976
977         case POLICY_WRITEBACK:
978                 atomic_inc(&stats->writeback);
979                 break;
980         }
981 }
982
983 /*----------------------------------------------------------------
984  * Migration processing
985  *
986  * Migration covers moving data from the origin device to the cache, or
987  * vice versa.
988  *--------------------------------------------------------------*/
989
990 static void inc_io_migrations(struct cache *cache)
991 {
992         atomic_inc(&cache->nr_io_migrations);
993 }
994
995 static void dec_io_migrations(struct cache *cache)
996 {
997         atomic_dec(&cache->nr_io_migrations);
998 }
999
1000 static bool discard_or_flush(struct bio *bio)
1001 {
1002         return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
1003 }
1004
1005 static void calc_discard_block_range(struct cache *cache, struct bio *bio,
1006                                      dm_dblock_t *b, dm_dblock_t *e)
1007 {
1008         sector_t sb = bio->bi_iter.bi_sector;
1009         sector_t se = bio_end_sector(bio);
1010
1011         *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
1012
1013         if (se - sb < cache->discard_block_size)
1014                 *e = *b;
1015         else
1016                 *e = to_dblock(block_div(se, cache->discard_block_size));
1017 }
1018
1019 /*----------------------------------------------------------------*/
1020
1021 static void prevent_background_work(struct cache *cache)
1022 {
1023         lockdep_off();
1024         down_write(&cache->background_work_lock);
1025         lockdep_on();
1026 }
1027
1028 static void allow_background_work(struct cache *cache)
1029 {
1030         lockdep_off();
1031         up_write(&cache->background_work_lock);
1032         lockdep_on();
1033 }
1034
1035 static bool background_work_begin(struct cache *cache)
1036 {
1037         bool r;
1038
1039         lockdep_off();
1040         r = down_read_trylock(&cache->background_work_lock);
1041         lockdep_on();
1042
1043         return r;
1044 }
1045
1046 static void background_work_end(struct cache *cache)
1047 {
1048         lockdep_off();
1049         up_read(&cache->background_work_lock);
1050         lockdep_on();
1051 }
1052
1053 /*----------------------------------------------------------------*/
1054
1055 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1056 {
1057         return (bio_data_dir(bio) == WRITE) &&
1058                 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1059 }
1060
1061 static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
1062 {
1063         return writeback_mode(cache) &&
1064                 (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
1065 }
1066
1067 static void quiesce(struct dm_cache_migration *mg,
1068                     void (*continuation)(struct work_struct *))
1069 {
1070         init_continuation(&mg->k, continuation);
1071         dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws);
1072 }
1073
1074 static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
1075 {
1076         struct continuation *k = container_of(ws, struct continuation, ws);
1077         return container_of(k, struct dm_cache_migration, k);
1078 }
1079
1080 static void copy_complete(int read_err, unsigned long write_err, void *context)
1081 {
1082         struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
1083
1084         if (read_err || write_err)
1085                 mg->k.input = BLK_STS_IOERR;
1086
1087         queue_continuation(mg->cache->wq, &mg->k);
1088 }
1089
1090 static void copy(struct dm_cache_migration *mg, bool promote)
1091 {
1092         struct dm_io_region o_region, c_region;
1093         struct cache *cache = mg->cache;
1094
1095         o_region.bdev = cache->origin_dev->bdev;
1096         o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block;
1097         o_region.count = cache->sectors_per_block;
1098
1099         c_region.bdev = cache->cache_dev->bdev;
1100         c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block;
1101         c_region.count = cache->sectors_per_block;
1102
1103         if (promote)
1104                 dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
1105         else
1106                 dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
1107 }
1108
1109 static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
1110 {
1111         struct per_bio_data *pb = get_per_bio_data(bio);
1112
1113         if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell))
1114                 free_prison_cell(cache, pb->cell);
1115         pb->cell = NULL;
1116 }
1117
1118 static void overwrite_endio(struct bio *bio)
1119 {
1120         struct dm_cache_migration *mg = bio->bi_private;
1121         struct cache *cache = mg->cache;
1122         struct per_bio_data *pb = get_per_bio_data(bio);
1123
1124         dm_unhook_bio(&pb->hook_info, bio);
1125
1126         if (bio->bi_status)
1127                 mg->k.input = bio->bi_status;
1128
1129         queue_continuation(cache->wq, &mg->k);
1130 }
1131
1132 static void overwrite(struct dm_cache_migration *mg,
1133                       void (*continuation)(struct work_struct *))
1134 {
1135         struct bio *bio = mg->overwrite_bio;
1136         struct per_bio_data *pb = get_per_bio_data(bio);
1137
1138         dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1139
1140         /*
1141          * The overwrite bio is part of the copy operation, as such it does
1142          * not set/clear discard or dirty flags.
1143          */
1144         if (mg->op->op == POLICY_PROMOTE)
1145                 remap_to_cache(mg->cache, bio, mg->op->cblock);
1146         else
1147                 remap_to_origin(mg->cache, bio);
1148
1149         init_continuation(&mg->k, continuation);
1150         accounted_request(mg->cache, bio);
1151 }
1152
1153 /*
1154  * Migration steps:
1155  *
1156  * 1) exclusive lock preventing WRITEs
1157  * 2) quiesce
1158  * 3) copy or issue overwrite bio
1159  * 4) upgrade to exclusive lock preventing READs and WRITEs
1160  * 5) quiesce
1161  * 6) update metadata and commit
1162  * 7) unlock
1163  */
1164 static void mg_complete(struct dm_cache_migration *mg, bool success)
1165 {
1166         struct bio_list bios;
1167         struct cache *cache = mg->cache;
1168         struct policy_work *op = mg->op;
1169         dm_cblock_t cblock = op->cblock;
1170
1171         if (success)
1172                 update_stats(&cache->stats, op->op);
1173
1174         switch (op->op) {
1175         case POLICY_PROMOTE:
1176                 clear_discard(cache, oblock_to_dblock(cache, op->oblock));
1177                 policy_complete_background_work(cache->policy, op, success);
1178
1179                 if (mg->overwrite_bio) {
1180                         if (success)
1181                                 force_set_dirty(cache, cblock);
1182                         else if (mg->k.input)
1183                                 mg->overwrite_bio->bi_status = mg->k.input;
1184                         else
1185                                 mg->overwrite_bio->bi_status = BLK_STS_IOERR;
1186                         bio_endio(mg->overwrite_bio);
1187                 } else {
1188                         if (success)
1189                                 force_clear_dirty(cache, cblock);
1190                         dec_io_migrations(cache);
1191                 }
1192                 break;
1193
1194         case POLICY_DEMOTE:
1195                 /*
1196                  * We clear dirty here to update the nr_dirty counter.
1197                  */
1198                 if (success)
1199                         force_clear_dirty(cache, cblock);
1200                 policy_complete_background_work(cache->policy, op, success);
1201                 dec_io_migrations(cache);
1202                 break;
1203
1204         case POLICY_WRITEBACK:
1205                 if (success)
1206                         force_clear_dirty(cache, cblock);
1207                 policy_complete_background_work(cache->policy, op, success);
1208                 dec_io_migrations(cache);
1209                 break;
1210         }
1211
1212         bio_list_init(&bios);
1213         if (mg->cell) {
1214                 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1215                         free_prison_cell(cache, mg->cell);
1216         }
1217
1218         free_migration(mg);
1219         defer_bios(cache, &bios);
1220         wake_migration_worker(cache);
1221
1222         background_work_end(cache);
1223 }
1224
1225 static void mg_success(struct work_struct *ws)
1226 {
1227         struct dm_cache_migration *mg = ws_to_mg(ws);
1228         mg_complete(mg, mg->k.input == 0);
1229 }
1230
1231 static void mg_update_metadata(struct work_struct *ws)
1232 {
1233         int r;
1234         struct dm_cache_migration *mg = ws_to_mg(ws);
1235         struct cache *cache = mg->cache;
1236         struct policy_work *op = mg->op;
1237
1238         switch (op->op) {
1239         case POLICY_PROMOTE:
1240                 r = dm_cache_insert_mapping(cache->cmd, op->cblock, op->oblock);
1241                 if (r) {
1242                         DMERR_LIMIT("%s: migration failed; couldn't insert mapping",
1243                                     cache_device_name(cache));
1244                         metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
1245
1246                         mg_complete(mg, false);
1247                         return;
1248                 }
1249                 mg_complete(mg, true);
1250                 break;
1251
1252         case POLICY_DEMOTE:
1253                 r = dm_cache_remove_mapping(cache->cmd, op->cblock);
1254                 if (r) {
1255                         DMERR_LIMIT("%s: migration failed; couldn't update on disk metadata",
1256                                     cache_device_name(cache));
1257                         metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1258
1259                         mg_complete(mg, false);
1260                         return;
1261                 }
1262
1263                 /*
1264                  * It would be nice if we only had to commit when a REQ_FLUSH
1265                  * comes through.  But there's one scenario that we have to
1266                  * look out for:
1267                  *
1268                  * - vblock x in a cache block
1269                  * - domotion occurs
1270                  * - cache block gets reallocated and over written
1271                  * - crash
1272                  *
1273                  * When we recover, because there was no commit the cache will
1274                  * rollback to having the data for vblock x in the cache block.
1275                  * But the cache block has since been overwritten, so it'll end
1276                  * up pointing to data that was never in 'x' during the history
1277                  * of the device.
1278                  *
1279                  * To avoid this issue we require a commit as part of the
1280                  * demotion operation.
1281                  */
1282                 init_continuation(&mg->k, mg_success);
1283                 continue_after_commit(&cache->committer, &mg->k);
1284                 schedule_commit(&cache->committer);
1285                 break;
1286
1287         case POLICY_WRITEBACK:
1288                 mg_complete(mg, true);
1289                 break;
1290         }
1291 }
1292
1293 static void mg_update_metadata_after_copy(struct work_struct *ws)
1294 {
1295         struct dm_cache_migration *mg = ws_to_mg(ws);
1296
1297         /*
1298          * Did the copy succeed?
1299          */
1300         if (mg->k.input)
1301                 mg_complete(mg, false);
1302         else
1303                 mg_update_metadata(ws);
1304 }
1305
1306 static void mg_upgrade_lock(struct work_struct *ws)
1307 {
1308         int r;
1309         struct dm_cache_migration *mg = ws_to_mg(ws);
1310
1311         /*
1312          * Did the copy succeed?
1313          */
1314         if (mg->k.input)
1315                 mg_complete(mg, false);
1316
1317         else {
1318                 /*
1319                  * Now we want the lock to prevent both reads and writes.
1320                  */
1321                 r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell,
1322                                             READ_WRITE_LOCK_LEVEL);
1323                 if (r < 0)
1324                         mg_complete(mg, false);
1325
1326                 else if (r)
1327                         quiesce(mg, mg_update_metadata);
1328
1329                 else
1330                         mg_update_metadata(ws);
1331         }
1332 }
1333
1334 static void mg_full_copy(struct work_struct *ws)
1335 {
1336         struct dm_cache_migration *mg = ws_to_mg(ws);
1337         struct cache *cache = mg->cache;
1338         struct policy_work *op = mg->op;
1339         bool is_policy_promote = (op->op == POLICY_PROMOTE);
1340
1341         if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
1342             is_discarded_oblock(cache, op->oblock)) {
1343                 mg_upgrade_lock(ws);
1344                 return;
1345         }
1346
1347         init_continuation(&mg->k, mg_upgrade_lock);
1348         copy(mg, is_policy_promote);
1349 }
1350
1351 static void mg_copy(struct work_struct *ws)
1352 {
1353         struct dm_cache_migration *mg = ws_to_mg(ws);
1354
1355         if (mg->overwrite_bio) {
1356                 /*
1357                  * No exclusive lock was held when we last checked if the bio
1358                  * was optimisable.  So we have to check again in case things
1359                  * have changed (eg, the block may no longer be discarded).
1360                  */
1361                 if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
1362                         /*
1363                          * Fallback to a real full copy after doing some tidying up.
1364                          */
1365                         bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
1366                         BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
1367                         mg->overwrite_bio = NULL;
1368                         inc_io_migrations(mg->cache);
1369                         mg_full_copy(ws);
1370                         return;
1371                 }
1372
1373                 /*
1374                  * It's safe to do this here, even though it's new data
1375                  * because all IO has been locked out of the block.
1376                  *
1377                  * mg_lock_writes() already took READ_WRITE_LOCK_LEVEL
1378                  * so _not_ using mg_upgrade_lock() as continutation.
1379                  */
1380                 overwrite(mg, mg_update_metadata_after_copy);
1381
1382         } else
1383                 mg_full_copy(ws);
1384 }
1385
1386 static int mg_lock_writes(struct dm_cache_migration *mg)
1387 {
1388         int r;
1389         struct dm_cell_key_v2 key;
1390         struct cache *cache = mg->cache;
1391         struct dm_bio_prison_cell_v2 *prealloc;
1392
1393         prealloc = alloc_prison_cell(cache);
1394
1395         /*
1396          * Prevent writes to the block, but allow reads to continue.
1397          * Unless we're using an overwrite bio, in which case we lock
1398          * everything.
1399          */
1400         build_key(mg->op->oblock, oblock_succ(mg->op->oblock), &key);
1401         r = dm_cell_lock_v2(cache->prison, &key,
1402                             mg->overwrite_bio ?  READ_WRITE_LOCK_LEVEL : WRITE_LOCK_LEVEL,
1403                             prealloc, &mg->cell);
1404         if (r < 0) {
1405                 free_prison_cell(cache, prealloc);
1406                 mg_complete(mg, false);
1407                 return r;
1408         }
1409
1410         if (mg->cell != prealloc)
1411                 free_prison_cell(cache, prealloc);
1412
1413         if (r == 0)
1414                 mg_copy(&mg->k.ws);
1415         else
1416                 quiesce(mg, mg_copy);
1417
1418         return 0;
1419 }
1420
1421 static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
1422 {
1423         struct dm_cache_migration *mg;
1424
1425         if (!background_work_begin(cache)) {
1426                 policy_complete_background_work(cache->policy, op, false);
1427                 return -EPERM;
1428         }
1429
1430         mg = alloc_migration(cache);
1431
1432         mg->op = op;
1433         mg->overwrite_bio = bio;
1434
1435         if (!bio)
1436                 inc_io_migrations(cache);
1437
1438         return mg_lock_writes(mg);
1439 }
1440
1441 /*----------------------------------------------------------------
1442  * invalidation processing
1443  *--------------------------------------------------------------*/
1444
1445 static void invalidate_complete(struct dm_cache_migration *mg, bool success)
1446 {
1447         struct bio_list bios;
1448         struct cache *cache = mg->cache;
1449
1450         bio_list_init(&bios);
1451         if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1452                 free_prison_cell(cache, mg->cell);
1453
1454         if (!success && mg->overwrite_bio)
1455                 bio_io_error(mg->overwrite_bio);
1456
1457         free_migration(mg);
1458         defer_bios(cache, &bios);
1459
1460         background_work_end(cache);
1461 }
1462
1463 static void invalidate_completed(struct work_struct *ws)
1464 {
1465         struct dm_cache_migration *mg = ws_to_mg(ws);
1466         invalidate_complete(mg, !mg->k.input);
1467 }
1468
1469 static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
1470 {
1471         int r = policy_invalidate_mapping(cache->policy, cblock);
1472         if (!r) {
1473                 r = dm_cache_remove_mapping(cache->cmd, cblock);
1474                 if (r) {
1475                         DMERR_LIMIT("%s: invalidation failed; couldn't update on disk metadata",
1476                                     cache_device_name(cache));
1477                         metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1478                 }
1479
1480         } else if (r == -ENODATA) {
1481                 /*
1482                  * Harmless, already unmapped.
1483                  */
1484                 r = 0;
1485
1486         } else
1487                 DMERR("%s: policy_invalidate_mapping failed", cache_device_name(cache));
1488
1489         return r;
1490 }
1491
1492 static void invalidate_remove(struct work_struct *ws)
1493 {
1494         int r;
1495         struct dm_cache_migration *mg = ws_to_mg(ws);
1496         struct cache *cache = mg->cache;
1497
1498         r = invalidate_cblock(cache, mg->invalidate_cblock);
1499         if (r) {
1500                 invalidate_complete(mg, false);
1501                 return;
1502         }
1503
1504         init_continuation(&mg->k, invalidate_completed);
1505         continue_after_commit(&cache->committer, &mg->k);
1506         remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock);
1507         mg->overwrite_bio = NULL;
1508         schedule_commit(&cache->committer);
1509 }
1510
1511 static int invalidate_lock(struct dm_cache_migration *mg)
1512 {
1513         int r;
1514         struct dm_cell_key_v2 key;
1515         struct cache *cache = mg->cache;
1516         struct dm_bio_prison_cell_v2 *prealloc;
1517
1518         prealloc = alloc_prison_cell(cache);
1519
1520         build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
1521         r = dm_cell_lock_v2(cache->prison, &key,
1522                             READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell);
1523         if (r < 0) {
1524                 free_prison_cell(cache, prealloc);
1525                 invalidate_complete(mg, false);
1526                 return r;
1527         }
1528
1529         if (mg->cell != prealloc)
1530                 free_prison_cell(cache, prealloc);
1531
1532         if (r)
1533                 quiesce(mg, invalidate_remove);
1534
1535         else {
1536                 /*
1537                  * We can't call invalidate_remove() directly here because we
1538                  * might still be in request context.
1539                  */
1540                 init_continuation(&mg->k, invalidate_remove);
1541                 queue_work(cache->wq, &mg->k.ws);
1542         }
1543
1544         return 0;
1545 }
1546
1547 static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
1548                             dm_oblock_t oblock, struct bio *bio)
1549 {
1550         struct dm_cache_migration *mg;
1551
1552         if (!background_work_begin(cache))
1553                 return -EPERM;
1554
1555         mg = alloc_migration(cache);
1556
1557         mg->overwrite_bio = bio;
1558         mg->invalidate_cblock = cblock;
1559         mg->invalidate_oblock = oblock;
1560
1561         return invalidate_lock(mg);
1562 }
1563
1564 /*----------------------------------------------------------------
1565  * bio processing
1566  *--------------------------------------------------------------*/
1567
1568 enum busy {
1569         IDLE,
1570         BUSY
1571 };
1572
1573 static enum busy spare_migration_bandwidth(struct cache *cache)
1574 {
1575         bool idle = dm_iot_idle_for(&cache->tracker, HZ);
1576         sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1577                 cache->sectors_per_block;
1578
1579         if (idle && current_volume <= cache->migration_threshold)
1580                 return IDLE;
1581         else
1582                 return BUSY;
1583 }
1584
1585 static void inc_hit_counter(struct cache *cache, struct bio *bio)
1586 {
1587         atomic_inc(bio_data_dir(bio) == READ ?
1588                    &cache->stats.read_hit : &cache->stats.write_hit);
1589 }
1590
1591 static void inc_miss_counter(struct cache *cache, struct bio *bio)
1592 {
1593         atomic_inc(bio_data_dir(bio) == READ ?
1594                    &cache->stats.read_miss : &cache->stats.write_miss);
1595 }
1596
1597 /*----------------------------------------------------------------*/
1598
1599 static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1600                    bool *commit_needed)
1601 {
1602         int r, data_dir;
1603         bool rb, background_queued;
1604         dm_cblock_t cblock;
1605
1606         *commit_needed = false;
1607
1608         rb = bio_detain_shared(cache, block, bio);
1609         if (!rb) {
1610                 /*
1611                  * An exclusive lock is held for this block, so we have to
1612                  * wait.  We set the commit_needed flag so the current
1613                  * transaction will be committed asap, allowing this lock
1614                  * to be dropped.
1615                  */
1616                 *commit_needed = true;
1617                 return DM_MAPIO_SUBMITTED;
1618         }
1619
1620         data_dir = bio_data_dir(bio);
1621
1622         if (optimisable_bio(cache, bio, block)) {
1623                 struct policy_work *op = NULL;
1624
1625                 r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op);
1626                 if (unlikely(r && r != -ENOENT)) {
1627                         DMERR_LIMIT("%s: policy_lookup_with_work() failed with r = %d",
1628                                     cache_device_name(cache), r);
1629                         bio_io_error(bio);
1630                         return DM_MAPIO_SUBMITTED;
1631                 }
1632
1633                 if (r == -ENOENT && op) {
1634                         bio_drop_shared_lock(cache, bio);
1635                         BUG_ON(op->op != POLICY_PROMOTE);
1636                         mg_start(cache, op, bio);
1637                         return DM_MAPIO_SUBMITTED;
1638                 }
1639         } else {
1640                 r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued);
1641                 if (unlikely(r && r != -ENOENT)) {
1642                         DMERR_LIMIT("%s: policy_lookup() failed with r = %d",
1643                                     cache_device_name(cache), r);
1644                         bio_io_error(bio);
1645                         return DM_MAPIO_SUBMITTED;
1646                 }
1647
1648                 if (background_queued)
1649                         wake_migration_worker(cache);
1650         }
1651
1652         if (r == -ENOENT) {
1653                 struct per_bio_data *pb = get_per_bio_data(bio);
1654
1655                 /*
1656                  * Miss.
1657                  */
1658                 inc_miss_counter(cache, bio);
1659                 if (pb->req_nr == 0) {
1660                         accounted_begin(cache, bio);
1661                         remap_to_origin_clear_discard(cache, bio, block);
1662                 } else {
1663                         /*
1664                          * This is a duplicate writethrough io that is no
1665                          * longer needed because the block has been demoted.
1666                          */
1667                         bio_endio(bio);
1668                         return DM_MAPIO_SUBMITTED;
1669                 }
1670         } else {
1671                 /*
1672                  * Hit.
1673                  */
1674                 inc_hit_counter(cache, bio);
1675
1676                 /*
1677                  * Passthrough always maps to the origin, invalidating any
1678                  * cache blocks that are written to.
1679                  */
1680                 if (passthrough_mode(cache)) {
1681                         if (bio_data_dir(bio) == WRITE) {
1682                                 bio_drop_shared_lock(cache, bio);
1683                                 atomic_inc(&cache->stats.demotion);
1684                                 invalidate_start(cache, cblock, block, bio);
1685                         } else
1686                                 remap_to_origin_clear_discard(cache, bio, block);
1687                 } else {
1688                         if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
1689                             !is_dirty(cache, cblock)) {
1690                                 remap_to_origin_and_cache(cache, bio, block, cblock);
1691                                 accounted_begin(cache, bio);
1692                         } else
1693                                 remap_to_cache_dirty(cache, bio, block, cblock);
1694                 }
1695         }
1696
1697         /*
1698          * dm core turns FUA requests into a separate payload and FLUSH req.
1699          */
1700         if (bio->bi_opf & REQ_FUA) {
1701                 /*
1702                  * issue_after_commit will call accounted_begin a second time.  So
1703                  * we call accounted_complete() to avoid double accounting.
1704                  */
1705                 accounted_complete(cache, bio);
1706                 issue_after_commit(&cache->committer, bio);
1707                 *commit_needed = true;
1708                 return DM_MAPIO_SUBMITTED;
1709         }
1710
1711         return DM_MAPIO_REMAPPED;
1712 }
1713
1714 static bool process_bio(struct cache *cache, struct bio *bio)
1715 {
1716         bool commit_needed;
1717
1718         if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
1719                 submit_bio_noacct(bio);
1720
1721         return commit_needed;
1722 }
1723
1724 /*
1725  * A non-zero return indicates read_only or fail_io mode.
1726  */
1727 static int commit(struct cache *cache, bool clean_shutdown)
1728 {
1729         int r;
1730
1731         if (get_cache_mode(cache) >= CM_READ_ONLY)
1732                 return -EINVAL;
1733
1734         atomic_inc(&cache->stats.commit_count);
1735         r = dm_cache_commit(cache->cmd, clean_shutdown);
1736         if (r)
1737                 metadata_operation_failed(cache, "dm_cache_commit", r);
1738
1739         return r;
1740 }
1741
1742 /*
1743  * Used by the batcher.
1744  */
1745 static blk_status_t commit_op(void *context)
1746 {
1747         struct cache *cache = context;
1748
1749         if (dm_cache_changed_this_transaction(cache->cmd))
1750                 return errno_to_blk_status(commit(cache, false));
1751
1752         return 0;
1753 }
1754
1755 /*----------------------------------------------------------------*/
1756
1757 static bool process_flush_bio(struct cache *cache, struct bio *bio)
1758 {
1759         struct per_bio_data *pb = get_per_bio_data(bio);
1760
1761         if (!pb->req_nr)
1762                 remap_to_origin(cache, bio);
1763         else
1764                 remap_to_cache(cache, bio, 0);
1765
1766         issue_after_commit(&cache->committer, bio);
1767         return true;
1768 }
1769
1770 static bool process_discard_bio(struct cache *cache, struct bio *bio)
1771 {
1772         dm_dblock_t b, e;
1773
1774         // FIXME: do we need to lock the region?  Or can we just assume the
1775         // user wont be so foolish as to issue discard concurrently with
1776         // other IO?
1777         calc_discard_block_range(cache, bio, &b, &e);
1778         while (b != e) {
1779                 set_discard(cache, b);
1780                 b = to_dblock(from_dblock(b) + 1);
1781         }
1782
1783         if (cache->features.discard_passdown) {
1784                 remap_to_origin(cache, bio);
1785                 submit_bio_noacct(bio);
1786         } else
1787                 bio_endio(bio);
1788
1789         return false;
1790 }
1791
1792 static void process_deferred_bios(struct work_struct *ws)
1793 {
1794         struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);
1795
1796         bool commit_needed = false;
1797         struct bio_list bios;
1798         struct bio *bio;
1799
1800         bio_list_init(&bios);
1801
1802         spin_lock_irq(&cache->lock);
1803         bio_list_merge(&bios, &cache->deferred_bios);
1804         bio_list_init(&cache->deferred_bios);
1805         spin_unlock_irq(&cache->lock);
1806
1807         while ((bio = bio_list_pop(&bios))) {
1808                 if (bio->bi_opf & REQ_PREFLUSH)
1809                         commit_needed = process_flush_bio(cache, bio) || commit_needed;
1810
1811                 else if (bio_op(bio) == REQ_OP_DISCARD)
1812                         commit_needed = process_discard_bio(cache, bio) || commit_needed;
1813
1814                 else
1815                         commit_needed = process_bio(cache, bio) || commit_needed;
1816         }
1817
1818         if (commit_needed)
1819                 schedule_commit(&cache->committer);
1820 }
1821
1822 /*----------------------------------------------------------------
1823  * Main worker loop
1824  *--------------------------------------------------------------*/
1825
1826 static void requeue_deferred_bios(struct cache *cache)
1827 {
1828         struct bio *bio;
1829         struct bio_list bios;
1830
1831         bio_list_init(&bios);
1832         bio_list_merge(&bios, &cache->deferred_bios);
1833         bio_list_init(&cache->deferred_bios);
1834
1835         while ((bio = bio_list_pop(&bios))) {
1836                 bio->bi_status = BLK_STS_DM_REQUEUE;
1837                 bio_endio(bio);
1838         }
1839 }
1840
1841 /*
1842  * We want to commit periodically so that not too much
1843  * unwritten metadata builds up.
1844  */
1845 static void do_waker(struct work_struct *ws)
1846 {
1847         struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1848
1849         policy_tick(cache->policy, true);
1850         wake_migration_worker(cache);
1851         schedule_commit(&cache->committer);
1852         queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1853 }
1854
1855 static void check_migrations(struct work_struct *ws)
1856 {
1857         int r;
1858         struct policy_work *op;
1859         struct cache *cache = container_of(ws, struct cache, migration_worker);
1860         enum busy b;
1861
1862         for (;;) {
1863                 b = spare_migration_bandwidth(cache);
1864
1865                 r = policy_get_background_work(cache->policy, b == IDLE, &op);
1866                 if (r == -ENODATA)
1867                         break;
1868
1869                 if (r) {
1870                         DMERR_LIMIT("%s: policy_background_work failed",
1871                                     cache_device_name(cache));
1872                         break;
1873                 }
1874
1875                 r = mg_start(cache, op, NULL);
1876                 if (r)
1877                         break;
1878         }
1879 }
1880
1881 /*----------------------------------------------------------------
1882  * Target methods
1883  *--------------------------------------------------------------*/
1884
1885 /*
1886  * This function gets called on the error paths of the constructor, so we
1887  * have to cope with a partially initialised struct.
1888  */
1889 static void destroy(struct cache *cache)
1890 {
1891         unsigned i;
1892
1893         mempool_exit(&cache->migration_pool);
1894
1895         if (cache->prison)
1896                 dm_bio_prison_destroy_v2(cache->prison);
1897
1898         cancel_delayed_work_sync(&cache->waker);
1899         if (cache->wq)
1900                 destroy_workqueue(cache->wq);
1901
1902         if (cache->dirty_bitset)
1903                 free_bitset(cache->dirty_bitset);
1904
1905         if (cache->discard_bitset)
1906                 free_bitset(cache->discard_bitset);
1907
1908         if (cache->copier)
1909                 dm_kcopyd_client_destroy(cache->copier);
1910
1911         if (cache->cmd)
1912                 dm_cache_metadata_close(cache->cmd);
1913
1914         if (cache->metadata_dev)
1915                 dm_put_device(cache->ti, cache->metadata_dev);
1916
1917         if (cache->origin_dev)
1918                 dm_put_device(cache->ti, cache->origin_dev);
1919
1920         if (cache->cache_dev)
1921                 dm_put_device(cache->ti, cache->cache_dev);
1922
1923         if (cache->policy)
1924                 dm_cache_policy_destroy(cache->policy);
1925
1926         for (i = 0; i < cache->nr_ctr_args ; i++)
1927                 kfree(cache->ctr_args[i]);
1928         kfree(cache->ctr_args);
1929
1930         bioset_exit(&cache->bs);
1931
1932         kfree(cache);
1933 }
1934
1935 static void cache_dtr(struct dm_target *ti)
1936 {
1937         struct cache *cache = ti->private;
1938
1939         destroy(cache);
1940 }
1941
1942 static sector_t get_dev_size(struct dm_dev *dev)
1943 {
1944         return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1945 }
1946
1947 /*----------------------------------------------------------------*/
1948
1949 /*
1950  * Construct a cache device mapping.
1951  *
1952  * cache <metadata dev> <cache dev> <origin dev> <block size>
1953  *       <#feature args> [<feature arg>]*
1954  *       <policy> <#policy args> [<policy arg>]*
1955  *
1956  * metadata dev    : fast device holding the persistent metadata
1957  * cache dev       : fast device holding cached data blocks
1958  * origin dev      : slow device holding original data blocks
1959  * block size      : cache unit size in sectors
1960  *
1961  * #feature args   : number of feature arguments passed
1962  * feature args    : writethrough.  (The default is writeback.)
1963  *
1964  * policy          : the replacement policy to use
1965  * #policy args    : an even number of policy arguments corresponding
1966  *                   to key/value pairs passed to the policy
1967  * policy args     : key/value pairs passed to the policy
1968  *                   E.g. 'sequential_threshold 1024'
1969  *                   See cache-policies.txt for details.
1970  *
1971  * Optional feature arguments are:
1972  *   writethrough  : write through caching that prohibits cache block
1973  *                   content from being different from origin block content.
1974  *                   Without this argument, the default behaviour is to write
1975  *                   back cache block contents later for performance reasons,
1976  *                   so they may differ from the corresponding origin blocks.
1977  */
1978 struct cache_args {
1979         struct dm_target *ti;
1980
1981         struct dm_dev *metadata_dev;
1982
1983         struct dm_dev *cache_dev;
1984         sector_t cache_sectors;
1985
1986         struct dm_dev *origin_dev;
1987         sector_t origin_sectors;
1988
1989         uint32_t block_size;
1990
1991         const char *policy_name;
1992         int policy_argc;
1993         const char **policy_argv;
1994
1995         struct cache_features features;
1996 };
1997
1998 static void destroy_cache_args(struct cache_args *ca)
1999 {
2000         if (ca->metadata_dev)
2001                 dm_put_device(ca->ti, ca->metadata_dev);
2002
2003         if (ca->cache_dev)
2004                 dm_put_device(ca->ti, ca->cache_dev);
2005
2006         if (ca->origin_dev)
2007                 dm_put_device(ca->ti, ca->origin_dev);
2008
2009         kfree(ca);
2010 }
2011
2012 static bool at_least_one_arg(struct dm_arg_set *as, char **error)
2013 {
2014         if (!as->argc) {
2015                 *error = "Insufficient args";
2016                 return false;
2017         }
2018
2019         return true;
2020 }
2021
2022 static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
2023                               char **error)
2024 {
2025         int r;
2026         sector_t metadata_dev_size;
2027         char b[BDEVNAME_SIZE];
2028
2029         if (!at_least_one_arg(as, error))
2030                 return -EINVAL;
2031
2032         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2033                           &ca->metadata_dev);
2034         if (r) {
2035                 *error = "Error opening metadata device";
2036                 return r;
2037         }
2038
2039         metadata_dev_size = get_dev_size(ca->metadata_dev);
2040         if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
2041                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2042                        bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
2043
2044         return 0;
2045 }
2046
2047 static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
2048                            char **error)
2049 {
2050         int r;
2051
2052         if (!at_least_one_arg(as, error))
2053                 return -EINVAL;
2054
2055         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2056                           &ca->cache_dev);
2057         if (r) {
2058                 *error = "Error opening cache device";
2059                 return r;
2060         }
2061         ca->cache_sectors = get_dev_size(ca->cache_dev);
2062
2063         return 0;
2064 }
2065
2066 static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
2067                             char **error)
2068 {
2069         int r;
2070
2071         if (!at_least_one_arg(as, error))
2072                 return -EINVAL;
2073
2074         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2075                           &ca->origin_dev);
2076         if (r) {
2077                 *error = "Error opening origin device";
2078                 return r;
2079         }
2080
2081         ca->origin_sectors = get_dev_size(ca->origin_dev);
2082         if (ca->ti->len > ca->origin_sectors) {
2083                 *error = "Device size larger than cached device";
2084                 return -EINVAL;
2085         }
2086
2087         return 0;
2088 }
2089
2090 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
2091                             char **error)
2092 {
2093         unsigned long block_size;
2094
2095         if (!at_least_one_arg(as, error))
2096                 return -EINVAL;
2097
2098         if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
2099             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2100             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2101             block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2102                 *error = "Invalid data block size";
2103                 return -EINVAL;
2104         }
2105
2106         if (block_size > ca->cache_sectors) {
2107                 *error = "Data block size is larger than the cache device";
2108                 return -EINVAL;
2109         }
2110
2111         ca->block_size = block_size;
2112
2113         return 0;
2114 }
2115
2116 static void init_features(struct cache_features *cf)
2117 {
2118         cf->mode = CM_WRITE;
2119         cf->io_mode = CM_IO_WRITEBACK;
2120         cf->metadata_version = 1;
2121         cf->discard_passdown = true;
2122 }
2123
2124 static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2125                           char **error)
2126 {
2127         static const struct dm_arg _args[] = {
2128                 {0, 3, "Invalid number of cache feature arguments"},
2129         };
2130
2131         int r, mode_ctr = 0;
2132         unsigned argc;
2133         const char *arg;
2134         struct cache_features *cf = &ca->features;
2135
2136         init_features(cf);
2137
2138         r = dm_read_arg_group(_args, as, &argc, error);
2139         if (r)
2140                 return -EINVAL;
2141
2142         while (argc--) {
2143                 arg = dm_shift_arg(as);
2144
2145                 if (!strcasecmp(arg, "writeback")) {
2146                         cf->io_mode = CM_IO_WRITEBACK;
2147                         mode_ctr++;
2148                 }
2149
2150                 else if (!strcasecmp(arg, "writethrough")) {
2151                         cf->io_mode = CM_IO_WRITETHROUGH;
2152                         mode_ctr++;
2153                 }
2154
2155                 else if (!strcasecmp(arg, "passthrough")) {
2156                         cf->io_mode = CM_IO_PASSTHROUGH;
2157                         mode_ctr++;
2158                 }
2159
2160                 else if (!strcasecmp(arg, "metadata2"))
2161                         cf->metadata_version = 2;
2162
2163                 else if (!strcasecmp(arg, "no_discard_passdown"))
2164                         cf->discard_passdown = false;
2165
2166                 else {
2167                         *error = "Unrecognised cache feature requested";
2168                         return -EINVAL;
2169                 }
2170         }
2171
2172         if (mode_ctr > 1) {
2173                 *error = "Duplicate cache io_mode features requested";
2174                 return -EINVAL;
2175         }
2176
2177         return 0;
2178 }
2179
2180 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
2181                         char **error)
2182 {
2183         static const struct dm_arg _args[] = {
2184                 {0, 1024, "Invalid number of policy arguments"},
2185         };
2186
2187         int r;
2188
2189         if (!at_least_one_arg(as, error))
2190                 return -EINVAL;
2191
2192         ca->policy_name = dm_shift_arg(as);
2193
2194         r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
2195         if (r)
2196                 return -EINVAL;
2197
2198         ca->policy_argv = (const char **)as->argv;
2199         dm_consume_args(as, ca->policy_argc);
2200
2201         return 0;
2202 }
2203
2204 static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
2205                             char **error)
2206 {
2207         int r;
2208         struct dm_arg_set as;
2209
2210         as.argc = argc;
2211         as.argv = argv;
2212
2213         r = parse_metadata_dev(ca, &as, error);
2214         if (r)
2215                 return r;
2216
2217         r = parse_cache_dev(ca, &as, error);
2218         if (r)
2219                 return r;
2220
2221         r = parse_origin_dev(ca, &as, error);
2222         if (r)
2223                 return r;
2224
2225         r = parse_block_size(ca, &as, error);
2226         if (r)
2227                 return r;
2228
2229         r = parse_features(ca, &as, error);
2230         if (r)
2231                 return r;
2232
2233         r = parse_policy(ca, &as, error);
2234         if (r)
2235                 return r;
2236
2237         return 0;
2238 }
2239
2240 /*----------------------------------------------------------------*/
2241
2242 static struct kmem_cache *migration_cache;
2243
2244 #define NOT_CORE_OPTION 1
2245
2246 static int process_config_option(struct cache *cache, const char *key, const char *value)
2247 {
2248         unsigned long tmp;
2249
2250         if (!strcasecmp(key, "migration_threshold")) {
2251                 if (kstrtoul(value, 10, &tmp))
2252                         return -EINVAL;
2253
2254                 cache->migration_threshold = tmp;
2255                 return 0;
2256         }
2257
2258         return NOT_CORE_OPTION;
2259 }
2260
2261 static int set_config_value(struct cache *cache, const char *key, const char *value)
2262 {
2263         int r = process_config_option(cache, key, value);
2264
2265         if (r == NOT_CORE_OPTION)
2266                 r = policy_set_config_value(cache->policy, key, value);
2267
2268         if (r)
2269                 DMWARN("bad config value for %s: %s", key, value);
2270
2271         return r;
2272 }
2273
2274 static int set_config_values(struct cache *cache, int argc, const char **argv)
2275 {
2276         int r = 0;
2277
2278         if (argc & 1) {
2279                 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
2280                 return -EINVAL;
2281         }
2282
2283         while (argc) {
2284                 r = set_config_value(cache, argv[0], argv[1]);
2285                 if (r)
2286                         break;
2287
2288                 argc -= 2;
2289                 argv += 2;
2290         }
2291
2292         return r;
2293 }
2294
2295 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2296                                char **error)
2297 {
2298         struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
2299                                                            cache->cache_size,
2300                                                            cache->origin_sectors,
2301                                                            cache->sectors_per_block);
2302         if (IS_ERR(p)) {
2303                 *error = "Error creating cache's policy";
2304                 return PTR_ERR(p);
2305         }
2306         cache->policy = p;
2307         BUG_ON(!cache->policy);
2308
2309         return 0;
2310 }
2311
2312 /*
2313  * We want the discard block size to be at least the size of the cache
2314  * block size and have no more than 2^14 discard blocks across the origin.
2315  */
2316 #define MAX_DISCARD_BLOCKS (1 << 14)
2317
2318 static bool too_many_discard_blocks(sector_t discard_block_size,
2319                                     sector_t origin_size)
2320 {
2321         (void) sector_div(origin_size, discard_block_size);
2322
2323         return origin_size > MAX_DISCARD_BLOCKS;
2324 }
2325
2326 static sector_t calculate_discard_block_size(sector_t cache_block_size,
2327                                              sector_t origin_size)
2328 {
2329         sector_t discard_block_size = cache_block_size;
2330
2331         if (origin_size)
2332                 while (too_many_discard_blocks(discard_block_size, origin_size))
2333                         discard_block_size *= 2;
2334
2335         return discard_block_size;
2336 }
2337
2338 static void set_cache_size(struct cache *cache, dm_cblock_t size)
2339 {
2340         dm_block_t nr_blocks = from_cblock(size);
2341
2342         if (nr_blocks > (1 << 20) && cache->cache_size != size)
2343                 DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
2344                              "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
2345                              "Please consider increasing the cache block size to reduce the overall cache block count.",
2346                              (unsigned long long) nr_blocks);
2347
2348         cache->cache_size = size;
2349 }
2350
2351 #define DEFAULT_MIGRATION_THRESHOLD 2048
2352
2353 static int cache_create(struct cache_args *ca, struct cache **result)
2354 {
2355         int r = 0;
2356         char **error = &ca->ti->error;
2357         struct cache *cache;
2358         struct dm_target *ti = ca->ti;
2359         dm_block_t origin_blocks;
2360         struct dm_cache_metadata *cmd;
2361         bool may_format = ca->features.mode == CM_WRITE;
2362
2363         cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2364         if (!cache)
2365                 return -ENOMEM;
2366
2367         cache->ti = ca->ti;
2368         ti->private = cache;
2369         ti->num_flush_bios = 2;
2370         ti->flush_supported = true;
2371
2372         ti->num_discard_bios = 1;
2373         ti->discards_supported = true;
2374
2375         ti->per_io_data_size = sizeof(struct per_bio_data);
2376
2377         cache->features = ca->features;
2378         if (writethrough_mode(cache)) {
2379                 /* Create bioset for writethrough bios issued to origin */
2380                 r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0);
2381                 if (r)
2382                         goto bad;
2383         }
2384
2385         cache->metadata_dev = ca->metadata_dev;
2386         cache->origin_dev = ca->origin_dev;
2387         cache->cache_dev = ca->cache_dev;
2388
2389         ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
2390
2391         origin_blocks = cache->origin_sectors = ca->origin_sectors;
2392         origin_blocks = block_div(origin_blocks, ca->block_size);
2393         cache->origin_blocks = to_oblock(origin_blocks);
2394
2395         cache->sectors_per_block = ca->block_size;
2396         if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2397                 r = -EINVAL;
2398                 goto bad;
2399         }
2400
2401         if (ca->block_size & (ca->block_size - 1)) {
2402                 dm_block_t cache_size = ca->cache_sectors;
2403
2404                 cache->sectors_per_block_shift = -1;
2405                 cache_size = block_div(cache_size, ca->block_size);
2406                 set_cache_size(cache, to_cblock(cache_size));
2407         } else {
2408                 cache->sectors_per_block_shift = __ffs(ca->block_size);
2409                 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
2410         }
2411
2412         r = create_cache_policy(cache, ca, error);
2413         if (r)
2414                 goto bad;
2415
2416         cache->policy_nr_args = ca->policy_argc;
2417         cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2418
2419         r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2420         if (r) {
2421                 *error = "Error setting cache policy's config values";
2422                 goto bad;
2423         }
2424
2425         cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2426                                      ca->block_size, may_format,
2427                                      dm_cache_policy_get_hint_size(cache->policy),
2428                                      ca->features.metadata_version);
2429         if (IS_ERR(cmd)) {
2430                 *error = "Error creating metadata object";
2431                 r = PTR_ERR(cmd);
2432                 goto bad;
2433         }
2434         cache->cmd = cmd;
2435         set_cache_mode(cache, CM_WRITE);
2436         if (get_cache_mode(cache) != CM_WRITE) {
2437                 *error = "Unable to get write access to metadata, please check/repair metadata.";
2438                 r = -EINVAL;
2439                 goto bad;
2440         }
2441
2442         if (passthrough_mode(cache)) {
2443                 bool all_clean;
2444
2445                 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
2446                 if (r) {
2447                         *error = "dm_cache_metadata_all_clean() failed";
2448                         goto bad;
2449                 }
2450
2451                 if (!all_clean) {
2452                         *error = "Cannot enter passthrough mode unless all blocks are clean";
2453                         r = -EINVAL;
2454                         goto bad;
2455                 }
2456
2457                 policy_allow_migrations(cache->policy, false);
2458         }
2459
2460         spin_lock_init(&cache->lock);
2461         bio_list_init(&cache->deferred_bios);
2462         atomic_set(&cache->nr_allocated_migrations, 0);
2463         atomic_set(&cache->nr_io_migrations, 0);
2464         init_waitqueue_head(&cache->migration_wait);
2465
2466         r = -ENOMEM;
2467         atomic_set(&cache->nr_dirty, 0);
2468         cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2469         if (!cache->dirty_bitset) {
2470                 *error = "could not allocate dirty bitset";
2471                 goto bad;
2472         }
2473         clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2474
2475         cache->discard_block_size =
2476                 calculate_discard_block_size(cache->sectors_per_block,
2477                                              cache->origin_sectors);
2478         cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
2479                                                               cache->discard_block_size));
2480         cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2481         if (!cache->discard_bitset) {
2482                 *error = "could not allocate discard bitset";
2483                 goto bad;
2484         }
2485         clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2486
2487         cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2488         if (IS_ERR(cache->copier)) {
2489                 *error = "could not create kcopyd client";
2490                 r = PTR_ERR(cache->copier);
2491                 goto bad;
2492         }
2493
2494         cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
2495         if (!cache->wq) {
2496                 *error = "could not create workqueue for metadata object";
2497                 goto bad;
2498         }
2499         INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
2500         INIT_WORK(&cache->migration_worker, check_migrations);
2501         INIT_DELAYED_WORK(&cache->waker, do_waker);
2502
2503         cache->prison = dm_bio_prison_create_v2(cache->wq);
2504         if (!cache->prison) {
2505                 *error = "could not create bio prison";
2506                 goto bad;
2507         }
2508
2509         r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE,
2510                                    migration_cache);
2511         if (r) {
2512                 *error = "Error creating cache's migration mempool";
2513                 goto bad;
2514         }
2515
2516         cache->need_tick_bio = true;
2517         cache->sized = false;
2518         cache->invalidate = false;
2519         cache->commit_requested = false;
2520         cache->loaded_mappings = false;
2521         cache->loaded_discards = false;
2522
2523         load_stats(cache);
2524
2525         atomic_set(&cache->stats.demotion, 0);
2526         atomic_set(&cache->stats.promotion, 0);
2527         atomic_set(&cache->stats.copies_avoided, 0);
2528         atomic_set(&cache->stats.cache_cell_clash, 0);
2529         atomic_set(&cache->stats.commit_count, 0);
2530         atomic_set(&cache->stats.discard_count, 0);
2531
2532         spin_lock_init(&cache->invalidation_lock);
2533         INIT_LIST_HEAD(&cache->invalidation_requests);
2534
2535         batcher_init(&cache->committer, commit_op, cache,
2536                      issue_op, cache, cache->wq);
2537         dm_iot_init(&cache->tracker);
2538
2539         init_rwsem(&cache->background_work_lock);
2540         prevent_background_work(cache);
2541
2542         *result = cache;
2543         return 0;
2544 bad:
2545         destroy(cache);
2546         return r;
2547 }
2548
2549 static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2550 {
2551         unsigned i;
2552         const char **copy;
2553
2554         copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2555         if (!copy)
2556                 return -ENOMEM;
2557         for (i = 0; i < argc; i++) {
2558                 copy[i] = kstrdup(argv[i], GFP_KERNEL);
2559                 if (!copy[i]) {
2560                         while (i--)
2561                                 kfree(copy[i]);
2562                         kfree(copy);
2563                         return -ENOMEM;
2564                 }
2565         }
2566
2567         cache->nr_ctr_args = argc;
2568         cache->ctr_args = copy;
2569
2570         return 0;
2571 }
2572
2573 static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2574 {
2575         int r = -EINVAL;
2576         struct cache_args *ca;
2577         struct cache *cache = NULL;
2578
2579         ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2580         if (!ca) {
2581                 ti->error = "Error allocating memory for cache";
2582                 return -ENOMEM;
2583         }
2584         ca->ti = ti;
2585
2586         r = parse_cache_args(ca, argc, argv, &ti->error);
2587         if (r)
2588                 goto out;
2589
2590         r = cache_create(ca, &cache);
2591         if (r)
2592                 goto out;
2593
2594         r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2595         if (r) {
2596                 destroy(cache);
2597                 goto out;
2598         }
2599
2600         ti->private = cache;
2601 out:
2602         destroy_cache_args(ca);
2603         return r;
2604 }
2605
2606 /*----------------------------------------------------------------*/
2607
2608 static int cache_map(struct dm_target *ti, struct bio *bio)
2609 {
2610         struct cache *cache = ti->private;
2611
2612         int r;
2613         bool commit_needed;
2614         dm_oblock_t block = get_bio_block(cache, bio);
2615
2616         init_per_bio_data(bio);
2617         if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2618                 /*
2619                  * This can only occur if the io goes to a partial block at
2620                  * the end of the origin device.  We don't cache these.
2621                  * Just remap to the origin and carry on.
2622                  */
2623                 remap_to_origin(cache, bio);
2624                 accounted_begin(cache, bio);
2625                 return DM_MAPIO_REMAPPED;
2626         }
2627
2628         if (discard_or_flush(bio)) {
2629                 defer_bio(cache, bio);
2630                 return DM_MAPIO_SUBMITTED;
2631         }
2632
2633         r = map_bio(cache, bio, block, &commit_needed);
2634         if (commit_needed)
2635                 schedule_commit(&cache->committer);
2636
2637         return r;
2638 }
2639
2640 static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
2641 {
2642         struct cache *cache = ti->private;
2643         unsigned long flags;
2644         struct per_bio_data *pb = get_per_bio_data(bio);
2645
2646         if (pb->tick) {
2647                 policy_tick(cache->policy, false);
2648
2649                 spin_lock_irqsave(&cache->lock, flags);
2650                 cache->need_tick_bio = true;
2651                 spin_unlock_irqrestore(&cache->lock, flags);
2652         }
2653
2654         bio_drop_shared_lock(cache, bio);
2655         accounted_complete(cache, bio);
2656
2657         return DM_ENDIO_DONE;
2658 }
2659
2660 static int write_dirty_bitset(struct cache *cache)
2661 {
2662         int r;
2663
2664         if (get_cache_mode(cache) >= CM_READ_ONLY)
2665                 return -EINVAL;
2666
2667         r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset);
2668         if (r)
2669                 metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r);
2670
2671         return r;
2672 }
2673
2674 static int write_discard_bitset(struct cache *cache)
2675 {
2676         unsigned i, r;
2677
2678         if (get_cache_mode(cache) >= CM_READ_ONLY)
2679                 return -EINVAL;
2680
2681         r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2682                                            cache->discard_nr_blocks);
2683         if (r) {
2684                 DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
2685                 metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
2686                 return r;
2687         }
2688
2689         for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2690                 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2691                                          is_discarded(cache, to_dblock(i)));
2692                 if (r) {
2693                         metadata_operation_failed(cache, "dm_cache_set_discard", r);
2694                         return r;
2695                 }
2696         }
2697
2698         return 0;
2699 }
2700
2701 static int write_hints(struct cache *cache)
2702 {
2703         int r;
2704
2705         if (get_cache_mode(cache) >= CM_READ_ONLY)
2706                 return -EINVAL;
2707
2708         r = dm_cache_write_hints(cache->cmd, cache->policy);
2709         if (r) {
2710                 metadata_operation_failed(cache, "dm_cache_write_hints", r);
2711                 return r;
2712         }
2713
2714         return 0;
2715 }
2716
2717 /*
2718  * returns true on success
2719  */
2720 static bool sync_metadata(struct cache *cache)
2721 {
2722         int r1, r2, r3, r4;
2723
2724         r1 = write_dirty_bitset(cache);
2725         if (r1)
2726                 DMERR("%s: could not write dirty bitset", cache_device_name(cache));
2727
2728         r2 = write_discard_bitset(cache);
2729         if (r2)
2730                 DMERR("%s: could not write discard bitset", cache_device_name(cache));
2731
2732         save_stats(cache);
2733
2734         r3 = write_hints(cache);
2735         if (r3)
2736                 DMERR("%s: could not write hints", cache_device_name(cache));
2737
2738         /*
2739          * If writing the above metadata failed, we still commit, but don't
2740          * set the clean shutdown flag.  This will effectively force every
2741          * dirty bit to be set on reload.
2742          */
2743         r4 = commit(cache, !r1 && !r2 && !r3);
2744         if (r4)
2745                 DMERR("%s: could not write cache metadata", cache_device_name(cache));
2746
2747         return !r1 && !r2 && !r3 && !r4;
2748 }
2749
2750 static void cache_postsuspend(struct dm_target *ti)
2751 {
2752         struct cache *cache = ti->private;
2753
2754         prevent_background_work(cache);
2755         BUG_ON(atomic_read(&cache->nr_io_migrations));
2756
2757         cancel_delayed_work_sync(&cache->waker);
2758         drain_workqueue(cache->wq);
2759         WARN_ON(cache->tracker.in_flight);
2760
2761         /*
2762          * If it's a flush suspend there won't be any deferred bios, so this
2763          * call is harmless.
2764          */
2765         requeue_deferred_bios(cache);
2766
2767         if (get_cache_mode(cache) == CM_WRITE)
2768                 (void) sync_metadata(cache);
2769 }
2770
2771 static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2772                         bool dirty, uint32_t hint, bool hint_valid)
2773 {
2774         struct cache *cache = context;
2775
2776         if (dirty) {
2777                 set_bit(from_cblock(cblock), cache->dirty_bitset);
2778                 atomic_inc(&cache->nr_dirty);
2779         } else
2780                 clear_bit(from_cblock(cblock), cache->dirty_bitset);
2781
2782         return policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
2783 }
2784
2785 /*
2786  * The discard block size in the on disk metadata is not
2787  * neccessarily the same as we're currently using.  So we have to
2788  * be careful to only set the discarded attribute if we know it
2789  * covers a complete block of the new size.
2790  */
2791 struct discard_load_info {
2792         struct cache *cache;
2793
2794         /*
2795          * These blocks are sized using the on disk dblock size, rather
2796          * than the current one.
2797          */
2798         dm_block_t block_size;
2799         dm_block_t discard_begin, discard_end;
2800 };
2801
2802 static void discard_load_info_init(struct cache *cache,
2803                                    struct discard_load_info *li)
2804 {
2805         li->cache = cache;
2806         li->discard_begin = li->discard_end = 0;
2807 }
2808
2809 static void set_discard_range(struct discard_load_info *li)
2810 {
2811         sector_t b, e;
2812
2813         if (li->discard_begin == li->discard_end)
2814                 return;
2815
2816         /*
2817          * Convert to sectors.
2818          */
2819         b = li->discard_begin * li->block_size;
2820         e = li->discard_end * li->block_size;
2821
2822         /*
2823          * Then convert back to the current dblock size.
2824          */
2825         b = dm_sector_div_up(b, li->cache->discard_block_size);
2826         sector_div(e, li->cache->discard_block_size);
2827
2828         /*
2829          * The origin may have shrunk, so we need to check we're still in
2830          * bounds.
2831          */
2832         if (e > from_dblock(li->cache->discard_nr_blocks))
2833                 e = from_dblock(li->cache->discard_nr_blocks);
2834
2835         for (; b < e; b++)
2836                 set_discard(li->cache, to_dblock(b));
2837 }
2838
2839 static int load_discard(void *context, sector_t discard_block_size,
2840                         dm_dblock_t dblock, bool discard)
2841 {
2842         struct discard_load_info *li = context;
2843
2844         li->block_size = discard_block_size;
2845
2846         if (discard) {
2847                 if (from_dblock(dblock) == li->discard_end)
2848                         /*
2849                          * We're already in a discard range, just extend it.
2850                          */
2851                         li->discard_end = li->discard_end + 1ULL;
2852
2853                 else {
2854                         /*
2855                          * Emit the old range and start a new one.
2856                          */
2857                         set_discard_range(li);
2858                         li->discard_begin = from_dblock(dblock);
2859                         li->discard_end = li->discard_begin + 1ULL;
2860                 }
2861         } else {
2862                 set_discard_range(li);
2863                 li->discard_begin = li->discard_end = 0;
2864         }
2865
2866         return 0;
2867 }
2868
2869 static dm_cblock_t get_cache_dev_size(struct cache *cache)
2870 {
2871         sector_t size = get_dev_size(cache->cache_dev);
2872         (void) sector_div(size, cache->sectors_per_block);
2873         return to_cblock(size);
2874 }
2875
2876 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
2877 {
2878         if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
2879                 if (cache->sized) {
2880                         DMERR("%s: unable to extend cache due to missing cache table reload",
2881                               cache_device_name(cache));
2882                         return false;
2883                 }
2884         }
2885
2886         /*
2887          * We can't drop a dirty block when shrinking the cache.
2888          */
2889         while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
2890                 new_size = to_cblock(from_cblock(new_size) + 1);
2891                 if (is_dirty(cache, new_size)) {
2892                         DMERR("%s: unable to shrink cache; cache block %llu is dirty",
2893                               cache_device_name(cache),
2894                               (unsigned long long) from_cblock(new_size));
2895                         return false;
2896                 }
2897         }
2898
2899         return true;
2900 }
2901
2902 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2903 {
2904         int r;
2905
2906         r = dm_cache_resize(cache->cmd, new_size);
2907         if (r) {
2908                 DMERR("%s: could not resize cache metadata", cache_device_name(cache));
2909                 metadata_operation_failed(cache, "dm_cache_resize", r);
2910                 return r;
2911         }
2912
2913         set_cache_size(cache, new_size);
2914
2915         return 0;
2916 }
2917
2918 static int cache_preresume(struct dm_target *ti)
2919 {
2920         int r = 0;
2921         struct cache *cache = ti->private;
2922         dm_cblock_t csize = get_cache_dev_size(cache);
2923
2924         /*
2925          * Check to see if the cache has resized.
2926          */
2927         if (!cache->sized) {
2928                 r = resize_cache_dev(cache, csize);
2929                 if (r)
2930                         return r;
2931
2932                 cache->sized = true;
2933
2934         } else if (csize != cache->cache_size) {
2935                 if (!can_resize(cache, csize))
2936                         return -EINVAL;
2937
2938                 r = resize_cache_dev(cache, csize);
2939                 if (r)
2940                         return r;
2941         }
2942
2943         if (!cache->loaded_mappings) {
2944                 r = dm_cache_load_mappings(cache->cmd, cache->policy,
2945                                            load_mapping, cache);
2946                 if (r) {
2947                         DMERR("%s: could not load cache mappings", cache_device_name(cache));
2948                         metadata_operation_failed(cache, "dm_cache_load_mappings", r);
2949                         return r;
2950                 }
2951
2952                 cache->loaded_mappings = true;
2953         }
2954
2955         if (!cache->loaded_discards) {
2956                 struct discard_load_info li;
2957
2958                 /*
2959                  * The discard bitset could have been resized, or the
2960                  * discard block size changed.  To be safe we start by
2961                  * setting every dblock to not discarded.
2962                  */
2963                 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2964
2965                 discard_load_info_init(cache, &li);
2966                 r = dm_cache_load_discards(cache->cmd, load_discard, &li);
2967                 if (r) {
2968                         DMERR("%s: could not load origin discards", cache_device_name(cache));
2969                         metadata_operation_failed(cache, "dm_cache_load_discards", r);
2970                         return r;
2971                 }
2972                 set_discard_range(&li);
2973
2974                 cache->loaded_discards = true;
2975         }
2976
2977         return r;
2978 }
2979
2980 static void cache_resume(struct dm_target *ti)
2981 {
2982         struct cache *cache = ti->private;
2983
2984         cache->need_tick_bio = true;
2985         allow_background_work(cache);
2986         do_waker(&cache->waker.work);
2987 }
2988
2989 static void emit_flags(struct cache *cache, char *result,
2990                        unsigned maxlen, ssize_t *sz_ptr)
2991 {
2992         ssize_t sz = *sz_ptr;
2993         struct cache_features *cf = &cache->features;
2994         unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
2995
2996         DMEMIT("%u ", count);
2997
2998         if (cf->metadata_version == 2)
2999                 DMEMIT("metadata2 ");
3000
3001         if (writethrough_mode(cache))
3002                 DMEMIT("writethrough ");
3003
3004         else if (passthrough_mode(cache))
3005                 DMEMIT("passthrough ");
3006
3007         else if (writeback_mode(cache))
3008                 DMEMIT("writeback ");
3009
3010         else {
3011                 DMEMIT("unknown ");
3012                 DMERR("%s: internal error: unknown io mode: %d",
3013                       cache_device_name(cache), (int) cf->io_mode);
3014         }
3015
3016         if (!cf->discard_passdown)
3017                 DMEMIT("no_discard_passdown ");
3018
3019         *sz_ptr = sz;
3020 }
3021
3022 /*
3023  * Status format:
3024  *
3025  * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
3026  * <cache block size> <#used cache blocks>/<#total cache blocks>
3027  * <#read hits> <#read misses> <#write hits> <#write misses>
3028  * <#demotions> <#promotions> <#dirty>
3029  * <#features> <features>*
3030  * <#core args> <core args>
3031  * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
3032  */
3033 static void cache_status(struct dm_target *ti, status_type_t type,
3034                          unsigned status_flags, char *result, unsigned maxlen)
3035 {
3036         int r = 0;
3037         unsigned i;
3038         ssize_t sz = 0;
3039         dm_block_t nr_free_blocks_metadata = 0;
3040         dm_block_t nr_blocks_metadata = 0;
3041         char buf[BDEVNAME_SIZE];
3042         struct cache *cache = ti->private;
3043         dm_cblock_t residency;
3044         bool needs_check;
3045
3046         switch (type) {
3047         case STATUSTYPE_INFO:
3048                 if (get_cache_mode(cache) == CM_FAIL) {
3049                         DMEMIT("Fail");
3050                         break;
3051                 }
3052
3053                 /* Commit to ensure statistics aren't out-of-date */
3054                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3055                         (void) commit(cache, false);
3056
3057                 r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
3058                 if (r) {
3059                         DMERR("%s: dm_cache_get_free_metadata_block_count returned %d",
3060                               cache_device_name(cache), r);
3061                         goto err;
3062                 }
3063
3064                 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
3065                 if (r) {
3066                         DMERR("%s: dm_cache_get_metadata_dev_size returned %d",
3067                               cache_device_name(cache), r);
3068                         goto err;
3069                 }
3070
3071                 residency = policy_residency(cache->policy);
3072
3073                 DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
3074                        (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
3075                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3076                        (unsigned long long)nr_blocks_metadata,
3077                        (unsigned long long)cache->sectors_per_block,
3078                        (unsigned long long) from_cblock(residency),
3079                        (unsigned long long) from_cblock(cache->cache_size),
3080                        (unsigned) atomic_read(&cache->stats.read_hit),
3081                        (unsigned) atomic_read(&cache->stats.read_miss),
3082                        (unsigned) atomic_read(&cache->stats.write_hit),
3083                        (unsigned) atomic_read(&cache->stats.write_miss),
3084                        (unsigned) atomic_read(&cache->stats.demotion),
3085                        (unsigned) atomic_read(&cache->stats.promotion),
3086                        (unsigned long) atomic_read(&cache->nr_dirty));
3087
3088                 emit_flags(cache, result, maxlen, &sz);
3089
3090                 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
3091
3092                 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
3093                 if (sz < maxlen) {
3094                         r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
3095                         if (r)
3096                                 DMERR("%s: policy_emit_config_values returned %d",
3097                                       cache_device_name(cache), r);
3098                 }
3099
3100                 if (get_cache_mode(cache) == CM_READ_ONLY)
3101                         DMEMIT("ro ");
3102                 else
3103                         DMEMIT("rw ");
3104
3105                 r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
3106
3107                 if (r || needs_check)
3108                         DMEMIT("needs_check ");
3109                 else
3110                         DMEMIT("- ");
3111
3112                 break;
3113
3114         case STATUSTYPE_TABLE:
3115                 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3116                 DMEMIT("%s ", buf);
3117                 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3118                 DMEMIT("%s ", buf);
3119                 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3120                 DMEMIT("%s", buf);
3121
3122                 for (i = 0; i < cache->nr_ctr_args - 1; i++)
3123                         DMEMIT(" %s", cache->ctr_args[i]);
3124                 if (cache->nr_ctr_args)
3125                         DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
3126                 break;
3127
3128         case STATUSTYPE_IMA:
3129                 DMEMIT_TARGET_NAME_VERSION(ti->type);
3130                 if (get_cache_mode(cache) == CM_FAIL)
3131                         DMEMIT(",metadata_mode=fail");
3132                 else if (get_cache_mode(cache) == CM_READ_ONLY)
3133                         DMEMIT(",metadata_mode=ro");
3134                 else
3135                         DMEMIT(",metadata_mode=rw");
3136
3137                 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3138                 DMEMIT(",cache_metadata_device=%s", buf);
3139                 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3140                 DMEMIT(",cache_device=%s", buf);
3141                 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3142                 DMEMIT(",cache_origin_device=%s", buf);
3143                 DMEMIT(",writethrough=%c", writethrough_mode(cache) ? 'y' : 'n');
3144                 DMEMIT(",writeback=%c", writeback_mode(cache) ? 'y' : 'n');
3145                 DMEMIT(",passthrough=%c", passthrough_mode(cache) ? 'y' : 'n');
3146                 DMEMIT(",metadata2=%c", cache->features.metadata_version == 2 ? 'y' : 'n');
3147                 DMEMIT(",no_discard_passdown=%c", cache->features.discard_passdown ? 'n' : 'y');
3148                 DMEMIT(";");
3149                 break;
3150         }
3151
3152         return;
3153
3154 err:
3155         DMEMIT("Error");
3156 }
3157
3158 /*
3159  * Defines a range of cblocks, begin to (end - 1) are in the range.  end is
3160  * the one-past-the-end value.
3161  */
3162 struct cblock_range {
3163         dm_cblock_t begin;
3164         dm_cblock_t end;
3165 };
3166
3167 /*
3168  * A cache block range can take two forms:
3169  *
3170  * i) A single cblock, eg. '3456'
3171  * ii) A begin and end cblock with a dash between, eg. 123-234
3172  */
3173 static int parse_cblock_range(struct cache *cache, const char *str,
3174                               struct cblock_range *result)
3175 {
3176         char dummy;
3177         uint64_t b, e;
3178         int r;
3179
3180         /*
3181          * Try and parse form (ii) first.
3182          */
3183         r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
3184         if (r < 0)
3185                 return r;
3186
3187         if (r == 2) {
3188                 result->begin = to_cblock(b);
3189                 result->end = to_cblock(e);
3190                 return 0;
3191         }
3192
3193         /*
3194          * That didn't work, try form (i).
3195          */
3196         r = sscanf(str, "%llu%c", &b, &dummy);
3197         if (r < 0)
3198                 return r;
3199
3200         if (r == 1) {
3201                 result->begin = to_cblock(b);
3202                 result->end = to_cblock(from_cblock(result->begin) + 1u);
3203                 return 0;
3204         }
3205
3206         DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
3207         return -EINVAL;
3208 }
3209
3210 static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
3211 {
3212         uint64_t b = from_cblock(range->begin);
3213         uint64_t e = from_cblock(range->end);
3214         uint64_t n = from_cblock(cache->cache_size);
3215
3216         if (b >= n) {
3217                 DMERR("%s: begin cblock out of range: %llu >= %llu",
3218                       cache_device_name(cache), b, n);
3219                 return -EINVAL;
3220         }
3221
3222         if (e > n) {
3223                 DMERR("%s: end cblock out of range: %llu > %llu",
3224                       cache_device_name(cache), e, n);
3225                 return -EINVAL;
3226         }
3227
3228         if (b >= e) {
3229                 DMERR("%s: invalid cblock range: %llu >= %llu",
3230                       cache_device_name(cache), b, e);
3231                 return -EINVAL;
3232         }
3233
3234         return 0;
3235 }
3236
3237 static inline dm_cblock_t cblock_succ(dm_cblock_t b)
3238 {
3239         return to_cblock(from_cblock(b) + 1);
3240 }
3241
3242 static int request_invalidation(struct cache *cache, struct cblock_range *range)
3243 {
3244         int r = 0;
3245
3246         /*
3247          * We don't need to do any locking here because we know we're in
3248          * passthrough mode.  There's is potential for a race between an
3249          * invalidation triggered by an io and an invalidation message.  This
3250          * is harmless, we must not worry if the policy call fails.
3251          */
3252         while (range->begin != range->end) {
3253                 r = invalidate_cblock(cache, range->begin);
3254                 if (r)
3255                         return r;
3256
3257                 range->begin = cblock_succ(range->begin);
3258         }
3259
3260         cache->commit_requested = true;
3261         return r;
3262 }
3263
3264 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
3265                                               const char **cblock_ranges)
3266 {
3267         int r = 0;
3268         unsigned i;
3269         struct cblock_range range;
3270
3271         if (!passthrough_mode(cache)) {
3272                 DMERR("%s: cache has to be in passthrough mode for invalidation",
3273                       cache_device_name(cache));
3274                 return -EPERM;
3275         }
3276
3277         for (i = 0; i < count; i++) {
3278                 r = parse_cblock_range(cache, cblock_ranges[i], &range);
3279                 if (r)
3280                         break;
3281
3282                 r = validate_cblock_range(cache, &range);
3283                 if (r)
3284                         break;
3285
3286                 /*
3287                  * Pass begin and end origin blocks to the worker and wake it.
3288                  */
3289                 r = request_invalidation(cache, &range);
3290                 if (r)
3291                         break;
3292         }
3293
3294         return r;
3295 }
3296
3297 /*
3298  * Supports
3299  *      "<key> <value>"
3300  * and
3301  *     "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
3302  *
3303  * The key migration_threshold is supported by the cache target core.
3304  */
3305 static int cache_message(struct dm_target *ti, unsigned argc, char **argv,
3306                          char *result, unsigned maxlen)
3307 {
3308         struct cache *cache = ti->private;
3309
3310         if (!argc)
3311                 return -EINVAL;
3312
3313         if (get_cache_mode(cache) >= CM_READ_ONLY) {
3314                 DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
3315                       cache_device_name(cache));
3316                 return -EOPNOTSUPP;
3317         }
3318
3319         if (!strcasecmp(argv[0], "invalidate_cblocks"))
3320                 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3321
3322         if (argc != 2)
3323                 return -EINVAL;
3324
3325         return set_config_value(cache, argv[0], argv[1]);
3326 }
3327
3328 static int cache_iterate_devices(struct dm_target *ti,
3329                                  iterate_devices_callout_fn fn, void *data)
3330 {
3331         int r = 0;
3332         struct cache *cache = ti->private;
3333
3334         r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
3335         if (!r)
3336                 r = fn(ti, cache->origin_dev, 0, ti->len, data);
3337
3338         return r;
3339 }
3340
3341 static bool origin_dev_supports_discard(struct block_device *origin_bdev)
3342 {
3343         struct request_queue *q = bdev_get_queue(origin_bdev);
3344
3345         return blk_queue_discard(q);
3346 }
3347
3348 /*
3349  * If discard_passdown was enabled verify that the origin device
3350  * supports discards.  Disable discard_passdown if not.
3351  */
3352 static void disable_passdown_if_not_supported(struct cache *cache)
3353 {
3354         struct block_device *origin_bdev = cache->origin_dev->bdev;
3355         struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
3356         const char *reason = NULL;
3357         char buf[BDEVNAME_SIZE];
3358
3359         if (!cache->features.discard_passdown)
3360                 return;
3361
3362         if (!origin_dev_supports_discard(origin_bdev))
3363                 reason = "discard unsupported";
3364
3365         else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
3366                 reason = "max discard sectors smaller than a block";
3367
3368         if (reason) {
3369                 DMWARN("Origin device (%s) %s: Disabling discard passdown.",
3370                        bdevname(origin_bdev, buf), reason);
3371                 cache->features.discard_passdown = false;
3372         }
3373 }
3374
3375 static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3376 {
3377         struct block_device *origin_bdev = cache->origin_dev->bdev;
3378         struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
3379
3380         if (!cache->features.discard_passdown) {
3381                 /* No passdown is done so setting own virtual limits */
3382                 limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
3383                                                     cache->origin_sectors);
3384                 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
3385                 return;
3386         }
3387
3388         /*
3389          * cache_iterate_devices() is stacking both origin and fast device limits
3390          * but discards aren't passed to fast device, so inherit origin's limits.
3391          */
3392         limits->max_discard_sectors = origin_limits->max_discard_sectors;
3393         limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
3394         limits->discard_granularity = origin_limits->discard_granularity;
3395         limits->discard_alignment = origin_limits->discard_alignment;
3396         limits->discard_misaligned = origin_limits->discard_misaligned;
3397 }
3398
3399 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3400 {
3401         struct cache *cache = ti->private;
3402         uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3403
3404         /*
3405          * If the system-determined stacked limits are compatible with the
3406          * cache's blocksize (io_opt is a factor) do not override them.
3407          */
3408         if (io_opt_sectors < cache->sectors_per_block ||
3409             do_div(io_opt_sectors, cache->sectors_per_block)) {
3410                 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
3411                 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3412         }
3413
3414         disable_passdown_if_not_supported(cache);
3415         set_discard_limits(cache, limits);
3416 }
3417
3418 /*----------------------------------------------------------------*/
3419
3420 static struct target_type cache_target = {
3421         .name = "cache",
3422         .version = {2, 2, 0},
3423         .module = THIS_MODULE,
3424         .ctr = cache_ctr,
3425         .dtr = cache_dtr,
3426         .map = cache_map,
3427         .end_io = cache_end_io,
3428         .postsuspend = cache_postsuspend,
3429         .preresume = cache_preresume,
3430         .resume = cache_resume,
3431         .status = cache_status,
3432         .message = cache_message,
3433         .iterate_devices = cache_iterate_devices,
3434         .io_hints = cache_io_hints,
3435 };
3436
3437 static int __init dm_cache_init(void)
3438 {
3439         int r;
3440
3441         migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3442         if (!migration_cache)
3443                 return -ENOMEM;
3444
3445         r = dm_register_target(&cache_target);
3446         if (r) {
3447                 DMERR("cache target registration failed: %d", r);
3448                 kmem_cache_destroy(migration_cache);
3449                 return r;
3450         }
3451
3452         return 0;
3453 }
3454
3455 static void __exit dm_cache_exit(void)
3456 {
3457         dm_unregister_target(&cache_target);
3458         kmem_cache_destroy(migration_cache);
3459 }
3460
3461 module_init(dm_cache_init);
3462 module_exit(dm_cache_exit);
3463
3464 MODULE_DESCRIPTION(DM_NAME " cache target");
3465 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
3466 MODULE_LICENSE("GPL");