2 * Copyright (C) 2012 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-policy.h"
10 #include <linux/hash.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #define DM_MSG_PREFIX "cache-policy-mq"
18 static struct kmem_cache *mq_entry_cache;
20 /*----------------------------------------------------------------*/
22 static unsigned next_power(unsigned n, unsigned min)
24 return roundup_pow_of_two(max(n, min));
27 /*----------------------------------------------------------------*/
30 * Large, sequential ios are probably better left on the origin device since
31 * spindles tend to have good bandwidth.
33 * The io_tracker tries to spot when the io is in one of these sequential
36 * Two thresholds to switch between random and sequential io mode are defaulting
37 * as follows and can be adjusted via the constructor and message interfaces.
39 #define RANDOM_THRESHOLD_DEFAULT 4
40 #define SEQUENTIAL_THRESHOLD_DEFAULT 512
48 enum io_pattern pattern;
50 unsigned nr_seq_samples;
51 unsigned nr_rand_samples;
52 unsigned thresholds[2];
54 dm_oblock_t last_end_oblock;
57 static void iot_init(struct io_tracker *t,
58 int sequential_threshold, int random_threshold)
60 t->pattern = PATTERN_RANDOM;
61 t->nr_seq_samples = 0;
62 t->nr_rand_samples = 0;
63 t->last_end_oblock = 0;
64 t->thresholds[PATTERN_RANDOM] = random_threshold;
65 t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold;
68 static enum io_pattern iot_pattern(struct io_tracker *t)
73 static void iot_update_stats(struct io_tracker *t, struct bio *bio)
75 if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
79 * Just one non-sequential IO is enough to reset the
82 if (t->nr_seq_samples) {
83 t->nr_seq_samples = 0;
84 t->nr_rand_samples = 0;
90 t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
93 static void iot_check_for_pattern_switch(struct io_tracker *t)
96 case PATTERN_SEQUENTIAL:
97 if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) {
98 t->pattern = PATTERN_RANDOM;
99 t->nr_seq_samples = t->nr_rand_samples = 0;
104 if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) {
105 t->pattern = PATTERN_SEQUENTIAL;
106 t->nr_seq_samples = t->nr_rand_samples = 0;
112 static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
114 iot_update_stats(t, bio);
115 iot_check_for_pattern_switch(t);
118 /*----------------------------------------------------------------*/
122 * This queue is divided up into different levels. Allowing us to push
123 * entries to the back of any of the levels. Think of it as a partially
126 #define NR_QUEUE_LEVELS 16u
129 struct list_head qs[NR_QUEUE_LEVELS];
132 static void queue_init(struct queue *q)
136 for (i = 0; i < NR_QUEUE_LEVELS; i++)
137 INIT_LIST_HEAD(q->qs + i);
141 * Checks to see if the queue is empty.
142 * FIXME: reduce cpu usage.
144 static bool queue_empty(struct queue *q)
148 for (i = 0; i < NR_QUEUE_LEVELS; i++)
149 if (!list_empty(q->qs + i))
156 * Insert an entry to the back of the given level.
158 static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
160 list_add_tail(elt, q->qs + level);
163 static void queue_remove(struct list_head *elt)
169 * Shifts all regions down one level. This has no effect on the order of
172 static void queue_shift_down(struct queue *q)
176 for (level = 1; level < NR_QUEUE_LEVELS; level++)
177 list_splice_init(q->qs + level, q->qs + level - 1);
181 * Gives us the oldest entry of the lowest popoulated level. If the first
182 * level is emptied then we shift down one level.
184 static struct list_head *queue_pop(struct queue *q)
189 for (level = 0; level < NR_QUEUE_LEVELS; level++)
190 if (!list_empty(q->qs + level)) {
191 r = q->qs[level].next;
194 /* have we just emptied the bottom level? */
195 if (level == 0 && list_empty(q->qs))
204 static struct list_head *list_pop(struct list_head *lh)
206 struct list_head *r = lh->next;
214 /*----------------------------------------------------------------*/
217 * Describes a cache entry. Used in both the cache and the pre_cache.
220 struct hlist_node hlist;
221 struct list_head list;
225 * FIXME: pack these better
234 * Rather than storing the cblock in an entry, we allocate all entries in
235 * an array, and infer the cblock from the entry position.
237 * Free entries are linked together into a list.
240 struct entry *entries, *entries_end;
241 struct list_head free;
242 unsigned nr_allocated;
245 static int epool_init(struct entry_pool *ep, unsigned nr_entries)
249 ep->entries = vzalloc(sizeof(struct entry) * nr_entries);
253 ep->entries_end = ep->entries + nr_entries;
255 INIT_LIST_HEAD(&ep->free);
256 for (i = 0; i < nr_entries; i++)
257 list_add(&ep->entries[i].list, &ep->free);
259 ep->nr_allocated = 0;
264 static void epool_exit(struct entry_pool *ep)
269 static struct entry *alloc_entry(struct entry_pool *ep)
273 if (list_empty(&ep->free))
276 e = list_entry(list_pop(&ep->free), struct entry, list);
277 INIT_LIST_HEAD(&e->list);
278 INIT_HLIST_NODE(&e->hlist);
285 * This assumes the cblock hasn't already been allocated.
287 static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
289 struct entry *e = ep->entries + from_cblock(cblock);
291 list_del_init(&e->list);
292 INIT_HLIST_NODE(&e->hlist);
298 static void free_entry(struct entry_pool *ep, struct entry *e)
300 BUG_ON(!ep->nr_allocated);
302 INIT_HLIST_NODE(&e->hlist);
303 list_add(&e->list, &ep->free);
307 * Returns NULL if the entry is free.
309 static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock)
311 struct entry *e = ep->entries + from_cblock(cblock);
312 return !hlist_unhashed(&e->hlist) ? e : NULL;
315 static bool epool_empty(struct entry_pool *ep)
317 return list_empty(&ep->free);
320 static bool in_pool(struct entry_pool *ep, struct entry *e)
322 return e >= ep->entries && e < ep->entries_end;
325 static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e)
327 return to_cblock(e - ep->entries);
330 /*----------------------------------------------------------------*/
333 struct dm_cache_policy policy;
335 /* protects everything */
337 dm_cblock_t cache_size;
338 struct io_tracker tracker;
341 * Entries come from two pools, one of pre-cache entries, and one
342 * for the cache proper.
344 struct entry_pool pre_cache_pool;
345 struct entry_pool cache_pool;
348 * We maintain three queues of entries. The cache proper,
349 * consisting of a clean and dirty queue, contains the currently
350 * active mappings. Whereas the pre_cache tracks blocks that
351 * are being hit frequently and potential candidates for promotion
354 struct queue pre_cache;
355 struct queue cache_clean;
356 struct queue cache_dirty;
359 * Keeps track of time, incremented by the core. We use this to
360 * avoid attributing multiple hits within the same tick.
362 * Access to tick_protected should be done with the spin lock held.
363 * It's copied to tick at the start of the map function (within the
366 spinlock_t tick_lock;
367 unsigned tick_protected;
371 * A count of the number of times the map function has been called
372 * and found an entry in the pre_cache or cache. Currently used to
373 * calculate the generation.
378 * A generation is a longish period that is used to trigger some
379 * book keeping effects. eg, decrementing hit counts on entries.
380 * This is needed to allow the cache to evolve as io patterns
384 unsigned generation_period; /* in lookups (will probably change) */
387 * Entries in the pre_cache whose hit count passes the promotion
388 * threshold move to the cache proper. Working out the correct
389 * value for the promotion_threshold is crucial to this policy.
391 unsigned promote_threshold;
394 * The hash table allows us to quickly find an entry by origin
395 * block. Both pre_cache and cache entries are in here.
398 dm_block_t hash_bits;
399 struct hlist_head *table;
402 /*----------------------------------------------------------------*/
405 * Simple hash table implementation. Should replace with the standard hash
406 * table that's making its way upstream.
408 static void hash_insert(struct mq_policy *mq, struct entry *e)
410 unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
412 hlist_add_head(&e->hlist, mq->table + h);
415 static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
417 unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
418 struct hlist_head *bucket = mq->table + h;
421 hlist_for_each_entry(e, bucket, hlist)
422 if (e->oblock == oblock) {
423 hlist_del(&e->hlist);
424 hlist_add_head(&e->hlist, bucket);
431 static void hash_remove(struct entry *e)
433 hlist_del(&e->hlist);
436 /*----------------------------------------------------------------*/
438 static bool any_free_cblocks(struct mq_policy *mq)
440 return !epool_empty(&mq->cache_pool);
443 static bool any_clean_cblocks(struct mq_policy *mq)
445 return !queue_empty(&mq->cache_clean);
448 /*----------------------------------------------------------------*/
451 * Now we get to the meat of the policy. This section deals with deciding
452 * when to to add entries to the pre_cache and cache, and move between
457 * The queue level is based on the log2 of the hit count.
459 static unsigned queue_level(struct entry *e)
461 return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
464 static bool in_cache(struct mq_policy *mq, struct entry *e)
466 return in_pool(&mq->cache_pool, e);
470 * Inserts the entry into the pre_cache or the cache. Ensures the cache
471 * block is marked as allocated if necc. Inserts into the hash table.
472 * Sets the tick which records when the entry was last moved about.
474 static void push(struct mq_policy *mq, struct entry *e)
480 queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
481 queue_level(e), &e->list);
483 queue_push(&mq->pre_cache, queue_level(e), &e->list);
487 * Removes an entry from pre_cache or cache. Removes from the hash table.
489 static void del(struct mq_policy *mq, struct entry *e)
491 queue_remove(&e->list);
496 * Like del, except it removes the first entry in the queue (ie. the least
499 static struct entry *pop(struct mq_policy *mq, struct queue *q)
502 struct list_head *h = queue_pop(q);
507 e = container_of(h, struct entry, list);
514 * Has this entry already been updated?
516 static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
518 return mq->tick == e->tick;
522 * The promotion threshold is adjusted every generation. As are the counts
525 * At the moment the threshold is taken by averaging the hit counts of some
526 * of the entries in the cache (the first 20 entries across all levels in
527 * ascending order, giving preference to the clean entries at each level).
529 * We can be much cleverer than this though. For example, each promotion
530 * could bump up the threshold helping to prevent churn. Much more to do
534 #define MAX_TO_AVERAGE 20
536 static void check_generation(struct mq_policy *mq)
538 unsigned total = 0, nr = 0, count = 0, level;
539 struct list_head *head;
542 if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) {
546 for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
547 head = mq->cache_clean.qs + level;
548 list_for_each_entry(e, head, list) {
550 total += e->hit_count;
552 if (++count >= MAX_TO_AVERAGE)
556 head = mq->cache_dirty.qs + level;
557 list_for_each_entry(e, head, list) {
559 total += e->hit_count;
561 if (++count >= MAX_TO_AVERAGE)
566 mq->promote_threshold = nr ? total / nr : 1;
567 if (mq->promote_threshold * nr < total)
568 mq->promote_threshold++;
573 * Whenever we use an entry we bump up it's hit counter, and push it to the
574 * back to it's current level.
576 static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
578 if (updated_this_tick(mq, e))
583 check_generation(mq);
585 /* generation adjustment, to stop the counts increasing forever. */
587 /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
588 e->generation = mq->generation;
595 * Demote the least recently used entry from the cache to the pre_cache.
596 * Returns the new cache entry to use, and the old origin block it was
599 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
600 * straight back into the cache if it's subsequently hit. There are
601 * various options here, and more experimentation would be good:
603 * - just forget about the demoted entry completely (ie. don't insert it
605 * - divide the hit count rather that setting to some hard coded value.
606 * - set the hit count to a hard coded value other than 1, eg, is it better
607 * if it goes in at level 2?
609 static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
611 struct entry *demoted = pop(mq, &mq->cache_clean);
615 * We could get a block from mq->cache_dirty, but that
616 * would add extra latency to the triggering bio as it
617 * waits for the writeback. Better to not promote this
618 * time and hope there's a clean block next time this block
623 *oblock = demoted->oblock;
624 free_entry(&mq->cache_pool, demoted);
627 * We used to put the demoted block into the pre-cache, but I think
628 * it's simpler to just let it work it's way up from zero again.
629 * Stops blocks flickering in and out of the cache.
636 * We modify the basic promotion_threshold depending on the specific io.
638 * If the origin block has been discarded then there's no cost to copy it
641 * We bias towards reads, since they can be demoted at no cost if they
642 * haven't been dirtied.
644 #define DISCARDED_PROMOTE_THRESHOLD 1
645 #define READ_PROMOTE_THRESHOLD 4
646 #define WRITE_PROMOTE_THRESHOLD 8
648 static unsigned adjusted_promote_threshold(struct mq_policy *mq,
649 bool discarded_oblock, int data_dir)
651 if (data_dir == READ)
652 return mq->promote_threshold + READ_PROMOTE_THRESHOLD;
654 if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
656 * We don't need to do any copying at all, so give this a
657 * very low threshold.
659 return DISCARDED_PROMOTE_THRESHOLD;
662 return mq->promote_threshold + WRITE_PROMOTE_THRESHOLD;
665 static bool should_promote(struct mq_policy *mq, struct entry *e,
666 bool discarded_oblock, int data_dir)
668 return e->hit_count >=
669 adjusted_promote_threshold(mq, discarded_oblock, data_dir);
672 static int cache_entry_found(struct mq_policy *mq,
674 struct policy_result *result)
676 requeue_and_update_tick(mq, e);
678 if (in_cache(mq, e)) {
679 result->op = POLICY_HIT;
680 result->cblock = infer_cblock(&mq->cache_pool, e);
687 * Moves an entry from the pre_cache to the cache. The main work is
688 * finding which cache block to use.
690 static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
691 struct policy_result *result)
696 /* Ensure there's a free cblock in the cache */
697 if (epool_empty(&mq->cache_pool)) {
698 result->op = POLICY_REPLACE;
699 r = demote_cblock(mq, &result->old_oblock);
701 result->op = POLICY_MISS;
705 result->op = POLICY_NEW;
707 new_e = alloc_entry(&mq->cache_pool);
710 new_e->oblock = e->oblock;
711 new_e->dirty = false;
712 new_e->hit_count = e->hit_count;
713 new_e->generation = e->generation;
714 new_e->tick = e->tick;
717 free_entry(&mq->pre_cache_pool, e);
720 result->cblock = infer_cblock(&mq->cache_pool, new_e);
725 static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
726 bool can_migrate, bool discarded_oblock,
727 int data_dir, struct policy_result *result)
730 bool updated = updated_this_tick(mq, e);
732 if ((!discarded_oblock && updated) ||
733 !should_promote(mq, e, discarded_oblock, data_dir)) {
734 requeue_and_update_tick(mq, e);
735 result->op = POLICY_MISS;
737 } else if (!can_migrate)
741 requeue_and_update_tick(mq, e);
742 r = pre_cache_to_cache(mq, e, result);
748 static void insert_in_pre_cache(struct mq_policy *mq,
751 struct entry *e = alloc_entry(&mq->pre_cache_pool);
755 * There's no spare entry structure, so we grab the least
756 * used one from the pre_cache.
758 e = pop(mq, &mq->pre_cache);
761 DMWARN("couldn't pop from pre cache");
768 e->generation = mq->generation;
772 static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
773 struct policy_result *result)
778 if (epool_empty(&mq->cache_pool)) {
779 result->op = POLICY_REPLACE;
780 r = demote_cblock(mq, &result->old_oblock);
782 result->op = POLICY_MISS;
783 insert_in_pre_cache(mq, oblock);
788 * This will always succeed, since we've just demoted.
790 e = alloc_entry(&mq->cache_pool);
794 e = alloc_entry(&mq->cache_pool);
795 result->op = POLICY_NEW;
801 e->generation = mq->generation;
804 result->cblock = infer_cblock(&mq->cache_pool, e);
807 static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
808 bool can_migrate, bool discarded_oblock,
809 int data_dir, struct policy_result *result)
811 if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) == 1) {
813 insert_in_cache(mq, oblock, result);
817 insert_in_pre_cache(mq, oblock);
818 result->op = POLICY_MISS;
825 * Looks the oblock up in the hash table, then decides whether to put in
826 * pre_cache, or cache etc.
828 static int map(struct mq_policy *mq, dm_oblock_t oblock,
829 bool can_migrate, bool discarded_oblock,
830 int data_dir, struct policy_result *result)
833 struct entry *e = hash_lookup(mq, oblock);
835 if (e && in_cache(mq, e))
836 r = cache_entry_found(mq, e, result);
838 else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
839 result->op = POLICY_MISS;
842 r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
846 r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
849 if (r == -EWOULDBLOCK)
850 result->op = POLICY_MISS;
855 /*----------------------------------------------------------------*/
858 * Public interface, via the policy struct. See dm-cache-policy.h for a
859 * description of these.
862 static struct mq_policy *to_mq_policy(struct dm_cache_policy *p)
864 return container_of(p, struct mq_policy, policy);
867 static void mq_destroy(struct dm_cache_policy *p)
869 struct mq_policy *mq = to_mq_policy(p);
872 epool_exit(&mq->cache_pool);
873 epool_exit(&mq->pre_cache_pool);
877 static void copy_tick(struct mq_policy *mq)
881 spin_lock_irqsave(&mq->tick_lock, flags);
882 mq->tick = mq->tick_protected;
883 spin_unlock_irqrestore(&mq->tick_lock, flags);
886 static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
887 bool can_block, bool can_migrate, bool discarded_oblock,
888 struct bio *bio, struct policy_result *result)
891 struct mq_policy *mq = to_mq_policy(p);
893 result->op = POLICY_MISS;
896 mutex_lock(&mq->lock);
897 else if (!mutex_trylock(&mq->lock))
902 iot_examine_bio(&mq->tracker, bio);
903 r = map(mq, oblock, can_migrate, discarded_oblock,
904 bio_data_dir(bio), result);
906 mutex_unlock(&mq->lock);
911 static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
914 struct mq_policy *mq = to_mq_policy(p);
917 if (!mutex_trylock(&mq->lock))
920 e = hash_lookup(mq, oblock);
921 if (e && in_cache(mq, e)) {
922 *cblock = infer_cblock(&mq->cache_pool, e);
927 mutex_unlock(&mq->lock);
932 static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set)
936 e = hash_lookup(mq, oblock);
937 BUG_ON(!e || !in_cache(mq, e));
944 static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
946 struct mq_policy *mq = to_mq_policy(p);
948 mutex_lock(&mq->lock);
949 __mq_set_clear_dirty(mq, oblock, true);
950 mutex_unlock(&mq->lock);
953 static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
955 struct mq_policy *mq = to_mq_policy(p);
957 mutex_lock(&mq->lock);
958 __mq_set_clear_dirty(mq, oblock, false);
959 mutex_unlock(&mq->lock);
962 static int mq_load_mapping(struct dm_cache_policy *p,
963 dm_oblock_t oblock, dm_cblock_t cblock,
964 uint32_t hint, bool hint_valid)
966 struct mq_policy *mq = to_mq_policy(p);
969 e = alloc_particular_entry(&mq->cache_pool, cblock);
971 e->dirty = false; /* this gets corrected in a minute */
972 e->hit_count = hint_valid ? hint : 1;
973 e->generation = mq->generation;
979 static int mq_save_hints(struct mq_policy *mq, struct queue *q,
980 policy_walk_fn fn, void *context)
986 for (level = 0; level < NR_QUEUE_LEVELS; level++)
987 list_for_each_entry(e, q->qs + level, list) {
988 r = fn(context, infer_cblock(&mq->cache_pool, e),
989 e->oblock, e->hit_count);
997 static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
1000 struct mq_policy *mq = to_mq_policy(p);
1003 mutex_lock(&mq->lock);
1005 r = mq_save_hints(mq, &mq->cache_clean, fn, context);
1007 r = mq_save_hints(mq, &mq->cache_dirty, fn, context);
1009 mutex_unlock(&mq->lock);
1014 static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
1018 e = hash_lookup(mq, oblock);
1019 BUG_ON(!e || !in_cache(mq, e));
1022 free_entry(&mq->cache_pool, e);
1025 static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
1027 struct mq_policy *mq = to_mq_policy(p);
1029 mutex_lock(&mq->lock);
1030 __remove_mapping(mq, oblock);
1031 mutex_unlock(&mq->lock);
1034 static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock)
1036 struct entry *e = epool_find(&mq->cache_pool, cblock);
1042 free_entry(&mq->cache_pool, e);
1047 static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
1050 struct mq_policy *mq = to_mq_policy(p);
1052 mutex_lock(&mq->lock);
1053 r = __remove_cblock(mq, cblock);
1054 mutex_unlock(&mq->lock);
1059 static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
1060 dm_cblock_t *cblock)
1062 struct entry *e = pop(mq, &mq->cache_dirty);
1067 *oblock = e->oblock;
1068 *cblock = infer_cblock(&mq->cache_pool, e);
1075 static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
1076 dm_cblock_t *cblock)
1079 struct mq_policy *mq = to_mq_policy(p);
1081 mutex_lock(&mq->lock);
1082 r = __mq_writeback_work(mq, oblock, cblock);
1083 mutex_unlock(&mq->lock);
1088 static void __force_mapping(struct mq_policy *mq,
1089 dm_oblock_t current_oblock, dm_oblock_t new_oblock)
1091 struct entry *e = hash_lookup(mq, current_oblock);
1093 if (e && in_cache(mq, e)) {
1095 e->oblock = new_oblock;
1101 static void mq_force_mapping(struct dm_cache_policy *p,
1102 dm_oblock_t current_oblock, dm_oblock_t new_oblock)
1104 struct mq_policy *mq = to_mq_policy(p);
1106 mutex_lock(&mq->lock);
1107 __force_mapping(mq, current_oblock, new_oblock);
1108 mutex_unlock(&mq->lock);
1111 static dm_cblock_t mq_residency(struct dm_cache_policy *p)
1114 struct mq_policy *mq = to_mq_policy(p);
1116 mutex_lock(&mq->lock);
1117 r = to_cblock(mq->cache_pool.nr_allocated);
1118 mutex_unlock(&mq->lock);
1123 static void mq_tick(struct dm_cache_policy *p)
1125 struct mq_policy *mq = to_mq_policy(p);
1126 unsigned long flags;
1128 spin_lock_irqsave(&mq->tick_lock, flags);
1129 mq->tick_protected++;
1130 spin_unlock_irqrestore(&mq->tick_lock, flags);
1133 static int mq_set_config_value(struct dm_cache_policy *p,
1134 const char *key, const char *value)
1136 struct mq_policy *mq = to_mq_policy(p);
1137 enum io_pattern pattern;
1140 if (!strcasecmp(key, "random_threshold"))
1141 pattern = PATTERN_RANDOM;
1142 else if (!strcasecmp(key, "sequential_threshold"))
1143 pattern = PATTERN_SEQUENTIAL;
1147 if (kstrtoul(value, 10, &tmp))
1150 mq->tracker.thresholds[pattern] = tmp;
1155 static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
1158 struct mq_policy *mq = to_mq_policy(p);
1160 DMEMIT("4 random_threshold %u sequential_threshold %u",
1161 mq->tracker.thresholds[PATTERN_RANDOM],
1162 mq->tracker.thresholds[PATTERN_SEQUENTIAL]);
1167 /* Init the policy plugin interface function pointers. */
1168 static void init_policy_functions(struct mq_policy *mq)
1170 mq->policy.destroy = mq_destroy;
1171 mq->policy.map = mq_map;
1172 mq->policy.lookup = mq_lookup;
1173 mq->policy.set_dirty = mq_set_dirty;
1174 mq->policy.clear_dirty = mq_clear_dirty;
1175 mq->policy.load_mapping = mq_load_mapping;
1176 mq->policy.walk_mappings = mq_walk_mappings;
1177 mq->policy.remove_mapping = mq_remove_mapping;
1178 mq->policy.remove_cblock = mq_remove_cblock;
1179 mq->policy.writeback_work = mq_writeback_work;
1180 mq->policy.force_mapping = mq_force_mapping;
1181 mq->policy.residency = mq_residency;
1182 mq->policy.tick = mq_tick;
1183 mq->policy.emit_config_values = mq_emit_config_values;
1184 mq->policy.set_config_value = mq_set_config_value;
1187 static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1188 sector_t origin_size,
1189 sector_t cache_block_size)
1191 struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1196 init_policy_functions(mq);
1197 iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
1198 mq->cache_size = cache_size;
1200 if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) {
1201 DMERR("couldn't initialize pool of pre-cache entries");
1202 goto bad_pre_cache_init;
1205 if (epool_init(&mq->cache_pool, from_cblock(cache_size))) {
1206 DMERR("couldn't initialize pool of cache entries");
1207 goto bad_cache_init;
1210 mq->tick_protected = 0;
1214 mq->promote_threshold = 0;
1215 mutex_init(&mq->lock);
1216 spin_lock_init(&mq->tick_lock);
1218 queue_init(&mq->pre_cache);
1219 queue_init(&mq->cache_clean);
1220 queue_init(&mq->cache_dirty);
1222 mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
1224 mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
1225 mq->hash_bits = ffs(mq->nr_buckets) - 1;
1226 mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
1228 goto bad_alloc_table;
1233 epool_exit(&mq->cache_pool);
1235 epool_exit(&mq->pre_cache_pool);
1242 /*----------------------------------------------------------------*/
1244 static struct dm_cache_policy_type mq_policy_type = {
1246 .version = {1, 1, 0},
1248 .owner = THIS_MODULE,
1252 static struct dm_cache_policy_type default_policy_type = {
1254 .version = {1, 1, 0},
1256 .owner = THIS_MODULE,
1260 static int __init mq_init(void)
1264 mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry",
1265 sizeof(struct entry),
1266 __alignof__(struct entry),
1268 if (!mq_entry_cache)
1271 r = dm_cache_policy_register(&mq_policy_type);
1273 DMERR("register failed %d", r);
1274 goto bad_register_mq;
1277 r = dm_cache_policy_register(&default_policy_type);
1279 DMINFO("version %u.%u.%u loaded",
1280 mq_policy_type.version[0],
1281 mq_policy_type.version[1],
1282 mq_policy_type.version[2]);
1286 DMERR("register failed (as default) %d", r);
1288 dm_cache_policy_unregister(&mq_policy_type);
1290 kmem_cache_destroy(mq_entry_cache);
1295 static void __exit mq_exit(void)
1297 dm_cache_policy_unregister(&mq_policy_type);
1298 dm_cache_policy_unregister(&default_policy_type);
1300 kmem_cache_destroy(mq_entry_cache);
1303 module_init(mq_init);
1304 module_exit(mq_exit);
1306 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1307 MODULE_LICENSE("GPL");
1308 MODULE_DESCRIPTION("mq cache policy");
1310 MODULE_ALIAS("dm-cache-default");