1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2003 Sistina Software Limited.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
6 * This file is released under the GPL.
9 #include <linux/dm-dirty-log.h>
10 #include <linux/dm-region-hash.h>
12 #include <linux/ctype.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
20 #define DM_MSG_PREFIX "region hash"
23 *------------------------------------------------------------------
26 * The mirror splits itself up into discrete regions. Each
27 * region can be in one of three states: clean, dirty,
28 * nosync. There is no need to put clean regions in the hash.
30 * In addition to being present in the hash table a region _may_
31 * be present on one of three lists.
33 * clean_regions: Regions on this list have no io pending to
34 * them, they are in sync, we are no longer interested in them,
35 * they are dull. dm_rh_update_states() will remove them from the
38 * quiesced_regions: These regions have been spun down, ready
39 * for recovery. rh_recovery_start() will remove regions from
40 * this list and hand them to kmirrord, which will schedule the
41 * recovery io with kcopyd.
43 * recovered_regions: Regions that kcopyd has successfully
44 * recovered. dm_rh_update_states() will now schedule any delayed
45 * io, up the recovery_count, and remove the region from the
49 * A rw spin lock 'hash_lock' protects just the hash table,
50 * this is never held in write mode from interrupt context,
51 * which I believe means that we only have to disable irqs when
54 * An ordinary spin lock 'region_lock' that protects the three
55 * lists in the region_hash, with the 'state', 'list' and
56 * 'delayed_bios' fields of the regions. This is used from irq
57 * context, so all other uses will have to suspend local irqs.
58 *------------------------------------------------------------------
60 struct dm_region_hash {
62 unsigned int region_shift;
64 /* holds persistent region state */
65 struct dm_dirty_log *log;
70 unsigned int nr_buckets;
73 struct list_head *buckets;
76 * If there was a flush failure no regions can be marked clean.
80 unsigned int max_recovery; /* Max # of regions to recover in parallel */
82 spinlock_t region_lock;
83 atomic_t recovery_in_flight;
84 struct list_head clean_regions;
85 struct list_head quiesced_regions;
86 struct list_head recovered_regions;
87 struct list_head failed_recovered_regions;
88 struct semaphore recovery_count;
90 mempool_t region_pool;
93 sector_t target_begin;
95 /* Callback function to schedule bios writes */
96 void (*dispatch_bios)(void *context, struct bio_list *bios);
98 /* Callback function to wakeup callers worker thread. */
99 void (*wakeup_workers)(void *context);
101 /* Callback function to wakeup callers recovery waiters. */
102 void (*wakeup_all_recovery_waiters)(void *context);
106 struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */
110 struct list_head hash_list;
111 struct list_head list;
114 struct bio_list delayed_bios;
120 static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
122 return sector >> rh->region_shift;
125 sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
127 return region << rh->region_shift;
129 EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
131 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
133 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
136 EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
138 void *dm_rh_region_context(struct dm_region *reg)
140 return reg->rh->context;
142 EXPORT_SYMBOL_GPL(dm_rh_region_context);
144 region_t dm_rh_get_region_key(struct dm_region *reg)
148 EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
150 sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
152 return rh->region_size;
154 EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
157 * FIXME: shall we pass in a structure instead of all these args to
158 * dm_region_hash_create()????
160 #define RH_HASH_MULT 2654435387U
161 #define RH_HASH_SHIFT 12
163 #define MIN_REGIONS 64
164 struct dm_region_hash *dm_region_hash_create(
165 void *context, void (*dispatch_bios)(void *context,
166 struct bio_list *bios),
167 void (*wakeup_workers)(void *context),
168 void (*wakeup_all_recovery_waiters)(void *context),
169 sector_t target_begin, unsigned int max_recovery,
170 struct dm_dirty_log *log, uint32_t region_size,
173 struct dm_region_hash *rh;
174 unsigned int nr_buckets, max_buckets;
179 * Calculate a suitable number of buckets for our hash
182 max_buckets = nr_regions >> 6;
183 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
187 rh = kzalloc(sizeof(*rh), GFP_KERNEL);
189 DMERR("unable to allocate region hash memory");
190 return ERR_PTR(-ENOMEM);
193 rh->context = context;
194 rh->dispatch_bios = dispatch_bios;
195 rh->wakeup_workers = wakeup_workers;
196 rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
197 rh->target_begin = target_begin;
198 rh->max_recovery = max_recovery;
200 rh->region_size = region_size;
201 rh->region_shift = __ffs(region_size);
202 rwlock_init(&rh->hash_lock);
203 rh->mask = nr_buckets - 1;
204 rh->nr_buckets = nr_buckets;
206 rh->shift = RH_HASH_SHIFT;
207 rh->prime = RH_HASH_MULT;
209 rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
211 DMERR("unable to allocate region hash bucket memory");
213 return ERR_PTR(-ENOMEM);
216 for (i = 0; i < nr_buckets; i++)
217 INIT_LIST_HEAD(rh->buckets + i);
219 spin_lock_init(&rh->region_lock);
220 sema_init(&rh->recovery_count, 0);
221 atomic_set(&rh->recovery_in_flight, 0);
222 INIT_LIST_HEAD(&rh->clean_regions);
223 INIT_LIST_HEAD(&rh->quiesced_regions);
224 INIT_LIST_HEAD(&rh->recovered_regions);
225 INIT_LIST_HEAD(&rh->failed_recovered_regions);
226 rh->flush_failure = 0;
228 ret = mempool_init_kmalloc_pool(&rh->region_pool, MIN_REGIONS,
229 sizeof(struct dm_region));
233 rh = ERR_PTR(-ENOMEM);
238 EXPORT_SYMBOL_GPL(dm_region_hash_create);
240 void dm_region_hash_destroy(struct dm_region_hash *rh)
243 struct dm_region *reg, *nreg;
245 BUG_ON(!list_empty(&rh->quiesced_regions));
246 for (h = 0; h < rh->nr_buckets; h++) {
247 list_for_each_entry_safe(reg, nreg, rh->buckets + h,
249 BUG_ON(atomic_read(®->pending));
250 mempool_free(reg, &rh->region_pool);
255 dm_dirty_log_destroy(rh->log);
257 mempool_exit(&rh->region_pool);
261 EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
263 struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
267 EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
269 static unsigned int rh_hash(struct dm_region_hash *rh, region_t region)
271 return (unsigned int) ((region * rh->prime) >> rh->shift) & rh->mask;
274 static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
276 struct dm_region *reg;
277 struct list_head *bucket = rh->buckets + rh_hash(rh, region);
279 list_for_each_entry(reg, bucket, hash_list)
280 if (reg->key == region)
286 static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
288 list_add(®->hash_list, rh->buckets + rh_hash(rh, reg->key));
291 static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
293 struct dm_region *reg, *nreg;
295 nreg = mempool_alloc(&rh->region_pool, GFP_ATOMIC);
297 nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
299 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
300 DM_RH_CLEAN : DM_RH_NOSYNC;
303 INIT_LIST_HEAD(&nreg->list);
304 atomic_set(&nreg->pending, 0);
305 bio_list_init(&nreg->delayed_bios);
307 write_lock_irq(&rh->hash_lock);
308 reg = __rh_lookup(rh, region);
310 /* We lost the race. */
311 mempool_free(nreg, &rh->region_pool);
313 __rh_insert(rh, nreg);
314 if (nreg->state == DM_RH_CLEAN) {
315 spin_lock(&rh->region_lock);
316 list_add(&nreg->list, &rh->clean_regions);
317 spin_unlock(&rh->region_lock);
322 write_unlock_irq(&rh->hash_lock);
327 static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
329 struct dm_region *reg;
331 reg = __rh_lookup(rh, region);
333 read_unlock(&rh->hash_lock);
334 reg = __rh_alloc(rh, region);
335 read_lock(&rh->hash_lock);
341 int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
344 struct dm_region *reg;
346 read_lock(&rh->hash_lock);
347 reg = __rh_lookup(rh, region);
348 read_unlock(&rh->hash_lock);
354 * The region wasn't in the hash, so we fall back to the
357 r = rh->log->type->in_sync(rh->log, region, may_block);
360 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
361 * taken as a DM_RH_NOSYNC
363 return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
365 EXPORT_SYMBOL_GPL(dm_rh_get_state);
367 static void complete_resync_work(struct dm_region *reg, int success)
369 struct dm_region_hash *rh = reg->rh;
371 rh->log->type->set_region_sync(rh->log, reg->key, success);
374 * Dispatch the bios before we call 'wake_up_all'.
375 * This is important because if we are suspending,
376 * we want to know that recovery is complete and
377 * the work queue is flushed. If we wake_up_all
378 * before we dispatch_bios (queue bios and call wake()),
379 * then we risk suspending before the work queue
380 * has been properly flushed.
382 rh->dispatch_bios(rh->context, ®->delayed_bios);
383 if (atomic_dec_and_test(&rh->recovery_in_flight))
384 rh->wakeup_all_recovery_waiters(rh->context);
385 up(&rh->recovery_count);
392 * The bio was written on some mirror(s) but failed on other mirror(s).
393 * We can successfully endio the bio but should avoid the region being
394 * marked clean by setting the state DM_RH_NOSYNC.
396 * This function is _not_ safe in interrupt context!
398 void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
401 struct dm_dirty_log *log = rh->log;
402 struct dm_region *reg;
403 region_t region = dm_rh_bio_to_region(rh, bio);
406 if (bio->bi_opf & REQ_PREFLUSH) {
407 rh->flush_failure = 1;
411 if (bio_op(bio) == REQ_OP_DISCARD)
414 /* We must inform the log that the sync count has changed. */
415 log->type->set_region_sync(log, region, 0);
417 read_lock(&rh->hash_lock);
418 reg = __rh_find(rh, region);
419 read_unlock(&rh->hash_lock);
421 /* region hash entry should exist because write was in-flight */
423 BUG_ON(!list_empty(®->list));
425 spin_lock_irqsave(&rh->region_lock, flags);
429 * 2) DM_RH_NOSYNC: was dirty, other preceding writes failed
430 * 3) DM_RH_RECOVERING: flushing pending writes
431 * Either case, the region should have not been connected to list.
433 recovering = (reg->state == DM_RH_RECOVERING);
434 reg->state = DM_RH_NOSYNC;
435 BUG_ON(!list_empty(®->list));
436 spin_unlock_irqrestore(&rh->region_lock, flags);
439 complete_resync_work(reg, 0);
441 EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
443 void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
445 struct dm_region *reg, *next;
448 LIST_HEAD(recovered);
449 LIST_HEAD(failed_recovered);
452 * Quickly grab the lists.
454 write_lock_irq(&rh->hash_lock);
455 spin_lock(&rh->region_lock);
456 if (!list_empty(&rh->clean_regions)) {
457 list_splice_init(&rh->clean_regions, &clean);
459 list_for_each_entry(reg, &clean, list)
460 list_del(®->hash_list);
463 if (!list_empty(&rh->recovered_regions)) {
464 list_splice_init(&rh->recovered_regions, &recovered);
466 list_for_each_entry(reg, &recovered, list)
467 list_del(®->hash_list);
470 if (!list_empty(&rh->failed_recovered_regions)) {
471 list_splice_init(&rh->failed_recovered_regions,
474 list_for_each_entry(reg, &failed_recovered, list)
475 list_del(®->hash_list);
478 spin_unlock(&rh->region_lock);
479 write_unlock_irq(&rh->hash_lock);
482 * All the regions on the recovered and clean lists have
483 * now been pulled out of the system, so no need to do
486 list_for_each_entry_safe(reg, next, &recovered, list) {
487 rh->log->type->clear_region(rh->log, reg->key);
488 complete_resync_work(reg, 1);
489 mempool_free(reg, &rh->region_pool);
492 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
493 complete_resync_work(reg, errors_handled ? 0 : 1);
494 mempool_free(reg, &rh->region_pool);
497 list_for_each_entry_safe(reg, next, &clean, list) {
498 rh->log->type->clear_region(rh->log, reg->key);
499 mempool_free(reg, &rh->region_pool);
502 rh->log->type->flush(rh->log);
504 EXPORT_SYMBOL_GPL(dm_rh_update_states);
506 static void rh_inc(struct dm_region_hash *rh, region_t region)
508 struct dm_region *reg;
510 read_lock(&rh->hash_lock);
511 reg = __rh_find(rh, region);
513 spin_lock_irq(&rh->region_lock);
514 atomic_inc(®->pending);
516 if (reg->state == DM_RH_CLEAN) {
517 reg->state = DM_RH_DIRTY;
518 list_del_init(®->list); /* take off the clean list */
519 spin_unlock_irq(&rh->region_lock);
521 rh->log->type->mark_region(rh->log, reg->key);
523 spin_unlock_irq(&rh->region_lock);
526 read_unlock(&rh->hash_lock);
529 void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
533 for (bio = bios->head; bio; bio = bio->bi_next) {
534 if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
536 rh_inc(rh, dm_rh_bio_to_region(rh, bio));
539 EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
541 void dm_rh_dec(struct dm_region_hash *rh, region_t region)
544 struct dm_region *reg;
547 read_lock(&rh->hash_lock);
548 reg = __rh_lookup(rh, region);
549 read_unlock(&rh->hash_lock);
551 spin_lock_irqsave(&rh->region_lock, flags);
552 if (atomic_dec_and_test(®->pending)) {
554 * There is no pending I/O for this region.
555 * We can move the region to corresponding list for next action.
556 * At this point, the region is not yet connected to any list.
558 * If the state is DM_RH_NOSYNC, the region should be kept off
560 * The hash entry for DM_RH_NOSYNC will remain in memory
561 * until the region is recovered or the map is reloaded.
564 /* do nothing for DM_RH_NOSYNC */
565 if (unlikely(rh->flush_failure)) {
567 * If a write flush failed some time ago, we
568 * don't know whether or not this write made it
569 * to the disk, so we must resync the device.
571 reg->state = DM_RH_NOSYNC;
572 } else if (reg->state == DM_RH_RECOVERING) {
573 list_add_tail(®->list, &rh->quiesced_regions);
574 } else if (reg->state == DM_RH_DIRTY) {
575 reg->state = DM_RH_CLEAN;
576 list_add(®->list, &rh->clean_regions);
580 spin_unlock_irqrestore(&rh->region_lock, flags);
583 rh->wakeup_workers(rh->context);
585 EXPORT_SYMBOL_GPL(dm_rh_dec);
588 * Starts quiescing a region in preparation for recovery.
590 static int __rh_recovery_prepare(struct dm_region_hash *rh)
594 struct dm_region *reg;
597 * Ask the dirty log what's next.
599 r = rh->log->type->get_resync_work(rh->log, ®ion);
604 * Get this region, and start it quiescing by setting the
607 read_lock(&rh->hash_lock);
608 reg = __rh_find(rh, region);
609 read_unlock(&rh->hash_lock);
611 spin_lock_irq(&rh->region_lock);
612 reg->state = DM_RH_RECOVERING;
614 /* Already quiesced ? */
615 if (atomic_read(®->pending))
616 list_del_init(®->list);
618 list_move(®->list, &rh->quiesced_regions);
620 spin_unlock_irq(&rh->region_lock);
625 void dm_rh_recovery_prepare(struct dm_region_hash *rh)
627 /* Extra reference to avoid race with dm_rh_stop_recovery */
628 atomic_inc(&rh->recovery_in_flight);
630 while (!down_trylock(&rh->recovery_count)) {
631 atomic_inc(&rh->recovery_in_flight);
632 if (__rh_recovery_prepare(rh) <= 0) {
633 atomic_dec(&rh->recovery_in_flight);
634 up(&rh->recovery_count);
639 /* Drop the extra reference */
640 if (atomic_dec_and_test(&rh->recovery_in_flight))
641 rh->wakeup_all_recovery_waiters(rh->context);
643 EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
646 * Returns any quiesced regions.
648 struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
650 struct dm_region *reg = NULL;
652 spin_lock_irq(&rh->region_lock);
653 if (!list_empty(&rh->quiesced_regions)) {
654 reg = list_entry(rh->quiesced_regions.next,
655 struct dm_region, list);
656 list_del_init(®->list); /* remove from the quiesced list */
658 spin_unlock_irq(&rh->region_lock);
662 EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
664 void dm_rh_recovery_end(struct dm_region *reg, int success)
666 struct dm_region_hash *rh = reg->rh;
668 spin_lock_irq(&rh->region_lock);
670 list_add(®->list, ®->rh->recovered_regions);
672 list_add(®->list, ®->rh->failed_recovered_regions);
674 spin_unlock_irq(&rh->region_lock);
676 rh->wakeup_workers(rh->context);
678 EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
680 /* Return recovery in flight count. */
681 int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
683 return atomic_read(&rh->recovery_in_flight);
685 EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
687 int dm_rh_flush(struct dm_region_hash *rh)
689 return rh->log->type->flush(rh->log);
691 EXPORT_SYMBOL_GPL(dm_rh_flush);
693 void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
695 struct dm_region *reg;
697 read_lock(&rh->hash_lock);
698 reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
699 bio_list_add(®->delayed_bios, bio);
700 read_unlock(&rh->hash_lock);
702 EXPORT_SYMBOL_GPL(dm_rh_delay);
704 void dm_rh_stop_recovery(struct dm_region_hash *rh)
708 /* wait for any recovering regions */
709 for (i = 0; i < rh->max_recovery; i++)
710 down(&rh->recovery_count);
712 EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
714 void dm_rh_start_recovery(struct dm_region_hash *rh)
718 for (i = 0; i < rh->max_recovery; i++)
719 up(&rh->recovery_count);
721 rh->wakeup_workers(rh->context);
723 EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
725 MODULE_DESCRIPTION(DM_NAME " region hash");
726 MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
727 MODULE_LICENSE("GPL");