2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
7 * Kcopyd provides a simple interface for copying an area of one
8 * block-device to one or more other block-devices, with an asynchronous
9 * completion notification.
12 #include <linux/types.h>
13 #include <linux/atomic.h>
14 #include <linux/blkdev.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/workqueue.h>
24 #include <linux/mutex.h>
25 #include <linux/delay.h>
26 #include <linux/device-mapper.h>
27 #include <linux/dm-kcopyd.h>
34 #define DEFAULT_SUB_JOB_SIZE_KB 512
35 #define MAX_SUB_JOB_SIZE_KB 1024
37 static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
39 module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
40 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
42 static unsigned dm_get_kcopyd_subjob_size(void)
44 unsigned sub_job_size_kb;
46 sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
47 DEFAULT_SUB_JOB_SIZE_KB,
50 return sub_job_size_kb << 1;
53 /*-----------------------------------------------------------------
54 * Each kcopyd client has its own little pool of preallocated
55 * pages for kcopyd io.
56 *---------------------------------------------------------------*/
57 struct dm_kcopyd_client {
58 struct page_list *pages;
59 unsigned nr_reserved_pages;
60 unsigned nr_free_pages;
61 unsigned sub_job_size;
63 struct dm_io_client *io_client;
65 wait_queue_head_t destroyq;
69 struct workqueue_struct *kcopyd_wq;
70 struct work_struct kcopyd_work;
72 struct dm_kcopyd_throttle *throttle;
77 * We maintain four lists of jobs:
79 * i) jobs waiting for pages
80 * ii) jobs that have pages, and are waiting for the io to be issued.
81 * iii) jobs that don't need to do any IO and just run a callback
82 * iv) jobs that have completed.
84 * All four of these are protected by job_lock.
87 struct list_head callback_jobs;
88 struct list_head complete_jobs;
89 struct list_head io_jobs;
90 struct list_head pages_jobs;
93 static struct page_list zero_page_list;
95 static DEFINE_SPINLOCK(throttle_spinlock);
98 * IO/IDLE accounting slowly decays after (1 << ACCOUNT_INTERVAL_SHIFT) period.
99 * When total_period >= (1 << ACCOUNT_INTERVAL_SHIFT) the counters are divided
102 #define ACCOUNT_INTERVAL_SHIFT SHIFT_HZ
105 * Sleep this number of milliseconds.
107 * The value was decided experimentally.
108 * Smaller values seem to cause an increased copy rate above the limit.
109 * The reason for this is unknown but possibly due to jiffies rounding errors
110 * or read/write cache inside the disk.
112 #define SLEEP_MSEC 100
115 * Maximum number of sleep events. There is a theoretical livelock if more
116 * kcopyd clients do work simultaneously which this limit avoids.
118 #define MAX_SLEEPS 10
120 static void io_job_start(struct dm_kcopyd_throttle *t)
122 unsigned throttle, now, difference;
129 spin_lock_irq(&throttle_spinlock);
131 throttle = READ_ONCE(t->throttle);
133 if (likely(throttle >= 100))
137 difference = now - t->last_jiffies;
138 t->last_jiffies = now;
140 t->io_period += difference;
141 t->total_period += difference;
144 * Maintain sane values if we got a temporary overflow.
146 if (unlikely(t->io_period > t->total_period))
147 t->io_period = t->total_period;
149 if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
150 int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
151 t->total_period >>= shift;
152 t->io_period >>= shift;
155 skew = t->io_period - throttle * t->total_period / 100;
157 if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
159 spin_unlock_irq(&throttle_spinlock);
167 spin_unlock_irq(&throttle_spinlock);
170 static void io_job_finish(struct dm_kcopyd_throttle *t)
177 spin_lock_irqsave(&throttle_spinlock, flags);
181 if (likely(READ_ONCE(t->throttle) >= 100))
184 if (!t->num_io_jobs) {
185 unsigned now, difference;
188 difference = now - t->last_jiffies;
189 t->last_jiffies = now;
191 t->io_period += difference;
192 t->total_period += difference;
195 * Maintain sane values if we got a temporary overflow.
197 if (unlikely(t->io_period > t->total_period))
198 t->io_period = t->total_period;
202 spin_unlock_irqrestore(&throttle_spinlock, flags);
206 static void wake(struct dm_kcopyd_client *kc)
208 queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
212 * Obtain one page for the use of kcopyd.
214 static struct page_list *alloc_pl(gfp_t gfp)
216 struct page_list *pl;
218 pl = kmalloc(sizeof(*pl), gfp);
222 pl->page = alloc_page(gfp | __GFP_HIGHMEM);
231 static void free_pl(struct page_list *pl)
233 __free_page(pl->page);
238 * Add the provided pages to a client's free page list, releasing
239 * back to the system any beyond the reserved_pages limit.
241 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
243 struct page_list *next;
248 if (kc->nr_free_pages >= kc->nr_reserved_pages)
251 pl->next = kc->pages;
260 static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
261 unsigned int nr, struct page_list **pages)
263 struct page_list *pl;
268 pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM);
270 /* Use reserved pages */
274 kc->pages = pl->next;
285 kcopyd_put_pages(kc, *pages);
290 * These three functions resize the page pool.
292 static void drop_pages(struct page_list *pl)
294 struct page_list *next;
304 * Allocate and reserve nr_pages for the use of a specific client.
306 static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
309 struct page_list *pl = NULL, *next;
311 for (i = 0; i < nr_pages; i++) {
312 next = alloc_pl(GFP_KERNEL);
322 kc->nr_reserved_pages += nr_pages;
323 kcopyd_put_pages(kc, pl);
328 static void client_free_pages(struct dm_kcopyd_client *kc)
330 BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
331 drop_pages(kc->pages);
333 kc->nr_free_pages = kc->nr_reserved_pages = 0;
336 /*-----------------------------------------------------------------
337 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
338 * for this reason we use a mempool to prevent the client from
339 * ever having to do io (which could cause a deadlock).
340 *---------------------------------------------------------------*/
342 struct dm_kcopyd_client *kc;
343 struct list_head list;
347 * Error state of the job.
350 unsigned long write_err;
353 * REQ_OP_READ, REQ_OP_WRITE or REQ_OP_WRITE_ZEROES.
356 struct dm_io_region source;
359 * The destinations for the transfer.
361 unsigned int num_dests;
362 struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
364 struct page_list *pages;
367 * Set this to ensure you are notified when the job has
368 * completed. 'context' is for callback to use.
370 dm_kcopyd_notify_fn fn;
374 * These fields are only used if the job has been split
375 * into more manageable parts.
380 sector_t write_offset;
382 struct kcopyd_job *master_job;
385 static struct kmem_cache *_job_cache;
387 int __init dm_kcopyd_init(void)
389 _job_cache = kmem_cache_create("kcopyd_job",
390 sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
391 __alignof__(struct kcopyd_job), 0, NULL);
395 zero_page_list.next = &zero_page_list;
396 zero_page_list.page = ZERO_PAGE(0);
401 void dm_kcopyd_exit(void)
403 kmem_cache_destroy(_job_cache);
408 * Functions to push and pop a job onto the head of a given job
411 static struct kcopyd_job *pop_io_job(struct list_head *jobs,
412 struct dm_kcopyd_client *kc)
414 struct kcopyd_job *job;
417 * For I/O jobs, pop any read, any write without sequential write
418 * constraint and sequential writes that are at the right position.
420 list_for_each_entry(job, jobs, list) {
421 if (job->op == REQ_OP_READ ||
422 !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
423 list_del(&job->list);
427 if (job->write_offset == job->master_job->write_offset) {
428 job->master_job->write_offset += job->source.count;
429 list_del(&job->list);
437 static struct kcopyd_job *pop(struct list_head *jobs,
438 struct dm_kcopyd_client *kc)
440 struct kcopyd_job *job = NULL;
442 spin_lock_irq(&kc->job_lock);
444 if (!list_empty(jobs)) {
445 if (jobs == &kc->io_jobs)
446 job = pop_io_job(jobs, kc);
448 job = list_entry(jobs->next, struct kcopyd_job, list);
449 list_del(&job->list);
452 spin_unlock_irq(&kc->job_lock);
457 static void push(struct list_head *jobs, struct kcopyd_job *job)
460 struct dm_kcopyd_client *kc = job->kc;
462 spin_lock_irqsave(&kc->job_lock, flags);
463 list_add_tail(&job->list, jobs);
464 spin_unlock_irqrestore(&kc->job_lock, flags);
468 static void push_head(struct list_head *jobs, struct kcopyd_job *job)
470 struct dm_kcopyd_client *kc = job->kc;
472 spin_lock_irq(&kc->job_lock);
473 list_add(&job->list, jobs);
474 spin_unlock_irq(&kc->job_lock);
478 * These three functions process 1 item from the corresponding
484 * > 0: can't process yet.
486 static int run_complete_job(struct kcopyd_job *job)
488 void *context = job->context;
489 int read_err = job->read_err;
490 unsigned long write_err = job->write_err;
491 dm_kcopyd_notify_fn fn = job->fn;
492 struct dm_kcopyd_client *kc = job->kc;
494 if (job->pages && job->pages != &zero_page_list)
495 kcopyd_put_pages(kc, job->pages);
497 * If this is the master job, the sub jobs have already
498 * completed so we can free everything.
500 if (job->master_job == job) {
501 mutex_destroy(&job->lock);
502 mempool_free(job, &kc->job_pool);
504 fn(read_err, write_err, context);
506 if (atomic_dec_and_test(&kc->nr_jobs))
507 wake_up(&kc->destroyq);
514 static void complete_io(unsigned long error, void *context)
516 struct kcopyd_job *job = (struct kcopyd_job *) context;
517 struct dm_kcopyd_client *kc = job->kc;
519 io_job_finish(kc->throttle);
522 if (op_is_write(job->op))
523 job->write_err |= error;
527 if (!(job->flags & BIT(DM_KCOPYD_IGNORE_ERROR))) {
528 push(&kc->complete_jobs, job);
534 if (op_is_write(job->op))
535 push(&kc->complete_jobs, job);
538 job->op = REQ_OP_WRITE;
539 push(&kc->io_jobs, job);
546 * Request io on as many buffer heads as we can currently get for
549 static int run_io_job(struct kcopyd_job *job)
552 struct dm_io_request io_req = {
554 .mem.type = DM_IO_PAGE_LIST,
555 .mem.ptr.pl = job->pages,
557 .notify.fn = complete_io,
558 .notify.context = job,
559 .client = job->kc->io_client,
563 * If we need to write sequentially and some reads or writes failed,
564 * no point in continuing.
566 if (job->flags & BIT(DM_KCOPYD_WRITE_SEQ) &&
567 job->master_job->write_err) {
568 job->write_err = job->master_job->write_err;
572 io_job_start(job->kc->throttle);
574 if (job->op == REQ_OP_READ)
575 r = dm_io(&io_req, 1, &job->source, NULL);
577 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
582 static int run_pages_job(struct kcopyd_job *job)
585 unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
587 r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
589 /* this job is ready for io */
590 push(&job->kc->io_jobs, job);
595 /* can't complete now */
602 * Run through a list for as long as possible. Returns the count
603 * of successful jobs.
605 static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
606 int (*fn) (struct kcopyd_job *))
608 struct kcopyd_job *job;
611 while ((job = pop(jobs, kc))) {
616 /* error this rogue job */
617 if (op_is_write(job->op))
618 job->write_err = (unsigned long) -1L;
621 push(&kc->complete_jobs, job);
628 * We couldn't service this job ATM, so
629 * push this job back onto the list.
631 push_head(jobs, job);
642 * kcopyd does this every time it's woken up.
644 static void do_work(struct work_struct *work)
646 struct dm_kcopyd_client *kc = container_of(work,
647 struct dm_kcopyd_client, kcopyd_work);
648 struct blk_plug plug;
651 * The order that these are called is *very* important.
652 * complete jobs can free some pages for pages jobs.
653 * Pages jobs when successful will jump onto the io jobs
654 * list. io jobs call wake when they complete and it all
657 spin_lock_irq(&kc->job_lock);
658 list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
659 spin_unlock_irq(&kc->job_lock);
661 blk_start_plug(&plug);
662 process_jobs(&kc->complete_jobs, kc, run_complete_job);
663 process_jobs(&kc->pages_jobs, kc, run_pages_job);
664 process_jobs(&kc->io_jobs, kc, run_io_job);
665 blk_finish_plug(&plug);
669 * If we are copying a small region we just dispatch a single job
670 * to do the copy, otherwise the io has to be split up into many
673 static void dispatch_job(struct kcopyd_job *job)
675 struct dm_kcopyd_client *kc = job->kc;
676 atomic_inc(&kc->nr_jobs);
677 if (unlikely(!job->source.count))
678 push(&kc->callback_jobs, job);
679 else if (job->pages == &zero_page_list)
680 push(&kc->io_jobs, job);
682 push(&kc->pages_jobs, job);
686 static void segment_complete(int read_err, unsigned long write_err,
689 /* FIXME: tidy this function */
690 sector_t progress = 0;
692 struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
693 struct kcopyd_job *job = sub_job->master_job;
694 struct dm_kcopyd_client *kc = job->kc;
696 mutex_lock(&job->lock);
698 /* update the error */
703 job->write_err |= write_err;
706 * Only dispatch more work if there hasn't been an error.
708 if ((!job->read_err && !job->write_err) ||
709 job->flags & BIT(DM_KCOPYD_IGNORE_ERROR)) {
710 /* get the next chunk of work */
711 progress = job->progress;
712 count = job->source.count - progress;
714 if (count > kc->sub_job_size)
715 count = kc->sub_job_size;
717 job->progress += count;
720 mutex_unlock(&job->lock);
726 sub_job->write_offset = progress;
727 sub_job->source.sector += progress;
728 sub_job->source.count = count;
730 for (i = 0; i < job->num_dests; i++) {
731 sub_job->dests[i].sector += progress;
732 sub_job->dests[i].count = count;
735 sub_job->fn = segment_complete;
736 sub_job->context = sub_job;
737 dispatch_job(sub_job);
739 } else if (atomic_dec_and_test(&job->sub_jobs)) {
742 * Queue the completion callback to the kcopyd thread.
744 * Some callers assume that all the completions are called
745 * from a single thread and don't race with each other.
747 * We must not call the callback directly here because this
748 * code may not be executing in the thread.
750 push(&kc->complete_jobs, job);
756 * Create some sub jobs to share the work between them.
758 static void split_job(struct kcopyd_job *master_job)
762 atomic_inc(&master_job->kc->nr_jobs);
764 atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
765 for (i = 0; i < SPLIT_COUNT; i++) {
766 master_job[i + 1].master_job = master_job;
767 segment_complete(0, 0u, &master_job[i + 1]);
771 void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
772 unsigned int num_dests, struct dm_io_region *dests,
773 unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
775 struct kcopyd_job *job;
779 * Allocate an array of jobs consisting of one master job
780 * followed by SPLIT_COUNT sub jobs.
782 job = mempool_alloc(&kc->job_pool, GFP_NOIO);
783 mutex_init(&job->lock);
786 * set up for the read.
793 job->num_dests = num_dests;
794 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
797 * If one of the destination is a host-managed zoned block device,
798 * we need to write sequentially. If one of the destination is a
799 * host-aware device, then leave it to the caller to choose what to do.
801 if (!(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
802 for (i = 0; i < job->num_dests; i++) {
803 if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) {
804 job->flags |= BIT(DM_KCOPYD_WRITE_SEQ);
811 * If we need to write sequentially, errors cannot be ignored.
813 if (job->flags & BIT(DM_KCOPYD_WRITE_SEQ) &&
814 job->flags & BIT(DM_KCOPYD_IGNORE_ERROR))
815 job->flags &= ~BIT(DM_KCOPYD_IGNORE_ERROR);
820 job->op = REQ_OP_READ;
822 memset(&job->source, 0, sizeof job->source);
823 job->source.count = job->dests[0].count;
824 job->pages = &zero_page_list;
827 * Use WRITE ZEROES to optimize zeroing if all dests support it.
829 job->op = REQ_OP_WRITE_ZEROES;
830 for (i = 0; i < job->num_dests; i++)
831 if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) {
832 job->op = REQ_OP_WRITE;
838 job->context = context;
839 job->master_job = job;
840 job->write_offset = 0;
842 if (job->source.count <= kc->sub_job_size)
849 EXPORT_SYMBOL(dm_kcopyd_copy);
851 void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
852 unsigned num_dests, struct dm_io_region *dests,
853 unsigned flags, dm_kcopyd_notify_fn fn, void *context)
855 dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
857 EXPORT_SYMBOL(dm_kcopyd_zero);
859 void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
860 dm_kcopyd_notify_fn fn, void *context)
862 struct kcopyd_job *job;
864 job = mempool_alloc(&kc->job_pool, GFP_NOIO);
866 memset(job, 0, sizeof(struct kcopyd_job));
869 job->context = context;
870 job->master_job = job;
872 atomic_inc(&kc->nr_jobs);
876 EXPORT_SYMBOL(dm_kcopyd_prepare_callback);
878 void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
880 struct kcopyd_job *job = j;
881 struct dm_kcopyd_client *kc = job->kc;
883 job->read_err = read_err;
884 job->write_err = write_err;
886 push(&kc->callback_jobs, job);
889 EXPORT_SYMBOL(dm_kcopyd_do_callback);
892 * Cancels a kcopyd job, eg. someone might be deactivating a
896 int kcopyd_cancel(struct kcopyd_job *job, int block)
903 /*-----------------------------------------------------------------
905 *---------------------------------------------------------------*/
906 struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
909 unsigned reserve_pages;
910 struct dm_kcopyd_client *kc;
912 kc = kzalloc(sizeof(*kc), GFP_KERNEL);
914 return ERR_PTR(-ENOMEM);
916 spin_lock_init(&kc->job_lock);
917 INIT_LIST_HEAD(&kc->callback_jobs);
918 INIT_LIST_HEAD(&kc->complete_jobs);
919 INIT_LIST_HEAD(&kc->io_jobs);
920 INIT_LIST_HEAD(&kc->pages_jobs);
921 kc->throttle = throttle;
923 r = mempool_init_slab_pool(&kc->job_pool, MIN_JOBS, _job_cache);
927 INIT_WORK(&kc->kcopyd_work, do_work);
928 kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
929 if (!kc->kcopyd_wq) {
934 kc->sub_job_size = dm_get_kcopyd_subjob_size();
935 reserve_pages = DIV_ROUND_UP(kc->sub_job_size << SECTOR_SHIFT, PAGE_SIZE);
938 kc->nr_reserved_pages = kc->nr_free_pages = 0;
939 r = client_reserve_pages(kc, reserve_pages);
941 goto bad_client_pages;
943 kc->io_client = dm_io_client_create();
944 if (IS_ERR(kc->io_client)) {
945 r = PTR_ERR(kc->io_client);
949 init_waitqueue_head(&kc->destroyq);
950 atomic_set(&kc->nr_jobs, 0);
955 client_free_pages(kc);
957 destroy_workqueue(kc->kcopyd_wq);
959 mempool_exit(&kc->job_pool);
965 EXPORT_SYMBOL(dm_kcopyd_client_create);
967 void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
969 /* Wait for completion of all jobs submitted by this client. */
970 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
972 BUG_ON(!list_empty(&kc->callback_jobs));
973 BUG_ON(!list_empty(&kc->complete_jobs));
974 BUG_ON(!list_empty(&kc->io_jobs));
975 BUG_ON(!list_empty(&kc->pages_jobs));
976 destroy_workqueue(kc->kcopyd_wq);
977 dm_io_client_destroy(kc->io_client);
978 client_free_pages(kc);
979 mempool_exit(&kc->job_pool);
982 EXPORT_SYMBOL(dm_kcopyd_client_destroy);
984 void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc)
986 flush_workqueue(kc->kcopyd_wq);
988 EXPORT_SYMBOL(dm_kcopyd_client_flush);