1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/errno.h>
3 #include <linux/numa.h>
4 #include <linux/slab.h>
5 #include <linux/rculist.h>
6 #include <linux/threads.h>
7 #include <linux/preempt.h>
8 #include <linux/irqflags.h>
9 #include <linux/vmalloc.h>
11 #include <linux/module.h>
12 #include <linux/device-mapper.h>
17 #define DM_MSG_PREFIX "stats"
19 static int dm_stat_need_rcu_barrier;
22 * Using 64-bit values to avoid overflow (which is a
23 * problem that block/genhd.c's IO accounting has).
25 struct dm_stat_percpu {
26 unsigned long long sectors[2];
27 unsigned long long ios[2];
28 unsigned long long merges[2];
29 unsigned long long ticks[2];
30 unsigned long long io_ticks[2];
31 unsigned long long io_ticks_total;
32 unsigned long long time_in_queue;
33 unsigned long long *histogram;
36 struct dm_stat_shared {
37 atomic_t in_flight[2];
38 unsigned long long stamp;
39 struct dm_stat_percpu tmp;
43 struct list_head list_entry;
45 unsigned int stat_flags;
50 unsigned int n_histogram_entries;
51 unsigned long long *histogram_boundaries;
52 const char *program_id;
54 struct rcu_head rcu_head;
55 size_t shared_alloc_size;
56 size_t percpu_alloc_size;
57 size_t histogram_alloc_size;
58 struct dm_stat_percpu *stat_percpu[NR_CPUS];
59 struct dm_stat_shared stat_shared[];
62 #define STAT_PRECISE_TIMESTAMPS 1
64 struct dm_stats_last_position {
70 * A typo on the command line could possibly make the kernel run out of memory
71 * and crash. To prevent the crash we account all used memory. We fail if we
72 * exhaust 1/4 of all memory or 1/2 of vmalloc space.
74 #define DM_STATS_MEMORY_FACTOR 4
75 #define DM_STATS_VMALLOC_FACTOR 2
77 static DEFINE_SPINLOCK(shared_memory_lock);
79 static unsigned long shared_memory_amount;
81 static bool __check_shared_memory(size_t alloc_size)
85 a = shared_memory_amount + alloc_size;
86 if (a < shared_memory_amount)
88 if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR)
91 if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
97 static bool check_shared_memory(size_t alloc_size)
101 spin_lock_irq(&shared_memory_lock);
103 ret = __check_shared_memory(alloc_size);
105 spin_unlock_irq(&shared_memory_lock);
110 static bool claim_shared_memory(size_t alloc_size)
112 spin_lock_irq(&shared_memory_lock);
114 if (!__check_shared_memory(alloc_size)) {
115 spin_unlock_irq(&shared_memory_lock);
119 shared_memory_amount += alloc_size;
121 spin_unlock_irq(&shared_memory_lock);
126 static void free_shared_memory(size_t alloc_size)
130 spin_lock_irqsave(&shared_memory_lock, flags);
132 if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
133 spin_unlock_irqrestore(&shared_memory_lock, flags);
134 DMCRIT("Memory usage accounting bug.");
138 shared_memory_amount -= alloc_size;
140 spin_unlock_irqrestore(&shared_memory_lock, flags);
143 static void *dm_kvzalloc(size_t alloc_size, int node)
147 if (!claim_shared_memory(alloc_size))
150 p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
154 free_shared_memory(alloc_size);
159 static void dm_kvfree(void *ptr, size_t alloc_size)
164 free_shared_memory(alloc_size);
169 static void dm_stat_free(struct rcu_head *head)
172 struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
174 kfree(s->histogram_boundaries);
175 kfree(s->program_id);
177 for_each_possible_cpu(cpu) {
178 dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
179 dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
181 dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size);
182 dm_kvfree(s, s->shared_alloc_size);
185 static int dm_stat_in_flight(struct dm_stat_shared *shared)
187 return atomic_read(&shared->in_flight[READ]) +
188 atomic_read(&shared->in_flight[WRITE]);
191 void dm_stats_init(struct dm_stats *stats)
194 struct dm_stats_last_position *last;
196 mutex_init(&stats->mutex);
197 INIT_LIST_HEAD(&stats->list);
198 stats->precise_timestamps = false;
199 stats->last = alloc_percpu(struct dm_stats_last_position);
200 for_each_possible_cpu(cpu) {
201 last = per_cpu_ptr(stats->last, cpu);
202 last->last_sector = (sector_t)ULLONG_MAX;
203 last->last_rw = UINT_MAX;
207 void dm_stats_cleanup(struct dm_stats *stats)
211 struct dm_stat_shared *shared;
213 while (!list_empty(&stats->list)) {
214 s = container_of(stats->list.next, struct dm_stat, list_entry);
215 list_del(&s->list_entry);
216 for (ni = 0; ni < s->n_entries; ni++) {
217 shared = &s->stat_shared[ni];
218 if (WARN_ON(dm_stat_in_flight(shared))) {
219 DMCRIT("leaked in-flight counter at index %lu "
220 "(start %llu, end %llu, step %llu): reads %d, writes %d",
222 (unsigned long long)s->start,
223 (unsigned long long)s->end,
224 (unsigned long long)s->step,
225 atomic_read(&shared->in_flight[READ]),
226 atomic_read(&shared->in_flight[WRITE]));
230 dm_stat_free(&s->rcu_head);
232 free_percpu(stats->last);
233 mutex_destroy(&stats->mutex);
236 static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats)
239 struct dm_stat *tmp_s;
240 bool precise_timestamps = false;
242 list_for_each(l, &stats->list) {
243 tmp_s = container_of(l, struct dm_stat, list_entry);
244 if (tmp_s->stat_flags & STAT_PRECISE_TIMESTAMPS) {
245 precise_timestamps = true;
249 stats->precise_timestamps = precise_timestamps;
252 static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
253 sector_t step, unsigned int stat_flags,
254 unsigned int n_histogram_entries,
255 unsigned long long *histogram_boundaries,
256 const char *program_id, const char *aux_data,
257 void (*suspend_callback)(struct mapped_device *),
258 void (*resume_callback)(struct mapped_device *),
259 struct mapped_device *md)
262 struct dm_stat *s, *tmp_s;
265 size_t shared_alloc_size;
266 size_t percpu_alloc_size;
267 size_t histogram_alloc_size;
268 struct dm_stat_percpu *p;
273 if (end < start || !step)
276 n_entries = end - start;
277 if (dm_sector_div64(n_entries, step))
280 if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
283 shared_alloc_size = struct_size(s, stat_shared, n_entries);
284 if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
287 percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
288 if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
291 histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long);
292 if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
295 if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
296 num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
299 s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
303 s->stat_flags = stat_flags;
304 s->n_entries = n_entries;
308 s->shared_alloc_size = shared_alloc_size;
309 s->percpu_alloc_size = percpu_alloc_size;
310 s->histogram_alloc_size = histogram_alloc_size;
312 s->n_histogram_entries = n_histogram_entries;
313 s->histogram_boundaries = kmemdup(histogram_boundaries,
314 s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
315 if (!s->histogram_boundaries) {
320 s->program_id = kstrdup(program_id, GFP_KERNEL);
321 if (!s->program_id) {
325 s->aux_data = kstrdup(aux_data, GFP_KERNEL);
331 for (ni = 0; ni < n_entries; ni++) {
332 atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
333 atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
337 if (s->n_histogram_entries) {
338 unsigned long long *hi;
340 hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
345 for (ni = 0; ni < n_entries; ni++) {
346 s->stat_shared[ni].tmp.histogram = hi;
347 hi += s->n_histogram_entries + 1;
352 for_each_possible_cpu(cpu) {
353 p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
358 s->stat_percpu[cpu] = p;
359 if (s->n_histogram_entries) {
360 unsigned long long *hi;
362 hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
367 for (ni = 0; ni < n_entries; ni++) {
368 p[ni].histogram = hi;
369 hi += s->n_histogram_entries + 1;
376 * Suspend/resume to make sure there is no i/o in flight,
377 * so that newly created statistics will be exact.
379 * (note: we couldn't suspend earlier because we must not
380 * allocate memory while suspended)
382 suspend_callback(md);
384 mutex_lock(&stats->mutex);
386 list_for_each(l, &stats->list) {
387 tmp_s = container_of(l, struct dm_stat, list_entry);
388 if (WARN_ON(tmp_s->id < s->id)) {
390 goto out_unlock_resume;
392 if (tmp_s->id > s->id)
394 if (unlikely(s->id == INT_MAX)) {
396 goto out_unlock_resume;
401 list_add_tail_rcu(&s->list_entry, l);
403 dm_stats_recalc_precise_timestamps(stats);
405 if (!static_key_enabled(&stats_enabled.key))
406 static_branch_enable(&stats_enabled);
408 mutex_unlock(&stats->mutex);
415 mutex_unlock(&stats->mutex);
418 dm_stat_free(&s->rcu_head);
422 static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
426 list_for_each_entry(s, &stats->list, list_entry) {
436 static int dm_stats_delete(struct dm_stats *stats, int id)
441 mutex_lock(&stats->mutex);
443 s = __dm_stats_find(stats, id);
445 mutex_unlock(&stats->mutex);
449 list_del_rcu(&s->list_entry);
451 dm_stats_recalc_precise_timestamps(stats);
453 mutex_unlock(&stats->mutex);
456 * vfree can't be called from RCU callback
458 for_each_possible_cpu(cpu)
459 if (is_vmalloc_addr(s->stat_percpu) ||
460 is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
462 if (is_vmalloc_addr(s) ||
463 is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) {
465 synchronize_rcu_expedited();
466 dm_stat_free(&s->rcu_head);
468 WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
469 call_rcu(&s->rcu_head, dm_stat_free);
474 static int dm_stats_list(struct dm_stats *stats, const char *program,
475 char *result, unsigned int maxlen)
483 * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
486 mutex_lock(&stats->mutex);
487 list_for_each_entry(s, &stats->list, list_entry) {
488 if (!program || !strcmp(program, s->program_id)) {
489 len = s->end - s->start;
490 DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
491 (unsigned long long)s->start,
492 (unsigned long long)len,
493 (unsigned long long)s->step,
496 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
497 DMEMIT(" precise_timestamps");
498 if (s->n_histogram_entries) {
501 DMEMIT(" histogram:");
502 for (i = 0; i < s->n_histogram_entries; i++) {
505 DMEMIT("%llu", s->histogram_boundaries[i]);
512 mutex_unlock(&stats->mutex);
517 static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
518 struct dm_stat_percpu *p)
521 * This is racy, but so is part_round_stats_single.
523 unsigned long long now, difference;
524 unsigned int in_flight_read, in_flight_write;
526 if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
529 now = ktime_to_ns(ktime_get());
531 difference = now - shared->stamp;
535 in_flight_read = (unsigned int)atomic_read(&shared->in_flight[READ]);
536 in_flight_write = (unsigned int)atomic_read(&shared->in_flight[WRITE]);
538 p->io_ticks[READ] += difference;
540 p->io_ticks[WRITE] += difference;
541 if (in_flight_read + in_flight_write) {
542 p->io_ticks_total += difference;
543 p->time_in_queue += (in_flight_read + in_flight_write) * difference;
548 static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
549 int idx, sector_t len,
550 struct dm_stats_aux *stats_aux, bool end,
551 unsigned long duration_jiffies)
553 struct dm_stat_shared *shared = &s->stat_shared[entry];
554 struct dm_stat_percpu *p;
557 * For strict correctness we should use local_irq_save/restore
558 * instead of preempt_disable/enable.
560 * preempt_disable/enable is racy if the driver finishes bios
561 * from non-interrupt context as well as from interrupt context
562 * or from more different interrupts.
564 * On 64-bit architectures the race only results in not counting some
565 * events, so it is acceptable. On 32-bit architectures the race could
566 * cause the counter going off by 2^32, so we need to do proper locking
569 * part_stat_lock()/part_stat_unlock() have this race too.
571 #if BITS_PER_LONG == 32
574 local_irq_save(flags);
578 p = &s->stat_percpu[smp_processor_id()][entry];
581 dm_stat_round(s, shared, p);
582 atomic_inc(&shared->in_flight[idx]);
584 unsigned long long duration;
586 dm_stat_round(s, shared, p);
587 atomic_dec(&shared->in_flight[idx]);
588 p->sectors[idx] += len;
590 p->merges[idx] += stats_aux->merged;
591 if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) {
592 p->ticks[idx] += duration_jiffies;
593 duration = jiffies_to_msecs(duration_jiffies);
595 p->ticks[idx] += stats_aux->duration_ns;
596 duration = stats_aux->duration_ns;
598 if (s->n_histogram_entries) {
599 unsigned int lo = 0, hi = s->n_histogram_entries + 1;
601 while (lo + 1 < hi) {
602 unsigned int mid = (lo + hi) / 2;
604 if (s->histogram_boundaries[mid - 1] > duration)
613 #if BITS_PER_LONG == 32
614 local_irq_restore(flags);
620 static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
621 sector_t bi_sector, sector_t end_sector,
622 bool end, unsigned long duration_jiffies,
623 struct dm_stats_aux *stats_aux)
625 sector_t rel_sector, offset, todo, fragment_len;
628 if (end_sector <= s->start || bi_sector >= s->end)
630 if (unlikely(bi_sector < s->start)) {
632 todo = end_sector - s->start;
634 rel_sector = bi_sector - s->start;
635 todo = end_sector - bi_sector;
637 if (unlikely(end_sector > s->end))
638 todo -= (end_sector - s->end);
640 offset = dm_sector_div64(rel_sector, s->step);
643 if (WARN_ON_ONCE(entry >= s->n_entries)) {
644 DMCRIT("Invalid area access in region id %d", s->id);
648 if (fragment_len > s->step - offset)
649 fragment_len = s->step - offset;
650 dm_stat_for_entry(s, entry, bi_rw, fragment_len,
651 stats_aux, end, duration_jiffies);
652 todo -= fragment_len;
655 } while (unlikely(todo != 0));
658 void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
659 sector_t bi_sector, unsigned int bi_sectors, bool end,
660 unsigned long start_time,
661 struct dm_stats_aux *stats_aux)
665 struct dm_stats_last_position *last;
666 bool got_precise_time;
667 unsigned long duration_jiffies = 0;
669 if (unlikely(!bi_sectors))
672 end_sector = bi_sector + bi_sectors;
676 * A race condition can at worst result in the merged flag being
677 * misrepresented, so we don't have to disable preemption here.
679 last = raw_cpu_ptr(stats->last);
681 (bi_sector == (READ_ONCE(last->last_sector) &&
683 (READ_ONCE(last->last_rw) == WRITE))
685 WRITE_ONCE(last->last_sector, end_sector);
686 WRITE_ONCE(last->last_rw, bi_rw);
688 duration_jiffies = jiffies - start_time;
692 got_precise_time = false;
693 list_for_each_entry_rcu(s, &stats->list, list_entry) {
694 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
695 /* start (!end) duration_ns is set by DM core's alloc_io() */
697 stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
698 got_precise_time = true;
700 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux);
706 static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
707 struct dm_stat *s, size_t x)
710 struct dm_stat_percpu *p;
713 p = &s->stat_percpu[smp_processor_id()][x];
714 dm_stat_round(s, shared, p);
717 shared->tmp.sectors[READ] = 0;
718 shared->tmp.sectors[WRITE] = 0;
719 shared->tmp.ios[READ] = 0;
720 shared->tmp.ios[WRITE] = 0;
721 shared->tmp.merges[READ] = 0;
722 shared->tmp.merges[WRITE] = 0;
723 shared->tmp.ticks[READ] = 0;
724 shared->tmp.ticks[WRITE] = 0;
725 shared->tmp.io_ticks[READ] = 0;
726 shared->tmp.io_ticks[WRITE] = 0;
727 shared->tmp.io_ticks_total = 0;
728 shared->tmp.time_in_queue = 0;
730 if (s->n_histogram_entries)
731 memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long));
733 for_each_possible_cpu(cpu) {
734 p = &s->stat_percpu[cpu][x];
735 shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
736 shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
737 shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
738 shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
739 shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
740 shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
741 shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
742 shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
743 shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
744 shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
745 shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
746 shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
747 if (s->n_histogram_entries) {
750 for (i = 0; i < s->n_histogram_entries + 1; i++)
751 shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
756 static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
757 bool init_tmp_percpu_totals)
760 struct dm_stat_shared *shared;
761 struct dm_stat_percpu *p;
763 for (x = idx_start; x < idx_end; x++) {
764 shared = &s->stat_shared[x];
765 if (init_tmp_percpu_totals)
766 __dm_stat_init_temporary_percpu_totals(shared, s, x);
768 p = &s->stat_percpu[smp_processor_id()][x];
769 p->sectors[READ] -= shared->tmp.sectors[READ];
770 p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
771 p->ios[READ] -= shared->tmp.ios[READ];
772 p->ios[WRITE] -= shared->tmp.ios[WRITE];
773 p->merges[READ] -= shared->tmp.merges[READ];
774 p->merges[WRITE] -= shared->tmp.merges[WRITE];
775 p->ticks[READ] -= shared->tmp.ticks[READ];
776 p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
777 p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
778 p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
779 p->io_ticks_total -= shared->tmp.io_ticks_total;
780 p->time_in_queue -= shared->tmp.time_in_queue;
782 if (s->n_histogram_entries) {
785 for (i = 0; i < s->n_histogram_entries + 1; i++) {
787 p = &s->stat_percpu[smp_processor_id()][x];
788 p->histogram[i] -= shared->tmp.histogram[i];
796 static int dm_stats_clear(struct dm_stats *stats, int id)
800 mutex_lock(&stats->mutex);
802 s = __dm_stats_find(stats, id);
804 mutex_unlock(&stats->mutex);
808 __dm_stat_clear(s, 0, s->n_entries, true);
810 mutex_unlock(&stats->mutex);
816 * This is like jiffies_to_msec, but works for 64-bit values.
818 static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
820 unsigned long long result;
823 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
828 result = jiffies_to_msecs(j & 0x3fffff);
830 mult = jiffies_to_msecs(1 << 22);
831 result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
834 result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
839 static int dm_stats_print(struct dm_stats *stats, int id,
840 size_t idx_start, size_t idx_len,
841 bool clear, char *result, unsigned int maxlen)
846 sector_t start, end, step;
848 struct dm_stat_shared *shared;
852 * <start_sector>+<length> counters
855 mutex_lock(&stats->mutex);
857 s = __dm_stats_find(stats, id);
859 mutex_unlock(&stats->mutex);
863 idx_end = idx_start + idx_len;
864 if (idx_end < idx_start ||
865 idx_end > s->n_entries)
866 idx_end = s->n_entries;
868 if (idx_start > idx_end)
872 start = s->start + (step * idx_start);
874 for (x = idx_start; x < idx_end; x++, start = end) {
875 shared = &s->stat_shared[x];
877 if (unlikely(end > s->end))
880 __dm_stat_init_temporary_percpu_totals(shared, s, x);
882 DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
883 (unsigned long long)start,
884 (unsigned long long)step,
885 shared->tmp.ios[READ],
886 shared->tmp.merges[READ],
887 shared->tmp.sectors[READ],
888 dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]),
889 shared->tmp.ios[WRITE],
890 shared->tmp.merges[WRITE],
891 shared->tmp.sectors[WRITE],
892 dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]),
893 dm_stat_in_flight(shared),
894 dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total),
895 dm_jiffies_to_msec64(s, shared->tmp.time_in_queue),
896 dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
897 dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
898 if (s->n_histogram_entries) {
901 for (i = 0; i < s->n_histogram_entries + 1; i++)
902 DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
906 if (unlikely(sz + 1 >= maxlen))
907 goto buffer_overflow;
913 __dm_stat_clear(s, idx_start, idx_end, false);
916 mutex_unlock(&stats->mutex);
921 static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
924 const char *new_aux_data;
926 mutex_lock(&stats->mutex);
928 s = __dm_stats_find(stats, id);
930 mutex_unlock(&stats->mutex);
934 new_aux_data = kstrdup(aux_data, GFP_KERNEL);
936 mutex_unlock(&stats->mutex);
941 s->aux_data = new_aux_data;
943 mutex_unlock(&stats->mutex);
948 static int parse_histogram(const char *h, unsigned int *n_histogram_entries,
949 unsigned long long **histogram_boundaries)
953 unsigned long long last;
955 *n_histogram_entries = 1;
958 (*n_histogram_entries)++;
960 *histogram_boundaries = kmalloc_array(*n_histogram_entries,
961 sizeof(unsigned long long),
963 if (!*histogram_boundaries)
969 unsigned long long hi;
973 s = sscanf(h, "%llu%c", &hi, &ch);
974 if (!s || (s == 2 && ch != ','))
979 (*histogram_boundaries)[n] = hi;
982 h = strchr(h, ',') + 1;
987 static int message_stats_create(struct mapped_device *md,
988 unsigned int argc, char **argv,
989 char *result, unsigned int maxlen)
994 unsigned long long start, end, len, step;
995 unsigned int divisor;
996 const char *program_id, *aux_data;
997 unsigned int stat_flags = 0;
998 unsigned int n_histogram_entries = 0;
999 unsigned long long *histogram_boundaries = NULL;
1000 struct dm_arg_set as, as_backup;
1002 unsigned int feature_args;
1006 * <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]]
1014 dm_consume_args(&as, 1);
1016 a = dm_shift_arg(&as);
1017 if (!strcmp(a, "-")) {
1019 len = dm_get_size(md);
1022 } else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 ||
1023 start != (sector_t)start || len != (sector_t)len)
1030 a = dm_shift_arg(&as);
1031 if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) {
1035 if (do_div(step, divisor))
1039 } else if (sscanf(a, "%llu%c", &step, &dummy) != 1 ||
1040 step != (sector_t)step || !step)
1044 a = dm_shift_arg(&as);
1045 if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) {
1046 while (feature_args--) {
1047 a = dm_shift_arg(&as);
1050 if (!strcasecmp(a, "precise_timestamps"))
1051 stat_flags |= STAT_PRECISE_TIMESTAMPS;
1052 else if (!strncasecmp(a, "histogram:", 10)) {
1053 if (n_histogram_entries)
1055 r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries);
1068 a = dm_shift_arg(&as);
1072 a = dm_shift_arg(&as);
1080 * If a buffer overflow happens after we created the region,
1081 * it's too late (the userspace would retry with a larger
1082 * buffer, but the region id that caused the overflow is already
1083 * leaked). So we must detect buffer overflow in advance.
1085 snprintf(result, maxlen, "%d", INT_MAX);
1086 if (dm_message_test_buffer_overflow(result, maxlen)) {
1091 id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
1092 n_histogram_entries, histogram_boundaries, program_id, aux_data,
1093 dm_internal_suspend_fast, dm_internal_resume_fast, md);
1099 snprintf(result, maxlen, "%d", id);
1107 kfree(histogram_boundaries);
1111 static int message_stats_delete(struct mapped_device *md,
1112 unsigned int argc, char **argv)
1120 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1123 return dm_stats_delete(dm_get_stats(md), id);
1126 static int message_stats_clear(struct mapped_device *md,
1127 unsigned int argc, char **argv)
1135 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1138 return dm_stats_clear(dm_get_stats(md), id);
1141 static int message_stats_list(struct mapped_device *md,
1142 unsigned int argc, char **argv,
1143 char *result, unsigned int maxlen)
1146 const char *program = NULL;
1148 if (argc < 1 || argc > 2)
1152 program = kstrdup(argv[1], GFP_KERNEL);
1157 r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
1164 static int message_stats_print(struct mapped_device *md,
1165 unsigned int argc, char **argv, bool clear,
1166 char *result, unsigned int maxlen)
1170 unsigned long idx_start = 0, idx_len = ULONG_MAX;
1172 if (argc != 2 && argc != 4)
1175 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1179 if (strcmp(argv[2], "-") &&
1180 sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
1182 if (strcmp(argv[3], "-") &&
1183 sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
1187 return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
1191 static int message_stats_set_aux(struct mapped_device *md,
1192 unsigned int argc, char **argv)
1200 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1203 return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
1206 int dm_stats_message(struct mapped_device *md, unsigned int argc, char **argv,
1207 char *result, unsigned int maxlen)
1211 /* All messages here must start with '@' */
1212 if (!strcasecmp(argv[0], "@stats_create"))
1213 r = message_stats_create(md, argc, argv, result, maxlen);
1214 else if (!strcasecmp(argv[0], "@stats_delete"))
1215 r = message_stats_delete(md, argc, argv);
1216 else if (!strcasecmp(argv[0], "@stats_clear"))
1217 r = message_stats_clear(md, argc, argv);
1218 else if (!strcasecmp(argv[0], "@stats_list"))
1219 r = message_stats_list(md, argc, argv, result, maxlen);
1220 else if (!strcasecmp(argv[0], "@stats_print"))
1221 r = message_stats_print(md, argc, argv, false, result, maxlen);
1222 else if (!strcasecmp(argv[0], "@stats_print_clear"))
1223 r = message_stats_print(md, argc, argv, true, result, maxlen);
1224 else if (!strcasecmp(argv[0], "@stats_set_aux"))
1225 r = message_stats_set_aux(md, argc, argv);
1227 return 2; /* this wasn't a stats message */
1230 DMCRIT("Invalid parameters for message %s", argv[0]);
1235 int __init dm_statistics_init(void)
1237 shared_memory_amount = 0;
1238 dm_stat_need_rcu_barrier = 0;
1242 void dm_statistics_exit(void)
1244 if (dm_stat_need_rcu_barrier)
1246 if (WARN_ON(shared_memory_amount))
1247 DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
1250 module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, 0444);
1251 MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");