1 #include <linux/errno.h>
2 #include <linux/numa.h>
3 #include <linux/slab.h>
4 #include <linux/rculist.h>
5 #include <linux/threads.h>
6 #include <linux/preempt.h>
7 #include <linux/irqflags.h>
8 #include <linux/vmalloc.h>
10 #include <linux/module.h>
11 #include <linux/device-mapper.h>
16 #define DM_MSG_PREFIX "stats"
18 static int dm_stat_need_rcu_barrier;
21 * Using 64-bit values to avoid overflow (which is a
22 * problem that block/genhd.c's IO accounting has).
24 struct dm_stat_percpu {
25 unsigned long long sectors[2];
26 unsigned long long ios[2];
27 unsigned long long merges[2];
28 unsigned long long ticks[2];
29 unsigned long long io_ticks[2];
30 unsigned long long io_ticks_total;
31 unsigned long long time_in_queue;
34 struct dm_stat_shared {
35 atomic_t in_flight[2];
37 struct dm_stat_percpu tmp;
41 struct list_head list_entry;
47 const char *program_id;
49 struct rcu_head rcu_head;
50 size_t shared_alloc_size;
51 size_t percpu_alloc_size;
52 struct dm_stat_percpu *stat_percpu[NR_CPUS];
53 struct dm_stat_shared stat_shared[0];
56 struct dm_stats_last_position {
62 * A typo on the command line could possibly make the kernel run out of memory
63 * and crash. To prevent the crash we account all used memory. We fail if we
64 * exhaust 1/4 of all memory or 1/2 of vmalloc space.
66 #define DM_STATS_MEMORY_FACTOR 4
67 #define DM_STATS_VMALLOC_FACTOR 2
69 static DEFINE_SPINLOCK(shared_memory_lock);
71 static unsigned long shared_memory_amount;
73 static bool __check_shared_memory(size_t alloc_size)
77 a = shared_memory_amount + alloc_size;
78 if (a < shared_memory_amount)
80 if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
83 if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
89 static bool check_shared_memory(size_t alloc_size)
93 spin_lock_irq(&shared_memory_lock);
95 ret = __check_shared_memory(alloc_size);
97 spin_unlock_irq(&shared_memory_lock);
102 static bool claim_shared_memory(size_t alloc_size)
104 spin_lock_irq(&shared_memory_lock);
106 if (!__check_shared_memory(alloc_size)) {
107 spin_unlock_irq(&shared_memory_lock);
111 shared_memory_amount += alloc_size;
113 spin_unlock_irq(&shared_memory_lock);
118 static void free_shared_memory(size_t alloc_size)
122 spin_lock_irqsave(&shared_memory_lock, flags);
124 if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
125 spin_unlock_irqrestore(&shared_memory_lock, flags);
126 DMCRIT("Memory usage accounting bug.");
130 shared_memory_amount -= alloc_size;
132 spin_unlock_irqrestore(&shared_memory_lock, flags);
135 static void *dm_kvzalloc(size_t alloc_size, int node)
139 if (!claim_shared_memory(alloc_size))
142 if (alloc_size <= KMALLOC_MAX_SIZE) {
143 p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
147 p = vzalloc_node(alloc_size, node);
151 free_shared_memory(alloc_size);
156 static void dm_kvfree(void *ptr, size_t alloc_size)
161 free_shared_memory(alloc_size);
163 if (is_vmalloc_addr(ptr))
169 static void dm_stat_free(struct rcu_head *head)
172 struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
174 kfree(s->program_id);
176 for_each_possible_cpu(cpu)
177 dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
178 dm_kvfree(s, s->shared_alloc_size);
181 static int dm_stat_in_flight(struct dm_stat_shared *shared)
183 return atomic_read(&shared->in_flight[READ]) +
184 atomic_read(&shared->in_flight[WRITE]);
187 void dm_stats_init(struct dm_stats *stats)
190 struct dm_stats_last_position *last;
192 mutex_init(&stats->mutex);
193 INIT_LIST_HEAD(&stats->list);
194 stats->last = alloc_percpu(struct dm_stats_last_position);
195 for_each_possible_cpu(cpu) {
196 last = per_cpu_ptr(stats->last, cpu);
197 last->last_sector = (sector_t)ULLONG_MAX;
198 last->last_rw = UINT_MAX;
202 void dm_stats_cleanup(struct dm_stats *stats)
206 struct dm_stat_shared *shared;
208 while (!list_empty(&stats->list)) {
209 s = container_of(stats->list.next, struct dm_stat, list_entry);
210 list_del(&s->list_entry);
211 for (ni = 0; ni < s->n_entries; ni++) {
212 shared = &s->stat_shared[ni];
213 if (WARN_ON(dm_stat_in_flight(shared))) {
214 DMCRIT("leaked in-flight counter at index %lu "
215 "(start %llu, end %llu, step %llu): reads %d, writes %d",
217 (unsigned long long)s->start,
218 (unsigned long long)s->end,
219 (unsigned long long)s->step,
220 atomic_read(&shared->in_flight[READ]),
221 atomic_read(&shared->in_flight[WRITE]));
224 dm_stat_free(&s->rcu_head);
226 free_percpu(stats->last);
229 static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
230 sector_t step, const char *program_id, const char *aux_data,
231 void (*suspend_callback)(struct mapped_device *),
232 void (*resume_callback)(struct mapped_device *),
233 struct mapped_device *md)
236 struct dm_stat *s, *tmp_s;
239 size_t shared_alloc_size;
240 size_t percpu_alloc_size;
241 struct dm_stat_percpu *p;
246 if (end < start || !step)
249 n_entries = end - start;
250 if (dm_sector_div64(n_entries, step))
253 if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
256 shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
257 if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
260 percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
261 if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
264 if (!check_shared_memory(shared_alloc_size + num_possible_cpus() * percpu_alloc_size))
267 s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
271 s->n_entries = n_entries;
275 s->shared_alloc_size = shared_alloc_size;
276 s->percpu_alloc_size = percpu_alloc_size;
278 s->program_id = kstrdup(program_id, GFP_KERNEL);
279 if (!s->program_id) {
283 s->aux_data = kstrdup(aux_data, GFP_KERNEL);
289 for (ni = 0; ni < n_entries; ni++) {
290 atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
291 atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
294 for_each_possible_cpu(cpu) {
295 p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
300 s->stat_percpu[cpu] = p;
304 * Suspend/resume to make sure there is no i/o in flight,
305 * so that newly created statistics will be exact.
307 * (note: we couldn't suspend earlier because we must not
308 * allocate memory while suspended)
310 suspend_callback(md);
312 mutex_lock(&stats->mutex);
314 list_for_each(l, &stats->list) {
315 tmp_s = container_of(l, struct dm_stat, list_entry);
316 if (WARN_ON(tmp_s->id < s->id)) {
318 goto out_unlock_resume;
320 if (tmp_s->id > s->id)
322 if (unlikely(s->id == INT_MAX)) {
324 goto out_unlock_resume;
329 list_add_tail_rcu(&s->list_entry, l);
330 mutex_unlock(&stats->mutex);
337 mutex_unlock(&stats->mutex);
340 dm_stat_free(&s->rcu_head);
344 static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
348 list_for_each_entry(s, &stats->list, list_entry) {
358 static int dm_stats_delete(struct dm_stats *stats, int id)
363 mutex_lock(&stats->mutex);
365 s = __dm_stats_find(stats, id);
367 mutex_unlock(&stats->mutex);
371 list_del_rcu(&s->list_entry);
372 mutex_unlock(&stats->mutex);
375 * vfree can't be called from RCU callback
377 for_each_possible_cpu(cpu)
378 if (is_vmalloc_addr(s->stat_percpu))
380 if (is_vmalloc_addr(s)) {
382 synchronize_rcu_expedited();
383 dm_stat_free(&s->rcu_head);
385 ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
386 call_rcu(&s->rcu_head, dm_stat_free);
391 static int dm_stats_list(struct dm_stats *stats, const char *program,
392 char *result, unsigned maxlen)
400 * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
403 mutex_lock(&stats->mutex);
404 list_for_each_entry(s, &stats->list, list_entry) {
405 if (!program || !strcmp(program, s->program_id)) {
406 len = s->end - s->start;
407 DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id,
408 (unsigned long long)s->start,
409 (unsigned long long)len,
410 (unsigned long long)s->step,
415 mutex_unlock(&stats->mutex);
420 static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu *p)
423 * This is racy, but so is part_round_stats_single.
425 unsigned long now = jiffies;
426 unsigned in_flight_read;
427 unsigned in_flight_write;
428 unsigned long difference = now - shared->stamp;
432 in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
433 in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
435 p->io_ticks[READ] += difference;
437 p->io_ticks[WRITE] += difference;
438 if (in_flight_read + in_flight_write) {
439 p->io_ticks_total += difference;
440 p->time_in_queue += (in_flight_read + in_flight_write) * difference;
445 static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
446 unsigned long bi_rw, sector_t len, bool merged,
447 bool end, unsigned long duration)
449 unsigned long idx = bi_rw & REQ_WRITE;
450 struct dm_stat_shared *shared = &s->stat_shared[entry];
451 struct dm_stat_percpu *p;
454 * For strict correctness we should use local_irq_disable/enable
455 * instead of preempt_disable/enable.
457 * This is racy if the driver finishes bios from non-interrupt
458 * context as well as from interrupt context or from more different
461 * However, the race only results in not counting some events,
462 * so it is acceptable.
464 * part_stat_lock()/part_stat_unlock() have this race too.
467 p = &s->stat_percpu[smp_processor_id()][entry];
470 dm_stat_round(shared, p);
471 atomic_inc(&shared->in_flight[idx]);
473 dm_stat_round(shared, p);
474 atomic_dec(&shared->in_flight[idx]);
475 p->sectors[idx] += len;
477 p->merges[idx] += merged;
478 p->ticks[idx] += duration;
484 static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
485 sector_t bi_sector, sector_t end_sector,
486 bool end, unsigned long duration,
487 struct dm_stats_aux *stats_aux)
489 sector_t rel_sector, offset, todo, fragment_len;
492 if (end_sector <= s->start || bi_sector >= s->end)
494 if (unlikely(bi_sector < s->start)) {
496 todo = end_sector - s->start;
498 rel_sector = bi_sector - s->start;
499 todo = end_sector - bi_sector;
501 if (unlikely(end_sector > s->end))
502 todo -= (end_sector - s->end);
504 offset = dm_sector_div64(rel_sector, s->step);
507 if (WARN_ON_ONCE(entry >= s->n_entries)) {
508 DMCRIT("Invalid area access in region id %d", s->id);
512 if (fragment_len > s->step - offset)
513 fragment_len = s->step - offset;
514 dm_stat_for_entry(s, entry, bi_rw, fragment_len,
515 stats_aux->merged, end, duration);
516 todo -= fragment_len;
519 } while (unlikely(todo != 0));
522 void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
523 sector_t bi_sector, unsigned bi_sectors, bool end,
524 unsigned long duration, struct dm_stats_aux *stats_aux)
528 struct dm_stats_last_position *last;
530 if (unlikely(!bi_sectors))
533 end_sector = bi_sector + bi_sectors;
537 * A race condition can at worst result in the merged flag being
538 * misrepresented, so we don't have to disable preemption here.
540 last = __this_cpu_ptr(stats->last);
542 (bi_sector == (ACCESS_ONCE(last->last_sector) &&
543 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
544 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
546 ACCESS_ONCE(last->last_sector) = end_sector;
547 ACCESS_ONCE(last->last_rw) = bi_rw;
552 list_for_each_entry_rcu(s, &stats->list, list_entry)
553 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, stats_aux);
558 static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
559 struct dm_stat *s, size_t x)
562 struct dm_stat_percpu *p;
565 p = &s->stat_percpu[smp_processor_id()][x];
566 dm_stat_round(shared, p);
569 memset(&shared->tmp, 0, sizeof(shared->tmp));
570 for_each_possible_cpu(cpu) {
571 p = &s->stat_percpu[cpu][x];
572 shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
573 shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
574 shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
575 shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
576 shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
577 shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
578 shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
579 shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
580 shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
581 shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
582 shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
583 shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
587 static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
588 bool init_tmp_percpu_totals)
591 struct dm_stat_shared *shared;
592 struct dm_stat_percpu *p;
594 for (x = idx_start; x < idx_end; x++) {
595 shared = &s->stat_shared[x];
596 if (init_tmp_percpu_totals)
597 __dm_stat_init_temporary_percpu_totals(shared, s, x);
599 p = &s->stat_percpu[smp_processor_id()][x];
600 p->sectors[READ] -= shared->tmp.sectors[READ];
601 p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
602 p->ios[READ] -= shared->tmp.ios[READ];
603 p->ios[WRITE] -= shared->tmp.ios[WRITE];
604 p->merges[READ] -= shared->tmp.merges[READ];
605 p->merges[WRITE] -= shared->tmp.merges[WRITE];
606 p->ticks[READ] -= shared->tmp.ticks[READ];
607 p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
608 p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
609 p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
610 p->io_ticks_total -= shared->tmp.io_ticks_total;
611 p->time_in_queue -= shared->tmp.time_in_queue;
616 static int dm_stats_clear(struct dm_stats *stats, int id)
620 mutex_lock(&stats->mutex);
622 s = __dm_stats_find(stats, id);
624 mutex_unlock(&stats->mutex);
628 __dm_stat_clear(s, 0, s->n_entries, true);
630 mutex_unlock(&stats->mutex);
636 * This is like jiffies_to_msec, but works for 64-bit values.
638 static unsigned long long dm_jiffies_to_msec64(unsigned long long j)
640 unsigned long long result = 0;
644 result = jiffies_to_msecs(j & 0x3fffff);
646 mult = jiffies_to_msecs(1 << 22);
647 result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
650 result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
655 static int dm_stats_print(struct dm_stats *stats, int id,
656 size_t idx_start, size_t idx_len,
657 bool clear, char *result, unsigned maxlen)
662 sector_t start, end, step;
664 struct dm_stat_shared *shared;
668 * <start_sector>+<length> counters
671 mutex_lock(&stats->mutex);
673 s = __dm_stats_find(stats, id);
675 mutex_unlock(&stats->mutex);
679 idx_end = idx_start + idx_len;
680 if (idx_end < idx_start ||
681 idx_end > s->n_entries)
682 idx_end = s->n_entries;
684 if (idx_start > idx_end)
688 start = s->start + (step * idx_start);
690 for (x = idx_start; x < idx_end; x++, start = end) {
691 shared = &s->stat_shared[x];
693 if (unlikely(end > s->end))
696 __dm_stat_init_temporary_percpu_totals(shared, s, x);
698 DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n",
699 (unsigned long long)start,
700 (unsigned long long)step,
701 shared->tmp.ios[READ],
702 shared->tmp.merges[READ],
703 shared->tmp.sectors[READ],
704 dm_jiffies_to_msec64(shared->tmp.ticks[READ]),
705 shared->tmp.ios[WRITE],
706 shared->tmp.merges[WRITE],
707 shared->tmp.sectors[WRITE],
708 dm_jiffies_to_msec64(shared->tmp.ticks[WRITE]),
709 dm_stat_in_flight(shared),
710 dm_jiffies_to_msec64(shared->tmp.io_ticks_total),
711 dm_jiffies_to_msec64(shared->tmp.time_in_queue),
712 dm_jiffies_to_msec64(shared->tmp.io_ticks[READ]),
713 dm_jiffies_to_msec64(shared->tmp.io_ticks[WRITE]));
715 if (unlikely(sz + 1 >= maxlen))
716 goto buffer_overflow;
720 __dm_stat_clear(s, idx_start, idx_end, false);
723 mutex_unlock(&stats->mutex);
728 static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
731 const char *new_aux_data;
733 mutex_lock(&stats->mutex);
735 s = __dm_stats_find(stats, id);
737 mutex_unlock(&stats->mutex);
741 new_aux_data = kstrdup(aux_data, GFP_KERNEL);
743 mutex_unlock(&stats->mutex);
748 s->aux_data = new_aux_data;
750 mutex_unlock(&stats->mutex);
755 static int message_stats_create(struct mapped_device *md,
756 unsigned argc, char **argv,
757 char *result, unsigned maxlen)
761 unsigned long long start, end, len, step;
763 const char *program_id, *aux_data;
767 * <range> <step> [<program_id> [<aux_data>]]
770 if (argc < 3 || argc > 5)
773 if (!strcmp(argv[1], "-")) {
775 len = dm_get_size(md);
778 } else if (sscanf(argv[1], "%llu+%llu%c", &start, &len, &dummy) != 2 ||
779 start != (sector_t)start || len != (sector_t)len)
786 if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
788 if (do_div(step, divisor))
792 } else if (sscanf(argv[2], "%llu%c", &step, &dummy) != 1 ||
793 step != (sector_t)step || !step)
800 program_id = argv[3];
806 * If a buffer overflow happens after we created the region,
807 * it's too late (the userspace would retry with a larger
808 * buffer, but the region id that caused the overflow is already
809 * leaked). So we must detect buffer overflow in advance.
811 snprintf(result, maxlen, "%d", INT_MAX);
812 if (dm_message_test_buffer_overflow(result, maxlen))
815 id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data,
816 dm_internal_suspend, dm_internal_resume, md);
820 snprintf(result, maxlen, "%d", id);
825 static int message_stats_delete(struct mapped_device *md,
826 unsigned argc, char **argv)
834 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
837 return dm_stats_delete(dm_get_stats(md), id);
840 static int message_stats_clear(struct mapped_device *md,
841 unsigned argc, char **argv)
849 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
852 return dm_stats_clear(dm_get_stats(md), id);
855 static int message_stats_list(struct mapped_device *md,
856 unsigned argc, char **argv,
857 char *result, unsigned maxlen)
860 const char *program = NULL;
862 if (argc < 1 || argc > 2)
866 program = kstrdup(argv[1], GFP_KERNEL);
871 r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
878 static int message_stats_print(struct mapped_device *md,
879 unsigned argc, char **argv, bool clear,
880 char *result, unsigned maxlen)
884 unsigned long idx_start = 0, idx_len = ULONG_MAX;
886 if (argc != 2 && argc != 4)
889 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
893 if (strcmp(argv[2], "-") &&
894 sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
896 if (strcmp(argv[3], "-") &&
897 sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
901 return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
905 static int message_stats_set_aux(struct mapped_device *md,
906 unsigned argc, char **argv)
914 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
917 return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
920 int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
921 char *result, unsigned maxlen)
925 if (dm_request_based(md)) {
926 DMWARN("Statistics are only supported for bio-based devices");
930 /* All messages here must start with '@' */
931 if (!strcasecmp(argv[0], "@stats_create"))
932 r = message_stats_create(md, argc, argv, result, maxlen);
933 else if (!strcasecmp(argv[0], "@stats_delete"))
934 r = message_stats_delete(md, argc, argv);
935 else if (!strcasecmp(argv[0], "@stats_clear"))
936 r = message_stats_clear(md, argc, argv);
937 else if (!strcasecmp(argv[0], "@stats_list"))
938 r = message_stats_list(md, argc, argv, result, maxlen);
939 else if (!strcasecmp(argv[0], "@stats_print"))
940 r = message_stats_print(md, argc, argv, false, result, maxlen);
941 else if (!strcasecmp(argv[0], "@stats_print_clear"))
942 r = message_stats_print(md, argc, argv, true, result, maxlen);
943 else if (!strcasecmp(argv[0], "@stats_set_aux"))
944 r = message_stats_set_aux(md, argc, argv);
946 return 2; /* this wasn't a stats message */
949 DMWARN("Invalid parameters for message %s", argv[0]);
954 int __init dm_statistics_init(void)
956 dm_stat_need_rcu_barrier = 0;
960 void dm_statistics_exit(void)
962 if (dm_stat_need_rcu_barrier)
964 if (WARN_ON(shared_memory_amount))
965 DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
968 module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
969 MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");