1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
5 * Author: Joerg Roedel <joerg.roedel@amd.com>
8 #define pr_fmt(fmt) "DMA-API: " fmt
10 #include <linux/sched/task_stack.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-map-ops.h>
13 #include <linux/sched/task.h>
14 #include <linux/stacktrace.h>
15 #include <linux/spinlock.h>
16 #include <linux/vmalloc.h>
17 #include <linux/debugfs.h>
18 #include <linux/uaccess.h>
19 #include <linux/export.h>
20 #include <linux/device.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/ctype.h>
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <asm/sections.h>
29 #define HASH_SIZE 16384ULL
30 #define HASH_FN_SHIFT 13
31 #define HASH_FN_MASK (HASH_SIZE - 1)
33 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
34 /* If the pool runs out, add this many new entries at once */
35 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
45 MAP_ERR_CHECK_NOT_APPLICABLE,
50 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
53 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
54 * @list: node on pre-allocated free_entries list
55 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
56 * @dev_addr: dma address
57 * @size: length of the mapping
58 * @type: single, page, sg, coherent
59 * @direction: enum dma_data_direction
60 * @sg_call_ents: 'nents' from dma_map_sg
61 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
62 * @pfn: page frame of the start address
63 * @offset: offset of mapping relative to pfn
64 * @map_err_type: track whether dma_mapping_error() was checked
65 * @stacktrace: support backtraces when a violation is detected
67 struct dma_debug_entry {
68 struct list_head list;
78 enum map_err_types map_err_type;
79 #ifdef CONFIG_STACKTRACE
80 unsigned int stack_len;
81 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
83 } ____cacheline_aligned_in_smp;
85 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
88 struct list_head list;
92 /* Hash list to save the allocated dma addresses */
93 static struct hash_bucket dma_entry_hash[HASH_SIZE];
94 /* List of pre-allocated dma_debug_entry's */
95 static LIST_HEAD(free_entries);
96 /* Lock for the list above */
97 static DEFINE_SPINLOCK(free_entries_lock);
99 /* Global disable flag - will be set in case of an error */
100 static bool global_disable __read_mostly;
102 /* Early initialization disable flag, set at the end of dma_debug_init */
103 static bool dma_debug_initialized __read_mostly;
105 static inline bool dma_debug_disabled(void)
107 return global_disable || !dma_debug_initialized;
110 /* Global error count */
111 static u32 error_count;
113 /* Global error show enable*/
114 static u32 show_all_errors __read_mostly;
115 /* Number of errors to show */
116 static u32 show_num_errors = 1;
118 static u32 num_free_entries;
119 static u32 min_free_entries;
120 static u32 nr_total_entries;
122 /* number of preallocated entries requested by kernel cmdline */
123 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
125 /* per-driver filter related state */
127 #define NAME_MAX_LEN 64
129 static char current_driver_name[NAME_MAX_LEN] __read_mostly;
130 static struct device_driver *current_driver __read_mostly;
132 static DEFINE_RWLOCK(driver_name_lock);
134 static const char *const maperr2str[] = {
135 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
136 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
137 [MAP_ERR_CHECKED] = "dma map error checked",
140 static const char *type2name[] = {
141 [dma_debug_single] = "single",
142 [dma_debug_sg] = "scather-gather",
143 [dma_debug_coherent] = "coherent",
144 [dma_debug_resource] = "resource",
147 static const char *dir2name[] = {
148 [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL",
149 [DMA_TO_DEVICE] = "DMA_TO_DEVICE",
150 [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE",
151 [DMA_NONE] = "DMA_NONE",
155 * The access to some variables in this macro is racy. We can't use atomic_t
156 * here because all these variables are exported to debugfs. Some of them even
157 * writeable. This is also the reason why a lock won't help much. But anyway,
158 * the races are no big deal. Here is why:
160 * error_count: the addition is racy, but the worst thing that can happen is
161 * that we don't count some errors
162 * show_num_errors: the subtraction is racy. Also no big deal because in
163 * worst case this will result in one warning more in the
164 * system log than the user configured. This variable is
165 * writeable via debugfs.
167 static inline void dump_entry_trace(struct dma_debug_entry *entry)
169 #ifdef CONFIG_STACKTRACE
171 pr_warn("Mapped at:\n");
172 stack_trace_print(entry->stack_entries, entry->stack_len, 0);
177 static bool driver_filter(struct device *dev)
179 struct device_driver *drv;
183 /* driver filter off */
184 if (likely(!current_driver_name[0]))
187 /* driver filter on and initialized */
188 if (current_driver && dev && dev->driver == current_driver)
191 /* driver filter on, but we can't filter on a NULL device... */
195 if (current_driver || !current_driver_name[0])
198 /* driver filter on but not yet initialized */
203 /* lock to protect against change of current_driver_name */
204 read_lock_irqsave(&driver_name_lock, flags);
208 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
209 current_driver = drv;
213 read_unlock_irqrestore(&driver_name_lock, flags);
218 #define err_printk(dev, entry, format, arg...) do { \
220 if (driver_filter(dev) && \
221 (show_all_errors || show_num_errors > 0)) { \
222 WARN(1, pr_fmt("%s %s: ") format, \
223 dev ? dev_driver_string(dev) : "NULL", \
224 dev ? dev_name(dev) : "NULL", ## arg); \
225 dump_entry_trace(entry); \
227 if (!show_all_errors && show_num_errors > 0) \
228 show_num_errors -= 1; \
232 * Hash related functions
234 * Every DMA-API request is saved into a struct dma_debug_entry. To
235 * have quick access to these structs they are stored into a hash.
237 static int hash_fn(struct dma_debug_entry *entry)
240 * Hash function is based on the dma address.
241 * We use bits 20-27 here as the index into the hash
243 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
247 * Request exclusive access to a hash bucket for a given dma_debug_entry.
249 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
250 unsigned long *flags)
251 __acquires(&dma_entry_hash[idx].lock)
253 int idx = hash_fn(entry);
254 unsigned long __flags;
256 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
258 return &dma_entry_hash[idx];
262 * Give up exclusive access to the hash bucket
264 static void put_hash_bucket(struct hash_bucket *bucket,
266 __releases(&bucket->lock)
268 spin_unlock_irqrestore(&bucket->lock, flags);
271 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
273 return ((a->dev_addr == b->dev_addr) &&
274 (a->dev == b->dev)) ? true : false;
277 static bool containing_match(struct dma_debug_entry *a,
278 struct dma_debug_entry *b)
280 if (a->dev != b->dev)
283 if ((b->dev_addr <= a->dev_addr) &&
284 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
291 * Search a given entry in the hash bucket list
293 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
294 struct dma_debug_entry *ref,
297 struct dma_debug_entry *entry, *ret = NULL;
298 int matches = 0, match_lvl, last_lvl = -1;
300 list_for_each_entry(entry, &bucket->list, list) {
301 if (!match(ref, entry))
305 * Some drivers map the same physical address multiple
306 * times. Without a hardware IOMMU this results in the
307 * same device addresses being put into the dma-debug
308 * hash multiple times too. This can result in false
309 * positives being reported. Therefore we implement a
310 * best-fit algorithm here which returns the entry from
311 * the hash which fits best to the reference value
312 * instead of the first-fit.
316 entry->size == ref->size ? ++match_lvl : 0;
317 entry->type == ref->type ? ++match_lvl : 0;
318 entry->direction == ref->direction ? ++match_lvl : 0;
319 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
321 if (match_lvl == 4) {
322 /* perfect-fit - return the result */
324 } else if (match_lvl > last_lvl) {
326 * We found an entry that fits better then the
327 * previous one or it is the 1st match.
329 last_lvl = match_lvl;
335 * If we have multiple matches but no perfect-fit, just return
338 ret = (matches == 1) ? ret : NULL;
343 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
344 struct dma_debug_entry *ref)
346 return __hash_bucket_find(bucket, ref, exact_match);
349 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
350 struct dma_debug_entry *ref,
351 unsigned long *flags)
354 struct dma_debug_entry *entry, index = *ref;
355 int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
357 for (int i = 0; i < limit; i++) {
358 entry = __hash_bucket_find(*bucket, ref, containing_match);
364 * Nothing found, go back a hash bucket
366 put_hash_bucket(*bucket, *flags);
367 index.dev_addr -= (1 << HASH_FN_SHIFT);
368 *bucket = get_hash_bucket(&index, flags);
375 * Add an entry to a hash bucket
377 static void hash_bucket_add(struct hash_bucket *bucket,
378 struct dma_debug_entry *entry)
380 list_add_tail(&entry->list, &bucket->list);
384 * Remove entry from a hash bucket list
386 static void hash_bucket_del(struct dma_debug_entry *entry)
388 list_del(&entry->list);
391 static unsigned long long phys_addr(struct dma_debug_entry *entry)
393 if (entry->type == dma_debug_resource)
394 return __pfn_to_phys(entry->pfn) + entry->offset;
396 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
400 * For each mapping (initial cacheline in the case of
401 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
402 * scatterlist, or the cacheline specified in dma_map_single) insert
403 * into this tree using the cacheline as the key. At
404 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
405 * the entry already exists at insertion time add a tag as a reference
406 * count for the overlapping mappings. For now, the overlap tracking
407 * just ensures that 'unmaps' balance 'maps' before marking the
408 * cacheline idle, but we should also be flagging overlaps as an API
411 * Memory usage is mostly constrained by the maximum number of available
412 * dma-debug entries in that we need a free dma_debug_entry before
413 * inserting into the tree. In the case of dma_map_page and
414 * dma_alloc_coherent there is only one dma_debug_entry and one
415 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
416 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
417 * entries into the tree.
419 static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
420 static DEFINE_SPINLOCK(radix_lock);
421 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
422 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
423 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
425 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
427 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
428 (entry->offset >> L1_CACHE_SHIFT);
431 static int active_cacheline_read_overlap(phys_addr_t cln)
435 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
436 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
441 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
445 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
448 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
449 if (overlap & 1 << i)
450 radix_tree_tag_set(&dma_active_cacheline, cln, i);
452 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
457 static void active_cacheline_inc_overlap(phys_addr_t cln)
459 int overlap = active_cacheline_read_overlap(cln);
461 overlap = active_cacheline_set_overlap(cln, ++overlap);
463 /* If we overflowed the overlap counter then we're potentially
464 * leaking dma-mappings.
466 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
467 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
468 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
471 static int active_cacheline_dec_overlap(phys_addr_t cln)
473 int overlap = active_cacheline_read_overlap(cln);
475 return active_cacheline_set_overlap(cln, --overlap);
478 static int active_cacheline_insert(struct dma_debug_entry *entry)
480 phys_addr_t cln = to_cacheline_number(entry);
484 /* If the device is not writing memory then we don't have any
485 * concerns about the cpu consuming stale data. This mitigates
486 * legitimate usages of overlapping mappings.
488 if (entry->direction == DMA_TO_DEVICE)
491 spin_lock_irqsave(&radix_lock, flags);
492 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
494 active_cacheline_inc_overlap(cln);
495 spin_unlock_irqrestore(&radix_lock, flags);
500 static void active_cacheline_remove(struct dma_debug_entry *entry)
502 phys_addr_t cln = to_cacheline_number(entry);
505 /* ...mirror the insert case */
506 if (entry->direction == DMA_TO_DEVICE)
509 spin_lock_irqsave(&radix_lock, flags);
510 /* since we are counting overlaps the final put of the
511 * cacheline will occur when the overlap count is 0.
512 * active_cacheline_dec_overlap() returns -1 in that case
514 if (active_cacheline_dec_overlap(cln) < 0)
515 radix_tree_delete(&dma_active_cacheline, cln);
516 spin_unlock_irqrestore(&radix_lock, flags);
520 * Dump mappings entries on kernel space for debugging purposes
522 void debug_dma_dump_mappings(struct device *dev)
527 for (idx = 0; idx < HASH_SIZE; idx++) {
528 struct hash_bucket *bucket = &dma_entry_hash[idx];
529 struct dma_debug_entry *entry;
532 spin_lock_irqsave(&bucket->lock, flags);
533 list_for_each_entry(entry, &bucket->list, list) {
534 if (!dev || dev == entry->dev) {
535 cln = to_cacheline_number(entry);
537 "%s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n",
538 type2name[entry->type], idx,
539 phys_addr(entry), entry->pfn,
540 entry->dev_addr, entry->size,
541 &cln, dir2name[entry->direction],
542 maperr2str[entry->map_err_type]);
545 spin_unlock_irqrestore(&bucket->lock, flags);
552 * Dump mappings entries on user space via debugfs
554 static int dump_show(struct seq_file *seq, void *v)
559 for (idx = 0; idx < HASH_SIZE; idx++) {
560 struct hash_bucket *bucket = &dma_entry_hash[idx];
561 struct dma_debug_entry *entry;
564 spin_lock_irqsave(&bucket->lock, flags);
565 list_for_each_entry(entry, &bucket->list, list) {
566 cln = to_cacheline_number(entry);
568 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n",
569 dev_driver_string(entry->dev),
570 dev_name(entry->dev),
571 type2name[entry->type], idx,
572 phys_addr(entry), entry->pfn,
573 entry->dev_addr, entry->size,
574 &cln, dir2name[entry->direction],
575 maperr2str[entry->map_err_type]);
577 spin_unlock_irqrestore(&bucket->lock, flags);
581 DEFINE_SHOW_ATTRIBUTE(dump);
584 * Wrapper function for adding an entry to the hash.
585 * This function takes care of locking itself.
587 static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
589 struct hash_bucket *bucket;
593 bucket = get_hash_bucket(entry, &flags);
594 hash_bucket_add(bucket, entry);
595 put_hash_bucket(bucket, flags);
597 rc = active_cacheline_insert(entry);
599 pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
600 global_disable = true;
601 } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
602 err_printk(entry->dev, entry,
603 "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
607 static int dma_debug_create_entries(gfp_t gfp)
609 struct dma_debug_entry *entry;
612 entry = (void *)get_zeroed_page(gfp);
616 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
617 list_add_tail(&entry[i].list, &free_entries);
619 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
620 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
625 static struct dma_debug_entry *__dma_entry_alloc(void)
627 struct dma_debug_entry *entry;
629 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
630 list_del(&entry->list);
631 memset(entry, 0, sizeof(*entry));
633 num_free_entries -= 1;
634 if (num_free_entries < min_free_entries)
635 min_free_entries = num_free_entries;
641 * This should be called outside of free_entries_lock scope to avoid potential
642 * deadlocks with serial consoles that use DMA.
644 static void __dma_entry_alloc_check_leak(u32 nr_entries)
646 u32 tmp = nr_entries % nr_prealloc_entries;
648 /* Shout each time we tick over some multiple of the initial pool */
649 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
650 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
652 (nr_entries / nr_prealloc_entries));
656 /* struct dma_entry allocator
658 * The next two functions implement the allocator for
659 * struct dma_debug_entries.
661 static struct dma_debug_entry *dma_entry_alloc(void)
663 bool alloc_check_leak = false;
664 struct dma_debug_entry *entry;
668 spin_lock_irqsave(&free_entries_lock, flags);
669 if (num_free_entries == 0) {
670 if (dma_debug_create_entries(GFP_ATOMIC)) {
671 global_disable = true;
672 spin_unlock_irqrestore(&free_entries_lock, flags);
673 pr_err("debugging out of memory - disabling\n");
676 alloc_check_leak = true;
677 nr_entries = nr_total_entries;
680 entry = __dma_entry_alloc();
682 spin_unlock_irqrestore(&free_entries_lock, flags);
684 if (alloc_check_leak)
685 __dma_entry_alloc_check_leak(nr_entries);
687 #ifdef CONFIG_STACKTRACE
688 entry->stack_len = stack_trace_save(entry->stack_entries,
689 ARRAY_SIZE(entry->stack_entries),
695 static void dma_entry_free(struct dma_debug_entry *entry)
699 active_cacheline_remove(entry);
702 * add to beginning of the list - this way the entries are
703 * more likely cache hot when they are reallocated.
705 spin_lock_irqsave(&free_entries_lock, flags);
706 list_add(&entry->list, &free_entries);
707 num_free_entries += 1;
708 spin_unlock_irqrestore(&free_entries_lock, flags);
712 * DMA-API debugging init code
714 * The init code does two things:
715 * 1. Initialize core data structures
716 * 2. Preallocate a given number of dma_debug_entry structs
719 static ssize_t filter_read(struct file *file, char __user *user_buf,
720 size_t count, loff_t *ppos)
722 char buf[NAME_MAX_LEN + 1];
726 if (!current_driver_name[0])
730 * We can't copy to userspace directly because current_driver_name can
731 * only be read under the driver_name_lock with irqs disabled. So
732 * create a temporary copy first.
734 read_lock_irqsave(&driver_name_lock, flags);
735 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
736 read_unlock_irqrestore(&driver_name_lock, flags);
738 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
741 static ssize_t filter_write(struct file *file, const char __user *userbuf,
742 size_t count, loff_t *ppos)
744 char buf[NAME_MAX_LEN];
750 * We can't copy from userspace directly. Access to
751 * current_driver_name is protected with a write_lock with irqs
752 * disabled. Since copy_from_user can fault and may sleep we
753 * need to copy to temporary buffer first
755 len = min(count, (size_t)(NAME_MAX_LEN - 1));
756 if (copy_from_user(buf, userbuf, len))
761 write_lock_irqsave(&driver_name_lock, flags);
764 * Now handle the string we got from userspace very carefully.
766 * - only use the first token we got
767 * - token delimiter is everything looking like a space
768 * character (' ', '\n', '\t' ...)
771 if (!isalnum(buf[0])) {
773 * If the first character userspace gave us is not
774 * alphanumerical then assume the filter should be
777 if (current_driver_name[0])
778 pr_info("switching off dma-debug driver filter\n");
779 current_driver_name[0] = 0;
780 current_driver = NULL;
785 * Now parse out the first token and use it as the name for the
786 * driver to filter for.
788 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
789 current_driver_name[i] = buf[i];
790 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
793 current_driver_name[i] = 0;
794 current_driver = NULL;
796 pr_info("enable driver filter for driver [%s]\n",
797 current_driver_name);
800 write_unlock_irqrestore(&driver_name_lock, flags);
805 static const struct file_operations filter_fops = {
807 .write = filter_write,
808 .llseek = default_llseek,
811 static int __init dma_debug_fs_init(void)
813 struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
815 debugfs_create_bool("disabled", 0444, dentry, &global_disable);
816 debugfs_create_u32("error_count", 0444, dentry, &error_count);
817 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
818 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
819 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
820 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
821 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
822 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
823 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
827 core_initcall_sync(dma_debug_fs_init);
829 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
831 struct dma_debug_entry *entry;
835 for (i = 0; i < HASH_SIZE; ++i) {
836 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
837 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
838 if (entry->dev == dev) {
843 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
849 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
851 struct device *dev = data;
852 struct dma_debug_entry *entry;
855 if (dma_debug_disabled())
859 case BUS_NOTIFY_UNBOUND_DRIVER:
860 count = device_dma_allocations(dev, &entry);
863 err_printk(dev, entry, "device driver has pending "
864 "DMA allocations while released from device "
866 "One of leaked entries details: "
867 "[device address=0x%016llx] [size=%llu bytes] "
868 "[mapped with %s] [mapped as %s]\n",
869 count, entry->dev_addr, entry->size,
870 dir2name[entry->direction], type2name[entry->type]);
879 void dma_debug_add_bus(struct bus_type *bus)
881 struct notifier_block *nb;
883 if (dma_debug_disabled())
886 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
888 pr_err("dma_debug_add_bus: out of memory\n");
892 nb->notifier_call = dma_debug_device_change;
894 bus_register_notifier(bus, nb);
897 static int dma_debug_init(void)
901 /* Do not use dma_debug_initialized here, since we really want to be
902 * called to set dma_debug_initialized
907 for (i = 0; i < HASH_SIZE; ++i) {
908 INIT_LIST_HEAD(&dma_entry_hash[i].list);
909 spin_lock_init(&dma_entry_hash[i].lock);
912 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
913 for (i = 0; i < nr_pages; ++i)
914 dma_debug_create_entries(GFP_KERNEL);
915 if (num_free_entries >= nr_prealloc_entries) {
916 pr_info("preallocated %d debug entries\n", nr_total_entries);
917 } else if (num_free_entries > 0) {
918 pr_warn("%d debug entries requested but only %d allocated\n",
919 nr_prealloc_entries, nr_total_entries);
921 pr_err("debugging out of memory error - disabled\n");
922 global_disable = true;
926 min_free_entries = num_free_entries;
928 dma_debug_initialized = true;
930 pr_info("debugging enabled by kernel config\n");
933 core_initcall(dma_debug_init);
935 static __init int dma_debug_cmdline(char *str)
940 if (strncmp(str, "off", 3) == 0) {
941 pr_info("debugging disabled on kernel command line\n");
942 global_disable = true;
948 static __init int dma_debug_entries_cmdline(char *str)
952 if (!get_option(&str, &nr_prealloc_entries))
953 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
957 __setup("dma_debug=", dma_debug_cmdline);
958 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
960 static void check_unmap(struct dma_debug_entry *ref)
962 struct dma_debug_entry *entry;
963 struct hash_bucket *bucket;
966 bucket = get_hash_bucket(ref, &flags);
967 entry = bucket_find_exact(bucket, ref);
970 /* must drop lock before calling dma_mapping_error */
971 put_hash_bucket(bucket, flags);
973 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
974 err_printk(ref->dev, NULL,
975 "device driver tries to free an "
976 "invalid DMA memory address\n");
978 err_printk(ref->dev, NULL,
979 "device driver tries to free DMA "
980 "memory it has not allocated [device "
981 "address=0x%016llx] [size=%llu bytes]\n",
982 ref->dev_addr, ref->size);
987 if (ref->size != entry->size) {
988 err_printk(ref->dev, entry, "device driver frees "
989 "DMA memory with different size "
990 "[device address=0x%016llx] [map size=%llu bytes] "
991 "[unmap size=%llu bytes]\n",
992 ref->dev_addr, entry->size, ref->size);
995 if (ref->type != entry->type) {
996 err_printk(ref->dev, entry, "device driver frees "
997 "DMA memory with wrong function "
998 "[device address=0x%016llx] [size=%llu bytes] "
999 "[mapped as %s] [unmapped as %s]\n",
1000 ref->dev_addr, ref->size,
1001 type2name[entry->type], type2name[ref->type]);
1002 } else if ((entry->type == dma_debug_coherent) &&
1003 (phys_addr(ref) != phys_addr(entry))) {
1004 err_printk(ref->dev, entry, "device driver frees "
1005 "DMA memory with different CPU address "
1006 "[device address=0x%016llx] [size=%llu bytes] "
1007 "[cpu alloc address=0x%016llx] "
1008 "[cpu free address=0x%016llx]",
1009 ref->dev_addr, ref->size,
1014 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1015 ref->sg_call_ents != entry->sg_call_ents) {
1016 err_printk(ref->dev, entry, "device driver frees "
1017 "DMA sg list with different entry count "
1018 "[map count=%d] [unmap count=%d]\n",
1019 entry->sg_call_ents, ref->sg_call_ents);
1023 * This may be no bug in reality - but most implementations of the
1024 * DMA API don't handle this properly, so check for it here
1026 if (ref->direction != entry->direction) {
1027 err_printk(ref->dev, entry, "device driver frees "
1028 "DMA memory with different direction "
1029 "[device address=0x%016llx] [size=%llu bytes] "
1030 "[mapped with %s] [unmapped with %s]\n",
1031 ref->dev_addr, ref->size,
1032 dir2name[entry->direction],
1033 dir2name[ref->direction]);
1037 * Drivers should use dma_mapping_error() to check the returned
1038 * addresses of dma_map_single() and dma_map_page().
1039 * If not, print this warning message. See Documentation/core-api/dma-api.rst.
1041 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1042 err_printk(ref->dev, entry,
1043 "device driver failed to check map error"
1044 "[device address=0x%016llx] [size=%llu bytes] "
1046 ref->dev_addr, ref->size,
1047 type2name[entry->type]);
1050 hash_bucket_del(entry);
1051 dma_entry_free(entry);
1053 put_hash_bucket(bucket, flags);
1056 static void check_for_stack(struct device *dev,
1057 struct page *page, size_t offset)
1060 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1062 if (!stack_vm_area) {
1063 /* Stack is direct-mapped. */
1064 if (PageHighMem(page))
1066 addr = page_address(page) + offset;
1067 if (object_is_on_stack(addr))
1068 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1070 /* Stack is vmalloced. */
1073 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1074 if (page != stack_vm_area->pages[i])
1077 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1078 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1084 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1086 if (memory_intersects(_stext, _etext, addr, len) ||
1087 memory_intersects(__start_rodata, __end_rodata, addr, len))
1088 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1091 static void check_sync(struct device *dev,
1092 struct dma_debug_entry *ref,
1095 struct dma_debug_entry *entry;
1096 struct hash_bucket *bucket;
1097 unsigned long flags;
1099 bucket = get_hash_bucket(ref, &flags);
1101 entry = bucket_find_contain(&bucket, ref, &flags);
1104 err_printk(dev, NULL, "device driver tries "
1105 "to sync DMA memory it has not allocated "
1106 "[device address=0x%016llx] [size=%llu bytes]\n",
1107 (unsigned long long)ref->dev_addr, ref->size);
1111 if (ref->size > entry->size) {
1112 err_printk(dev, entry, "device driver syncs"
1113 " DMA memory outside allocated range "
1114 "[device address=0x%016llx] "
1115 "[allocation size=%llu bytes] "
1116 "[sync offset+size=%llu]\n",
1117 entry->dev_addr, entry->size,
1121 if (entry->direction == DMA_BIDIRECTIONAL)
1124 if (ref->direction != entry->direction) {
1125 err_printk(dev, entry, "device driver syncs "
1126 "DMA memory with different direction "
1127 "[device address=0x%016llx] [size=%llu bytes] "
1128 "[mapped with %s] [synced with %s]\n",
1129 (unsigned long long)ref->dev_addr, entry->size,
1130 dir2name[entry->direction],
1131 dir2name[ref->direction]);
1134 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1135 !(ref->direction == DMA_TO_DEVICE))
1136 err_printk(dev, entry, "device driver syncs "
1137 "device read-only DMA memory for cpu "
1138 "[device address=0x%016llx] [size=%llu bytes] "
1139 "[mapped with %s] [synced with %s]\n",
1140 (unsigned long long)ref->dev_addr, entry->size,
1141 dir2name[entry->direction],
1142 dir2name[ref->direction]);
1144 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1145 !(ref->direction == DMA_FROM_DEVICE))
1146 err_printk(dev, entry, "device driver syncs "
1147 "device write-only DMA memory to device "
1148 "[device address=0x%016llx] [size=%llu bytes] "
1149 "[mapped with %s] [synced with %s]\n",
1150 (unsigned long long)ref->dev_addr, entry->size,
1151 dir2name[entry->direction],
1152 dir2name[ref->direction]);
1154 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1155 ref->sg_call_ents != entry->sg_call_ents) {
1156 err_printk(ref->dev, entry, "device driver syncs "
1157 "DMA sg list with different entry count "
1158 "[map count=%d] [sync count=%d]\n",
1159 entry->sg_call_ents, ref->sg_call_ents);
1163 put_hash_bucket(bucket, flags);
1166 static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1168 #ifdef CONFIG_DMA_API_DEBUG_SG
1169 unsigned int max_seg = dma_get_max_seg_size(dev);
1170 u64 start, end, boundary = dma_get_seg_boundary(dev);
1173 * Either the driver forgot to set dma_parms appropriately, or
1174 * whoever generated the list forgot to check them.
1176 if (sg->length > max_seg)
1177 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1178 sg->length, max_seg);
1180 * In some cases this could potentially be the DMA API
1181 * implementation's fault, but it would usually imply that
1182 * the scatterlist was built inappropriately to begin with.
1184 start = sg_dma_address(sg);
1185 end = start + sg_dma_len(sg) - 1;
1186 if ((start ^ end) & ~boundary)
1187 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1188 start, end, boundary);
1192 void debug_dma_map_single(struct device *dev, const void *addr,
1195 if (unlikely(dma_debug_disabled()))
1198 if (!virt_addr_valid(addr))
1199 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1202 if (is_vmalloc_addr(addr))
1203 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1206 EXPORT_SYMBOL(debug_dma_map_single);
1208 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1209 size_t size, int direction, dma_addr_t dma_addr,
1210 unsigned long attrs)
1212 struct dma_debug_entry *entry;
1214 if (unlikely(dma_debug_disabled()))
1217 if (dma_mapping_error(dev, dma_addr))
1220 entry = dma_entry_alloc();
1225 entry->type = dma_debug_single;
1226 entry->pfn = page_to_pfn(page);
1227 entry->offset = offset;
1228 entry->dev_addr = dma_addr;
1230 entry->direction = direction;
1231 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1233 check_for_stack(dev, page, offset);
1235 if (!PageHighMem(page)) {
1236 void *addr = page_address(page) + offset;
1238 check_for_illegal_area(dev, addr, size);
1241 add_dma_entry(entry, attrs);
1244 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1246 struct dma_debug_entry ref;
1247 struct dma_debug_entry *entry;
1248 struct hash_bucket *bucket;
1249 unsigned long flags;
1251 if (unlikely(dma_debug_disabled()))
1255 ref.dev_addr = dma_addr;
1256 bucket = get_hash_bucket(&ref, &flags);
1258 list_for_each_entry(entry, &bucket->list, list) {
1259 if (!exact_match(&ref, entry))
1263 * The same physical address can be mapped multiple
1264 * times. Without a hardware IOMMU this results in the
1265 * same device addresses being put into the dma-debug
1266 * hash multiple times too. This can result in false
1267 * positives being reported. Therefore we implement a
1268 * best-fit algorithm here which updates the first entry
1269 * from the hash which fits the reference value and is
1270 * not currently listed as being checked.
1272 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1273 entry->map_err_type = MAP_ERR_CHECKED;
1278 put_hash_bucket(bucket, flags);
1280 EXPORT_SYMBOL(debug_dma_mapping_error);
1282 void debug_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
1283 size_t size, int direction)
1285 struct dma_debug_entry ref = {
1286 .type = dma_debug_single,
1288 .dev_addr = dma_addr,
1290 .direction = direction,
1293 if (unlikely(dma_debug_disabled()))
1298 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1299 int nents, int mapped_ents, int direction,
1300 unsigned long attrs)
1302 struct dma_debug_entry *entry;
1303 struct scatterlist *s;
1306 if (unlikely(dma_debug_disabled()))
1309 for_each_sg(sg, s, nents, i) {
1310 check_for_stack(dev, sg_page(s), s->offset);
1311 if (!PageHighMem(sg_page(s)))
1312 check_for_illegal_area(dev, sg_virt(s), s->length);
1315 for_each_sg(sg, s, mapped_ents, i) {
1316 entry = dma_entry_alloc();
1320 entry->type = dma_debug_sg;
1322 entry->pfn = page_to_pfn(sg_page(s));
1323 entry->offset = s->offset;
1324 entry->size = sg_dma_len(s);
1325 entry->dev_addr = sg_dma_address(s);
1326 entry->direction = direction;
1327 entry->sg_call_ents = nents;
1328 entry->sg_mapped_ents = mapped_ents;
1330 check_sg_segment(dev, s);
1332 add_dma_entry(entry, attrs);
1336 static int get_nr_mapped_entries(struct device *dev,
1337 struct dma_debug_entry *ref)
1339 struct dma_debug_entry *entry;
1340 struct hash_bucket *bucket;
1341 unsigned long flags;
1344 bucket = get_hash_bucket(ref, &flags);
1345 entry = bucket_find_exact(bucket, ref);
1349 mapped_ents = entry->sg_mapped_ents;
1350 put_hash_bucket(bucket, flags);
1355 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1356 int nelems, int dir)
1358 struct scatterlist *s;
1359 int mapped_ents = 0, i;
1361 if (unlikely(dma_debug_disabled()))
1364 for_each_sg(sglist, s, nelems, i) {
1366 struct dma_debug_entry ref = {
1367 .type = dma_debug_sg,
1369 .pfn = page_to_pfn(sg_page(s)),
1370 .offset = s->offset,
1371 .dev_addr = sg_dma_address(s),
1372 .size = sg_dma_len(s),
1374 .sg_call_ents = nelems,
1377 if (mapped_ents && i >= mapped_ents)
1381 mapped_ents = get_nr_mapped_entries(dev, &ref);
1387 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1388 dma_addr_t dma_addr, void *virt,
1389 unsigned long attrs)
1391 struct dma_debug_entry *entry;
1393 if (unlikely(dma_debug_disabled()))
1396 if (unlikely(virt == NULL))
1399 /* handle vmalloc and linear addresses */
1400 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1403 entry = dma_entry_alloc();
1407 entry->type = dma_debug_coherent;
1409 entry->offset = offset_in_page(virt);
1411 entry->dev_addr = dma_addr;
1412 entry->direction = DMA_BIDIRECTIONAL;
1414 if (is_vmalloc_addr(virt))
1415 entry->pfn = vmalloc_to_pfn(virt);
1417 entry->pfn = page_to_pfn(virt_to_page(virt));
1419 add_dma_entry(entry, attrs);
1422 void debug_dma_free_coherent(struct device *dev, size_t size,
1423 void *virt, dma_addr_t dma_addr)
1425 struct dma_debug_entry ref = {
1426 .type = dma_debug_coherent,
1428 .offset = offset_in_page(virt),
1429 .dev_addr = dma_addr,
1431 .direction = DMA_BIDIRECTIONAL,
1434 /* handle vmalloc and linear addresses */
1435 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1438 if (is_vmalloc_addr(virt))
1439 ref.pfn = vmalloc_to_pfn(virt);
1441 ref.pfn = page_to_pfn(virt_to_page(virt));
1443 if (unlikely(dma_debug_disabled()))
1449 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1450 int direction, dma_addr_t dma_addr,
1451 unsigned long attrs)
1453 struct dma_debug_entry *entry;
1455 if (unlikely(dma_debug_disabled()))
1458 entry = dma_entry_alloc();
1462 entry->type = dma_debug_resource;
1464 entry->pfn = PHYS_PFN(addr);
1465 entry->offset = offset_in_page(addr);
1467 entry->dev_addr = dma_addr;
1468 entry->direction = direction;
1469 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1471 add_dma_entry(entry, attrs);
1474 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1475 size_t size, int direction)
1477 struct dma_debug_entry ref = {
1478 .type = dma_debug_resource,
1480 .dev_addr = dma_addr,
1482 .direction = direction,
1485 if (unlikely(dma_debug_disabled()))
1491 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1492 size_t size, int direction)
1494 struct dma_debug_entry ref;
1496 if (unlikely(dma_debug_disabled()))
1499 ref.type = dma_debug_single;
1501 ref.dev_addr = dma_handle;
1503 ref.direction = direction;
1504 ref.sg_call_ents = 0;
1506 check_sync(dev, &ref, true);
1509 void debug_dma_sync_single_for_device(struct device *dev,
1510 dma_addr_t dma_handle, size_t size,
1513 struct dma_debug_entry ref;
1515 if (unlikely(dma_debug_disabled()))
1518 ref.type = dma_debug_single;
1520 ref.dev_addr = dma_handle;
1522 ref.direction = direction;
1523 ref.sg_call_ents = 0;
1525 check_sync(dev, &ref, false);
1528 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1529 int nelems, int direction)
1531 struct scatterlist *s;
1532 int mapped_ents = 0, i;
1534 if (unlikely(dma_debug_disabled()))
1537 for_each_sg(sg, s, nelems, i) {
1539 struct dma_debug_entry ref = {
1540 .type = dma_debug_sg,
1542 .pfn = page_to_pfn(sg_page(s)),
1543 .offset = s->offset,
1544 .dev_addr = sg_dma_address(s),
1545 .size = sg_dma_len(s),
1546 .direction = direction,
1547 .sg_call_ents = nelems,
1551 mapped_ents = get_nr_mapped_entries(dev, &ref);
1553 if (i >= mapped_ents)
1556 check_sync(dev, &ref, true);
1560 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1561 int nelems, int direction)
1563 struct scatterlist *s;
1564 int mapped_ents = 0, i;
1566 if (unlikely(dma_debug_disabled()))
1569 for_each_sg(sg, s, nelems, i) {
1571 struct dma_debug_entry ref = {
1572 .type = dma_debug_sg,
1574 .pfn = page_to_pfn(sg_page(s)),
1575 .offset = s->offset,
1576 .dev_addr = sg_dma_address(s),
1577 .size = sg_dma_len(s),
1578 .direction = direction,
1579 .sg_call_ents = nelems,
1582 mapped_ents = get_nr_mapped_entries(dev, &ref);
1584 if (i >= mapped_ents)
1587 check_sync(dev, &ref, false);
1591 static int __init dma_debug_driver_setup(char *str)
1595 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1596 current_driver_name[i] = *str;
1601 if (current_driver_name[0])
1602 pr_info("enable driver filter for driver [%s]\n",
1603 current_driver_name);
1608 __setup("dma_debug_driver=", dma_debug_driver_setup);