1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
5 * Author: Joerg Roedel <joerg.roedel@amd.com>
8 #define pr_fmt(fmt) "DMA-API: " fmt
10 #include <linux/sched/task_stack.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/sched/task.h>
14 #include <linux/stacktrace.h>
15 #include <linux/dma-debug.h>
16 #include <linux/spinlock.h>
17 #include <linux/vmalloc.h>
18 #include <linux/debugfs.h>
19 #include <linux/uaccess.h>
20 #include <linux/export.h>
21 #include <linux/device.h>
22 #include <linux/types.h>
23 #include <linux/sched.h>
24 #include <linux/ctype.h>
25 #include <linux/list.h>
26 #include <linux/slab.h>
28 #include <asm/sections.h>
30 #define HASH_SIZE 16384ULL
31 #define HASH_FN_SHIFT 13
32 #define HASH_FN_MASK (HASH_SIZE - 1)
34 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
35 /* If the pool runs out, add this many new entries at once */
36 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
46 MAP_ERR_CHECK_NOT_APPLICABLE,
51 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
54 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
55 * @list: node on pre-allocated free_entries list
56 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
57 * @size: length of the mapping
58 * @type: single, page, sg, coherent
59 * @direction: enum dma_data_direction
60 * @sg_call_ents: 'nents' from dma_map_sg
61 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
62 * @pfn: page frame of the start address
63 * @offset: offset of mapping relative to pfn
64 * @map_err_type: track whether dma_mapping_error() was checked
65 * @stacktrace: support backtraces when a violation is detected
67 struct dma_debug_entry {
68 struct list_head list;
78 enum map_err_types map_err_type;
79 #ifdef CONFIG_STACKTRACE
80 unsigned int stack_len;
81 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
83 } ____cacheline_aligned_in_smp;
85 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
88 struct list_head list;
92 /* Hash list to save the allocated dma addresses */
93 static struct hash_bucket dma_entry_hash[HASH_SIZE];
94 /* List of pre-allocated dma_debug_entry's */
95 static LIST_HEAD(free_entries);
96 /* Lock for the list above */
97 static DEFINE_SPINLOCK(free_entries_lock);
99 /* Global disable flag - will be set in case of an error */
100 static bool global_disable __read_mostly;
102 /* Early initialization disable flag, set at the end of dma_debug_init */
103 static bool dma_debug_initialized __read_mostly;
105 static inline bool dma_debug_disabled(void)
107 return global_disable || !dma_debug_initialized;
110 /* Global error count */
111 static u32 error_count;
113 /* Global error show enable*/
114 static u32 show_all_errors __read_mostly;
115 /* Number of errors to show */
116 static u32 show_num_errors = 1;
118 static u32 num_free_entries;
119 static u32 min_free_entries;
120 static u32 nr_total_entries;
122 /* number of preallocated entries requested by kernel cmdline */
123 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
125 /* per-driver filter related state */
127 #define NAME_MAX_LEN 64
129 static char current_driver_name[NAME_MAX_LEN] __read_mostly;
130 static struct device_driver *current_driver __read_mostly;
132 static DEFINE_RWLOCK(driver_name_lock);
134 static const char *const maperr2str[] = {
135 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
136 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
137 [MAP_ERR_CHECKED] = "dma map error checked",
140 static const char *type2name[5] = { "single", "page",
141 "scather-gather", "coherent",
144 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
145 "DMA_FROM_DEVICE", "DMA_NONE" };
148 * The access to some variables in this macro is racy. We can't use atomic_t
149 * here because all these variables are exported to debugfs. Some of them even
150 * writeable. This is also the reason why a lock won't help much. But anyway,
151 * the races are no big deal. Here is why:
153 * error_count: the addition is racy, but the worst thing that can happen is
154 * that we don't count some errors
155 * show_num_errors: the subtraction is racy. Also no big deal because in
156 * worst case this will result in one warning more in the
157 * system log than the user configured. This variable is
158 * writeable via debugfs.
160 static inline void dump_entry_trace(struct dma_debug_entry *entry)
162 #ifdef CONFIG_STACKTRACE
164 pr_warning("Mapped at:\n");
165 stack_trace_print(entry->stack_entries, entry->stack_len, 0);
170 static bool driver_filter(struct device *dev)
172 struct device_driver *drv;
176 /* driver filter off */
177 if (likely(!current_driver_name[0]))
180 /* driver filter on and initialized */
181 if (current_driver && dev && dev->driver == current_driver)
184 /* driver filter on, but we can't filter on a NULL device... */
188 if (current_driver || !current_driver_name[0])
191 /* driver filter on but not yet initialized */
196 /* lock to protect against change of current_driver_name */
197 read_lock_irqsave(&driver_name_lock, flags);
201 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
202 current_driver = drv;
206 read_unlock_irqrestore(&driver_name_lock, flags);
211 #define err_printk(dev, entry, format, arg...) do { \
213 if (driver_filter(dev) && \
214 (show_all_errors || show_num_errors > 0)) { \
215 WARN(1, pr_fmt("%s %s: ") format, \
216 dev ? dev_driver_string(dev) : "NULL", \
217 dev ? dev_name(dev) : "NULL", ## arg); \
218 dump_entry_trace(entry); \
220 if (!show_all_errors && show_num_errors > 0) \
221 show_num_errors -= 1; \
225 * Hash related functions
227 * Every DMA-API request is saved into a struct dma_debug_entry. To
228 * have quick access to these structs they are stored into a hash.
230 static int hash_fn(struct dma_debug_entry *entry)
233 * Hash function is based on the dma address.
234 * We use bits 20-27 here as the index into the hash
236 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
240 * Request exclusive access to a hash bucket for a given dma_debug_entry.
242 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
243 unsigned long *flags)
244 __acquires(&dma_entry_hash[idx].lock)
246 int idx = hash_fn(entry);
247 unsigned long __flags;
249 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
251 return &dma_entry_hash[idx];
255 * Give up exclusive access to the hash bucket
257 static void put_hash_bucket(struct hash_bucket *bucket,
258 unsigned long *flags)
259 __releases(&bucket->lock)
261 unsigned long __flags = *flags;
263 spin_unlock_irqrestore(&bucket->lock, __flags);
266 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
268 return ((a->dev_addr == b->dev_addr) &&
269 (a->dev == b->dev)) ? true : false;
272 static bool containing_match(struct dma_debug_entry *a,
273 struct dma_debug_entry *b)
275 if (a->dev != b->dev)
278 if ((b->dev_addr <= a->dev_addr) &&
279 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
286 * Search a given entry in the hash bucket list
288 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
289 struct dma_debug_entry *ref,
292 struct dma_debug_entry *entry, *ret = NULL;
293 int matches = 0, match_lvl, last_lvl = -1;
295 list_for_each_entry(entry, &bucket->list, list) {
296 if (!match(ref, entry))
300 * Some drivers map the same physical address multiple
301 * times. Without a hardware IOMMU this results in the
302 * same device addresses being put into the dma-debug
303 * hash multiple times too. This can result in false
304 * positives being reported. Therefore we implement a
305 * best-fit algorithm here which returns the entry from
306 * the hash which fits best to the reference value
307 * instead of the first-fit.
311 entry->size == ref->size ? ++match_lvl : 0;
312 entry->type == ref->type ? ++match_lvl : 0;
313 entry->direction == ref->direction ? ++match_lvl : 0;
314 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
316 if (match_lvl == 4) {
317 /* perfect-fit - return the result */
319 } else if (match_lvl > last_lvl) {
321 * We found an entry that fits better then the
322 * previous one or it is the 1st match.
324 last_lvl = match_lvl;
330 * If we have multiple matches but no perfect-fit, just return
333 ret = (matches == 1) ? ret : NULL;
338 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
339 struct dma_debug_entry *ref)
341 return __hash_bucket_find(bucket, ref, exact_match);
344 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
345 struct dma_debug_entry *ref,
346 unsigned long *flags)
349 unsigned int max_range = dma_get_max_seg_size(ref->dev);
350 struct dma_debug_entry *entry, index = *ref;
351 unsigned int range = 0;
353 while (range <= max_range) {
354 entry = __hash_bucket_find(*bucket, ref, containing_match);
360 * Nothing found, go back a hash bucket
362 put_hash_bucket(*bucket, flags);
363 range += (1 << HASH_FN_SHIFT);
364 index.dev_addr -= (1 << HASH_FN_SHIFT);
365 *bucket = get_hash_bucket(&index, flags);
372 * Add an entry to a hash bucket
374 static void hash_bucket_add(struct hash_bucket *bucket,
375 struct dma_debug_entry *entry)
377 list_add_tail(&entry->list, &bucket->list);
381 * Remove entry from a hash bucket list
383 static void hash_bucket_del(struct dma_debug_entry *entry)
385 list_del(&entry->list);
388 static unsigned long long phys_addr(struct dma_debug_entry *entry)
390 if (entry->type == dma_debug_resource)
391 return __pfn_to_phys(entry->pfn) + entry->offset;
393 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
397 * Dump mapping entries for debugging purposes
399 void debug_dma_dump_mappings(struct device *dev)
403 for (idx = 0; idx < HASH_SIZE; idx++) {
404 struct hash_bucket *bucket = &dma_entry_hash[idx];
405 struct dma_debug_entry *entry;
408 spin_lock_irqsave(&bucket->lock, flags);
410 list_for_each_entry(entry, &bucket->list, list) {
411 if (!dev || dev == entry->dev) {
413 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
414 type2name[entry->type], idx,
415 phys_addr(entry), entry->pfn,
416 entry->dev_addr, entry->size,
417 dir2name[entry->direction],
418 maperr2str[entry->map_err_type]);
422 spin_unlock_irqrestore(&bucket->lock, flags);
428 * For each mapping (initial cacheline in the case of
429 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
430 * scatterlist, or the cacheline specified in dma_map_single) insert
431 * into this tree using the cacheline as the key. At
432 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
433 * the entry already exists at insertion time add a tag as a reference
434 * count for the overlapping mappings. For now, the overlap tracking
435 * just ensures that 'unmaps' balance 'maps' before marking the
436 * cacheline idle, but we should also be flagging overlaps as an API
439 * Memory usage is mostly constrained by the maximum number of available
440 * dma-debug entries in that we need a free dma_debug_entry before
441 * inserting into the tree. In the case of dma_map_page and
442 * dma_alloc_coherent there is only one dma_debug_entry and one
443 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
444 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
445 * entries into the tree.
447 * At any time debug_dma_assert_idle() can be called to trigger a
448 * warning if any cachelines in the given page are in the active set.
450 static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
451 static DEFINE_SPINLOCK(radix_lock);
452 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
453 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
454 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
456 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
458 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
459 (entry->offset >> L1_CACHE_SHIFT);
462 static int active_cacheline_read_overlap(phys_addr_t cln)
466 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
467 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
472 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
476 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
479 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
480 if (overlap & 1 << i)
481 radix_tree_tag_set(&dma_active_cacheline, cln, i);
483 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
488 static void active_cacheline_inc_overlap(phys_addr_t cln)
490 int overlap = active_cacheline_read_overlap(cln);
492 overlap = active_cacheline_set_overlap(cln, ++overlap);
494 /* If we overflowed the overlap counter then we're potentially
495 * leaking dma-mappings. Otherwise, if maps and unmaps are
496 * balanced then this overflow may cause false negatives in
497 * debug_dma_assert_idle() as the cacheline may be marked idle
500 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
501 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
502 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
505 static int active_cacheline_dec_overlap(phys_addr_t cln)
507 int overlap = active_cacheline_read_overlap(cln);
509 return active_cacheline_set_overlap(cln, --overlap);
512 static int active_cacheline_insert(struct dma_debug_entry *entry)
514 phys_addr_t cln = to_cacheline_number(entry);
518 /* If the device is not writing memory then we don't have any
519 * concerns about the cpu consuming stale data. This mitigates
520 * legitimate usages of overlapping mappings.
522 if (entry->direction == DMA_TO_DEVICE)
525 spin_lock_irqsave(&radix_lock, flags);
526 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
528 active_cacheline_inc_overlap(cln);
529 spin_unlock_irqrestore(&radix_lock, flags);
534 static void active_cacheline_remove(struct dma_debug_entry *entry)
536 phys_addr_t cln = to_cacheline_number(entry);
539 /* ...mirror the insert case */
540 if (entry->direction == DMA_TO_DEVICE)
543 spin_lock_irqsave(&radix_lock, flags);
544 /* since we are counting overlaps the final put of the
545 * cacheline will occur when the overlap count is 0.
546 * active_cacheline_dec_overlap() returns -1 in that case
548 if (active_cacheline_dec_overlap(cln) < 0)
549 radix_tree_delete(&dma_active_cacheline, cln);
550 spin_unlock_irqrestore(&radix_lock, flags);
554 * debug_dma_assert_idle() - assert that a page is not undergoing dma
555 * @page: page to lookup in the dma_active_cacheline tree
557 * Place a call to this routine in cases where the cpu touching the page
558 * before the dma completes (page is dma_unmapped) will lead to data
561 void debug_dma_assert_idle(struct page *page)
563 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
564 struct dma_debug_entry *entry = NULL;
565 void **results = (void **) &ents;
566 unsigned int nents, i;
570 if (dma_debug_disabled())
576 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
577 spin_lock_irqsave(&radix_lock, flags);
578 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
579 CACHELINES_PER_PAGE);
580 for (i = 0; i < nents; i++) {
581 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
583 if (ent_cln == cln) {
586 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
589 spin_unlock_irqrestore(&radix_lock, flags);
594 cln = to_cacheline_number(entry);
595 err_printk(entry->dev, entry,
596 "cpu touching an active dma mapped cacheline [cln=%pa]\n",
601 * Wrapper function for adding an entry to the hash.
602 * This function takes care of locking itself.
604 static void add_dma_entry(struct dma_debug_entry *entry)
606 struct hash_bucket *bucket;
610 bucket = get_hash_bucket(entry, &flags);
611 hash_bucket_add(bucket, entry);
612 put_hash_bucket(bucket, &flags);
614 rc = active_cacheline_insert(entry);
616 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
617 global_disable = true;
620 /* TODO: report -EEXIST errors here as overlapping mappings are
621 * not supported by the DMA API
625 static int dma_debug_create_entries(gfp_t gfp)
627 struct dma_debug_entry *entry;
630 entry = (void *)get_zeroed_page(gfp);
634 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
635 list_add_tail(&entry[i].list, &free_entries);
637 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
638 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
643 static struct dma_debug_entry *__dma_entry_alloc(void)
645 struct dma_debug_entry *entry;
647 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
648 list_del(&entry->list);
649 memset(entry, 0, sizeof(*entry));
651 num_free_entries -= 1;
652 if (num_free_entries < min_free_entries)
653 min_free_entries = num_free_entries;
658 void __dma_entry_alloc_check_leak(void)
660 u32 tmp = nr_total_entries % nr_prealloc_entries;
662 /* Shout each time we tick over some multiple of the initial pool */
663 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
664 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
666 (nr_total_entries / nr_prealloc_entries));
670 /* struct dma_entry allocator
672 * The next two functions implement the allocator for
673 * struct dma_debug_entries.
675 static struct dma_debug_entry *dma_entry_alloc(void)
677 struct dma_debug_entry *entry;
680 spin_lock_irqsave(&free_entries_lock, flags);
681 if (num_free_entries == 0) {
682 if (dma_debug_create_entries(GFP_ATOMIC)) {
683 global_disable = true;
684 spin_unlock_irqrestore(&free_entries_lock, flags);
685 pr_err("debugging out of memory - disabling\n");
688 __dma_entry_alloc_check_leak();
691 entry = __dma_entry_alloc();
693 spin_unlock_irqrestore(&free_entries_lock, flags);
695 #ifdef CONFIG_STACKTRACE
696 entry->stack_len = stack_trace_save(entry->stack_entries,
697 ARRAY_SIZE(entry->stack_entries),
703 static void dma_entry_free(struct dma_debug_entry *entry)
707 active_cacheline_remove(entry);
710 * add to beginning of the list - this way the entries are
711 * more likely cache hot when they are reallocated.
713 spin_lock_irqsave(&free_entries_lock, flags);
714 list_add(&entry->list, &free_entries);
715 num_free_entries += 1;
716 spin_unlock_irqrestore(&free_entries_lock, flags);
720 * DMA-API debugging init code
722 * The init code does two things:
723 * 1. Initialize core data structures
724 * 2. Preallocate a given number of dma_debug_entry structs
727 static ssize_t filter_read(struct file *file, char __user *user_buf,
728 size_t count, loff_t *ppos)
730 char buf[NAME_MAX_LEN + 1];
734 if (!current_driver_name[0])
738 * We can't copy to userspace directly because current_driver_name can
739 * only be read under the driver_name_lock with irqs disabled. So
740 * create a temporary copy first.
742 read_lock_irqsave(&driver_name_lock, flags);
743 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
744 read_unlock_irqrestore(&driver_name_lock, flags);
746 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
749 static ssize_t filter_write(struct file *file, const char __user *userbuf,
750 size_t count, loff_t *ppos)
752 char buf[NAME_MAX_LEN];
758 * We can't copy from userspace directly. Access to
759 * current_driver_name is protected with a write_lock with irqs
760 * disabled. Since copy_from_user can fault and may sleep we
761 * need to copy to temporary buffer first
763 len = min(count, (size_t)(NAME_MAX_LEN - 1));
764 if (copy_from_user(buf, userbuf, len))
769 write_lock_irqsave(&driver_name_lock, flags);
772 * Now handle the string we got from userspace very carefully.
774 * - only use the first token we got
775 * - token delimiter is everything looking like a space
776 * character (' ', '\n', '\t' ...)
779 if (!isalnum(buf[0])) {
781 * If the first character userspace gave us is not
782 * alphanumerical then assume the filter should be
785 if (current_driver_name[0])
786 pr_info("switching off dma-debug driver filter\n");
787 current_driver_name[0] = 0;
788 current_driver = NULL;
793 * Now parse out the first token and use it as the name for the
794 * driver to filter for.
796 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
797 current_driver_name[i] = buf[i];
798 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
801 current_driver_name[i] = 0;
802 current_driver = NULL;
804 pr_info("enable driver filter for driver [%s]\n",
805 current_driver_name);
808 write_unlock_irqrestore(&driver_name_lock, flags);
813 static const struct file_operations filter_fops = {
815 .write = filter_write,
816 .llseek = default_llseek,
819 static int dump_show(struct seq_file *seq, void *v)
823 for (idx = 0; idx < HASH_SIZE; idx++) {
824 struct hash_bucket *bucket = &dma_entry_hash[idx];
825 struct dma_debug_entry *entry;
828 spin_lock_irqsave(&bucket->lock, flags);
829 list_for_each_entry(entry, &bucket->list, list) {
831 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
832 dev_name(entry->dev),
833 dev_driver_string(entry->dev),
834 type2name[entry->type], idx,
835 phys_addr(entry), entry->pfn,
836 entry->dev_addr, entry->size,
837 dir2name[entry->direction],
838 maperr2str[entry->map_err_type]);
840 spin_unlock_irqrestore(&bucket->lock, flags);
844 DEFINE_SHOW_ATTRIBUTE(dump);
846 static void dma_debug_fs_init(void)
848 struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
850 debugfs_create_bool("disabled", 0444, dentry, &global_disable);
851 debugfs_create_u32("error_count", 0444, dentry, &error_count);
852 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
853 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
854 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
855 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
856 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
857 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
858 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
861 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
863 struct dma_debug_entry *entry;
867 for (i = 0; i < HASH_SIZE; ++i) {
868 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
869 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
870 if (entry->dev == dev) {
875 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
881 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
883 struct device *dev = data;
884 struct dma_debug_entry *uninitialized_var(entry);
887 if (dma_debug_disabled())
891 case BUS_NOTIFY_UNBOUND_DRIVER:
892 count = device_dma_allocations(dev, &entry);
895 err_printk(dev, entry, "device driver has pending "
896 "DMA allocations while released from device "
898 "One of leaked entries details: "
899 "[device address=0x%016llx] [size=%llu bytes] "
900 "[mapped with %s] [mapped as %s]\n",
901 count, entry->dev_addr, entry->size,
902 dir2name[entry->direction], type2name[entry->type]);
911 void dma_debug_add_bus(struct bus_type *bus)
913 struct notifier_block *nb;
915 if (dma_debug_disabled())
918 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
920 pr_err("dma_debug_add_bus: out of memory\n");
924 nb->notifier_call = dma_debug_device_change;
926 bus_register_notifier(bus, nb);
929 static int dma_debug_init(void)
933 /* Do not use dma_debug_initialized here, since we really want to be
934 * called to set dma_debug_initialized
939 for (i = 0; i < HASH_SIZE; ++i) {
940 INIT_LIST_HEAD(&dma_entry_hash[i].list);
941 spin_lock_init(&dma_entry_hash[i].lock);
946 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
947 for (i = 0; i < nr_pages; ++i)
948 dma_debug_create_entries(GFP_KERNEL);
949 if (num_free_entries >= nr_prealloc_entries) {
950 pr_info("preallocated %d debug entries\n", nr_total_entries);
951 } else if (num_free_entries > 0) {
952 pr_warn("%d debug entries requested but only %d allocated\n",
953 nr_prealloc_entries, nr_total_entries);
955 pr_err("debugging out of memory error - disabled\n");
956 global_disable = true;
960 min_free_entries = num_free_entries;
962 dma_debug_initialized = true;
964 pr_info("debugging enabled by kernel config\n");
967 core_initcall(dma_debug_init);
969 static __init int dma_debug_cmdline(char *str)
974 if (strncmp(str, "off", 3) == 0) {
975 pr_info("debugging disabled on kernel command line\n");
976 global_disable = true;
982 static __init int dma_debug_entries_cmdline(char *str)
986 if (!get_option(&str, &nr_prealloc_entries))
987 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
991 __setup("dma_debug=", dma_debug_cmdline);
992 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
994 static void check_unmap(struct dma_debug_entry *ref)
996 struct dma_debug_entry *entry;
997 struct hash_bucket *bucket;
1000 bucket = get_hash_bucket(ref, &flags);
1001 entry = bucket_find_exact(bucket, ref);
1004 /* must drop lock before calling dma_mapping_error */
1005 put_hash_bucket(bucket, &flags);
1007 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1008 err_printk(ref->dev, NULL,
1009 "device driver tries to free an "
1010 "invalid DMA memory address\n");
1012 err_printk(ref->dev, NULL,
1013 "device driver tries to free DMA "
1014 "memory it has not allocated [device "
1015 "address=0x%016llx] [size=%llu bytes]\n",
1016 ref->dev_addr, ref->size);
1021 if (ref->size != entry->size) {
1022 err_printk(ref->dev, entry, "device driver frees "
1023 "DMA memory with different size "
1024 "[device address=0x%016llx] [map size=%llu bytes] "
1025 "[unmap size=%llu bytes]\n",
1026 ref->dev_addr, entry->size, ref->size);
1029 if (ref->type != entry->type) {
1030 err_printk(ref->dev, entry, "device driver frees "
1031 "DMA memory with wrong function "
1032 "[device address=0x%016llx] [size=%llu bytes] "
1033 "[mapped as %s] [unmapped as %s]\n",
1034 ref->dev_addr, ref->size,
1035 type2name[entry->type], type2name[ref->type]);
1036 } else if ((entry->type == dma_debug_coherent) &&
1037 (phys_addr(ref) != phys_addr(entry))) {
1038 err_printk(ref->dev, entry, "device driver frees "
1039 "DMA memory with different CPU address "
1040 "[device address=0x%016llx] [size=%llu bytes] "
1041 "[cpu alloc address=0x%016llx] "
1042 "[cpu free address=0x%016llx]",
1043 ref->dev_addr, ref->size,
1048 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1049 ref->sg_call_ents != entry->sg_call_ents) {
1050 err_printk(ref->dev, entry, "device driver frees "
1051 "DMA sg list with different entry count "
1052 "[map count=%d] [unmap count=%d]\n",
1053 entry->sg_call_ents, ref->sg_call_ents);
1057 * This may be no bug in reality - but most implementations of the
1058 * DMA API don't handle this properly, so check for it here
1060 if (ref->direction != entry->direction) {
1061 err_printk(ref->dev, entry, "device driver frees "
1062 "DMA memory with different direction "
1063 "[device address=0x%016llx] [size=%llu bytes] "
1064 "[mapped with %s] [unmapped with %s]\n",
1065 ref->dev_addr, ref->size,
1066 dir2name[entry->direction],
1067 dir2name[ref->direction]);
1071 * Drivers should use dma_mapping_error() to check the returned
1072 * addresses of dma_map_single() and dma_map_page().
1073 * If not, print this warning message. See Documentation/DMA-API.txt.
1075 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1076 err_printk(ref->dev, entry,
1077 "device driver failed to check map error"
1078 "[device address=0x%016llx] [size=%llu bytes] "
1080 ref->dev_addr, ref->size,
1081 type2name[entry->type]);
1084 hash_bucket_del(entry);
1085 dma_entry_free(entry);
1087 put_hash_bucket(bucket, &flags);
1090 static void check_for_stack(struct device *dev,
1091 struct page *page, size_t offset)
1094 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1096 if (!stack_vm_area) {
1097 /* Stack is direct-mapped. */
1098 if (PageHighMem(page))
1100 addr = page_address(page) + offset;
1101 if (object_is_on_stack(addr))
1102 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1104 /* Stack is vmalloced. */
1107 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1108 if (page != stack_vm_area->pages[i])
1111 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1112 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1118 static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
1120 unsigned long a1 = (unsigned long)addr;
1121 unsigned long b1 = a1 + len;
1122 unsigned long a2 = (unsigned long)start;
1123 unsigned long b2 = (unsigned long)end;
1125 return !(b1 <= a2 || a1 >= b2);
1128 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1130 if (overlap(addr, len, _stext, _etext) ||
1131 overlap(addr, len, __start_rodata, __end_rodata))
1132 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1135 static void check_sync(struct device *dev,
1136 struct dma_debug_entry *ref,
1139 struct dma_debug_entry *entry;
1140 struct hash_bucket *bucket;
1141 unsigned long flags;
1143 bucket = get_hash_bucket(ref, &flags);
1145 entry = bucket_find_contain(&bucket, ref, &flags);
1148 err_printk(dev, NULL, "device driver tries "
1149 "to sync DMA memory it has not allocated "
1150 "[device address=0x%016llx] [size=%llu bytes]\n",
1151 (unsigned long long)ref->dev_addr, ref->size);
1155 if (ref->size > entry->size) {
1156 err_printk(dev, entry, "device driver syncs"
1157 " DMA memory outside allocated range "
1158 "[device address=0x%016llx] "
1159 "[allocation size=%llu bytes] "
1160 "[sync offset+size=%llu]\n",
1161 entry->dev_addr, entry->size,
1165 if (entry->direction == DMA_BIDIRECTIONAL)
1168 if (ref->direction != entry->direction) {
1169 err_printk(dev, entry, "device driver syncs "
1170 "DMA memory with different direction "
1171 "[device address=0x%016llx] [size=%llu bytes] "
1172 "[mapped with %s] [synced with %s]\n",
1173 (unsigned long long)ref->dev_addr, entry->size,
1174 dir2name[entry->direction],
1175 dir2name[ref->direction]);
1178 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1179 !(ref->direction == DMA_TO_DEVICE))
1180 err_printk(dev, entry, "device driver syncs "
1181 "device read-only DMA memory for cpu "
1182 "[device address=0x%016llx] [size=%llu bytes] "
1183 "[mapped with %s] [synced with %s]\n",
1184 (unsigned long long)ref->dev_addr, entry->size,
1185 dir2name[entry->direction],
1186 dir2name[ref->direction]);
1188 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1189 !(ref->direction == DMA_FROM_DEVICE))
1190 err_printk(dev, entry, "device driver syncs "
1191 "device write-only DMA memory to device "
1192 "[device address=0x%016llx] [size=%llu bytes] "
1193 "[mapped with %s] [synced with %s]\n",
1194 (unsigned long long)ref->dev_addr, entry->size,
1195 dir2name[entry->direction],
1196 dir2name[ref->direction]);
1198 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1199 ref->sg_call_ents != entry->sg_call_ents) {
1200 err_printk(ref->dev, entry, "device driver syncs "
1201 "DMA sg list with different entry count "
1202 "[map count=%d] [sync count=%d]\n",
1203 entry->sg_call_ents, ref->sg_call_ents);
1207 put_hash_bucket(bucket, &flags);
1210 static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1212 #ifdef CONFIG_DMA_API_DEBUG_SG
1213 unsigned int max_seg = dma_get_max_seg_size(dev);
1214 u64 start, end, boundary = dma_get_seg_boundary(dev);
1217 * Either the driver forgot to set dma_parms appropriately, or
1218 * whoever generated the list forgot to check them.
1220 if (sg->length > max_seg)
1221 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1222 sg->length, max_seg);
1224 * In some cases this could potentially be the DMA API
1225 * implementation's fault, but it would usually imply that
1226 * the scatterlist was built inappropriately to begin with.
1228 start = sg_dma_address(sg);
1229 end = start + sg_dma_len(sg) - 1;
1230 if ((start ^ end) & ~boundary)
1231 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1232 start, end, boundary);
1236 void debug_dma_map_single(struct device *dev, const void *addr,
1239 if (unlikely(dma_debug_disabled()))
1242 if (!virt_addr_valid(addr))
1243 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1246 if (is_vmalloc_addr(addr))
1247 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1250 EXPORT_SYMBOL(debug_dma_map_single);
1252 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1253 size_t size, int direction, dma_addr_t dma_addr)
1255 struct dma_debug_entry *entry;
1257 if (unlikely(dma_debug_disabled()))
1260 if (dma_mapping_error(dev, dma_addr))
1263 entry = dma_entry_alloc();
1268 entry->type = dma_debug_single;
1269 entry->pfn = page_to_pfn(page);
1270 entry->offset = offset,
1271 entry->dev_addr = dma_addr;
1273 entry->direction = direction;
1274 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1276 check_for_stack(dev, page, offset);
1278 if (!PageHighMem(page)) {
1279 void *addr = page_address(page) + offset;
1281 check_for_illegal_area(dev, addr, size);
1284 add_dma_entry(entry);
1286 EXPORT_SYMBOL(debug_dma_map_page);
1288 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1290 struct dma_debug_entry ref;
1291 struct dma_debug_entry *entry;
1292 struct hash_bucket *bucket;
1293 unsigned long flags;
1295 if (unlikely(dma_debug_disabled()))
1299 ref.dev_addr = dma_addr;
1300 bucket = get_hash_bucket(&ref, &flags);
1302 list_for_each_entry(entry, &bucket->list, list) {
1303 if (!exact_match(&ref, entry))
1307 * The same physical address can be mapped multiple
1308 * times. Without a hardware IOMMU this results in the
1309 * same device addresses being put into the dma-debug
1310 * hash multiple times too. This can result in false
1311 * positives being reported. Therefore we implement a
1312 * best-fit algorithm here which updates the first entry
1313 * from the hash which fits the reference value and is
1314 * not currently listed as being checked.
1316 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1317 entry->map_err_type = MAP_ERR_CHECKED;
1322 put_hash_bucket(bucket, &flags);
1324 EXPORT_SYMBOL(debug_dma_mapping_error);
1326 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1327 size_t size, int direction)
1329 struct dma_debug_entry ref = {
1330 .type = dma_debug_single,
1334 .direction = direction,
1337 if (unlikely(dma_debug_disabled()))
1341 EXPORT_SYMBOL(debug_dma_unmap_page);
1343 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1344 int nents, int mapped_ents, int direction)
1346 struct dma_debug_entry *entry;
1347 struct scatterlist *s;
1350 if (unlikely(dma_debug_disabled()))
1353 for_each_sg(sg, s, mapped_ents, i) {
1354 entry = dma_entry_alloc();
1358 entry->type = dma_debug_sg;
1360 entry->pfn = page_to_pfn(sg_page(s));
1361 entry->offset = s->offset,
1362 entry->size = sg_dma_len(s);
1363 entry->dev_addr = sg_dma_address(s);
1364 entry->direction = direction;
1365 entry->sg_call_ents = nents;
1366 entry->sg_mapped_ents = mapped_ents;
1368 check_for_stack(dev, sg_page(s), s->offset);
1370 if (!PageHighMem(sg_page(s))) {
1371 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1374 check_sg_segment(dev, s);
1376 add_dma_entry(entry);
1379 EXPORT_SYMBOL(debug_dma_map_sg);
1381 static int get_nr_mapped_entries(struct device *dev,
1382 struct dma_debug_entry *ref)
1384 struct dma_debug_entry *entry;
1385 struct hash_bucket *bucket;
1386 unsigned long flags;
1389 bucket = get_hash_bucket(ref, &flags);
1390 entry = bucket_find_exact(bucket, ref);
1394 mapped_ents = entry->sg_mapped_ents;
1395 put_hash_bucket(bucket, &flags);
1400 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1401 int nelems, int dir)
1403 struct scatterlist *s;
1404 int mapped_ents = 0, i;
1406 if (unlikely(dma_debug_disabled()))
1409 for_each_sg(sglist, s, nelems, i) {
1411 struct dma_debug_entry ref = {
1412 .type = dma_debug_sg,
1414 .pfn = page_to_pfn(sg_page(s)),
1415 .offset = s->offset,
1416 .dev_addr = sg_dma_address(s),
1417 .size = sg_dma_len(s),
1419 .sg_call_ents = nelems,
1422 if (mapped_ents && i >= mapped_ents)
1426 mapped_ents = get_nr_mapped_entries(dev, &ref);
1431 EXPORT_SYMBOL(debug_dma_unmap_sg);
1433 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1434 dma_addr_t dma_addr, void *virt)
1436 struct dma_debug_entry *entry;
1438 if (unlikely(dma_debug_disabled()))
1441 if (unlikely(virt == NULL))
1444 /* handle vmalloc and linear addresses */
1445 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1448 entry = dma_entry_alloc();
1452 entry->type = dma_debug_coherent;
1454 entry->offset = offset_in_page(virt);
1456 entry->dev_addr = dma_addr;
1457 entry->direction = DMA_BIDIRECTIONAL;
1459 if (is_vmalloc_addr(virt))
1460 entry->pfn = vmalloc_to_pfn(virt);
1462 entry->pfn = page_to_pfn(virt_to_page(virt));
1464 add_dma_entry(entry);
1467 void debug_dma_free_coherent(struct device *dev, size_t size,
1468 void *virt, dma_addr_t addr)
1470 struct dma_debug_entry ref = {
1471 .type = dma_debug_coherent,
1473 .offset = offset_in_page(virt),
1476 .direction = DMA_BIDIRECTIONAL,
1479 /* handle vmalloc and linear addresses */
1480 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1483 if (is_vmalloc_addr(virt))
1484 ref.pfn = vmalloc_to_pfn(virt);
1486 ref.pfn = page_to_pfn(virt_to_page(virt));
1488 if (unlikely(dma_debug_disabled()))
1494 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1495 int direction, dma_addr_t dma_addr)
1497 struct dma_debug_entry *entry;
1499 if (unlikely(dma_debug_disabled()))
1502 entry = dma_entry_alloc();
1506 entry->type = dma_debug_resource;
1508 entry->pfn = PHYS_PFN(addr);
1509 entry->offset = offset_in_page(addr);
1511 entry->dev_addr = dma_addr;
1512 entry->direction = direction;
1513 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1515 add_dma_entry(entry);
1517 EXPORT_SYMBOL(debug_dma_map_resource);
1519 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1520 size_t size, int direction)
1522 struct dma_debug_entry ref = {
1523 .type = dma_debug_resource,
1525 .dev_addr = dma_addr,
1527 .direction = direction,
1530 if (unlikely(dma_debug_disabled()))
1535 EXPORT_SYMBOL(debug_dma_unmap_resource);
1537 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1538 size_t size, int direction)
1540 struct dma_debug_entry ref;
1542 if (unlikely(dma_debug_disabled()))
1545 ref.type = dma_debug_single;
1547 ref.dev_addr = dma_handle;
1549 ref.direction = direction;
1550 ref.sg_call_ents = 0;
1552 check_sync(dev, &ref, true);
1554 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1556 void debug_dma_sync_single_for_device(struct device *dev,
1557 dma_addr_t dma_handle, size_t size,
1560 struct dma_debug_entry ref;
1562 if (unlikely(dma_debug_disabled()))
1565 ref.type = dma_debug_single;
1567 ref.dev_addr = dma_handle;
1569 ref.direction = direction;
1570 ref.sg_call_ents = 0;
1572 check_sync(dev, &ref, false);
1574 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1576 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1577 int nelems, int direction)
1579 struct scatterlist *s;
1580 int mapped_ents = 0, i;
1582 if (unlikely(dma_debug_disabled()))
1585 for_each_sg(sg, s, nelems, i) {
1587 struct dma_debug_entry ref = {
1588 .type = dma_debug_sg,
1590 .pfn = page_to_pfn(sg_page(s)),
1591 .offset = s->offset,
1592 .dev_addr = sg_dma_address(s),
1593 .size = sg_dma_len(s),
1594 .direction = direction,
1595 .sg_call_ents = nelems,
1599 mapped_ents = get_nr_mapped_entries(dev, &ref);
1601 if (i >= mapped_ents)
1604 check_sync(dev, &ref, true);
1607 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1609 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1610 int nelems, int direction)
1612 struct scatterlist *s;
1613 int mapped_ents = 0, i;
1615 if (unlikely(dma_debug_disabled()))
1618 for_each_sg(sg, s, nelems, i) {
1620 struct dma_debug_entry ref = {
1621 .type = dma_debug_sg,
1623 .pfn = page_to_pfn(sg_page(s)),
1624 .offset = s->offset,
1625 .dev_addr = sg_dma_address(s),
1626 .size = sg_dma_len(s),
1627 .direction = direction,
1628 .sg_call_ents = nelems,
1631 mapped_ents = get_nr_mapped_entries(dev, &ref);
1633 if (i >= mapped_ents)
1636 check_sync(dev, &ref, false);
1639 EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1641 static int __init dma_debug_driver_setup(char *str)
1645 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1646 current_driver_name[i] = *str;
1651 if (current_driver_name[0])
1652 pr_info("enable driver filter for driver [%s]\n",
1653 current_driver_name);
1658 __setup("dma_debug_driver=", dma_debug_driver_setup);