Root/
1 | /* |
2 | * Copyright (C) 2008 Advanced Micro Devices, Inc. |
3 | * |
4 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ |
19 | |
20 | #include <linux/scatterlist.h> |
21 | #include <linux/dma-mapping.h> |
22 | #include <linux/stacktrace.h> |
23 | #include <linux/dma-debug.h> |
24 | #include <linux/spinlock.h> |
25 | #include <linux/debugfs.h> |
26 | #include <linux/uaccess.h> |
27 | #include <linux/export.h> |
28 | #include <linux/device.h> |
29 | #include <linux/types.h> |
30 | #include <linux/sched.h> |
31 | #include <linux/ctype.h> |
32 | #include <linux/list.h> |
33 | #include <linux/slab.h> |
34 | |
35 | #include <asm/sections.h> |
36 | |
37 | #define HASH_SIZE 1024ULL |
38 | #define HASH_FN_SHIFT 13 |
39 | #define HASH_FN_MASK (HASH_SIZE - 1) |
40 | |
41 | enum { |
42 | dma_debug_single, |
43 | dma_debug_page, |
44 | dma_debug_sg, |
45 | dma_debug_coherent, |
46 | }; |
47 | |
48 | enum map_err_types { |
49 | MAP_ERR_CHECK_NOT_APPLICABLE, |
50 | MAP_ERR_NOT_CHECKED, |
51 | MAP_ERR_CHECKED, |
52 | }; |
53 | |
54 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 |
55 | |
56 | struct dma_debug_entry { |
57 | struct list_head list; |
58 | struct device *dev; |
59 | int type; |
60 | phys_addr_t paddr; |
61 | u64 dev_addr; |
62 | u64 size; |
63 | int direction; |
64 | int sg_call_ents; |
65 | int sg_mapped_ents; |
66 | enum map_err_types map_err_type; |
67 | #ifdef CONFIG_STACKTRACE |
68 | struct stack_trace stacktrace; |
69 | unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; |
70 | #endif |
71 | }; |
72 | |
73 | typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); |
74 | |
75 | struct hash_bucket { |
76 | struct list_head list; |
77 | spinlock_t lock; |
78 | } ____cacheline_aligned_in_smp; |
79 | |
80 | /* Hash list to save the allocated dma addresses */ |
81 | static struct hash_bucket dma_entry_hash[HASH_SIZE]; |
82 | /* List of pre-allocated dma_debug_entry's */ |
83 | static LIST_HEAD(free_entries); |
84 | /* Lock for the list above */ |
85 | static DEFINE_SPINLOCK(free_entries_lock); |
86 | |
87 | /* Global disable flag - will be set in case of an error */ |
88 | static u32 global_disable __read_mostly; |
89 | |
90 | /* Global error count */ |
91 | static u32 error_count; |
92 | |
93 | /* Global error show enable*/ |
94 | static u32 show_all_errors __read_mostly; |
95 | /* Number of errors to show */ |
96 | static u32 show_num_errors = 1; |
97 | |
98 | static u32 num_free_entries; |
99 | static u32 min_free_entries; |
100 | static u32 nr_total_entries; |
101 | |
102 | /* number of preallocated entries requested by kernel cmdline */ |
103 | static u32 req_entries; |
104 | |
105 | /* debugfs dentry's for the stuff above */ |
106 | static struct dentry *dma_debug_dent __read_mostly; |
107 | static struct dentry *global_disable_dent __read_mostly; |
108 | static struct dentry *error_count_dent __read_mostly; |
109 | static struct dentry *show_all_errors_dent __read_mostly; |
110 | static struct dentry *show_num_errors_dent __read_mostly; |
111 | static struct dentry *num_free_entries_dent __read_mostly; |
112 | static struct dentry *min_free_entries_dent __read_mostly; |
113 | static struct dentry *filter_dent __read_mostly; |
114 | |
115 | /* per-driver filter related state */ |
116 | |
117 | #define NAME_MAX_LEN 64 |
118 | |
119 | static char current_driver_name[NAME_MAX_LEN] __read_mostly; |
120 | static struct device_driver *current_driver __read_mostly; |
121 | |
122 | static DEFINE_RWLOCK(driver_name_lock); |
123 | |
124 | static const char *const maperr2str[] = { |
125 | [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", |
126 | [MAP_ERR_NOT_CHECKED] = "dma map error not checked", |
127 | [MAP_ERR_CHECKED] = "dma map error checked", |
128 | }; |
129 | |
130 | static const char *type2name[4] = { "single", "page", |
131 | "scather-gather", "coherent" }; |
132 | |
133 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", |
134 | "DMA_FROM_DEVICE", "DMA_NONE" }; |
135 | |
136 | /* |
137 | * The access to some variables in this macro is racy. We can't use atomic_t |
138 | * here because all these variables are exported to debugfs. Some of them even |
139 | * writeable. This is also the reason why a lock won't help much. But anyway, |
140 | * the races are no big deal. Here is why: |
141 | * |
142 | * error_count: the addition is racy, but the worst thing that can happen is |
143 | * that we don't count some errors |
144 | * show_num_errors: the subtraction is racy. Also no big deal because in |
145 | * worst case this will result in one warning more in the |
146 | * system log than the user configured. This variable is |
147 | * writeable via debugfs. |
148 | */ |
149 | static inline void dump_entry_trace(struct dma_debug_entry *entry) |
150 | { |
151 | #ifdef CONFIG_STACKTRACE |
152 | if (entry) { |
153 | pr_warning("Mapped at:\n"); |
154 | print_stack_trace(&entry->stacktrace, 0); |
155 | } |
156 | #endif |
157 | } |
158 | |
159 | static bool driver_filter(struct device *dev) |
160 | { |
161 | struct device_driver *drv; |
162 | unsigned long flags; |
163 | bool ret; |
164 | |
165 | /* driver filter off */ |
166 | if (likely(!current_driver_name[0])) |
167 | return true; |
168 | |
169 | /* driver filter on and initialized */ |
170 | if (current_driver && dev && dev->driver == current_driver) |
171 | return true; |
172 | |
173 | /* driver filter on, but we can't filter on a NULL device... */ |
174 | if (!dev) |
175 | return false; |
176 | |
177 | if (current_driver || !current_driver_name[0]) |
178 | return false; |
179 | |
180 | /* driver filter on but not yet initialized */ |
181 | drv = dev->driver; |
182 | if (!drv) |
183 | return false; |
184 | |
185 | /* lock to protect against change of current_driver_name */ |
186 | read_lock_irqsave(&driver_name_lock, flags); |
187 | |
188 | ret = false; |
189 | if (drv->name && |
190 | strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { |
191 | current_driver = drv; |
192 | ret = true; |
193 | } |
194 | |
195 | read_unlock_irqrestore(&driver_name_lock, flags); |
196 | |
197 | return ret; |
198 | } |
199 | |
200 | #define err_printk(dev, entry, format, arg...) do { \ |
201 | error_count += 1; \ |
202 | if (driver_filter(dev) && \ |
203 | (show_all_errors || show_num_errors > 0)) { \ |
204 | WARN(1, "%s %s: " format, \ |
205 | dev ? dev_driver_string(dev) : "NULL", \ |
206 | dev ? dev_name(dev) : "NULL", ## arg); \ |
207 | dump_entry_trace(entry); \ |
208 | } \ |
209 | if (!show_all_errors && show_num_errors > 0) \ |
210 | show_num_errors -= 1; \ |
211 | } while (0); |
212 | |
213 | /* |
214 | * Hash related functions |
215 | * |
216 | * Every DMA-API request is saved into a struct dma_debug_entry. To |
217 | * have quick access to these structs they are stored into a hash. |
218 | */ |
219 | static int hash_fn(struct dma_debug_entry *entry) |
220 | { |
221 | /* |
222 | * Hash function is based on the dma address. |
223 | * We use bits 20-27 here as the index into the hash |
224 | */ |
225 | return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; |
226 | } |
227 | |
228 | /* |
229 | * Request exclusive access to a hash bucket for a given dma_debug_entry. |
230 | */ |
231 | static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, |
232 | unsigned long *flags) |
233 | { |
234 | int idx = hash_fn(entry); |
235 | unsigned long __flags; |
236 | |
237 | spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); |
238 | *flags = __flags; |
239 | return &dma_entry_hash[idx]; |
240 | } |
241 | |
242 | /* |
243 | * Give up exclusive access to the hash bucket |
244 | */ |
245 | static void put_hash_bucket(struct hash_bucket *bucket, |
246 | unsigned long *flags) |
247 | { |
248 | unsigned long __flags = *flags; |
249 | |
250 | spin_unlock_irqrestore(&bucket->lock, __flags); |
251 | } |
252 | |
253 | static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) |
254 | { |
255 | return ((a->dev_addr == b->dev_addr) && |
256 | (a->dev == b->dev)) ? true : false; |
257 | } |
258 | |
259 | static bool containing_match(struct dma_debug_entry *a, |
260 | struct dma_debug_entry *b) |
261 | { |
262 | if (a->dev != b->dev) |
263 | return false; |
264 | |
265 | if ((b->dev_addr <= a->dev_addr) && |
266 | ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) |
267 | return true; |
268 | |
269 | return false; |
270 | } |
271 | |
272 | /* |
273 | * Search a given entry in the hash bucket list |
274 | */ |
275 | static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, |
276 | struct dma_debug_entry *ref, |
277 | match_fn match) |
278 | { |
279 | struct dma_debug_entry *entry, *ret = NULL; |
280 | int matches = 0, match_lvl, last_lvl = -1; |
281 | |
282 | list_for_each_entry(entry, &bucket->list, list) { |
283 | if (!match(ref, entry)) |
284 | continue; |
285 | |
286 | /* |
287 | * Some drivers map the same physical address multiple |
288 | * times. Without a hardware IOMMU this results in the |
289 | * same device addresses being put into the dma-debug |
290 | * hash multiple times too. This can result in false |
291 | * positives being reported. Therefore we implement a |
292 | * best-fit algorithm here which returns the entry from |
293 | * the hash which fits best to the reference value |
294 | * instead of the first-fit. |
295 | */ |
296 | matches += 1; |
297 | match_lvl = 0; |
298 | entry->size == ref->size ? ++match_lvl : 0; |
299 | entry->type == ref->type ? ++match_lvl : 0; |
300 | entry->direction == ref->direction ? ++match_lvl : 0; |
301 | entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; |
302 | |
303 | if (match_lvl == 4) { |
304 | /* perfect-fit - return the result */ |
305 | return entry; |
306 | } else if (match_lvl > last_lvl) { |
307 | /* |
308 | * We found an entry that fits better then the |
309 | * previous one or it is the 1st match. |
310 | */ |
311 | last_lvl = match_lvl; |
312 | ret = entry; |
313 | } |
314 | } |
315 | |
316 | /* |
317 | * If we have multiple matches but no perfect-fit, just return |
318 | * NULL. |
319 | */ |
320 | ret = (matches == 1) ? ret : NULL; |
321 | |
322 | return ret; |
323 | } |
324 | |
325 | static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, |
326 | struct dma_debug_entry *ref) |
327 | { |
328 | return __hash_bucket_find(bucket, ref, exact_match); |
329 | } |
330 | |
331 | static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, |
332 | struct dma_debug_entry *ref, |
333 | unsigned long *flags) |
334 | { |
335 | |
336 | unsigned int max_range = dma_get_max_seg_size(ref->dev); |
337 | struct dma_debug_entry *entry, index = *ref; |
338 | unsigned int range = 0; |
339 | |
340 | while (range <= max_range) { |
341 | entry = __hash_bucket_find(*bucket, &index, containing_match); |
342 | |
343 | if (entry) |
344 | return entry; |
345 | |
346 | /* |
347 | * Nothing found, go back a hash bucket |
348 | */ |
349 | put_hash_bucket(*bucket, flags); |
350 | range += (1 << HASH_FN_SHIFT); |
351 | index.dev_addr -= (1 << HASH_FN_SHIFT); |
352 | *bucket = get_hash_bucket(&index, flags); |
353 | } |
354 | |
355 | return NULL; |
356 | } |
357 | |
358 | /* |
359 | * Add an entry to a hash bucket |
360 | */ |
361 | static void hash_bucket_add(struct hash_bucket *bucket, |
362 | struct dma_debug_entry *entry) |
363 | { |
364 | list_add_tail(&entry->list, &bucket->list); |
365 | } |
366 | |
367 | /* |
368 | * Remove entry from a hash bucket list |
369 | */ |
370 | static void hash_bucket_del(struct dma_debug_entry *entry) |
371 | { |
372 | list_del(&entry->list); |
373 | } |
374 | |
375 | /* |
376 | * Dump mapping entries for debugging purposes |
377 | */ |
378 | void debug_dma_dump_mappings(struct device *dev) |
379 | { |
380 | int idx; |
381 | |
382 | for (idx = 0; idx < HASH_SIZE; idx++) { |
383 | struct hash_bucket *bucket = &dma_entry_hash[idx]; |
384 | struct dma_debug_entry *entry; |
385 | unsigned long flags; |
386 | |
387 | spin_lock_irqsave(&bucket->lock, flags); |
388 | |
389 | list_for_each_entry(entry, &bucket->list, list) { |
390 | if (!dev || dev == entry->dev) { |
391 | dev_info(entry->dev, |
392 | "%s idx %d P=%Lx D=%Lx L=%Lx %s %s\n", |
393 | type2name[entry->type], idx, |
394 | (unsigned long long)entry->paddr, |
395 | entry->dev_addr, entry->size, |
396 | dir2name[entry->direction], |
397 | maperr2str[entry->map_err_type]); |
398 | } |
399 | } |
400 | |
401 | spin_unlock_irqrestore(&bucket->lock, flags); |
402 | } |
403 | } |
404 | EXPORT_SYMBOL(debug_dma_dump_mappings); |
405 | |
406 | /* |
407 | * Wrapper function for adding an entry to the hash. |
408 | * This function takes care of locking itself. |
409 | */ |
410 | static void add_dma_entry(struct dma_debug_entry *entry) |
411 | { |
412 | struct hash_bucket *bucket; |
413 | unsigned long flags; |
414 | |
415 | bucket = get_hash_bucket(entry, &flags); |
416 | hash_bucket_add(bucket, entry); |
417 | put_hash_bucket(bucket, &flags); |
418 | } |
419 | |
420 | static struct dma_debug_entry *__dma_entry_alloc(void) |
421 | { |
422 | struct dma_debug_entry *entry; |
423 | |
424 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); |
425 | list_del(&entry->list); |
426 | memset(entry, 0, sizeof(*entry)); |
427 | |
428 | num_free_entries -= 1; |
429 | if (num_free_entries < min_free_entries) |
430 | min_free_entries = num_free_entries; |
431 | |
432 | return entry; |
433 | } |
434 | |
435 | /* struct dma_entry allocator |
436 | * |
437 | * The next two functions implement the allocator for |
438 | * struct dma_debug_entries. |
439 | */ |
440 | static struct dma_debug_entry *dma_entry_alloc(void) |
441 | { |
442 | struct dma_debug_entry *entry; |
443 | unsigned long flags; |
444 | |
445 | spin_lock_irqsave(&free_entries_lock, flags); |
446 | |
447 | if (list_empty(&free_entries)) { |
448 | pr_err("DMA-API: debugging out of memory - disabling\n"); |
449 | global_disable = true; |
450 | spin_unlock_irqrestore(&free_entries_lock, flags); |
451 | return NULL; |
452 | } |
453 | |
454 | entry = __dma_entry_alloc(); |
455 | |
456 | spin_unlock_irqrestore(&free_entries_lock, flags); |
457 | |
458 | #ifdef CONFIG_STACKTRACE |
459 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; |
460 | entry->stacktrace.entries = entry->st_entries; |
461 | entry->stacktrace.skip = 2; |
462 | save_stack_trace(&entry->stacktrace); |
463 | #endif |
464 | |
465 | return entry; |
466 | } |
467 | |
468 | static void dma_entry_free(struct dma_debug_entry *entry) |
469 | { |
470 | unsigned long flags; |
471 | |
472 | /* |
473 | * add to beginning of the list - this way the entries are |
474 | * more likely cache hot when they are reallocated. |
475 | */ |
476 | spin_lock_irqsave(&free_entries_lock, flags); |
477 | list_add(&entry->list, &free_entries); |
478 | num_free_entries += 1; |
479 | spin_unlock_irqrestore(&free_entries_lock, flags); |
480 | } |
481 | |
482 | int dma_debug_resize_entries(u32 num_entries) |
483 | { |
484 | int i, delta, ret = 0; |
485 | unsigned long flags; |
486 | struct dma_debug_entry *entry; |
487 | LIST_HEAD(tmp); |
488 | |
489 | spin_lock_irqsave(&free_entries_lock, flags); |
490 | |
491 | if (nr_total_entries < num_entries) { |
492 | delta = num_entries - nr_total_entries; |
493 | |
494 | spin_unlock_irqrestore(&free_entries_lock, flags); |
495 | |
496 | for (i = 0; i < delta; i++) { |
497 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
498 | if (!entry) |
499 | break; |
500 | |
501 | list_add_tail(&entry->list, &tmp); |
502 | } |
503 | |
504 | spin_lock_irqsave(&free_entries_lock, flags); |
505 | |
506 | list_splice(&tmp, &free_entries); |
507 | nr_total_entries += i; |
508 | num_free_entries += i; |
509 | } else { |
510 | delta = nr_total_entries - num_entries; |
511 | |
512 | for (i = 0; i < delta && !list_empty(&free_entries); i++) { |
513 | entry = __dma_entry_alloc(); |
514 | kfree(entry); |
515 | } |
516 | |
517 | nr_total_entries -= i; |
518 | } |
519 | |
520 | if (nr_total_entries != num_entries) |
521 | ret = 1; |
522 | |
523 | spin_unlock_irqrestore(&free_entries_lock, flags); |
524 | |
525 | return ret; |
526 | } |
527 | EXPORT_SYMBOL(dma_debug_resize_entries); |
528 | |
529 | /* |
530 | * DMA-API debugging init code |
531 | * |
532 | * The init code does two things: |
533 | * 1. Initialize core data structures |
534 | * 2. Preallocate a given number of dma_debug_entry structs |
535 | */ |
536 | |
537 | static int prealloc_memory(u32 num_entries) |
538 | { |
539 | struct dma_debug_entry *entry, *next_entry; |
540 | int i; |
541 | |
542 | for (i = 0; i < num_entries; ++i) { |
543 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
544 | if (!entry) |
545 | goto out_err; |
546 | |
547 | list_add_tail(&entry->list, &free_entries); |
548 | } |
549 | |
550 | num_free_entries = num_entries; |
551 | min_free_entries = num_entries; |
552 | |
553 | pr_info("DMA-API: preallocated %d debug entries\n", num_entries); |
554 | |
555 | return 0; |
556 | |
557 | out_err: |
558 | |
559 | list_for_each_entry_safe(entry, next_entry, &free_entries, list) { |
560 | list_del(&entry->list); |
561 | kfree(entry); |
562 | } |
563 | |
564 | return -ENOMEM; |
565 | } |
566 | |
567 | static ssize_t filter_read(struct file *file, char __user *user_buf, |
568 | size_t count, loff_t *ppos) |
569 | { |
570 | char buf[NAME_MAX_LEN + 1]; |
571 | unsigned long flags; |
572 | int len; |
573 | |
574 | if (!current_driver_name[0]) |
575 | return 0; |
576 | |
577 | /* |
578 | * We can't copy to userspace directly because current_driver_name can |
579 | * only be read under the driver_name_lock with irqs disabled. So |
580 | * create a temporary copy first. |
581 | */ |
582 | read_lock_irqsave(&driver_name_lock, flags); |
583 | len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); |
584 | read_unlock_irqrestore(&driver_name_lock, flags); |
585 | |
586 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); |
587 | } |
588 | |
589 | static ssize_t filter_write(struct file *file, const char __user *userbuf, |
590 | size_t count, loff_t *ppos) |
591 | { |
592 | char buf[NAME_MAX_LEN]; |
593 | unsigned long flags; |
594 | size_t len; |
595 | int i; |
596 | |
597 | /* |
598 | * We can't copy from userspace directly. Access to |
599 | * current_driver_name is protected with a write_lock with irqs |
600 | * disabled. Since copy_from_user can fault and may sleep we |
601 | * need to copy to temporary buffer first |
602 | */ |
603 | len = min(count, (size_t)(NAME_MAX_LEN - 1)); |
604 | if (copy_from_user(buf, userbuf, len)) |
605 | return -EFAULT; |
606 | |
607 | buf[len] = 0; |
608 | |
609 | write_lock_irqsave(&driver_name_lock, flags); |
610 | |
611 | /* |
612 | * Now handle the string we got from userspace very carefully. |
613 | * The rules are: |
614 | * - only use the first token we got |
615 | * - token delimiter is everything looking like a space |
616 | * character (' ', '\n', '\t' ...) |
617 | * |
618 | */ |
619 | if (!isalnum(buf[0])) { |
620 | /* |
621 | * If the first character userspace gave us is not |
622 | * alphanumerical then assume the filter should be |
623 | * switched off. |
624 | */ |
625 | if (current_driver_name[0]) |
626 | pr_info("DMA-API: switching off dma-debug driver filter\n"); |
627 | current_driver_name[0] = 0; |
628 | current_driver = NULL; |
629 | goto out_unlock; |
630 | } |
631 | |
632 | /* |
633 | * Now parse out the first token and use it as the name for the |
634 | * driver to filter for. |
635 | */ |
636 | for (i = 0; i < NAME_MAX_LEN - 1; ++i) { |
637 | current_driver_name[i] = buf[i]; |
638 | if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) |
639 | break; |
640 | } |
641 | current_driver_name[i] = 0; |
642 | current_driver = NULL; |
643 | |
644 | pr_info("DMA-API: enable driver filter for driver [%s]\n", |
645 | current_driver_name); |
646 | |
647 | out_unlock: |
648 | write_unlock_irqrestore(&driver_name_lock, flags); |
649 | |
650 | return count; |
651 | } |
652 | |
653 | static const struct file_operations filter_fops = { |
654 | .read = filter_read, |
655 | .write = filter_write, |
656 | .llseek = default_llseek, |
657 | }; |
658 | |
659 | static int dma_debug_fs_init(void) |
660 | { |
661 | dma_debug_dent = debugfs_create_dir("dma-api", NULL); |
662 | if (!dma_debug_dent) { |
663 | pr_err("DMA-API: can not create debugfs directory\n"); |
664 | return -ENOMEM; |
665 | } |
666 | |
667 | global_disable_dent = debugfs_create_bool("disabled", 0444, |
668 | dma_debug_dent, |
669 | &global_disable); |
670 | if (!global_disable_dent) |
671 | goto out_err; |
672 | |
673 | error_count_dent = debugfs_create_u32("error_count", 0444, |
674 | dma_debug_dent, &error_count); |
675 | if (!error_count_dent) |
676 | goto out_err; |
677 | |
678 | show_all_errors_dent = debugfs_create_u32("all_errors", 0644, |
679 | dma_debug_dent, |
680 | &show_all_errors); |
681 | if (!show_all_errors_dent) |
682 | goto out_err; |
683 | |
684 | show_num_errors_dent = debugfs_create_u32("num_errors", 0644, |
685 | dma_debug_dent, |
686 | &show_num_errors); |
687 | if (!show_num_errors_dent) |
688 | goto out_err; |
689 | |
690 | num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, |
691 | dma_debug_dent, |
692 | &num_free_entries); |
693 | if (!num_free_entries_dent) |
694 | goto out_err; |
695 | |
696 | min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, |
697 | dma_debug_dent, |
698 | &min_free_entries); |
699 | if (!min_free_entries_dent) |
700 | goto out_err; |
701 | |
702 | filter_dent = debugfs_create_file("driver_filter", 0644, |
703 | dma_debug_dent, NULL, &filter_fops); |
704 | if (!filter_dent) |
705 | goto out_err; |
706 | |
707 | return 0; |
708 | |
709 | out_err: |
710 | debugfs_remove_recursive(dma_debug_dent); |
711 | |
712 | return -ENOMEM; |
713 | } |
714 | |
715 | static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) |
716 | { |
717 | struct dma_debug_entry *entry; |
718 | unsigned long flags; |
719 | int count = 0, i; |
720 | |
721 | local_irq_save(flags); |
722 | |
723 | for (i = 0; i < HASH_SIZE; ++i) { |
724 | spin_lock(&dma_entry_hash[i].lock); |
725 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { |
726 | if (entry->dev == dev) { |
727 | count += 1; |
728 | *out_entry = entry; |
729 | } |
730 | } |
731 | spin_unlock(&dma_entry_hash[i].lock); |
732 | } |
733 | |
734 | local_irq_restore(flags); |
735 | |
736 | return count; |
737 | } |
738 | |
739 | static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) |
740 | { |
741 | struct device *dev = data; |
742 | struct dma_debug_entry *uninitialized_var(entry); |
743 | int count; |
744 | |
745 | if (global_disable) |
746 | return 0; |
747 | |
748 | switch (action) { |
749 | case BUS_NOTIFY_UNBOUND_DRIVER: |
750 | count = device_dma_allocations(dev, &entry); |
751 | if (count == 0) |
752 | break; |
753 | err_printk(dev, entry, "DMA-API: device driver has pending " |
754 | "DMA allocations while released from device " |
755 | "[count=%d]\n" |
756 | "One of leaked entries details: " |
757 | "[device address=0x%016llx] [size=%llu bytes] " |
758 | "[mapped with %s] [mapped as %s]\n", |
759 | count, entry->dev_addr, entry->size, |
760 | dir2name[entry->direction], type2name[entry->type]); |
761 | break; |
762 | default: |
763 | break; |
764 | } |
765 | |
766 | return 0; |
767 | } |
768 | |
769 | void dma_debug_add_bus(struct bus_type *bus) |
770 | { |
771 | struct notifier_block *nb; |
772 | |
773 | if (global_disable) |
774 | return; |
775 | |
776 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); |
777 | if (nb == NULL) { |
778 | pr_err("dma_debug_add_bus: out of memory\n"); |
779 | return; |
780 | } |
781 | |
782 | nb->notifier_call = dma_debug_device_change; |
783 | |
784 | bus_register_notifier(bus, nb); |
785 | } |
786 | |
787 | /* |
788 | * Let the architectures decide how many entries should be preallocated. |
789 | */ |
790 | void dma_debug_init(u32 num_entries) |
791 | { |
792 | int i; |
793 | |
794 | if (global_disable) |
795 | return; |
796 | |
797 | for (i = 0; i < HASH_SIZE; ++i) { |
798 | INIT_LIST_HEAD(&dma_entry_hash[i].list); |
799 | spin_lock_init(&dma_entry_hash[i].lock); |
800 | } |
801 | |
802 | if (dma_debug_fs_init() != 0) { |
803 | pr_err("DMA-API: error creating debugfs entries - disabling\n"); |
804 | global_disable = true; |
805 | |
806 | return; |
807 | } |
808 | |
809 | if (req_entries) |
810 | num_entries = req_entries; |
811 | |
812 | if (prealloc_memory(num_entries) != 0) { |
813 | pr_err("DMA-API: debugging out of memory error - disabled\n"); |
814 | global_disable = true; |
815 | |
816 | return; |
817 | } |
818 | |
819 | nr_total_entries = num_free_entries; |
820 | |
821 | pr_info("DMA-API: debugging enabled by kernel config\n"); |
822 | } |
823 | |
824 | static __init int dma_debug_cmdline(char *str) |
825 | { |
826 | if (!str) |
827 | return -EINVAL; |
828 | |
829 | if (strncmp(str, "off", 3) == 0) { |
830 | pr_info("DMA-API: debugging disabled on kernel command line\n"); |
831 | global_disable = true; |
832 | } |
833 | |
834 | return 0; |
835 | } |
836 | |
837 | static __init int dma_debug_entries_cmdline(char *str) |
838 | { |
839 | int res; |
840 | |
841 | if (!str) |
842 | return -EINVAL; |
843 | |
844 | res = get_option(&str, &req_entries); |
845 | |
846 | if (!res) |
847 | req_entries = 0; |
848 | |
849 | return 0; |
850 | } |
851 | |
852 | __setup("dma_debug=", dma_debug_cmdline); |
853 | __setup("dma_debug_entries=", dma_debug_entries_cmdline); |
854 | |
855 | static void check_unmap(struct dma_debug_entry *ref) |
856 | { |
857 | struct dma_debug_entry *entry; |
858 | struct hash_bucket *bucket; |
859 | unsigned long flags; |
860 | |
861 | bucket = get_hash_bucket(ref, &flags); |
862 | entry = bucket_find_exact(bucket, ref); |
863 | |
864 | if (!entry) { |
865 | /* must drop lock before calling dma_mapping_error */ |
866 | put_hash_bucket(bucket, &flags); |
867 | |
868 | if (dma_mapping_error(ref->dev, ref->dev_addr)) { |
869 | err_printk(ref->dev, NULL, |
870 | "DMA-API: device driver tries to free an " |
871 | "invalid DMA memory address\n"); |
872 | } else { |
873 | err_printk(ref->dev, NULL, |
874 | "DMA-API: device driver tries to free DMA " |
875 | "memory it has not allocated [device " |
876 | "address=0x%016llx] [size=%llu bytes]\n", |
877 | ref->dev_addr, ref->size); |
878 | } |
879 | return; |
880 | } |
881 | |
882 | if (ref->size != entry->size) { |
883 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
884 | "DMA memory with different size " |
885 | "[device address=0x%016llx] [map size=%llu bytes] " |
886 | "[unmap size=%llu bytes]\n", |
887 | ref->dev_addr, entry->size, ref->size); |
888 | } |
889 | |
890 | if (ref->type != entry->type) { |
891 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
892 | "DMA memory with wrong function " |
893 | "[device address=0x%016llx] [size=%llu bytes] " |
894 | "[mapped as %s] [unmapped as %s]\n", |
895 | ref->dev_addr, ref->size, |
896 | type2name[entry->type], type2name[ref->type]); |
897 | } else if ((entry->type == dma_debug_coherent) && |
898 | (ref->paddr != entry->paddr)) { |
899 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
900 | "DMA memory with different CPU address " |
901 | "[device address=0x%016llx] [size=%llu bytes] " |
902 | "[cpu alloc address=0x%016llx] " |
903 | "[cpu free address=0x%016llx]", |
904 | ref->dev_addr, ref->size, |
905 | (unsigned long long)entry->paddr, |
906 | (unsigned long long)ref->paddr); |
907 | } |
908 | |
909 | if (ref->sg_call_ents && ref->type == dma_debug_sg && |
910 | ref->sg_call_ents != entry->sg_call_ents) { |
911 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
912 | "DMA sg list with different entry count " |
913 | "[map count=%d] [unmap count=%d]\n", |
914 | entry->sg_call_ents, ref->sg_call_ents); |
915 | } |
916 | |
917 | /* |
918 | * This may be no bug in reality - but most implementations of the |
919 | * DMA API don't handle this properly, so check for it here |
920 | */ |
921 | if (ref->direction != entry->direction) { |
922 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
923 | "DMA memory with different direction " |
924 | "[device address=0x%016llx] [size=%llu bytes] " |
925 | "[mapped with %s] [unmapped with %s]\n", |
926 | ref->dev_addr, ref->size, |
927 | dir2name[entry->direction], |
928 | dir2name[ref->direction]); |
929 | } |
930 | |
931 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { |
932 | err_printk(ref->dev, entry, |
933 | "DMA-API: device driver failed to check map error" |
934 | "[device address=0x%016llx] [size=%llu bytes] " |
935 | "[mapped as %s]", |
936 | ref->dev_addr, ref->size, |
937 | type2name[entry->type]); |
938 | } |
939 | |
940 | hash_bucket_del(entry); |
941 | dma_entry_free(entry); |
942 | |
943 | put_hash_bucket(bucket, &flags); |
944 | } |
945 | |
946 | static void check_for_stack(struct device *dev, void *addr) |
947 | { |
948 | if (object_is_on_stack(addr)) |
949 | err_printk(dev, NULL, "DMA-API: device driver maps memory from" |
950 | "stack [addr=%p]\n", addr); |
951 | } |
952 | |
953 | static inline bool overlap(void *addr, unsigned long len, void *start, void *end) |
954 | { |
955 | unsigned long a1 = (unsigned long)addr; |
956 | unsigned long b1 = a1 + len; |
957 | unsigned long a2 = (unsigned long)start; |
958 | unsigned long b2 = (unsigned long)end; |
959 | |
960 | return !(b1 <= a2 || a1 >= b2); |
961 | } |
962 | |
963 | static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) |
964 | { |
965 | if (overlap(addr, len, _text, _etext) || |
966 | overlap(addr, len, __start_rodata, __end_rodata)) |
967 | err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); |
968 | } |
969 | |
970 | static void check_sync(struct device *dev, |
971 | struct dma_debug_entry *ref, |
972 | bool to_cpu) |
973 | { |
974 | struct dma_debug_entry *entry; |
975 | struct hash_bucket *bucket; |
976 | unsigned long flags; |
977 | |
978 | bucket = get_hash_bucket(ref, &flags); |
979 | |
980 | entry = bucket_find_contain(&bucket, ref, &flags); |
981 | |
982 | if (!entry) { |
983 | err_printk(dev, NULL, "DMA-API: device driver tries " |
984 | "to sync DMA memory it has not allocated " |
985 | "[device address=0x%016llx] [size=%llu bytes]\n", |
986 | (unsigned long long)ref->dev_addr, ref->size); |
987 | goto out; |
988 | } |
989 | |
990 | if (ref->size > entry->size) { |
991 | err_printk(dev, entry, "DMA-API: device driver syncs" |
992 | " DMA memory outside allocated range " |
993 | "[device address=0x%016llx] " |
994 | "[allocation size=%llu bytes] " |
995 | "[sync offset+size=%llu]\n", |
996 | entry->dev_addr, entry->size, |
997 | ref->size); |
998 | } |
999 | |
1000 | if (entry->direction == DMA_BIDIRECTIONAL) |
1001 | goto out; |
1002 | |
1003 | if (ref->direction != entry->direction) { |
1004 | err_printk(dev, entry, "DMA-API: device driver syncs " |
1005 | "DMA memory with different direction " |
1006 | "[device address=0x%016llx] [size=%llu bytes] " |
1007 | "[mapped with %s] [synced with %s]\n", |
1008 | (unsigned long long)ref->dev_addr, entry->size, |
1009 | dir2name[entry->direction], |
1010 | dir2name[ref->direction]); |
1011 | } |
1012 | |
1013 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && |
1014 | !(ref->direction == DMA_TO_DEVICE)) |
1015 | err_printk(dev, entry, "DMA-API: device driver syncs " |
1016 | "device read-only DMA memory for cpu " |
1017 | "[device address=0x%016llx] [size=%llu bytes] " |
1018 | "[mapped with %s] [synced with %s]\n", |
1019 | (unsigned long long)ref->dev_addr, entry->size, |
1020 | dir2name[entry->direction], |
1021 | dir2name[ref->direction]); |
1022 | |
1023 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && |
1024 | !(ref->direction == DMA_FROM_DEVICE)) |
1025 | err_printk(dev, entry, "DMA-API: device driver syncs " |
1026 | "device write-only DMA memory to device " |
1027 | "[device address=0x%016llx] [size=%llu bytes] " |
1028 | "[mapped with %s] [synced with %s]\n", |
1029 | (unsigned long long)ref->dev_addr, entry->size, |
1030 | dir2name[entry->direction], |
1031 | dir2name[ref->direction]); |
1032 | |
1033 | out: |
1034 | put_hash_bucket(bucket, &flags); |
1035 | } |
1036 | |
1037 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, |
1038 | size_t size, int direction, dma_addr_t dma_addr, |
1039 | bool map_single) |
1040 | { |
1041 | struct dma_debug_entry *entry; |
1042 | |
1043 | if (unlikely(global_disable)) |
1044 | return; |
1045 | |
1046 | if (dma_mapping_error(dev, dma_addr)) |
1047 | return; |
1048 | |
1049 | entry = dma_entry_alloc(); |
1050 | if (!entry) |
1051 | return; |
1052 | |
1053 | entry->dev = dev; |
1054 | entry->type = dma_debug_page; |
1055 | entry->paddr = page_to_phys(page) + offset; |
1056 | entry->dev_addr = dma_addr; |
1057 | entry->size = size; |
1058 | entry->direction = direction; |
1059 | entry->map_err_type = MAP_ERR_NOT_CHECKED; |
1060 | |
1061 | if (map_single) |
1062 | entry->type = dma_debug_single; |
1063 | |
1064 | if (!PageHighMem(page)) { |
1065 | void *addr = page_address(page) + offset; |
1066 | |
1067 | check_for_stack(dev, addr); |
1068 | check_for_illegal_area(dev, addr, size); |
1069 | } |
1070 | |
1071 | add_dma_entry(entry); |
1072 | } |
1073 | EXPORT_SYMBOL(debug_dma_map_page); |
1074 | |
1075 | void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
1076 | { |
1077 | struct dma_debug_entry ref; |
1078 | struct dma_debug_entry *entry; |
1079 | struct hash_bucket *bucket; |
1080 | unsigned long flags; |
1081 | |
1082 | if (unlikely(global_disable)) |
1083 | return; |
1084 | |
1085 | ref.dev = dev; |
1086 | ref.dev_addr = dma_addr; |
1087 | bucket = get_hash_bucket(&ref, &flags); |
1088 | |
1089 | list_for_each_entry(entry, &bucket->list, list) { |
1090 | if (!exact_match(&ref, entry)) |
1091 | continue; |
1092 | |
1093 | /* |
1094 | * The same physical address can be mapped multiple |
1095 | * times. Without a hardware IOMMU this results in the |
1096 | * same device addresses being put into the dma-debug |
1097 | * hash multiple times too. This can result in false |
1098 | * positives being reported. Therefore we implement a |
1099 | * best-fit algorithm here which updates the first entry |
1100 | * from the hash which fits the reference value and is |
1101 | * not currently listed as being checked. |
1102 | */ |
1103 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { |
1104 | entry->map_err_type = MAP_ERR_CHECKED; |
1105 | break; |
1106 | } |
1107 | } |
1108 | |
1109 | put_hash_bucket(bucket, &flags); |
1110 | } |
1111 | EXPORT_SYMBOL(debug_dma_mapping_error); |
1112 | |
1113 | void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, |
1114 | size_t size, int direction, bool map_single) |
1115 | { |
1116 | struct dma_debug_entry ref = { |
1117 | .type = dma_debug_page, |
1118 | .dev = dev, |
1119 | .dev_addr = addr, |
1120 | .size = size, |
1121 | .direction = direction, |
1122 | }; |
1123 | |
1124 | if (unlikely(global_disable)) |
1125 | return; |
1126 | |
1127 | if (map_single) |
1128 | ref.type = dma_debug_single; |
1129 | |
1130 | check_unmap(&ref); |
1131 | } |
1132 | EXPORT_SYMBOL(debug_dma_unmap_page); |
1133 | |
1134 | void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, |
1135 | int nents, int mapped_ents, int direction) |
1136 | { |
1137 | struct dma_debug_entry *entry; |
1138 | struct scatterlist *s; |
1139 | int i; |
1140 | |
1141 | if (unlikely(global_disable)) |
1142 | return; |
1143 | |
1144 | for_each_sg(sg, s, mapped_ents, i) { |
1145 | entry = dma_entry_alloc(); |
1146 | if (!entry) |
1147 | return; |
1148 | |
1149 | entry->type = dma_debug_sg; |
1150 | entry->dev = dev; |
1151 | entry->paddr = sg_phys(s); |
1152 | entry->size = sg_dma_len(s); |
1153 | entry->dev_addr = sg_dma_address(s); |
1154 | entry->direction = direction; |
1155 | entry->sg_call_ents = nents; |
1156 | entry->sg_mapped_ents = mapped_ents; |
1157 | |
1158 | if (!PageHighMem(sg_page(s))) { |
1159 | check_for_stack(dev, sg_virt(s)); |
1160 | check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); |
1161 | } |
1162 | |
1163 | add_dma_entry(entry); |
1164 | } |
1165 | } |
1166 | EXPORT_SYMBOL(debug_dma_map_sg); |
1167 | |
1168 | static int get_nr_mapped_entries(struct device *dev, |
1169 | struct dma_debug_entry *ref) |
1170 | { |
1171 | struct dma_debug_entry *entry; |
1172 | struct hash_bucket *bucket; |
1173 | unsigned long flags; |
1174 | int mapped_ents; |
1175 | |
1176 | bucket = get_hash_bucket(ref, &flags); |
1177 | entry = bucket_find_exact(bucket, ref); |
1178 | mapped_ents = 0; |
1179 | |
1180 | if (entry) |
1181 | mapped_ents = entry->sg_mapped_ents; |
1182 | put_hash_bucket(bucket, &flags); |
1183 | |
1184 | return mapped_ents; |
1185 | } |
1186 | |
1187 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
1188 | int nelems, int dir) |
1189 | { |
1190 | struct scatterlist *s; |
1191 | int mapped_ents = 0, i; |
1192 | |
1193 | if (unlikely(global_disable)) |
1194 | return; |
1195 | |
1196 | for_each_sg(sglist, s, nelems, i) { |
1197 | |
1198 | struct dma_debug_entry ref = { |
1199 | .type = dma_debug_sg, |
1200 | .dev = dev, |
1201 | .paddr = sg_phys(s), |
1202 | .dev_addr = sg_dma_address(s), |
1203 | .size = sg_dma_len(s), |
1204 | .direction = dir, |
1205 | .sg_call_ents = nelems, |
1206 | }; |
1207 | |
1208 | if (mapped_ents && i >= mapped_ents) |
1209 | break; |
1210 | |
1211 | if (!i) |
1212 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
1213 | |
1214 | check_unmap(&ref); |
1215 | } |
1216 | } |
1217 | EXPORT_SYMBOL(debug_dma_unmap_sg); |
1218 | |
1219 | void debug_dma_alloc_coherent(struct device *dev, size_t size, |
1220 | dma_addr_t dma_addr, void *virt) |
1221 | { |
1222 | struct dma_debug_entry *entry; |
1223 | |
1224 | if (unlikely(global_disable)) |
1225 | return; |
1226 | |
1227 | if (unlikely(virt == NULL)) |
1228 | return; |
1229 | |
1230 | entry = dma_entry_alloc(); |
1231 | if (!entry) |
1232 | return; |
1233 | |
1234 | entry->type = dma_debug_coherent; |
1235 | entry->dev = dev; |
1236 | entry->paddr = virt_to_phys(virt); |
1237 | entry->size = size; |
1238 | entry->dev_addr = dma_addr; |
1239 | entry->direction = DMA_BIDIRECTIONAL; |
1240 | |
1241 | add_dma_entry(entry); |
1242 | } |
1243 | EXPORT_SYMBOL(debug_dma_alloc_coherent); |
1244 | |
1245 | void debug_dma_free_coherent(struct device *dev, size_t size, |
1246 | void *virt, dma_addr_t addr) |
1247 | { |
1248 | struct dma_debug_entry ref = { |
1249 | .type = dma_debug_coherent, |
1250 | .dev = dev, |
1251 | .paddr = virt_to_phys(virt), |
1252 | .dev_addr = addr, |
1253 | .size = size, |
1254 | .direction = DMA_BIDIRECTIONAL, |
1255 | }; |
1256 | |
1257 | if (unlikely(global_disable)) |
1258 | return; |
1259 | |
1260 | check_unmap(&ref); |
1261 | } |
1262 | EXPORT_SYMBOL(debug_dma_free_coherent); |
1263 | |
1264 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
1265 | size_t size, int direction) |
1266 | { |
1267 | struct dma_debug_entry ref; |
1268 | |
1269 | if (unlikely(global_disable)) |
1270 | return; |
1271 | |
1272 | ref.type = dma_debug_single; |
1273 | ref.dev = dev; |
1274 | ref.dev_addr = dma_handle; |
1275 | ref.size = size; |
1276 | ref.direction = direction; |
1277 | ref.sg_call_ents = 0; |
1278 | |
1279 | check_sync(dev, &ref, true); |
1280 | } |
1281 | EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); |
1282 | |
1283 | void debug_dma_sync_single_for_device(struct device *dev, |
1284 | dma_addr_t dma_handle, size_t size, |
1285 | int direction) |
1286 | { |
1287 | struct dma_debug_entry ref; |
1288 | |
1289 | if (unlikely(global_disable)) |
1290 | return; |
1291 | |
1292 | ref.type = dma_debug_single; |
1293 | ref.dev = dev; |
1294 | ref.dev_addr = dma_handle; |
1295 | ref.size = size; |
1296 | ref.direction = direction; |
1297 | ref.sg_call_ents = 0; |
1298 | |
1299 | check_sync(dev, &ref, false); |
1300 | } |
1301 | EXPORT_SYMBOL(debug_dma_sync_single_for_device); |
1302 | |
1303 | void debug_dma_sync_single_range_for_cpu(struct device *dev, |
1304 | dma_addr_t dma_handle, |
1305 | unsigned long offset, size_t size, |
1306 | int direction) |
1307 | { |
1308 | struct dma_debug_entry ref; |
1309 | |
1310 | if (unlikely(global_disable)) |
1311 | return; |
1312 | |
1313 | ref.type = dma_debug_single; |
1314 | ref.dev = dev; |
1315 | ref.dev_addr = dma_handle; |
1316 | ref.size = offset + size; |
1317 | ref.direction = direction; |
1318 | ref.sg_call_ents = 0; |
1319 | |
1320 | check_sync(dev, &ref, true); |
1321 | } |
1322 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); |
1323 | |
1324 | void debug_dma_sync_single_range_for_device(struct device *dev, |
1325 | dma_addr_t dma_handle, |
1326 | unsigned long offset, |
1327 | size_t size, int direction) |
1328 | { |
1329 | struct dma_debug_entry ref; |
1330 | |
1331 | if (unlikely(global_disable)) |
1332 | return; |
1333 | |
1334 | ref.type = dma_debug_single; |
1335 | ref.dev = dev; |
1336 | ref.dev_addr = dma_handle; |
1337 | ref.size = offset + size; |
1338 | ref.direction = direction; |
1339 | ref.sg_call_ents = 0; |
1340 | |
1341 | check_sync(dev, &ref, false); |
1342 | } |
1343 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); |
1344 | |
1345 | void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
1346 | int nelems, int direction) |
1347 | { |
1348 | struct scatterlist *s; |
1349 | int mapped_ents = 0, i; |
1350 | |
1351 | if (unlikely(global_disable)) |
1352 | return; |
1353 | |
1354 | for_each_sg(sg, s, nelems, i) { |
1355 | |
1356 | struct dma_debug_entry ref = { |
1357 | .type = dma_debug_sg, |
1358 | .dev = dev, |
1359 | .paddr = sg_phys(s), |
1360 | .dev_addr = sg_dma_address(s), |
1361 | .size = sg_dma_len(s), |
1362 | .direction = direction, |
1363 | .sg_call_ents = nelems, |
1364 | }; |
1365 | |
1366 | if (!i) |
1367 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
1368 | |
1369 | if (i >= mapped_ents) |
1370 | break; |
1371 | |
1372 | check_sync(dev, &ref, true); |
1373 | } |
1374 | } |
1375 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); |
1376 | |
1377 | void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
1378 | int nelems, int direction) |
1379 | { |
1380 | struct scatterlist *s; |
1381 | int mapped_ents = 0, i; |
1382 | |
1383 | if (unlikely(global_disable)) |
1384 | return; |
1385 | |
1386 | for_each_sg(sg, s, nelems, i) { |
1387 | |
1388 | struct dma_debug_entry ref = { |
1389 | .type = dma_debug_sg, |
1390 | .dev = dev, |
1391 | .paddr = sg_phys(s), |
1392 | .dev_addr = sg_dma_address(s), |
1393 | .size = sg_dma_len(s), |
1394 | .direction = direction, |
1395 | .sg_call_ents = nelems, |
1396 | }; |
1397 | if (!i) |
1398 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
1399 | |
1400 | if (i >= mapped_ents) |
1401 | break; |
1402 | |
1403 | check_sync(dev, &ref, false); |
1404 | } |
1405 | } |
1406 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); |
1407 | |
1408 | static int __init dma_debug_driver_setup(char *str) |
1409 | { |
1410 | int i; |
1411 | |
1412 | for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { |
1413 | current_driver_name[i] = *str; |
1414 | if (*str == 0) |
1415 | break; |
1416 | } |
1417 | |
1418 | if (current_driver_name[0]) |
1419 | pr_info("DMA-API: enable driver filter for driver [%s]\n", |
1420 | current_driver_name); |
1421 | |
1422 | |
1423 | return 1; |
1424 | } |
1425 | __setup("dma_debug_driver=", dma_debug_driver_setup); |
1426 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9