Root/
1 | /* |
2 | * omap iommu: tlb and pagetable primitives |
3 | * |
4 | * Copyright (C) 2008-2010 Nokia Corporation |
5 | * |
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, |
7 | * Paul Mundt and Toshihiro Kobayashi |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. |
12 | */ |
13 | |
14 | #include <linux/err.h> |
15 | #include <linux/module.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/interrupt.h> |
18 | #include <linux/ioport.h> |
19 | #include <linux/platform_device.h> |
20 | #include <linux/iommu.h> |
21 | #include <linux/omap-iommu.h> |
22 | #include <linux/mutex.h> |
23 | #include <linux/spinlock.h> |
24 | #include <linux/io.h> |
25 | #include <linux/pm_runtime.h> |
26 | |
27 | #include <asm/cacheflush.h> |
28 | |
29 | #include <linux/platform_data/iommu-omap.h> |
30 | |
31 | #include "omap-iopgtable.h" |
32 | #include "omap-iommu.h" |
33 | |
34 | #define for_each_iotlb_cr(obj, n, __i, cr) \ |
35 | for (__i = 0; \ |
36 | (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ |
37 | __i++) |
38 | |
39 | /* bitmap of the page sizes currently supported */ |
40 | #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) |
41 | |
42 | /** |
43 | * struct omap_iommu_domain - omap iommu domain |
44 | * @pgtable: the page table |
45 | * @iommu_dev: an omap iommu device attached to this domain. only a single |
46 | * iommu device can be attached for now. |
47 | * @dev: Device using this domain. |
48 | * @lock: domain lock, should be taken when attaching/detaching |
49 | */ |
50 | struct omap_iommu_domain { |
51 | u32 *pgtable; |
52 | struct omap_iommu *iommu_dev; |
53 | struct device *dev; |
54 | spinlock_t lock; |
55 | }; |
56 | |
57 | #define MMU_LOCK_BASE_SHIFT 10 |
58 | #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) |
59 | #define MMU_LOCK_BASE(x) \ |
60 | ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT) |
61 | |
62 | #define MMU_LOCK_VICT_SHIFT 4 |
63 | #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT) |
64 | #define MMU_LOCK_VICT(x) \ |
65 | ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT) |
66 | |
67 | struct iotlb_lock { |
68 | short base; |
69 | short vict; |
70 | }; |
71 | |
72 | /* accommodate the difference between omap1 and omap2/3 */ |
73 | static const struct iommu_functions *arch_iommu; |
74 | |
75 | static struct platform_driver omap_iommu_driver; |
76 | static struct kmem_cache *iopte_cachep; |
77 | |
78 | /** |
79 | * omap_install_iommu_arch - Install archtecure specific iommu functions |
80 | * @ops: a pointer to architecture specific iommu functions |
81 | * |
82 | * There are several kind of iommu algorithm(tlb, pagetable) among |
83 | * omap series. This interface installs such an iommu algorighm. |
84 | **/ |
85 | int omap_install_iommu_arch(const struct iommu_functions *ops) |
86 | { |
87 | if (arch_iommu) |
88 | return -EBUSY; |
89 | |
90 | arch_iommu = ops; |
91 | return 0; |
92 | } |
93 | EXPORT_SYMBOL_GPL(omap_install_iommu_arch); |
94 | |
95 | /** |
96 | * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions |
97 | * @ops: a pointer to architecture specific iommu functions |
98 | * |
99 | * This interface uninstalls the iommu algorighm installed previously. |
100 | **/ |
101 | void omap_uninstall_iommu_arch(const struct iommu_functions *ops) |
102 | { |
103 | if (arch_iommu != ops) |
104 | pr_err("%s: not your arch\n", __func__); |
105 | |
106 | arch_iommu = NULL; |
107 | } |
108 | EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); |
109 | |
110 | /** |
111 | * omap_iommu_save_ctx - Save registers for pm off-mode support |
112 | * @dev: client device |
113 | **/ |
114 | void omap_iommu_save_ctx(struct device *dev) |
115 | { |
116 | struct omap_iommu *obj = dev_to_omap_iommu(dev); |
117 | |
118 | arch_iommu->save_ctx(obj); |
119 | } |
120 | EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); |
121 | |
122 | /** |
123 | * omap_iommu_restore_ctx - Restore registers for pm off-mode support |
124 | * @dev: client device |
125 | **/ |
126 | void omap_iommu_restore_ctx(struct device *dev) |
127 | { |
128 | struct omap_iommu *obj = dev_to_omap_iommu(dev); |
129 | |
130 | arch_iommu->restore_ctx(obj); |
131 | } |
132 | EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); |
133 | |
134 | /** |
135 | * omap_iommu_arch_version - Return running iommu arch version |
136 | **/ |
137 | u32 omap_iommu_arch_version(void) |
138 | { |
139 | return arch_iommu->version; |
140 | } |
141 | EXPORT_SYMBOL_GPL(omap_iommu_arch_version); |
142 | |
143 | static int iommu_enable(struct omap_iommu *obj) |
144 | { |
145 | int err; |
146 | struct platform_device *pdev = to_platform_device(obj->dev); |
147 | struct iommu_platform_data *pdata = pdev->dev.platform_data; |
148 | |
149 | if (!pdata) |
150 | return -EINVAL; |
151 | |
152 | if (!arch_iommu) |
153 | return -ENODEV; |
154 | |
155 | if (pdata->deassert_reset) { |
156 | err = pdata->deassert_reset(pdev, pdata->reset_name); |
157 | if (err) { |
158 | dev_err(obj->dev, "deassert_reset failed: %d\n", err); |
159 | return err; |
160 | } |
161 | } |
162 | |
163 | pm_runtime_get_sync(obj->dev); |
164 | |
165 | err = arch_iommu->enable(obj); |
166 | |
167 | return err; |
168 | } |
169 | |
170 | static void iommu_disable(struct omap_iommu *obj) |
171 | { |
172 | struct platform_device *pdev = to_platform_device(obj->dev); |
173 | struct iommu_platform_data *pdata = pdev->dev.platform_data; |
174 | |
175 | if (!pdata) |
176 | return; |
177 | |
178 | arch_iommu->disable(obj); |
179 | |
180 | pm_runtime_put_sync(obj->dev); |
181 | |
182 | if (pdata->assert_reset) |
183 | pdata->assert_reset(pdev, pdata->reset_name); |
184 | } |
185 | |
186 | /* |
187 | * TLB operations |
188 | */ |
189 | void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) |
190 | { |
191 | BUG_ON(!cr || !e); |
192 | |
193 | arch_iommu->cr_to_e(cr, e); |
194 | } |
195 | EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); |
196 | |
197 | static inline int iotlb_cr_valid(struct cr_regs *cr) |
198 | { |
199 | if (!cr) |
200 | return -EINVAL; |
201 | |
202 | return arch_iommu->cr_valid(cr); |
203 | } |
204 | |
205 | static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, |
206 | struct iotlb_entry *e) |
207 | { |
208 | if (!e) |
209 | return NULL; |
210 | |
211 | return arch_iommu->alloc_cr(obj, e); |
212 | } |
213 | |
214 | static u32 iotlb_cr_to_virt(struct cr_regs *cr) |
215 | { |
216 | return arch_iommu->cr_to_virt(cr); |
217 | } |
218 | |
219 | static u32 get_iopte_attr(struct iotlb_entry *e) |
220 | { |
221 | return arch_iommu->get_pte_attr(e); |
222 | } |
223 | |
224 | static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) |
225 | { |
226 | return arch_iommu->fault_isr(obj, da); |
227 | } |
228 | |
229 | static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) |
230 | { |
231 | u32 val; |
232 | |
233 | val = iommu_read_reg(obj, MMU_LOCK); |
234 | |
235 | l->base = MMU_LOCK_BASE(val); |
236 | l->vict = MMU_LOCK_VICT(val); |
237 | |
238 | } |
239 | |
240 | static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) |
241 | { |
242 | u32 val; |
243 | |
244 | val = (l->base << MMU_LOCK_BASE_SHIFT); |
245 | val |= (l->vict << MMU_LOCK_VICT_SHIFT); |
246 | |
247 | iommu_write_reg(obj, val, MMU_LOCK); |
248 | } |
249 | |
250 | static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) |
251 | { |
252 | arch_iommu->tlb_read_cr(obj, cr); |
253 | } |
254 | |
255 | static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) |
256 | { |
257 | arch_iommu->tlb_load_cr(obj, cr); |
258 | |
259 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
260 | iommu_write_reg(obj, 1, MMU_LD_TLB); |
261 | } |
262 | |
263 | /** |
264 | * iotlb_dump_cr - Dump an iommu tlb entry into buf |
265 | * @obj: target iommu |
266 | * @cr: contents of cam and ram register |
267 | * @buf: output buffer |
268 | **/ |
269 | static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, |
270 | char *buf) |
271 | { |
272 | BUG_ON(!cr || !buf); |
273 | |
274 | return arch_iommu->dump_cr(obj, cr, buf); |
275 | } |
276 | |
277 | /* only used in iotlb iteration for-loop */ |
278 | static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) |
279 | { |
280 | struct cr_regs cr; |
281 | struct iotlb_lock l; |
282 | |
283 | iotlb_lock_get(obj, &l); |
284 | l.vict = n; |
285 | iotlb_lock_set(obj, &l); |
286 | iotlb_read_cr(obj, &cr); |
287 | |
288 | return cr; |
289 | } |
290 | |
291 | /** |
292 | * load_iotlb_entry - Set an iommu tlb entry |
293 | * @obj: target iommu |
294 | * @e: an iommu tlb entry info |
295 | **/ |
296 | #ifdef PREFETCH_IOTLB |
297 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
298 | { |
299 | int err = 0; |
300 | struct iotlb_lock l; |
301 | struct cr_regs *cr; |
302 | |
303 | if (!obj || !obj->nr_tlb_entries || !e) |
304 | return -EINVAL; |
305 | |
306 | pm_runtime_get_sync(obj->dev); |
307 | |
308 | iotlb_lock_get(obj, &l); |
309 | if (l.base == obj->nr_tlb_entries) { |
310 | dev_warn(obj->dev, "%s: preserve entries full\n", __func__); |
311 | err = -EBUSY; |
312 | goto out; |
313 | } |
314 | if (!e->prsvd) { |
315 | int i; |
316 | struct cr_regs tmp; |
317 | |
318 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) |
319 | if (!iotlb_cr_valid(&tmp)) |
320 | break; |
321 | |
322 | if (i == obj->nr_tlb_entries) { |
323 | dev_dbg(obj->dev, "%s: full: no entry\n", __func__); |
324 | err = -EBUSY; |
325 | goto out; |
326 | } |
327 | |
328 | iotlb_lock_get(obj, &l); |
329 | } else { |
330 | l.vict = l.base; |
331 | iotlb_lock_set(obj, &l); |
332 | } |
333 | |
334 | cr = iotlb_alloc_cr(obj, e); |
335 | if (IS_ERR(cr)) { |
336 | pm_runtime_put_sync(obj->dev); |
337 | return PTR_ERR(cr); |
338 | } |
339 | |
340 | iotlb_load_cr(obj, cr); |
341 | kfree(cr); |
342 | |
343 | if (e->prsvd) |
344 | l.base++; |
345 | /* increment victim for next tlb load */ |
346 | if (++l.vict == obj->nr_tlb_entries) |
347 | l.vict = l.base; |
348 | iotlb_lock_set(obj, &l); |
349 | out: |
350 | pm_runtime_put_sync(obj->dev); |
351 | return err; |
352 | } |
353 | |
354 | #else /* !PREFETCH_IOTLB */ |
355 | |
356 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
357 | { |
358 | return 0; |
359 | } |
360 | |
361 | #endif /* !PREFETCH_IOTLB */ |
362 | |
363 | static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
364 | { |
365 | return load_iotlb_entry(obj, e); |
366 | } |
367 | |
368 | /** |
369 | * flush_iotlb_page - Clear an iommu tlb entry |
370 | * @obj: target iommu |
371 | * @da: iommu device virtual address |
372 | * |
373 | * Clear an iommu tlb entry which includes 'da' address. |
374 | **/ |
375 | static void flush_iotlb_page(struct omap_iommu *obj, u32 da) |
376 | { |
377 | int i; |
378 | struct cr_regs cr; |
379 | |
380 | pm_runtime_get_sync(obj->dev); |
381 | |
382 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { |
383 | u32 start; |
384 | size_t bytes; |
385 | |
386 | if (!iotlb_cr_valid(&cr)) |
387 | continue; |
388 | |
389 | start = iotlb_cr_to_virt(&cr); |
390 | bytes = iopgsz_to_bytes(cr.cam & 3); |
391 | |
392 | if ((start <= da) && (da < start + bytes)) { |
393 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", |
394 | __func__, start, da, bytes); |
395 | iotlb_load_cr(obj, &cr); |
396 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
397 | } |
398 | } |
399 | pm_runtime_put_sync(obj->dev); |
400 | |
401 | if (i == obj->nr_tlb_entries) |
402 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); |
403 | } |
404 | |
405 | /** |
406 | * flush_iotlb_all - Clear all iommu tlb entries |
407 | * @obj: target iommu |
408 | **/ |
409 | static void flush_iotlb_all(struct omap_iommu *obj) |
410 | { |
411 | struct iotlb_lock l; |
412 | |
413 | pm_runtime_get_sync(obj->dev); |
414 | |
415 | l.base = 0; |
416 | l.vict = 0; |
417 | iotlb_lock_set(obj, &l); |
418 | |
419 | iommu_write_reg(obj, 1, MMU_GFLUSH); |
420 | |
421 | pm_runtime_put_sync(obj->dev); |
422 | } |
423 | |
424 | #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) |
425 | |
426 | ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) |
427 | { |
428 | if (!obj || !buf) |
429 | return -EINVAL; |
430 | |
431 | pm_runtime_get_sync(obj->dev); |
432 | |
433 | bytes = arch_iommu->dump_ctx(obj, buf, bytes); |
434 | |
435 | pm_runtime_put_sync(obj->dev); |
436 | |
437 | return bytes; |
438 | } |
439 | EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); |
440 | |
441 | static int |
442 | __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) |
443 | { |
444 | int i; |
445 | struct iotlb_lock saved; |
446 | struct cr_regs tmp; |
447 | struct cr_regs *p = crs; |
448 | |
449 | pm_runtime_get_sync(obj->dev); |
450 | iotlb_lock_get(obj, &saved); |
451 | |
452 | for_each_iotlb_cr(obj, num, i, tmp) { |
453 | if (!iotlb_cr_valid(&tmp)) |
454 | continue; |
455 | *p++ = tmp; |
456 | } |
457 | |
458 | iotlb_lock_set(obj, &saved); |
459 | pm_runtime_put_sync(obj->dev); |
460 | |
461 | return p - crs; |
462 | } |
463 | |
464 | /** |
465 | * omap_dump_tlb_entries - dump cr arrays to given buffer |
466 | * @obj: target iommu |
467 | * @buf: output buffer |
468 | **/ |
469 | size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) |
470 | { |
471 | int i, num; |
472 | struct cr_regs *cr; |
473 | char *p = buf; |
474 | |
475 | num = bytes / sizeof(*cr); |
476 | num = min(obj->nr_tlb_entries, num); |
477 | |
478 | cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); |
479 | if (!cr) |
480 | return 0; |
481 | |
482 | num = __dump_tlb_entries(obj, cr, num); |
483 | for (i = 0; i < num; i++) |
484 | p += iotlb_dump_cr(obj, cr + i, p); |
485 | kfree(cr); |
486 | |
487 | return p - buf; |
488 | } |
489 | EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); |
490 | |
491 | int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) |
492 | { |
493 | return driver_for_each_device(&omap_iommu_driver.driver, |
494 | NULL, data, fn); |
495 | } |
496 | EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); |
497 | |
498 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ |
499 | |
500 | /* |
501 | * H/W pagetable operations |
502 | */ |
503 | static void flush_iopgd_range(u32 *first, u32 *last) |
504 | { |
505 | /* FIXME: L2 cache should be taken care of if it exists */ |
506 | do { |
507 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" |
508 | : : "r" (first)); |
509 | first += L1_CACHE_BYTES / sizeof(*first); |
510 | } while (first <= last); |
511 | } |
512 | |
513 | static void flush_iopte_range(u32 *first, u32 *last) |
514 | { |
515 | /* FIXME: L2 cache should be taken care of if it exists */ |
516 | do { |
517 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" |
518 | : : "r" (first)); |
519 | first += L1_CACHE_BYTES / sizeof(*first); |
520 | } while (first <= last); |
521 | } |
522 | |
523 | static void iopte_free(u32 *iopte) |
524 | { |
525 | /* Note: freed iopte's must be clean ready for re-use */ |
526 | kmem_cache_free(iopte_cachep, iopte); |
527 | } |
528 | |
529 | static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) |
530 | { |
531 | u32 *iopte; |
532 | |
533 | /* a table has already existed */ |
534 | if (*iopgd) |
535 | goto pte_ready; |
536 | |
537 | /* |
538 | * do the allocation outside the page table lock |
539 | */ |
540 | spin_unlock(&obj->page_table_lock); |
541 | iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); |
542 | spin_lock(&obj->page_table_lock); |
543 | |
544 | if (!*iopgd) { |
545 | if (!iopte) |
546 | return ERR_PTR(-ENOMEM); |
547 | |
548 | *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; |
549 | flush_iopgd_range(iopgd, iopgd); |
550 | |
551 | dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); |
552 | } else { |
553 | /* We raced, free the reduniovant table */ |
554 | iopte_free(iopte); |
555 | } |
556 | |
557 | pte_ready: |
558 | iopte = iopte_offset(iopgd, da); |
559 | |
560 | dev_vdbg(obj->dev, |
561 | "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", |
562 | __func__, da, iopgd, *iopgd, iopte, *iopte); |
563 | |
564 | return iopte; |
565 | } |
566 | |
567 | static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
568 | { |
569 | u32 *iopgd = iopgd_offset(obj, da); |
570 | |
571 | if ((da | pa) & ~IOSECTION_MASK) { |
572 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", |
573 | __func__, da, pa, IOSECTION_SIZE); |
574 | return -EINVAL; |
575 | } |
576 | |
577 | *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; |
578 | flush_iopgd_range(iopgd, iopgd); |
579 | return 0; |
580 | } |
581 | |
582 | static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
583 | { |
584 | u32 *iopgd = iopgd_offset(obj, da); |
585 | int i; |
586 | |
587 | if ((da | pa) & ~IOSUPER_MASK) { |
588 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", |
589 | __func__, da, pa, IOSUPER_SIZE); |
590 | return -EINVAL; |
591 | } |
592 | |
593 | for (i = 0; i < 16; i++) |
594 | *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; |
595 | flush_iopgd_range(iopgd, iopgd + 15); |
596 | return 0; |
597 | } |
598 | |
599 | static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
600 | { |
601 | u32 *iopgd = iopgd_offset(obj, da); |
602 | u32 *iopte = iopte_alloc(obj, iopgd, da); |
603 | |
604 | if (IS_ERR(iopte)) |
605 | return PTR_ERR(iopte); |
606 | |
607 | *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; |
608 | flush_iopte_range(iopte, iopte); |
609 | |
610 | dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", |
611 | __func__, da, pa, iopte, *iopte); |
612 | |
613 | return 0; |
614 | } |
615 | |
616 | static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
617 | { |
618 | u32 *iopgd = iopgd_offset(obj, da); |
619 | u32 *iopte = iopte_alloc(obj, iopgd, da); |
620 | int i; |
621 | |
622 | if ((da | pa) & ~IOLARGE_MASK) { |
623 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", |
624 | __func__, da, pa, IOLARGE_SIZE); |
625 | return -EINVAL; |
626 | } |
627 | |
628 | if (IS_ERR(iopte)) |
629 | return PTR_ERR(iopte); |
630 | |
631 | for (i = 0; i < 16; i++) |
632 | *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; |
633 | flush_iopte_range(iopte, iopte + 15); |
634 | return 0; |
635 | } |
636 | |
637 | static int |
638 | iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) |
639 | { |
640 | int (*fn)(struct omap_iommu *, u32, u32, u32); |
641 | u32 prot; |
642 | int err; |
643 | |
644 | if (!obj || !e) |
645 | return -EINVAL; |
646 | |
647 | switch (e->pgsz) { |
648 | case MMU_CAM_PGSZ_16M: |
649 | fn = iopgd_alloc_super; |
650 | break; |
651 | case MMU_CAM_PGSZ_1M: |
652 | fn = iopgd_alloc_section; |
653 | break; |
654 | case MMU_CAM_PGSZ_64K: |
655 | fn = iopte_alloc_large; |
656 | break; |
657 | case MMU_CAM_PGSZ_4K: |
658 | fn = iopte_alloc_page; |
659 | break; |
660 | default: |
661 | fn = NULL; |
662 | BUG(); |
663 | break; |
664 | } |
665 | |
666 | prot = get_iopte_attr(e); |
667 | |
668 | spin_lock(&obj->page_table_lock); |
669 | err = fn(obj, e->da, e->pa, prot); |
670 | spin_unlock(&obj->page_table_lock); |
671 | |
672 | return err; |
673 | } |
674 | |
675 | /** |
676 | * omap_iopgtable_store_entry - Make an iommu pte entry |
677 | * @obj: target iommu |
678 | * @e: an iommu tlb entry info |
679 | **/ |
680 | int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
681 | { |
682 | int err; |
683 | |
684 | flush_iotlb_page(obj, e->da); |
685 | err = iopgtable_store_entry_core(obj, e); |
686 | if (!err) |
687 | prefetch_iotlb_entry(obj, e); |
688 | return err; |
689 | } |
690 | EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); |
691 | |
692 | /** |
693 | * iopgtable_lookup_entry - Lookup an iommu pte entry |
694 | * @obj: target iommu |
695 | * @da: iommu device virtual address |
696 | * @ppgd: iommu pgd entry pointer to be returned |
697 | * @ppte: iommu pte entry pointer to be returned |
698 | **/ |
699 | static void |
700 | iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) |
701 | { |
702 | u32 *iopgd, *iopte = NULL; |
703 | |
704 | iopgd = iopgd_offset(obj, da); |
705 | if (!*iopgd) |
706 | goto out; |
707 | |
708 | if (iopgd_is_table(*iopgd)) |
709 | iopte = iopte_offset(iopgd, da); |
710 | out: |
711 | *ppgd = iopgd; |
712 | *ppte = iopte; |
713 | } |
714 | |
715 | static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) |
716 | { |
717 | size_t bytes; |
718 | u32 *iopgd = iopgd_offset(obj, da); |
719 | int nent = 1; |
720 | |
721 | if (!*iopgd) |
722 | return 0; |
723 | |
724 | if (iopgd_is_table(*iopgd)) { |
725 | int i; |
726 | u32 *iopte = iopte_offset(iopgd, da); |
727 | |
728 | bytes = IOPTE_SIZE; |
729 | if (*iopte & IOPTE_LARGE) { |
730 | nent *= 16; |
731 | /* rewind to the 1st entry */ |
732 | iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); |
733 | } |
734 | bytes *= nent; |
735 | memset(iopte, 0, nent * sizeof(*iopte)); |
736 | flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); |
737 | |
738 | /* |
739 | * do table walk to check if this table is necessary or not |
740 | */ |
741 | iopte = iopte_offset(iopgd, 0); |
742 | for (i = 0; i < PTRS_PER_IOPTE; i++) |
743 | if (iopte[i]) |
744 | goto out; |
745 | |
746 | iopte_free(iopte); |
747 | nent = 1; /* for the next L1 entry */ |
748 | } else { |
749 | bytes = IOPGD_SIZE; |
750 | if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { |
751 | nent *= 16; |
752 | /* rewind to the 1st entry */ |
753 | iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); |
754 | } |
755 | bytes *= nent; |
756 | } |
757 | memset(iopgd, 0, nent * sizeof(*iopgd)); |
758 | flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); |
759 | out: |
760 | return bytes; |
761 | } |
762 | |
763 | /** |
764 | * iopgtable_clear_entry - Remove an iommu pte entry |
765 | * @obj: target iommu |
766 | * @da: iommu device virtual address |
767 | **/ |
768 | static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) |
769 | { |
770 | size_t bytes; |
771 | |
772 | spin_lock(&obj->page_table_lock); |
773 | |
774 | bytes = iopgtable_clear_entry_core(obj, da); |
775 | flush_iotlb_page(obj, da); |
776 | |
777 | spin_unlock(&obj->page_table_lock); |
778 | |
779 | return bytes; |
780 | } |
781 | |
782 | static void iopgtable_clear_entry_all(struct omap_iommu *obj) |
783 | { |
784 | int i; |
785 | |
786 | spin_lock(&obj->page_table_lock); |
787 | |
788 | for (i = 0; i < PTRS_PER_IOPGD; i++) { |
789 | u32 da; |
790 | u32 *iopgd; |
791 | |
792 | da = i << IOPGD_SHIFT; |
793 | iopgd = iopgd_offset(obj, da); |
794 | |
795 | if (!*iopgd) |
796 | continue; |
797 | |
798 | if (iopgd_is_table(*iopgd)) |
799 | iopte_free(iopte_offset(iopgd, 0)); |
800 | |
801 | *iopgd = 0; |
802 | flush_iopgd_range(iopgd, iopgd); |
803 | } |
804 | |
805 | flush_iotlb_all(obj); |
806 | |
807 | spin_unlock(&obj->page_table_lock); |
808 | } |
809 | |
810 | /* |
811 | * Device IOMMU generic operations |
812 | */ |
813 | static irqreturn_t iommu_fault_handler(int irq, void *data) |
814 | { |
815 | u32 da, errs; |
816 | u32 *iopgd, *iopte; |
817 | struct omap_iommu *obj = data; |
818 | struct iommu_domain *domain = obj->domain; |
819 | |
820 | if (!obj->refcount) |
821 | return IRQ_NONE; |
822 | |
823 | errs = iommu_report_fault(obj, &da); |
824 | if (errs == 0) |
825 | return IRQ_HANDLED; |
826 | |
827 | /* Fault callback or TLB/PTE Dynamic loading */ |
828 | if (!report_iommu_fault(domain, obj->dev, da, 0)) |
829 | return IRQ_HANDLED; |
830 | |
831 | iommu_disable(obj); |
832 | |
833 | iopgd = iopgd_offset(obj, da); |
834 | |
835 | if (!iopgd_is_table(*iopgd)) { |
836 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " |
837 | "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); |
838 | return IRQ_NONE; |
839 | } |
840 | |
841 | iopte = iopte_offset(iopgd, da); |
842 | |
843 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " |
844 | "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, |
845 | iopte, *iopte); |
846 | |
847 | return IRQ_NONE; |
848 | } |
849 | |
850 | static int device_match_by_alias(struct device *dev, void *data) |
851 | { |
852 | struct omap_iommu *obj = to_iommu(dev); |
853 | const char *name = data; |
854 | |
855 | pr_debug("%s: %s %s\n", __func__, obj->name, name); |
856 | |
857 | return strcmp(obj->name, name) == 0; |
858 | } |
859 | |
860 | /** |
861 | * omap_iommu_attach() - attach iommu device to an iommu domain |
862 | * @name: name of target omap iommu device |
863 | * @iopgd: page table |
864 | **/ |
865 | static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) |
866 | { |
867 | int err = -ENOMEM; |
868 | struct device *dev; |
869 | struct omap_iommu *obj; |
870 | |
871 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, |
872 | (void *)name, |
873 | device_match_by_alias); |
874 | if (!dev) |
875 | return NULL; |
876 | |
877 | obj = to_iommu(dev); |
878 | |
879 | spin_lock(&obj->iommu_lock); |
880 | |
881 | /* an iommu device can only be attached once */ |
882 | if (++obj->refcount > 1) { |
883 | dev_err(dev, "%s: already attached!\n", obj->name); |
884 | err = -EBUSY; |
885 | goto err_enable; |
886 | } |
887 | |
888 | obj->iopgd = iopgd; |
889 | err = iommu_enable(obj); |
890 | if (err) |
891 | goto err_enable; |
892 | flush_iotlb_all(obj); |
893 | |
894 | if (!try_module_get(obj->owner)) |
895 | goto err_module; |
896 | |
897 | spin_unlock(&obj->iommu_lock); |
898 | |
899 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); |
900 | return obj; |
901 | |
902 | err_module: |
903 | if (obj->refcount == 1) |
904 | iommu_disable(obj); |
905 | err_enable: |
906 | obj->refcount--; |
907 | spin_unlock(&obj->iommu_lock); |
908 | return ERR_PTR(err); |
909 | } |
910 | |
911 | /** |
912 | * omap_iommu_detach - release iommu device |
913 | * @obj: target iommu |
914 | **/ |
915 | static void omap_iommu_detach(struct omap_iommu *obj) |
916 | { |
917 | if (!obj || IS_ERR(obj)) |
918 | return; |
919 | |
920 | spin_lock(&obj->iommu_lock); |
921 | |
922 | if (--obj->refcount == 0) |
923 | iommu_disable(obj); |
924 | |
925 | module_put(obj->owner); |
926 | |
927 | obj->iopgd = NULL; |
928 | |
929 | spin_unlock(&obj->iommu_lock); |
930 | |
931 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); |
932 | } |
933 | |
934 | /* |
935 | * OMAP Device MMU(IOMMU) detection |
936 | */ |
937 | static int omap_iommu_probe(struct platform_device *pdev) |
938 | { |
939 | int err = -ENODEV; |
940 | int irq; |
941 | struct omap_iommu *obj; |
942 | struct resource *res; |
943 | struct iommu_platform_data *pdata = pdev->dev.platform_data; |
944 | |
945 | obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); |
946 | if (!obj) |
947 | return -ENOMEM; |
948 | |
949 | obj->nr_tlb_entries = pdata->nr_tlb_entries; |
950 | obj->name = pdata->name; |
951 | obj->dev = &pdev->dev; |
952 | obj->ctx = (void *)obj + sizeof(*obj); |
953 | obj->da_start = pdata->da_start; |
954 | obj->da_end = pdata->da_end; |
955 | |
956 | spin_lock_init(&obj->iommu_lock); |
957 | mutex_init(&obj->mmap_lock); |
958 | spin_lock_init(&obj->page_table_lock); |
959 | INIT_LIST_HEAD(&obj->mmap); |
960 | |
961 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
962 | if (!res) { |
963 | err = -ENODEV; |
964 | goto err_mem; |
965 | } |
966 | |
967 | res = request_mem_region(res->start, resource_size(res), |
968 | dev_name(&pdev->dev)); |
969 | if (!res) { |
970 | err = -EIO; |
971 | goto err_mem; |
972 | } |
973 | |
974 | obj->regbase = ioremap(res->start, resource_size(res)); |
975 | if (!obj->regbase) { |
976 | err = -ENOMEM; |
977 | goto err_ioremap; |
978 | } |
979 | |
980 | irq = platform_get_irq(pdev, 0); |
981 | if (irq < 0) { |
982 | err = -ENODEV; |
983 | goto err_irq; |
984 | } |
985 | err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, |
986 | dev_name(&pdev->dev), obj); |
987 | if (err < 0) |
988 | goto err_irq; |
989 | platform_set_drvdata(pdev, obj); |
990 | |
991 | pm_runtime_irq_safe(obj->dev); |
992 | pm_runtime_enable(obj->dev); |
993 | |
994 | dev_info(&pdev->dev, "%s registered\n", obj->name); |
995 | return 0; |
996 | |
997 | err_irq: |
998 | iounmap(obj->regbase); |
999 | err_ioremap: |
1000 | release_mem_region(res->start, resource_size(res)); |
1001 | err_mem: |
1002 | kfree(obj); |
1003 | return err; |
1004 | } |
1005 | |
1006 | static int omap_iommu_remove(struct platform_device *pdev) |
1007 | { |
1008 | int irq; |
1009 | struct resource *res; |
1010 | struct omap_iommu *obj = platform_get_drvdata(pdev); |
1011 | |
1012 | platform_set_drvdata(pdev, NULL); |
1013 | |
1014 | iopgtable_clear_entry_all(obj); |
1015 | |
1016 | irq = platform_get_irq(pdev, 0); |
1017 | free_irq(irq, obj); |
1018 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1019 | release_mem_region(res->start, resource_size(res)); |
1020 | iounmap(obj->regbase); |
1021 | |
1022 | pm_runtime_disable(obj->dev); |
1023 | |
1024 | dev_info(&pdev->dev, "%s removed\n", obj->name); |
1025 | kfree(obj); |
1026 | return 0; |
1027 | } |
1028 | |
1029 | static struct platform_driver omap_iommu_driver = { |
1030 | .probe = omap_iommu_probe, |
1031 | .remove = omap_iommu_remove, |
1032 | .driver = { |
1033 | .name = "omap-iommu", |
1034 | }, |
1035 | }; |
1036 | |
1037 | static void iopte_cachep_ctor(void *iopte) |
1038 | { |
1039 | clean_dcache_area(iopte, IOPTE_TABLE_SIZE); |
1040 | } |
1041 | |
1042 | static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, |
1043 | u32 flags) |
1044 | { |
1045 | memset(e, 0, sizeof(*e)); |
1046 | |
1047 | e->da = da; |
1048 | e->pa = pa; |
1049 | e->valid = 1; |
1050 | /* FIXME: add OMAP1 support */ |
1051 | e->pgsz = flags & MMU_CAM_PGSZ_MASK; |
1052 | e->endian = flags & MMU_RAM_ENDIAN_MASK; |
1053 | e->elsz = flags & MMU_RAM_ELSZ_MASK; |
1054 | e->mixed = flags & MMU_RAM_MIXED_MASK; |
1055 | |
1056 | return iopgsz_to_bytes(e->pgsz); |
1057 | } |
1058 | |
1059 | static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, |
1060 | phys_addr_t pa, size_t bytes, int prot) |
1061 | { |
1062 | struct omap_iommu_domain *omap_domain = domain->priv; |
1063 | struct omap_iommu *oiommu = omap_domain->iommu_dev; |
1064 | struct device *dev = oiommu->dev; |
1065 | struct iotlb_entry e; |
1066 | int omap_pgsz; |
1067 | u32 ret, flags; |
1068 | |
1069 | /* we only support mapping a single iommu page for now */ |
1070 | omap_pgsz = bytes_to_iopgsz(bytes); |
1071 | if (omap_pgsz < 0) { |
1072 | dev_err(dev, "invalid size to map: %d\n", bytes); |
1073 | return -EINVAL; |
1074 | } |
1075 | |
1076 | dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); |
1077 | |
1078 | flags = omap_pgsz | prot; |
1079 | |
1080 | iotlb_init_entry(&e, da, pa, flags); |
1081 | |
1082 | ret = omap_iopgtable_store_entry(oiommu, &e); |
1083 | if (ret) |
1084 | dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret); |
1085 | |
1086 | return ret; |
1087 | } |
1088 | |
1089 | static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, |
1090 | size_t size) |
1091 | { |
1092 | struct omap_iommu_domain *omap_domain = domain->priv; |
1093 | struct omap_iommu *oiommu = omap_domain->iommu_dev; |
1094 | struct device *dev = oiommu->dev; |
1095 | |
1096 | dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size); |
1097 | |
1098 | return iopgtable_clear_entry(oiommu, da); |
1099 | } |
1100 | |
1101 | static int |
1102 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
1103 | { |
1104 | struct omap_iommu_domain *omap_domain = domain->priv; |
1105 | struct omap_iommu *oiommu; |
1106 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; |
1107 | int ret = 0; |
1108 | |
1109 | spin_lock(&omap_domain->lock); |
1110 | |
1111 | /* only a single device is supported per domain for now */ |
1112 | if (omap_domain->iommu_dev) { |
1113 | dev_err(dev, "iommu domain is already attached\n"); |
1114 | ret = -EBUSY; |
1115 | goto out; |
1116 | } |
1117 | |
1118 | /* get a handle to and enable the omap iommu */ |
1119 | oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable); |
1120 | if (IS_ERR(oiommu)) { |
1121 | ret = PTR_ERR(oiommu); |
1122 | dev_err(dev, "can't get omap iommu: %d\n", ret); |
1123 | goto out; |
1124 | } |
1125 | |
1126 | omap_domain->iommu_dev = arch_data->iommu_dev = oiommu; |
1127 | omap_domain->dev = dev; |
1128 | oiommu->domain = domain; |
1129 | |
1130 | out: |
1131 | spin_unlock(&omap_domain->lock); |
1132 | return ret; |
1133 | } |
1134 | |
1135 | static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, |
1136 | struct device *dev) |
1137 | { |
1138 | struct omap_iommu *oiommu = dev_to_omap_iommu(dev); |
1139 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; |
1140 | |
1141 | /* only a single device is supported per domain for now */ |
1142 | if (omap_domain->iommu_dev != oiommu) { |
1143 | dev_err(dev, "invalid iommu device\n"); |
1144 | return; |
1145 | } |
1146 | |
1147 | iopgtable_clear_entry_all(oiommu); |
1148 | |
1149 | omap_iommu_detach(oiommu); |
1150 | |
1151 | omap_domain->iommu_dev = arch_data->iommu_dev = NULL; |
1152 | omap_domain->dev = NULL; |
1153 | } |
1154 | |
1155 | static void omap_iommu_detach_dev(struct iommu_domain *domain, |
1156 | struct device *dev) |
1157 | { |
1158 | struct omap_iommu_domain *omap_domain = domain->priv; |
1159 | |
1160 | spin_lock(&omap_domain->lock); |
1161 | _omap_iommu_detach_dev(omap_domain, dev); |
1162 | spin_unlock(&omap_domain->lock); |
1163 | } |
1164 | |
1165 | static int omap_iommu_domain_init(struct iommu_domain *domain) |
1166 | { |
1167 | struct omap_iommu_domain *omap_domain; |
1168 | |
1169 | omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); |
1170 | if (!omap_domain) { |
1171 | pr_err("kzalloc failed\n"); |
1172 | goto out; |
1173 | } |
1174 | |
1175 | omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); |
1176 | if (!omap_domain->pgtable) { |
1177 | pr_err("kzalloc failed\n"); |
1178 | goto fail_nomem; |
1179 | } |
1180 | |
1181 | /* |
1182 | * should never fail, but please keep this around to ensure |
1183 | * we keep the hardware happy |
1184 | */ |
1185 | BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); |
1186 | |
1187 | clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); |
1188 | spin_lock_init(&omap_domain->lock); |
1189 | |
1190 | domain->priv = omap_domain; |
1191 | |
1192 | domain->geometry.aperture_start = 0; |
1193 | domain->geometry.aperture_end = (1ULL << 32) - 1; |
1194 | domain->geometry.force_aperture = true; |
1195 | |
1196 | return 0; |
1197 | |
1198 | fail_nomem: |
1199 | kfree(omap_domain); |
1200 | out: |
1201 | return -ENOMEM; |
1202 | } |
1203 | |
1204 | static void omap_iommu_domain_destroy(struct iommu_domain *domain) |
1205 | { |
1206 | struct omap_iommu_domain *omap_domain = domain->priv; |
1207 | |
1208 | domain->priv = NULL; |
1209 | |
1210 | /* |
1211 | * An iommu device is still attached |
1212 | * (currently, only one device can be attached) ? |
1213 | */ |
1214 | if (omap_domain->iommu_dev) |
1215 | _omap_iommu_detach_dev(omap_domain, omap_domain->dev); |
1216 | |
1217 | kfree(omap_domain->pgtable); |
1218 | kfree(omap_domain); |
1219 | } |
1220 | |
1221 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, |
1222 | unsigned long da) |
1223 | { |
1224 | struct omap_iommu_domain *omap_domain = domain->priv; |
1225 | struct omap_iommu *oiommu = omap_domain->iommu_dev; |
1226 | struct device *dev = oiommu->dev; |
1227 | u32 *pgd, *pte; |
1228 | phys_addr_t ret = 0; |
1229 | |
1230 | iopgtable_lookup_entry(oiommu, da, &pgd, &pte); |
1231 | |
1232 | if (pte) { |
1233 | if (iopte_is_small(*pte)) |
1234 | ret = omap_iommu_translate(*pte, da, IOPTE_MASK); |
1235 | else if (iopte_is_large(*pte)) |
1236 | ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); |
1237 | else |
1238 | dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da); |
1239 | } else { |
1240 | if (iopgd_is_section(*pgd)) |
1241 | ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); |
1242 | else if (iopgd_is_super(*pgd)) |
1243 | ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); |
1244 | else |
1245 | dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da); |
1246 | } |
1247 | |
1248 | return ret; |
1249 | } |
1250 | |
1251 | static int omap_iommu_domain_has_cap(struct iommu_domain *domain, |
1252 | unsigned long cap) |
1253 | { |
1254 | return 0; |
1255 | } |
1256 | |
1257 | static struct iommu_ops omap_iommu_ops = { |
1258 | .domain_init = omap_iommu_domain_init, |
1259 | .domain_destroy = omap_iommu_domain_destroy, |
1260 | .attach_dev = omap_iommu_attach_dev, |
1261 | .detach_dev = omap_iommu_detach_dev, |
1262 | .map = omap_iommu_map, |
1263 | .unmap = omap_iommu_unmap, |
1264 | .iova_to_phys = omap_iommu_iova_to_phys, |
1265 | .domain_has_cap = omap_iommu_domain_has_cap, |
1266 | .pgsize_bitmap = OMAP_IOMMU_PGSIZES, |
1267 | }; |
1268 | |
1269 | static int __init omap_iommu_init(void) |
1270 | { |
1271 | struct kmem_cache *p; |
1272 | const unsigned long flags = SLAB_HWCACHE_ALIGN; |
1273 | size_t align = 1 << 10; /* L2 pagetable alignement */ |
1274 | |
1275 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, |
1276 | iopte_cachep_ctor); |
1277 | if (!p) |
1278 | return -ENOMEM; |
1279 | iopte_cachep = p; |
1280 | |
1281 | bus_set_iommu(&platform_bus_type, &omap_iommu_ops); |
1282 | |
1283 | return platform_driver_register(&omap_iommu_driver); |
1284 | } |
1285 | /* must be ready before omap3isp is probed */ |
1286 | subsys_initcall(omap_iommu_init); |
1287 | |
1288 | static void __exit omap_iommu_exit(void) |
1289 | { |
1290 | kmem_cache_destroy(iopte_cachep); |
1291 | |
1292 | platform_driver_unregister(&omap_iommu_driver); |
1293 | } |
1294 | module_exit(omap_iommu_exit); |
1295 | |
1296 | MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); |
1297 | MODULE_ALIAS("platform:omap-iommu"); |
1298 | MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); |
1299 | MODULE_LICENSE("GPL v2"); |
1300 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9