Root/
1 | /* linux/drivers/iommu/exynos_iommu.c |
2 | * |
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. |
9 | */ |
10 | |
11 | #ifdef CONFIG_EXYNOS_IOMMU_DEBUG |
12 | #define DEBUG |
13 | #endif |
14 | |
15 | #include <linux/io.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/platform_device.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/pm_runtime.h> |
20 | #include <linux/clk.h> |
21 | #include <linux/err.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/iommu.h> |
24 | #include <linux/errno.h> |
25 | #include <linux/list.h> |
26 | #include <linux/memblock.h> |
27 | #include <linux/export.h> |
28 | |
29 | #include <asm/cacheflush.h> |
30 | #include <asm/pgtable.h> |
31 | |
32 | #include <mach/sysmmu.h> |
33 | |
34 | /* We does not consider super section mapping (16MB) */ |
35 | #define SECT_ORDER 20 |
36 | #define LPAGE_ORDER 16 |
37 | #define SPAGE_ORDER 12 |
38 | |
39 | #define SECT_SIZE (1 << SECT_ORDER) |
40 | #define LPAGE_SIZE (1 << LPAGE_ORDER) |
41 | #define SPAGE_SIZE (1 << SPAGE_ORDER) |
42 | |
43 | #define SECT_MASK (~(SECT_SIZE - 1)) |
44 | #define LPAGE_MASK (~(LPAGE_SIZE - 1)) |
45 | #define SPAGE_MASK (~(SPAGE_SIZE - 1)) |
46 | |
47 | #define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) |
48 | #define lv1ent_page(sent) ((*(sent) & 3) == 1) |
49 | #define lv1ent_section(sent) ((*(sent) & 3) == 2) |
50 | |
51 | #define lv2ent_fault(pent) ((*(pent) & 3) == 0) |
52 | #define lv2ent_small(pent) ((*(pent) & 2) == 2) |
53 | #define lv2ent_large(pent) ((*(pent) & 3) == 1) |
54 | |
55 | #define section_phys(sent) (*(sent) & SECT_MASK) |
56 | #define section_offs(iova) ((iova) & 0xFFFFF) |
57 | #define lpage_phys(pent) (*(pent) & LPAGE_MASK) |
58 | #define lpage_offs(iova) ((iova) & 0xFFFF) |
59 | #define spage_phys(pent) (*(pent) & SPAGE_MASK) |
60 | #define spage_offs(iova) ((iova) & 0xFFF) |
61 | |
62 | #define lv1ent_offset(iova) ((iova) >> SECT_ORDER) |
63 | #define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER) |
64 | |
65 | #define NUM_LV1ENTRIES 4096 |
66 | #define NUM_LV2ENTRIES 256 |
67 | |
68 | #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long)) |
69 | |
70 | #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) |
71 | |
72 | #define lv2table_base(sent) (*(sent) & 0xFFFFFC00) |
73 | |
74 | #define mk_lv1ent_sect(pa) ((pa) | 2) |
75 | #define mk_lv1ent_page(pa) ((pa) | 1) |
76 | #define mk_lv2ent_lpage(pa) ((pa) | 1) |
77 | #define mk_lv2ent_spage(pa) ((pa) | 2) |
78 | |
79 | #define CTRL_ENABLE 0x5 |
80 | #define CTRL_BLOCK 0x7 |
81 | #define CTRL_DISABLE 0x0 |
82 | |
83 | #define REG_MMU_CTRL 0x000 |
84 | #define REG_MMU_CFG 0x004 |
85 | #define REG_MMU_STATUS 0x008 |
86 | #define REG_MMU_FLUSH 0x00C |
87 | #define REG_MMU_FLUSH_ENTRY 0x010 |
88 | #define REG_PT_BASE_ADDR 0x014 |
89 | #define REG_INT_STATUS 0x018 |
90 | #define REG_INT_CLEAR 0x01C |
91 | |
92 | #define REG_PAGE_FAULT_ADDR 0x024 |
93 | #define REG_AW_FAULT_ADDR 0x028 |
94 | #define REG_AR_FAULT_ADDR 0x02C |
95 | #define REG_DEFAULT_SLAVE_ADDR 0x030 |
96 | |
97 | #define REG_MMU_VERSION 0x034 |
98 | |
99 | #define REG_PB0_SADDR 0x04C |
100 | #define REG_PB0_EADDR 0x050 |
101 | #define REG_PB1_SADDR 0x054 |
102 | #define REG_PB1_EADDR 0x058 |
103 | |
104 | static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova) |
105 | { |
106 | return pgtable + lv1ent_offset(iova); |
107 | } |
108 | |
109 | static unsigned long *page_entry(unsigned long *sent, unsigned long iova) |
110 | { |
111 | return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova); |
112 | } |
113 | |
114 | enum exynos_sysmmu_inttype { |
115 | SYSMMU_PAGEFAULT, |
116 | SYSMMU_AR_MULTIHIT, |
117 | SYSMMU_AW_MULTIHIT, |
118 | SYSMMU_BUSERROR, |
119 | SYSMMU_AR_SECURITY, |
120 | SYSMMU_AR_ACCESS, |
121 | SYSMMU_AW_SECURITY, |
122 | SYSMMU_AW_PROTECTION, /* 7 */ |
123 | SYSMMU_FAULT_UNKNOWN, |
124 | SYSMMU_FAULTS_NUM |
125 | }; |
126 | |
127 | /* |
128 | * @itype: type of fault. |
129 | * @pgtable_base: the physical address of page table base. This is 0 if @itype |
130 | * is SYSMMU_BUSERROR. |
131 | * @fault_addr: the device (virtual) address that the System MMU tried to |
132 | * translated. This is 0 if @itype is SYSMMU_BUSERROR. |
133 | */ |
134 | typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype, |
135 | unsigned long pgtable_base, unsigned long fault_addr); |
136 | |
137 | static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = { |
138 | REG_PAGE_FAULT_ADDR, |
139 | REG_AR_FAULT_ADDR, |
140 | REG_AW_FAULT_ADDR, |
141 | REG_DEFAULT_SLAVE_ADDR, |
142 | REG_AR_FAULT_ADDR, |
143 | REG_AR_FAULT_ADDR, |
144 | REG_AW_FAULT_ADDR, |
145 | REG_AW_FAULT_ADDR |
146 | }; |
147 | |
148 | static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = { |
149 | "PAGE FAULT", |
150 | "AR MULTI-HIT FAULT", |
151 | "AW MULTI-HIT FAULT", |
152 | "BUS ERROR", |
153 | "AR SECURITY PROTECTION FAULT", |
154 | "AR ACCESS PROTECTION FAULT", |
155 | "AW SECURITY PROTECTION FAULT", |
156 | "AW ACCESS PROTECTION FAULT", |
157 | "UNKNOWN FAULT" |
158 | }; |
159 | |
160 | struct exynos_iommu_domain { |
161 | struct list_head clients; /* list of sysmmu_drvdata.node */ |
162 | unsigned long *pgtable; /* lv1 page table, 16KB */ |
163 | short *lv2entcnt; /* free lv2 entry counter for each section */ |
164 | spinlock_t lock; /* lock for this structure */ |
165 | spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ |
166 | }; |
167 | |
168 | struct sysmmu_drvdata { |
169 | struct list_head node; /* entry of exynos_iommu_domain.clients */ |
170 | struct device *sysmmu; /* System MMU's device descriptor */ |
171 | struct device *dev; /* Owner of system MMU */ |
172 | char *dbgname; |
173 | int nsfrs; |
174 | void __iomem **sfrbases; |
175 | struct clk *clk[2]; |
176 | int activations; |
177 | rwlock_t lock; |
178 | struct iommu_domain *domain; |
179 | sysmmu_fault_handler_t fault_handler; |
180 | unsigned long pgtable; |
181 | }; |
182 | |
183 | static bool set_sysmmu_active(struct sysmmu_drvdata *data) |
184 | { |
185 | /* return true if the System MMU was not active previously |
186 | and it needs to be initialized */ |
187 | return ++data->activations == 1; |
188 | } |
189 | |
190 | static bool set_sysmmu_inactive(struct sysmmu_drvdata *data) |
191 | { |
192 | /* return true if the System MMU is needed to be disabled */ |
193 | BUG_ON(data->activations < 1); |
194 | return --data->activations == 0; |
195 | } |
196 | |
197 | static bool is_sysmmu_active(struct sysmmu_drvdata *data) |
198 | { |
199 | return data->activations > 0; |
200 | } |
201 | |
202 | static void sysmmu_unblock(void __iomem *sfrbase) |
203 | { |
204 | __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL); |
205 | } |
206 | |
207 | static bool sysmmu_block(void __iomem *sfrbase) |
208 | { |
209 | int i = 120; |
210 | |
211 | __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL); |
212 | while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) |
213 | --i; |
214 | |
215 | if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) { |
216 | sysmmu_unblock(sfrbase); |
217 | return false; |
218 | } |
219 | |
220 | return true; |
221 | } |
222 | |
223 | static void __sysmmu_tlb_invalidate(void __iomem *sfrbase) |
224 | { |
225 | __raw_writel(0x1, sfrbase + REG_MMU_FLUSH); |
226 | } |
227 | |
228 | static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, |
229 | unsigned long iova) |
230 | { |
231 | __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY); |
232 | } |
233 | |
234 | static void __sysmmu_set_ptbase(void __iomem *sfrbase, |
235 | unsigned long pgd) |
236 | { |
237 | __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */ |
238 | __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR); |
239 | |
240 | __sysmmu_tlb_invalidate(sfrbase); |
241 | } |
242 | |
243 | static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base, |
244 | unsigned long size, int idx) |
245 | { |
246 | __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8); |
247 | __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8); |
248 | } |
249 | |
250 | void exynos_sysmmu_set_prefbuf(struct device *dev, |
251 | unsigned long base0, unsigned long size0, |
252 | unsigned long base1, unsigned long size1) |
253 | { |
254 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); |
255 | unsigned long flags; |
256 | int i; |
257 | |
258 | BUG_ON((base0 + size0) <= base0); |
259 | BUG_ON((size1 > 0) && ((base1 + size1) <= base1)); |
260 | |
261 | read_lock_irqsave(&data->lock, flags); |
262 | if (!is_sysmmu_active(data)) |
263 | goto finish; |
264 | |
265 | for (i = 0; i < data->nsfrs; i++) { |
266 | if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) { |
267 | if (!sysmmu_block(data->sfrbases[i])) |
268 | continue; |
269 | |
270 | if (size1 == 0) { |
271 | if (size0 <= SZ_128K) { |
272 | base1 = base0; |
273 | size1 = size0; |
274 | } else { |
275 | size1 = size0 - |
276 | ALIGN(size0 / 2, SZ_64K); |
277 | size0 = size0 - size1; |
278 | base1 = base0 + size0; |
279 | } |
280 | } |
281 | |
282 | __sysmmu_set_prefbuf( |
283 | data->sfrbases[i], base0, size0, 0); |
284 | __sysmmu_set_prefbuf( |
285 | data->sfrbases[i], base1, size1, 1); |
286 | |
287 | sysmmu_unblock(data->sfrbases[i]); |
288 | } |
289 | } |
290 | finish: |
291 | read_unlock_irqrestore(&data->lock, flags); |
292 | } |
293 | |
294 | static void __set_fault_handler(struct sysmmu_drvdata *data, |
295 | sysmmu_fault_handler_t handler) |
296 | { |
297 | unsigned long flags; |
298 | |
299 | write_lock_irqsave(&data->lock, flags); |
300 | data->fault_handler = handler; |
301 | write_unlock_irqrestore(&data->lock, flags); |
302 | } |
303 | |
304 | void exynos_sysmmu_set_fault_handler(struct device *dev, |
305 | sysmmu_fault_handler_t handler) |
306 | { |
307 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); |
308 | |
309 | __set_fault_handler(data, handler); |
310 | } |
311 | |
312 | static int default_fault_handler(enum exynos_sysmmu_inttype itype, |
313 | unsigned long pgtable_base, unsigned long fault_addr) |
314 | { |
315 | unsigned long *ent; |
316 | |
317 | if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT)) |
318 | itype = SYSMMU_FAULT_UNKNOWN; |
319 | |
320 | pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n", |
321 | sysmmu_fault_name[itype], fault_addr, pgtable_base); |
322 | |
323 | ent = section_entry(__va(pgtable_base), fault_addr); |
324 | pr_err("\tLv1 entry: 0x%lx\n", *ent); |
325 | |
326 | if (lv1ent_page(ent)) { |
327 | ent = page_entry(ent, fault_addr); |
328 | pr_err("\t Lv2 entry: 0x%lx\n", *ent); |
329 | } |
330 | |
331 | pr_err("Generating Kernel OOPS... because it is unrecoverable.\n"); |
332 | |
333 | BUG(); |
334 | |
335 | return 0; |
336 | } |
337 | |
338 | static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) |
339 | { |
340 | /* SYSMMU is in blocked when interrupt occurred. */ |
341 | struct sysmmu_drvdata *data = dev_id; |
342 | struct resource *irqres; |
343 | struct platform_device *pdev; |
344 | enum exynos_sysmmu_inttype itype; |
345 | unsigned long addr = -1; |
346 | |
347 | int i, ret = -ENOSYS; |
348 | |
349 | read_lock(&data->lock); |
350 | |
351 | WARN_ON(!is_sysmmu_active(data)); |
352 | |
353 | pdev = to_platform_device(data->sysmmu); |
354 | for (i = 0; i < (pdev->num_resources / 2); i++) { |
355 | irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i); |
356 | if (irqres && ((int)irqres->start == irq)) |
357 | break; |
358 | } |
359 | |
360 | if (i == pdev->num_resources) { |
361 | itype = SYSMMU_FAULT_UNKNOWN; |
362 | } else { |
363 | itype = (enum exynos_sysmmu_inttype) |
364 | __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS)); |
365 | if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN)))) |
366 | itype = SYSMMU_FAULT_UNKNOWN; |
367 | else |
368 | addr = __raw_readl( |
369 | data->sfrbases[i] + fault_reg_offset[itype]); |
370 | } |
371 | |
372 | if (data->domain) |
373 | ret = report_iommu_fault(data->domain, data->dev, |
374 | addr, itype); |
375 | |
376 | if ((ret == -ENOSYS) && data->fault_handler) { |
377 | unsigned long base = data->pgtable; |
378 | if (itype != SYSMMU_FAULT_UNKNOWN) |
379 | base = __raw_readl( |
380 | data->sfrbases[i] + REG_PT_BASE_ADDR); |
381 | ret = data->fault_handler(itype, base, addr); |
382 | } |
383 | |
384 | if (!ret && (itype != SYSMMU_FAULT_UNKNOWN)) |
385 | __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR); |
386 | else |
387 | dev_dbg(data->sysmmu, "(%s) %s is not handled.\n", |
388 | data->dbgname, sysmmu_fault_name[itype]); |
389 | |
390 | if (itype != SYSMMU_FAULT_UNKNOWN) |
391 | sysmmu_unblock(data->sfrbases[i]); |
392 | |
393 | read_unlock(&data->lock); |
394 | |
395 | return IRQ_HANDLED; |
396 | } |
397 | |
398 | static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data) |
399 | { |
400 | unsigned long flags; |
401 | bool disabled = false; |
402 | int i; |
403 | |
404 | write_lock_irqsave(&data->lock, flags); |
405 | |
406 | if (!set_sysmmu_inactive(data)) |
407 | goto finish; |
408 | |
409 | for (i = 0; i < data->nsfrs; i++) |
410 | __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL); |
411 | |
412 | if (data->clk[1]) |
413 | clk_disable(data->clk[1]); |
414 | if (data->clk[0]) |
415 | clk_disable(data->clk[0]); |
416 | |
417 | disabled = true; |
418 | data->pgtable = 0; |
419 | data->domain = NULL; |
420 | finish: |
421 | write_unlock_irqrestore(&data->lock, flags); |
422 | |
423 | if (disabled) |
424 | dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname); |
425 | else |
426 | dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n", |
427 | data->dbgname, data->activations); |
428 | |
429 | return disabled; |
430 | } |
431 | |
432 | /* __exynos_sysmmu_enable: Enables System MMU |
433 | * |
434 | * returns -error if an error occurred and System MMU is not enabled, |
435 | * 0 if the System MMU has been just enabled and 1 if System MMU was already |
436 | * enabled before. |
437 | */ |
438 | static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data, |
439 | unsigned long pgtable, struct iommu_domain *domain) |
440 | { |
441 | int i, ret = 0; |
442 | unsigned long flags; |
443 | |
444 | write_lock_irqsave(&data->lock, flags); |
445 | |
446 | if (!set_sysmmu_active(data)) { |
447 | if (WARN_ON(pgtable != data->pgtable)) { |
448 | ret = -EBUSY; |
449 | set_sysmmu_inactive(data); |
450 | } else { |
451 | ret = 1; |
452 | } |
453 | |
454 | dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname); |
455 | goto finish; |
456 | } |
457 | |
458 | if (data->clk[0]) |
459 | clk_enable(data->clk[0]); |
460 | if (data->clk[1]) |
461 | clk_enable(data->clk[1]); |
462 | |
463 | data->pgtable = pgtable; |
464 | |
465 | for (i = 0; i < data->nsfrs; i++) { |
466 | __sysmmu_set_ptbase(data->sfrbases[i], pgtable); |
467 | |
468 | if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) { |
469 | /* System MMU version is 3.x */ |
470 | __raw_writel((1 << 12) | (2 << 28), |
471 | data->sfrbases[i] + REG_MMU_CFG); |
472 | __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0); |
473 | __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1); |
474 | } |
475 | |
476 | __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL); |
477 | } |
478 | |
479 | data->domain = domain; |
480 | |
481 | dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname); |
482 | finish: |
483 | write_unlock_irqrestore(&data->lock, flags); |
484 | |
485 | return ret; |
486 | } |
487 | |
488 | int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable) |
489 | { |
490 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); |
491 | int ret; |
492 | |
493 | BUG_ON(!memblock_is_memory(pgtable)); |
494 | |
495 | ret = pm_runtime_get_sync(data->sysmmu); |
496 | if (ret < 0) { |
497 | dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname); |
498 | return ret; |
499 | } |
500 | |
501 | ret = __exynos_sysmmu_enable(data, pgtable, NULL); |
502 | if (WARN_ON(ret < 0)) { |
503 | pm_runtime_put(data->sysmmu); |
504 | dev_err(data->sysmmu, |
505 | "(%s) Already enabled with page table %#lx\n", |
506 | data->dbgname, data->pgtable); |
507 | } else { |
508 | data->dev = dev; |
509 | } |
510 | |
511 | return ret; |
512 | } |
513 | |
514 | bool exynos_sysmmu_disable(struct device *dev) |
515 | { |
516 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); |
517 | bool disabled; |
518 | |
519 | disabled = __exynos_sysmmu_disable(data); |
520 | pm_runtime_put(data->sysmmu); |
521 | |
522 | return disabled; |
523 | } |
524 | |
525 | static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova) |
526 | { |
527 | unsigned long flags; |
528 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); |
529 | |
530 | read_lock_irqsave(&data->lock, flags); |
531 | |
532 | if (is_sysmmu_active(data)) { |
533 | int i; |
534 | for (i = 0; i < data->nsfrs; i++) { |
535 | if (sysmmu_block(data->sfrbases[i])) { |
536 | __sysmmu_tlb_invalidate_entry( |
537 | data->sfrbases[i], iova); |
538 | sysmmu_unblock(data->sfrbases[i]); |
539 | } |
540 | } |
541 | } else { |
542 | dev_dbg(data->sysmmu, |
543 | "(%s) Disabled. Skipping invalidating TLB.\n", |
544 | data->dbgname); |
545 | } |
546 | |
547 | read_unlock_irqrestore(&data->lock, flags); |
548 | } |
549 | |
550 | void exynos_sysmmu_tlb_invalidate(struct device *dev) |
551 | { |
552 | unsigned long flags; |
553 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); |
554 | |
555 | read_lock_irqsave(&data->lock, flags); |
556 | |
557 | if (is_sysmmu_active(data)) { |
558 | int i; |
559 | for (i = 0; i < data->nsfrs; i++) { |
560 | if (sysmmu_block(data->sfrbases[i])) { |
561 | __sysmmu_tlb_invalidate(data->sfrbases[i]); |
562 | sysmmu_unblock(data->sfrbases[i]); |
563 | } |
564 | } |
565 | } else { |
566 | dev_dbg(data->sysmmu, |
567 | "(%s) Disabled. Skipping invalidating TLB.\n", |
568 | data->dbgname); |
569 | } |
570 | |
571 | read_unlock_irqrestore(&data->lock, flags); |
572 | } |
573 | |
574 | static int exynos_sysmmu_probe(struct platform_device *pdev) |
575 | { |
576 | int i, ret; |
577 | struct device *dev; |
578 | struct sysmmu_drvdata *data; |
579 | |
580 | dev = &pdev->dev; |
581 | |
582 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
583 | if (!data) { |
584 | dev_dbg(dev, "Not enough memory\n"); |
585 | ret = -ENOMEM; |
586 | goto err_alloc; |
587 | } |
588 | |
589 | ret = dev_set_drvdata(dev, data); |
590 | if (ret) { |
591 | dev_dbg(dev, "Unabled to initialize driver data\n"); |
592 | goto err_init; |
593 | } |
594 | |
595 | data->nsfrs = pdev->num_resources / 2; |
596 | data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs, |
597 | GFP_KERNEL); |
598 | if (data->sfrbases == NULL) { |
599 | dev_dbg(dev, "Not enough memory\n"); |
600 | ret = -ENOMEM; |
601 | goto err_init; |
602 | } |
603 | |
604 | for (i = 0; i < data->nsfrs; i++) { |
605 | struct resource *res; |
606 | res = platform_get_resource(pdev, IORESOURCE_MEM, i); |
607 | if (!res) { |
608 | dev_dbg(dev, "Unable to find IOMEM region\n"); |
609 | ret = -ENOENT; |
610 | goto err_res; |
611 | } |
612 | |
613 | data->sfrbases[i] = ioremap(res->start, resource_size(res)); |
614 | if (!data->sfrbases[i]) { |
615 | dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n", |
616 | res->start); |
617 | ret = -ENOENT; |
618 | goto err_res; |
619 | } |
620 | } |
621 | |
622 | for (i = 0; i < data->nsfrs; i++) { |
623 | ret = platform_get_irq(pdev, i); |
624 | if (ret <= 0) { |
625 | dev_dbg(dev, "Unable to find IRQ resource\n"); |
626 | goto err_irq; |
627 | } |
628 | |
629 | ret = request_irq(ret, exynos_sysmmu_irq, 0, |
630 | dev_name(dev), data); |
631 | if (ret) { |
632 | dev_dbg(dev, "Unabled to register interrupt handler\n"); |
633 | goto err_irq; |
634 | } |
635 | } |
636 | |
637 | if (dev_get_platdata(dev)) { |
638 | char *deli, *beg; |
639 | struct sysmmu_platform_data *platdata = dev_get_platdata(dev); |
640 | |
641 | beg = platdata->clockname; |
642 | |
643 | for (deli = beg; (*deli != '\0') && (*deli != ','); deli++) |
644 | /* NOTHING */; |
645 | |
646 | if (*deli == '\0') |
647 | deli = NULL; |
648 | else |
649 | *deli = '\0'; |
650 | |
651 | data->clk[0] = clk_get(dev, beg); |
652 | if (IS_ERR(data->clk[0])) { |
653 | data->clk[0] = NULL; |
654 | dev_dbg(dev, "No clock descriptor registered\n"); |
655 | } |
656 | |
657 | if (data->clk[0] && deli) { |
658 | *deli = ','; |
659 | data->clk[1] = clk_get(dev, deli + 1); |
660 | if (IS_ERR(data->clk[1])) |
661 | data->clk[1] = NULL; |
662 | } |
663 | |
664 | data->dbgname = platdata->dbgname; |
665 | } |
666 | |
667 | data->sysmmu = dev; |
668 | rwlock_init(&data->lock); |
669 | INIT_LIST_HEAD(&data->node); |
670 | |
671 | __set_fault_handler(data, &default_fault_handler); |
672 | |
673 | if (dev->parent) |
674 | pm_runtime_enable(dev); |
675 | |
676 | dev_dbg(dev, "(%s) Initialized\n", data->dbgname); |
677 | return 0; |
678 | err_irq: |
679 | while (i-- > 0) { |
680 | int irq; |
681 | |
682 | irq = platform_get_irq(pdev, i); |
683 | free_irq(irq, data); |
684 | } |
685 | err_res: |
686 | while (data->nsfrs-- > 0) |
687 | iounmap(data->sfrbases[data->nsfrs]); |
688 | kfree(data->sfrbases); |
689 | err_init: |
690 | kfree(data); |
691 | err_alloc: |
692 | dev_err(dev, "Failed to initialize\n"); |
693 | return ret; |
694 | } |
695 | |
696 | static struct platform_driver exynos_sysmmu_driver = { |
697 | .probe = exynos_sysmmu_probe, |
698 | .driver = { |
699 | .owner = THIS_MODULE, |
700 | .name = "exynos-sysmmu", |
701 | } |
702 | }; |
703 | |
704 | static inline void pgtable_flush(void *vastart, void *vaend) |
705 | { |
706 | dmac_flush_range(vastart, vaend); |
707 | outer_flush_range(virt_to_phys(vastart), |
708 | virt_to_phys(vaend)); |
709 | } |
710 | |
711 | static int exynos_iommu_domain_init(struct iommu_domain *domain) |
712 | { |
713 | struct exynos_iommu_domain *priv; |
714 | |
715 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
716 | if (!priv) |
717 | return -ENOMEM; |
718 | |
719 | priv->pgtable = (unsigned long *)__get_free_pages( |
720 | GFP_KERNEL | __GFP_ZERO, 2); |
721 | if (!priv->pgtable) |
722 | goto err_pgtable; |
723 | |
724 | priv->lv2entcnt = (short *)__get_free_pages( |
725 | GFP_KERNEL | __GFP_ZERO, 1); |
726 | if (!priv->lv2entcnt) |
727 | goto err_counter; |
728 | |
729 | pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES); |
730 | |
731 | spin_lock_init(&priv->lock); |
732 | spin_lock_init(&priv->pgtablelock); |
733 | INIT_LIST_HEAD(&priv->clients); |
734 | |
735 | domain->geometry.aperture_start = 0; |
736 | domain->geometry.aperture_end = ~0UL; |
737 | domain->geometry.force_aperture = true; |
738 | |
739 | domain->priv = priv; |
740 | return 0; |
741 | |
742 | err_counter: |
743 | free_pages((unsigned long)priv->pgtable, 2); |
744 | err_pgtable: |
745 | kfree(priv); |
746 | return -ENOMEM; |
747 | } |
748 | |
749 | static void exynos_iommu_domain_destroy(struct iommu_domain *domain) |
750 | { |
751 | struct exynos_iommu_domain *priv = domain->priv; |
752 | struct sysmmu_drvdata *data; |
753 | unsigned long flags; |
754 | int i; |
755 | |
756 | WARN_ON(!list_empty(&priv->clients)); |
757 | |
758 | spin_lock_irqsave(&priv->lock, flags); |
759 | |
760 | list_for_each_entry(data, &priv->clients, node) { |
761 | while (!exynos_sysmmu_disable(data->dev)) |
762 | ; /* until System MMU is actually disabled */ |
763 | } |
764 | |
765 | spin_unlock_irqrestore(&priv->lock, flags); |
766 | |
767 | for (i = 0; i < NUM_LV1ENTRIES; i++) |
768 | if (lv1ent_page(priv->pgtable + i)) |
769 | kfree(__va(lv2table_base(priv->pgtable + i))); |
770 | |
771 | free_pages((unsigned long)priv->pgtable, 2); |
772 | free_pages((unsigned long)priv->lv2entcnt, 1); |
773 | kfree(domain->priv); |
774 | domain->priv = NULL; |
775 | } |
776 | |
777 | static int exynos_iommu_attach_device(struct iommu_domain *domain, |
778 | struct device *dev) |
779 | { |
780 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); |
781 | struct exynos_iommu_domain *priv = domain->priv; |
782 | unsigned long flags; |
783 | int ret; |
784 | |
785 | ret = pm_runtime_get_sync(data->sysmmu); |
786 | if (ret < 0) |
787 | return ret; |
788 | |
789 | ret = 0; |
790 | |
791 | spin_lock_irqsave(&priv->lock, flags); |
792 | |
793 | ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain); |
794 | |
795 | if (ret == 0) { |
796 | /* 'data->node' must not be appeared in priv->clients */ |
797 | BUG_ON(!list_empty(&data->node)); |
798 | data->dev = dev; |
799 | list_add_tail(&data->node, &priv->clients); |
800 | } |
801 | |
802 | spin_unlock_irqrestore(&priv->lock, flags); |
803 | |
804 | if (ret < 0) { |
805 | dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n", |
806 | __func__, __pa(priv->pgtable)); |
807 | pm_runtime_put(data->sysmmu); |
808 | } else if (ret > 0) { |
809 | dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n", |
810 | __func__, __pa(priv->pgtable)); |
811 | } else { |
812 | dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n", |
813 | __func__, __pa(priv->pgtable)); |
814 | } |
815 | |
816 | return ret; |
817 | } |
818 | |
819 | static void exynos_iommu_detach_device(struct iommu_domain *domain, |
820 | struct device *dev) |
821 | { |
822 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); |
823 | struct exynos_iommu_domain *priv = domain->priv; |
824 | struct list_head *pos; |
825 | unsigned long flags; |
826 | bool found = false; |
827 | |
828 | spin_lock_irqsave(&priv->lock, flags); |
829 | |
830 | list_for_each(pos, &priv->clients) { |
831 | if (list_entry(pos, struct sysmmu_drvdata, node) == data) { |
832 | found = true; |
833 | break; |
834 | } |
835 | } |
836 | |
837 | if (!found) |
838 | goto finish; |
839 | |
840 | if (__exynos_sysmmu_disable(data)) { |
841 | dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n", |
842 | __func__, __pa(priv->pgtable)); |
843 | list_del(&data->node); |
844 | INIT_LIST_HEAD(&data->node); |
845 | |
846 | } else { |
847 | dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed", |
848 | __func__, __pa(priv->pgtable)); |
849 | } |
850 | |
851 | finish: |
852 | spin_unlock_irqrestore(&priv->lock, flags); |
853 | |
854 | if (found) |
855 | pm_runtime_put(data->sysmmu); |
856 | } |
857 | |
858 | static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova, |
859 | short *pgcounter) |
860 | { |
861 | if (lv1ent_fault(sent)) { |
862 | unsigned long *pent; |
863 | |
864 | pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC); |
865 | BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1)); |
866 | if (!pent) |
867 | return NULL; |
868 | |
869 | *sent = mk_lv1ent_page(__pa(pent)); |
870 | *pgcounter = NUM_LV2ENTRIES; |
871 | pgtable_flush(pent, pent + NUM_LV2ENTRIES); |
872 | pgtable_flush(sent, sent + 1); |
873 | } |
874 | |
875 | return page_entry(sent, iova); |
876 | } |
877 | |
878 | static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt) |
879 | { |
880 | if (lv1ent_section(sent)) |
881 | return -EADDRINUSE; |
882 | |
883 | if (lv1ent_page(sent)) { |
884 | if (*pgcnt != NUM_LV2ENTRIES) |
885 | return -EADDRINUSE; |
886 | |
887 | kfree(page_entry(sent, 0)); |
888 | |
889 | *pgcnt = 0; |
890 | } |
891 | |
892 | *sent = mk_lv1ent_sect(paddr); |
893 | |
894 | pgtable_flush(sent, sent + 1); |
895 | |
896 | return 0; |
897 | } |
898 | |
899 | static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, |
900 | short *pgcnt) |
901 | { |
902 | if (size == SPAGE_SIZE) { |
903 | if (!lv2ent_fault(pent)) |
904 | return -EADDRINUSE; |
905 | |
906 | *pent = mk_lv2ent_spage(paddr); |
907 | pgtable_flush(pent, pent + 1); |
908 | *pgcnt -= 1; |
909 | } else { /* size == LPAGE_SIZE */ |
910 | int i; |
911 | for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { |
912 | if (!lv2ent_fault(pent)) { |
913 | memset(pent, 0, sizeof(*pent) * i); |
914 | return -EADDRINUSE; |
915 | } |
916 | |
917 | *pent = mk_lv2ent_lpage(paddr); |
918 | } |
919 | pgtable_flush(pent - SPAGES_PER_LPAGE, pent); |
920 | *pgcnt -= SPAGES_PER_LPAGE; |
921 | } |
922 | |
923 | return 0; |
924 | } |
925 | |
926 | static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, |
927 | phys_addr_t paddr, size_t size, int prot) |
928 | { |
929 | struct exynos_iommu_domain *priv = domain->priv; |
930 | unsigned long *entry; |
931 | unsigned long flags; |
932 | int ret = -ENOMEM; |
933 | |
934 | BUG_ON(priv->pgtable == NULL); |
935 | |
936 | spin_lock_irqsave(&priv->pgtablelock, flags); |
937 | |
938 | entry = section_entry(priv->pgtable, iova); |
939 | |
940 | if (size == SECT_SIZE) { |
941 | ret = lv1set_section(entry, paddr, |
942 | &priv->lv2entcnt[lv1ent_offset(iova)]); |
943 | } else { |
944 | unsigned long *pent; |
945 | |
946 | pent = alloc_lv2entry(entry, iova, |
947 | &priv->lv2entcnt[lv1ent_offset(iova)]); |
948 | |
949 | if (!pent) |
950 | ret = -ENOMEM; |
951 | else |
952 | ret = lv2set_page(pent, paddr, size, |
953 | &priv->lv2entcnt[lv1ent_offset(iova)]); |
954 | } |
955 | |
956 | if (ret) { |
957 | pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n", |
958 | __func__, iova, size); |
959 | } |
960 | |
961 | spin_unlock_irqrestore(&priv->pgtablelock, flags); |
962 | |
963 | return ret; |
964 | } |
965 | |
966 | static size_t exynos_iommu_unmap(struct iommu_domain *domain, |
967 | unsigned long iova, size_t size) |
968 | { |
969 | struct exynos_iommu_domain *priv = domain->priv; |
970 | struct sysmmu_drvdata *data; |
971 | unsigned long flags; |
972 | unsigned long *ent; |
973 | |
974 | BUG_ON(priv->pgtable == NULL); |
975 | |
976 | spin_lock_irqsave(&priv->pgtablelock, flags); |
977 | |
978 | ent = section_entry(priv->pgtable, iova); |
979 | |
980 | if (lv1ent_section(ent)) { |
981 | BUG_ON(size < SECT_SIZE); |
982 | |
983 | *ent = 0; |
984 | pgtable_flush(ent, ent + 1); |
985 | size = SECT_SIZE; |
986 | goto done; |
987 | } |
988 | |
989 | if (unlikely(lv1ent_fault(ent))) { |
990 | if (size > SECT_SIZE) |
991 | size = SECT_SIZE; |
992 | goto done; |
993 | } |
994 | |
995 | /* lv1ent_page(sent) == true here */ |
996 | |
997 | ent = page_entry(ent, iova); |
998 | |
999 | if (unlikely(lv2ent_fault(ent))) { |
1000 | size = SPAGE_SIZE; |
1001 | goto done; |
1002 | } |
1003 | |
1004 | if (lv2ent_small(ent)) { |
1005 | *ent = 0; |
1006 | size = SPAGE_SIZE; |
1007 | priv->lv2entcnt[lv1ent_offset(iova)] += 1; |
1008 | goto done; |
1009 | } |
1010 | |
1011 | /* lv1ent_large(ent) == true here */ |
1012 | BUG_ON(size < LPAGE_SIZE); |
1013 | |
1014 | memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); |
1015 | |
1016 | size = LPAGE_SIZE; |
1017 | priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; |
1018 | done: |
1019 | spin_unlock_irqrestore(&priv->pgtablelock, flags); |
1020 | |
1021 | spin_lock_irqsave(&priv->lock, flags); |
1022 | list_for_each_entry(data, &priv->clients, node) |
1023 | sysmmu_tlb_invalidate_entry(data->dev, iova); |
1024 | spin_unlock_irqrestore(&priv->lock, flags); |
1025 | |
1026 | |
1027 | return size; |
1028 | } |
1029 | |
1030 | static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, |
1031 | unsigned long iova) |
1032 | { |
1033 | struct exynos_iommu_domain *priv = domain->priv; |
1034 | unsigned long *entry; |
1035 | unsigned long flags; |
1036 | phys_addr_t phys = 0; |
1037 | |
1038 | spin_lock_irqsave(&priv->pgtablelock, flags); |
1039 | |
1040 | entry = section_entry(priv->pgtable, iova); |
1041 | |
1042 | if (lv1ent_section(entry)) { |
1043 | phys = section_phys(entry) + section_offs(iova); |
1044 | } else if (lv1ent_page(entry)) { |
1045 | entry = page_entry(entry, iova); |
1046 | |
1047 | if (lv2ent_large(entry)) |
1048 | phys = lpage_phys(entry) + lpage_offs(iova); |
1049 | else if (lv2ent_small(entry)) |
1050 | phys = spage_phys(entry) + spage_offs(iova); |
1051 | } |
1052 | |
1053 | spin_unlock_irqrestore(&priv->pgtablelock, flags); |
1054 | |
1055 | return phys; |
1056 | } |
1057 | |
1058 | static struct iommu_ops exynos_iommu_ops = { |
1059 | .domain_init = &exynos_iommu_domain_init, |
1060 | .domain_destroy = &exynos_iommu_domain_destroy, |
1061 | .attach_dev = &exynos_iommu_attach_device, |
1062 | .detach_dev = &exynos_iommu_detach_device, |
1063 | .map = &exynos_iommu_map, |
1064 | .unmap = &exynos_iommu_unmap, |
1065 | .iova_to_phys = &exynos_iommu_iova_to_phys, |
1066 | .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, |
1067 | }; |
1068 | |
1069 | static int __init exynos_iommu_init(void) |
1070 | { |
1071 | int ret; |
1072 | |
1073 | ret = platform_driver_register(&exynos_sysmmu_driver); |
1074 | |
1075 | if (ret == 0) |
1076 | bus_set_iommu(&platform_bus_type, &exynos_iommu_ops); |
1077 | |
1078 | return ret; |
1079 | } |
1080 | subsys_initcall(exynos_iommu_init); |
1081 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9