Root/
1 | /* |
2 | * Copyright (c) 2006, Intel Corporation. |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, |
6 | * version 2, as published by the Free Software Foundation. |
7 | * |
8 | * This program is distributed in the hope it will be useful, but WITHOUT |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
11 | * more details. |
12 | * |
13 | * You should have received a copy of the GNU General Public License along with |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
16 | * |
17 | * Copyright (C) 2006-2008 Intel Corporation |
18 | * Author: Ashok Raj <ashok.raj@intel.com> |
19 | * Author: Shaohua Li <shaohua.li@intel.com> |
20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
21 | * |
22 | * This file implements early detection/parsing of Remapping Devices |
23 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI |
24 | * tables. |
25 | * |
26 | * These routines are used by both DMA-remapping and Interrupt-remapping |
27 | */ |
28 | |
29 | #include <linux/pci.h> |
30 | #include <linux/dmar.h> |
31 | #include <linux/iova.h> |
32 | #include <linux/intel-iommu.h> |
33 | #include <linux/timer.h> |
34 | #include <linux/irq.h> |
35 | #include <linux/interrupt.h> |
36 | #include <linux/tboot.h> |
37 | #include <linux/dmi.h> |
38 | #include <linux/slab.h> |
39 | |
40 | #define PREFIX "DMAR: " |
41 | |
42 | /* No locks are needed as DMA remapping hardware unit |
43 | * list is constructed at boot time and hotplug of |
44 | * these units are not supported by the architecture. |
45 | */ |
46 | LIST_HEAD(dmar_drhd_units); |
47 | |
48 | static struct acpi_table_header * __initdata dmar_tbl; |
49 | static acpi_size dmar_tbl_size; |
50 | |
51 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) |
52 | { |
53 | /* |
54 | * add INCLUDE_ALL at the tail, so scan the list will find it at |
55 | * the very end. |
56 | */ |
57 | if (drhd->include_all) |
58 | list_add_tail(&drhd->list, &dmar_drhd_units); |
59 | else |
60 | list_add(&drhd->list, &dmar_drhd_units); |
61 | } |
62 | |
63 | static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, |
64 | struct pci_dev **dev, u16 segment) |
65 | { |
66 | struct pci_bus *bus; |
67 | struct pci_dev *pdev = NULL; |
68 | struct acpi_dmar_pci_path *path; |
69 | int count; |
70 | |
71 | bus = pci_find_bus(segment, scope->bus); |
72 | path = (struct acpi_dmar_pci_path *)(scope + 1); |
73 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) |
74 | / sizeof(struct acpi_dmar_pci_path); |
75 | |
76 | while (count) { |
77 | if (pdev) |
78 | pci_dev_put(pdev); |
79 | /* |
80 | * Some BIOSes list non-exist devices in DMAR table, just |
81 | * ignore it |
82 | */ |
83 | if (!bus) { |
84 | printk(KERN_WARNING |
85 | PREFIX "Device scope bus [%d] not found\n", |
86 | scope->bus); |
87 | break; |
88 | } |
89 | pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); |
90 | if (!pdev) { |
91 | printk(KERN_WARNING PREFIX |
92 | "Device scope device [%04x:%02x:%02x.%02x] not found\n", |
93 | segment, bus->number, path->dev, path->fn); |
94 | break; |
95 | } |
96 | path ++; |
97 | count --; |
98 | bus = pdev->subordinate; |
99 | } |
100 | if (!pdev) { |
101 | printk(KERN_WARNING PREFIX |
102 | "Device scope device [%04x:%02x:%02x.%02x] not found\n", |
103 | segment, scope->bus, path->dev, path->fn); |
104 | *dev = NULL; |
105 | return 0; |
106 | } |
107 | if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ |
108 | pdev->subordinate) || (scope->entry_type == \ |
109 | ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { |
110 | pci_dev_put(pdev); |
111 | printk(KERN_WARNING PREFIX |
112 | "Device scope type does not match for %s\n", |
113 | pci_name(pdev)); |
114 | return -EINVAL; |
115 | } |
116 | *dev = pdev; |
117 | return 0; |
118 | } |
119 | |
120 | static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, |
121 | struct pci_dev ***devices, u16 segment) |
122 | { |
123 | struct acpi_dmar_device_scope *scope; |
124 | void * tmp = start; |
125 | int index; |
126 | int ret; |
127 | |
128 | *cnt = 0; |
129 | while (start < end) { |
130 | scope = start; |
131 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || |
132 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) |
133 | (*cnt)++; |
134 | else |
135 | printk(KERN_WARNING PREFIX |
136 | "Unsupported device scope\n"); |
137 | start += scope->length; |
138 | } |
139 | if (*cnt == 0) |
140 | return 0; |
141 | |
142 | *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL); |
143 | if (!*devices) |
144 | return -ENOMEM; |
145 | |
146 | start = tmp; |
147 | index = 0; |
148 | while (start < end) { |
149 | scope = start; |
150 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || |
151 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) { |
152 | ret = dmar_parse_one_dev_scope(scope, |
153 | &(*devices)[index], segment); |
154 | if (ret) { |
155 | kfree(*devices); |
156 | return ret; |
157 | } |
158 | index ++; |
159 | } |
160 | start += scope->length; |
161 | } |
162 | |
163 | return 0; |
164 | } |
165 | |
166 | /** |
167 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition |
168 | * structure which uniquely represent one DMA remapping hardware unit |
169 | * present in the platform |
170 | */ |
171 | static int __init |
172 | dmar_parse_one_drhd(struct acpi_dmar_header *header) |
173 | { |
174 | struct acpi_dmar_hardware_unit *drhd; |
175 | struct dmar_drhd_unit *dmaru; |
176 | int ret = 0; |
177 | |
178 | drhd = (struct acpi_dmar_hardware_unit *)header; |
179 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); |
180 | if (!dmaru) |
181 | return -ENOMEM; |
182 | |
183 | dmaru->hdr = header; |
184 | dmaru->reg_base_addr = drhd->address; |
185 | dmaru->segment = drhd->segment; |
186 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ |
187 | |
188 | ret = alloc_iommu(dmaru); |
189 | if (ret) { |
190 | kfree(dmaru); |
191 | return ret; |
192 | } |
193 | dmar_register_drhd_unit(dmaru); |
194 | return 0; |
195 | } |
196 | |
197 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) |
198 | { |
199 | struct acpi_dmar_hardware_unit *drhd; |
200 | int ret = 0; |
201 | |
202 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; |
203 | |
204 | if (dmaru->include_all) |
205 | return 0; |
206 | |
207 | ret = dmar_parse_dev_scope((void *)(drhd + 1), |
208 | ((void *)drhd) + drhd->header.length, |
209 | &dmaru->devices_cnt, &dmaru->devices, |
210 | drhd->segment); |
211 | if (ret) { |
212 | list_del(&dmaru->list); |
213 | kfree(dmaru); |
214 | } |
215 | return ret; |
216 | } |
217 | |
218 | #ifdef CONFIG_DMAR |
219 | LIST_HEAD(dmar_rmrr_units); |
220 | |
221 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) |
222 | { |
223 | list_add(&rmrr->list, &dmar_rmrr_units); |
224 | } |
225 | |
226 | |
227 | static int __init |
228 | dmar_parse_one_rmrr(struct acpi_dmar_header *header) |
229 | { |
230 | struct acpi_dmar_reserved_memory *rmrr; |
231 | struct dmar_rmrr_unit *rmrru; |
232 | |
233 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); |
234 | if (!rmrru) |
235 | return -ENOMEM; |
236 | |
237 | rmrru->hdr = header; |
238 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
239 | rmrru->base_address = rmrr->base_address; |
240 | rmrru->end_address = rmrr->end_address; |
241 | |
242 | dmar_register_rmrr_unit(rmrru); |
243 | return 0; |
244 | } |
245 | |
246 | static int __init |
247 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) |
248 | { |
249 | struct acpi_dmar_reserved_memory *rmrr; |
250 | int ret; |
251 | |
252 | rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; |
253 | ret = dmar_parse_dev_scope((void *)(rmrr + 1), |
254 | ((void *)rmrr) + rmrr->header.length, |
255 | &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); |
256 | |
257 | if (ret || (rmrru->devices_cnt == 0)) { |
258 | list_del(&rmrru->list); |
259 | kfree(rmrru); |
260 | } |
261 | return ret; |
262 | } |
263 | |
264 | static LIST_HEAD(dmar_atsr_units); |
265 | |
266 | static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) |
267 | { |
268 | struct acpi_dmar_atsr *atsr; |
269 | struct dmar_atsr_unit *atsru; |
270 | |
271 | atsr = container_of(hdr, struct acpi_dmar_atsr, header); |
272 | atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); |
273 | if (!atsru) |
274 | return -ENOMEM; |
275 | |
276 | atsru->hdr = hdr; |
277 | atsru->include_all = atsr->flags & 0x1; |
278 | |
279 | list_add(&atsru->list, &dmar_atsr_units); |
280 | |
281 | return 0; |
282 | } |
283 | |
284 | static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru) |
285 | { |
286 | int rc; |
287 | struct acpi_dmar_atsr *atsr; |
288 | |
289 | if (atsru->include_all) |
290 | return 0; |
291 | |
292 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); |
293 | rc = dmar_parse_dev_scope((void *)(atsr + 1), |
294 | (void *)atsr + atsr->header.length, |
295 | &atsru->devices_cnt, &atsru->devices, |
296 | atsr->segment); |
297 | if (rc || !atsru->devices_cnt) { |
298 | list_del(&atsru->list); |
299 | kfree(atsru); |
300 | } |
301 | |
302 | return rc; |
303 | } |
304 | |
305 | int dmar_find_matched_atsr_unit(struct pci_dev *dev) |
306 | { |
307 | int i; |
308 | struct pci_bus *bus; |
309 | struct acpi_dmar_atsr *atsr; |
310 | struct dmar_atsr_unit *atsru; |
311 | |
312 | list_for_each_entry(atsru, &dmar_atsr_units, list) { |
313 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); |
314 | if (atsr->segment == pci_domain_nr(dev->bus)) |
315 | goto found; |
316 | } |
317 | |
318 | return 0; |
319 | |
320 | found: |
321 | for (bus = dev->bus; bus; bus = bus->parent) { |
322 | struct pci_dev *bridge = bus->self; |
323 | |
324 | if (!bridge || !pci_is_pcie(bridge) || |
325 | bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) |
326 | return 0; |
327 | |
328 | if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { |
329 | for (i = 0; i < atsru->devices_cnt; i++) |
330 | if (atsru->devices[i] == bridge) |
331 | return 1; |
332 | break; |
333 | } |
334 | } |
335 | |
336 | if (atsru->include_all) |
337 | return 1; |
338 | |
339 | return 0; |
340 | } |
341 | #endif |
342 | |
343 | #ifdef CONFIG_ACPI_NUMA |
344 | static int __init |
345 | dmar_parse_one_rhsa(struct acpi_dmar_header *header) |
346 | { |
347 | struct acpi_dmar_rhsa *rhsa; |
348 | struct dmar_drhd_unit *drhd; |
349 | |
350 | rhsa = (struct acpi_dmar_rhsa *)header; |
351 | for_each_drhd_unit(drhd) { |
352 | if (drhd->reg_base_addr == rhsa->base_address) { |
353 | int node = acpi_map_pxm_to_node(rhsa->proximity_domain); |
354 | |
355 | if (!node_online(node)) |
356 | node = -1; |
357 | drhd->iommu->node = node; |
358 | return 0; |
359 | } |
360 | } |
361 | WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" |
362 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", |
363 | drhd->reg_base_addr, |
364 | dmi_get_system_info(DMI_BIOS_VENDOR), |
365 | dmi_get_system_info(DMI_BIOS_VERSION), |
366 | dmi_get_system_info(DMI_PRODUCT_VERSION)); |
367 | |
368 | return 0; |
369 | } |
370 | #endif |
371 | |
372 | static void __init |
373 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) |
374 | { |
375 | struct acpi_dmar_hardware_unit *drhd; |
376 | struct acpi_dmar_reserved_memory *rmrr; |
377 | struct acpi_dmar_atsr *atsr; |
378 | struct acpi_dmar_rhsa *rhsa; |
379 | |
380 | switch (header->type) { |
381 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: |
382 | drhd = container_of(header, struct acpi_dmar_hardware_unit, |
383 | header); |
384 | printk (KERN_INFO PREFIX |
385 | "DRHD base: %#016Lx flags: %#x\n", |
386 | (unsigned long long)drhd->address, drhd->flags); |
387 | break; |
388 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
389 | rmrr = container_of(header, struct acpi_dmar_reserved_memory, |
390 | header); |
391 | printk (KERN_INFO PREFIX |
392 | "RMRR base: %#016Lx end: %#016Lx\n", |
393 | (unsigned long long)rmrr->base_address, |
394 | (unsigned long long)rmrr->end_address); |
395 | break; |
396 | case ACPI_DMAR_TYPE_ATSR: |
397 | atsr = container_of(header, struct acpi_dmar_atsr, header); |
398 | printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags); |
399 | break; |
400 | case ACPI_DMAR_HARDWARE_AFFINITY: |
401 | rhsa = container_of(header, struct acpi_dmar_rhsa, header); |
402 | printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n", |
403 | (unsigned long long)rhsa->base_address, |
404 | rhsa->proximity_domain); |
405 | break; |
406 | } |
407 | } |
408 | |
409 | /** |
410 | * dmar_table_detect - checks to see if the platform supports DMAR devices |
411 | */ |
412 | static int __init dmar_table_detect(void) |
413 | { |
414 | acpi_status status = AE_OK; |
415 | |
416 | /* if we could find DMAR table, then there are DMAR devices */ |
417 | status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0, |
418 | (struct acpi_table_header **)&dmar_tbl, |
419 | &dmar_tbl_size); |
420 | |
421 | if (ACPI_SUCCESS(status) && !dmar_tbl) { |
422 | printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); |
423 | status = AE_NOT_FOUND; |
424 | } |
425 | |
426 | return (ACPI_SUCCESS(status) ? 1 : 0); |
427 | } |
428 | |
429 | /** |
430 | * parse_dmar_table - parses the DMA reporting table |
431 | */ |
432 | static int __init |
433 | parse_dmar_table(void) |
434 | { |
435 | struct acpi_table_dmar *dmar; |
436 | struct acpi_dmar_header *entry_header; |
437 | int ret = 0; |
438 | |
439 | /* |
440 | * Do it again, earlier dmar_tbl mapping could be mapped with |
441 | * fixed map. |
442 | */ |
443 | dmar_table_detect(); |
444 | |
445 | /* |
446 | * ACPI tables may not be DMA protected by tboot, so use DMAR copy |
447 | * SINIT saved in SinitMleData in TXT heap (which is DMA protected) |
448 | */ |
449 | dmar_tbl = tboot_get_dmar_table(dmar_tbl); |
450 | |
451 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
452 | if (!dmar) |
453 | return -ENODEV; |
454 | |
455 | if (dmar->width < PAGE_SHIFT - 1) { |
456 | printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); |
457 | return -EINVAL; |
458 | } |
459 | |
460 | printk (KERN_INFO PREFIX "Host address width %d\n", |
461 | dmar->width + 1); |
462 | |
463 | entry_header = (struct acpi_dmar_header *)(dmar + 1); |
464 | while (((unsigned long)entry_header) < |
465 | (((unsigned long)dmar) + dmar_tbl->length)) { |
466 | /* Avoid looping forever on bad ACPI tables */ |
467 | if (entry_header->length == 0) { |
468 | printk(KERN_WARNING PREFIX |
469 | "Invalid 0-length structure\n"); |
470 | ret = -EINVAL; |
471 | break; |
472 | } |
473 | |
474 | dmar_table_print_dmar_entry(entry_header); |
475 | |
476 | switch (entry_header->type) { |
477 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: |
478 | ret = dmar_parse_one_drhd(entry_header); |
479 | break; |
480 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
481 | #ifdef CONFIG_DMAR |
482 | ret = dmar_parse_one_rmrr(entry_header); |
483 | #endif |
484 | break; |
485 | case ACPI_DMAR_TYPE_ATSR: |
486 | #ifdef CONFIG_DMAR |
487 | ret = dmar_parse_one_atsr(entry_header); |
488 | #endif |
489 | break; |
490 | case ACPI_DMAR_HARDWARE_AFFINITY: |
491 | #ifdef CONFIG_ACPI_NUMA |
492 | ret = dmar_parse_one_rhsa(entry_header); |
493 | #endif |
494 | break; |
495 | default: |
496 | printk(KERN_WARNING PREFIX |
497 | "Unknown DMAR structure type %d\n", |
498 | entry_header->type); |
499 | ret = 0; /* for forward compatibility */ |
500 | break; |
501 | } |
502 | if (ret) |
503 | break; |
504 | |
505 | entry_header = ((void *)entry_header + entry_header->length); |
506 | } |
507 | return ret; |
508 | } |
509 | |
510 | int dmar_pci_device_match(struct pci_dev *devices[], int cnt, |
511 | struct pci_dev *dev) |
512 | { |
513 | int index; |
514 | |
515 | while (dev) { |
516 | for (index = 0; index < cnt; index++) |
517 | if (dev == devices[index]) |
518 | return 1; |
519 | |
520 | /* Check our parent */ |
521 | dev = dev->bus->self; |
522 | } |
523 | |
524 | return 0; |
525 | } |
526 | |
527 | struct dmar_drhd_unit * |
528 | dmar_find_matched_drhd_unit(struct pci_dev *dev) |
529 | { |
530 | struct dmar_drhd_unit *dmaru = NULL; |
531 | struct acpi_dmar_hardware_unit *drhd; |
532 | |
533 | list_for_each_entry(dmaru, &dmar_drhd_units, list) { |
534 | drhd = container_of(dmaru->hdr, |
535 | struct acpi_dmar_hardware_unit, |
536 | header); |
537 | |
538 | if (dmaru->include_all && |
539 | drhd->segment == pci_domain_nr(dev->bus)) |
540 | return dmaru; |
541 | |
542 | if (dmar_pci_device_match(dmaru->devices, |
543 | dmaru->devices_cnt, dev)) |
544 | return dmaru; |
545 | } |
546 | |
547 | return NULL; |
548 | } |
549 | |
550 | int __init dmar_dev_scope_init(void) |
551 | { |
552 | struct dmar_drhd_unit *drhd, *drhd_n; |
553 | int ret = -ENODEV; |
554 | |
555 | list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { |
556 | ret = dmar_parse_dev(drhd); |
557 | if (ret) |
558 | return ret; |
559 | } |
560 | |
561 | #ifdef CONFIG_DMAR |
562 | { |
563 | struct dmar_rmrr_unit *rmrr, *rmrr_n; |
564 | struct dmar_atsr_unit *atsr, *atsr_n; |
565 | |
566 | list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { |
567 | ret = rmrr_parse_dev(rmrr); |
568 | if (ret) |
569 | return ret; |
570 | } |
571 | |
572 | list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) { |
573 | ret = atsr_parse_dev(atsr); |
574 | if (ret) |
575 | return ret; |
576 | } |
577 | } |
578 | #endif |
579 | |
580 | return ret; |
581 | } |
582 | |
583 | |
584 | int __init dmar_table_init(void) |
585 | { |
586 | static int dmar_table_initialized; |
587 | int ret; |
588 | |
589 | if (dmar_table_initialized) |
590 | return 0; |
591 | |
592 | dmar_table_initialized = 1; |
593 | |
594 | ret = parse_dmar_table(); |
595 | if (ret) { |
596 | if (ret != -ENODEV) |
597 | printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); |
598 | return ret; |
599 | } |
600 | |
601 | if (list_empty(&dmar_drhd_units)) { |
602 | printk(KERN_INFO PREFIX "No DMAR devices found\n"); |
603 | return -ENODEV; |
604 | } |
605 | |
606 | #ifdef CONFIG_DMAR |
607 | if (list_empty(&dmar_rmrr_units)) |
608 | printk(KERN_INFO PREFIX "No RMRR found\n"); |
609 | |
610 | if (list_empty(&dmar_atsr_units)) |
611 | printk(KERN_INFO PREFIX "No ATSR found\n"); |
612 | #endif |
613 | |
614 | return 0; |
615 | } |
616 | |
617 | static int bios_warned; |
618 | |
619 | int __init check_zero_address(void) |
620 | { |
621 | struct acpi_table_dmar *dmar; |
622 | struct acpi_dmar_header *entry_header; |
623 | struct acpi_dmar_hardware_unit *drhd; |
624 | |
625 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
626 | entry_header = (struct acpi_dmar_header *)(dmar + 1); |
627 | |
628 | while (((unsigned long)entry_header) < |
629 | (((unsigned long)dmar) + dmar_tbl->length)) { |
630 | /* Avoid looping forever on bad ACPI tables */ |
631 | if (entry_header->length == 0) { |
632 | printk(KERN_WARNING PREFIX |
633 | "Invalid 0-length structure\n"); |
634 | return 0; |
635 | } |
636 | |
637 | if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { |
638 | void __iomem *addr; |
639 | u64 cap, ecap; |
640 | |
641 | drhd = (void *)entry_header; |
642 | if (!drhd->address) { |
643 | /* Promote an attitude of violence to a BIOS engineer today */ |
644 | WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" |
645 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", |
646 | dmi_get_system_info(DMI_BIOS_VENDOR), |
647 | dmi_get_system_info(DMI_BIOS_VERSION), |
648 | dmi_get_system_info(DMI_PRODUCT_VERSION)); |
649 | bios_warned = 1; |
650 | goto failed; |
651 | } |
652 | |
653 | addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); |
654 | if (!addr ) { |
655 | printk("IOMMU: can't validate: %llx\n", drhd->address); |
656 | goto failed; |
657 | } |
658 | cap = dmar_readq(addr + DMAR_CAP_REG); |
659 | ecap = dmar_readq(addr + DMAR_ECAP_REG); |
660 | early_iounmap(addr, VTD_PAGE_SIZE); |
661 | if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { |
662 | /* Promote an attitude of violence to a BIOS engineer today */ |
663 | WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" |
664 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", |
665 | drhd->address, |
666 | dmi_get_system_info(DMI_BIOS_VENDOR), |
667 | dmi_get_system_info(DMI_BIOS_VERSION), |
668 | dmi_get_system_info(DMI_PRODUCT_VERSION)); |
669 | bios_warned = 1; |
670 | goto failed; |
671 | } |
672 | } |
673 | |
674 | entry_header = ((void *)entry_header + entry_header->length); |
675 | } |
676 | return 1; |
677 | |
678 | failed: |
679 | #ifdef CONFIG_DMAR |
680 | dmar_disabled = 1; |
681 | #endif |
682 | return 0; |
683 | } |
684 | |
685 | void __init detect_intel_iommu(void) |
686 | { |
687 | int ret; |
688 | |
689 | ret = dmar_table_detect(); |
690 | if (ret) |
691 | ret = check_zero_address(); |
692 | { |
693 | #ifdef CONFIG_INTR_REMAP |
694 | struct acpi_table_dmar *dmar; |
695 | /* |
696 | * for now we will disable dma-remapping when interrupt |
697 | * remapping is enabled. |
698 | * When support for queued invalidation for IOTLB invalidation |
699 | * is added, we will not need this any more. |
700 | */ |
701 | dmar = (struct acpi_table_dmar *) dmar_tbl; |
702 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) |
703 | printk(KERN_INFO |
704 | "Queued invalidation will be enabled to support " |
705 | "x2apic and Intr-remapping.\n"); |
706 | #endif |
707 | #ifdef CONFIG_DMAR |
708 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { |
709 | iommu_detected = 1; |
710 | /* Make sure ACS will be enabled */ |
711 | pci_request_acs(); |
712 | } |
713 | #endif |
714 | #ifdef CONFIG_X86 |
715 | if (ret) |
716 | x86_init.iommu.iommu_init = intel_iommu_init; |
717 | #endif |
718 | } |
719 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); |
720 | dmar_tbl = NULL; |
721 | } |
722 | |
723 | |
724 | int alloc_iommu(struct dmar_drhd_unit *drhd) |
725 | { |
726 | struct intel_iommu *iommu; |
727 | int map_size; |
728 | u32 ver; |
729 | static int iommu_allocated = 0; |
730 | int agaw = 0; |
731 | int msagaw = 0; |
732 | |
733 | if (!drhd->reg_base_addr) { |
734 | if (!bios_warned) { |
735 | WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" |
736 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", |
737 | dmi_get_system_info(DMI_BIOS_VENDOR), |
738 | dmi_get_system_info(DMI_BIOS_VERSION), |
739 | dmi_get_system_info(DMI_PRODUCT_VERSION)); |
740 | bios_warned = 1; |
741 | } |
742 | return -EINVAL; |
743 | } |
744 | |
745 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
746 | if (!iommu) |
747 | return -ENOMEM; |
748 | |
749 | iommu->seq_id = iommu_allocated++; |
750 | sprintf (iommu->name, "dmar%d", iommu->seq_id); |
751 | |
752 | iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); |
753 | if (!iommu->reg) { |
754 | printk(KERN_ERR "IOMMU: can't map the region\n"); |
755 | goto error; |
756 | } |
757 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); |
758 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); |
759 | |
760 | if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { |
761 | if (!bios_warned) { |
762 | /* Promote an attitude of violence to a BIOS engineer today */ |
763 | WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" |
764 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", |
765 | drhd->reg_base_addr, |
766 | dmi_get_system_info(DMI_BIOS_VENDOR), |
767 | dmi_get_system_info(DMI_BIOS_VERSION), |
768 | dmi_get_system_info(DMI_PRODUCT_VERSION)); |
769 | bios_warned = 1; |
770 | } |
771 | goto err_unmap; |
772 | } |
773 | |
774 | #ifdef CONFIG_DMAR |
775 | agaw = iommu_calculate_agaw(iommu); |
776 | if (agaw < 0) { |
777 | printk(KERN_ERR |
778 | "Cannot get a valid agaw for iommu (seq_id = %d)\n", |
779 | iommu->seq_id); |
780 | goto err_unmap; |
781 | } |
782 | msagaw = iommu_calculate_max_sagaw(iommu); |
783 | if (msagaw < 0) { |
784 | printk(KERN_ERR |
785 | "Cannot get a valid max agaw for iommu (seq_id = %d)\n", |
786 | iommu->seq_id); |
787 | goto err_unmap; |
788 | } |
789 | #endif |
790 | iommu->agaw = agaw; |
791 | iommu->msagaw = msagaw; |
792 | |
793 | iommu->node = -1; |
794 | |
795 | /* the registers might be more than one page */ |
796 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), |
797 | cap_max_fault_reg_offset(iommu->cap)); |
798 | map_size = VTD_PAGE_ALIGN(map_size); |
799 | if (map_size > VTD_PAGE_SIZE) { |
800 | iounmap(iommu->reg); |
801 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); |
802 | if (!iommu->reg) { |
803 | printk(KERN_ERR "IOMMU: can't map the region\n"); |
804 | goto error; |
805 | } |
806 | } |
807 | |
808 | ver = readl(iommu->reg + DMAR_VER_REG); |
809 | pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", |
810 | (unsigned long long)drhd->reg_base_addr, |
811 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), |
812 | (unsigned long long)iommu->cap, |
813 | (unsigned long long)iommu->ecap); |
814 | |
815 | spin_lock_init(&iommu->register_lock); |
816 | |
817 | drhd->iommu = iommu; |
818 | return 0; |
819 | |
820 | err_unmap: |
821 | iounmap(iommu->reg); |
822 | error: |
823 | kfree(iommu); |
824 | return -1; |
825 | } |
826 | |
827 | void free_iommu(struct intel_iommu *iommu) |
828 | { |
829 | if (!iommu) |
830 | return; |
831 | |
832 | #ifdef CONFIG_DMAR |
833 | free_dmar_iommu(iommu); |
834 | #endif |
835 | |
836 | if (iommu->reg) |
837 | iounmap(iommu->reg); |
838 | kfree(iommu); |
839 | } |
840 | |
841 | /* |
842 | * Reclaim all the submitted descriptors which have completed its work. |
843 | */ |
844 | static inline void reclaim_free_desc(struct q_inval *qi) |
845 | { |
846 | while (qi->desc_status[qi->free_tail] == QI_DONE || |
847 | qi->desc_status[qi->free_tail] == QI_ABORT) { |
848 | qi->desc_status[qi->free_tail] = QI_FREE; |
849 | qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; |
850 | qi->free_cnt++; |
851 | } |
852 | } |
853 | |
854 | static int qi_check_fault(struct intel_iommu *iommu, int index) |
855 | { |
856 | u32 fault; |
857 | int head, tail; |
858 | struct q_inval *qi = iommu->qi; |
859 | int wait_index = (index + 1) % QI_LENGTH; |
860 | |
861 | if (qi->desc_status[wait_index] == QI_ABORT) |
862 | return -EAGAIN; |
863 | |
864 | fault = readl(iommu->reg + DMAR_FSTS_REG); |
865 | |
866 | /* |
867 | * If IQE happens, the head points to the descriptor associated |
868 | * with the error. No new descriptors are fetched until the IQE |
869 | * is cleared. |
870 | */ |
871 | if (fault & DMA_FSTS_IQE) { |
872 | head = readl(iommu->reg + DMAR_IQH_REG); |
873 | if ((head >> DMAR_IQ_SHIFT) == index) { |
874 | printk(KERN_ERR "VT-d detected invalid descriptor: " |
875 | "low=%llx, high=%llx\n", |
876 | (unsigned long long)qi->desc[index].low, |
877 | (unsigned long long)qi->desc[index].high); |
878 | memcpy(&qi->desc[index], &qi->desc[wait_index], |
879 | sizeof(struct qi_desc)); |
880 | __iommu_flush_cache(iommu, &qi->desc[index], |
881 | sizeof(struct qi_desc)); |
882 | writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); |
883 | return -EINVAL; |
884 | } |
885 | } |
886 | |
887 | /* |
888 | * If ITE happens, all pending wait_desc commands are aborted. |
889 | * No new descriptors are fetched until the ITE is cleared. |
890 | */ |
891 | if (fault & DMA_FSTS_ITE) { |
892 | head = readl(iommu->reg + DMAR_IQH_REG); |
893 | head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; |
894 | head |= 1; |
895 | tail = readl(iommu->reg + DMAR_IQT_REG); |
896 | tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; |
897 | |
898 | writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); |
899 | |
900 | do { |
901 | if (qi->desc_status[head] == QI_IN_USE) |
902 | qi->desc_status[head] = QI_ABORT; |
903 | head = (head - 2 + QI_LENGTH) % QI_LENGTH; |
904 | } while (head != tail); |
905 | |
906 | if (qi->desc_status[wait_index] == QI_ABORT) |
907 | return -EAGAIN; |
908 | } |
909 | |
910 | if (fault & DMA_FSTS_ICE) |
911 | writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); |
912 | |
913 | return 0; |
914 | } |
915 | |
916 | /* |
917 | * Submit the queued invalidation descriptor to the remapping |
918 | * hardware unit and wait for its completion. |
919 | */ |
920 | int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) |
921 | { |
922 | int rc; |
923 | struct q_inval *qi = iommu->qi; |
924 | struct qi_desc *hw, wait_desc; |
925 | int wait_index, index; |
926 | unsigned long flags; |
927 | |
928 | if (!qi) |
929 | return 0; |
930 | |
931 | hw = qi->desc; |
932 | |
933 | restart: |
934 | rc = 0; |
935 | |
936 | spin_lock_irqsave(&qi->q_lock, flags); |
937 | while (qi->free_cnt < 3) { |
938 | spin_unlock_irqrestore(&qi->q_lock, flags); |
939 | cpu_relax(); |
940 | spin_lock_irqsave(&qi->q_lock, flags); |
941 | } |
942 | |
943 | index = qi->free_head; |
944 | wait_index = (index + 1) % QI_LENGTH; |
945 | |
946 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; |
947 | |
948 | hw[index] = *desc; |
949 | |
950 | wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | |
951 | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; |
952 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); |
953 | |
954 | hw[wait_index] = wait_desc; |
955 | |
956 | __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); |
957 | __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); |
958 | |
959 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; |
960 | qi->free_cnt -= 2; |
961 | |
962 | /* |
963 | * update the HW tail register indicating the presence of |
964 | * new descriptors. |
965 | */ |
966 | writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG); |
967 | |
968 | while (qi->desc_status[wait_index] != QI_DONE) { |
969 | /* |
970 | * We will leave the interrupts disabled, to prevent interrupt |
971 | * context to queue another cmd while a cmd is already submitted |
972 | * and waiting for completion on this cpu. This is to avoid |
973 | * a deadlock where the interrupt context can wait indefinitely |
974 | * for free slots in the queue. |
975 | */ |
976 | rc = qi_check_fault(iommu, index); |
977 | if (rc) |
978 | break; |
979 | |
980 | spin_unlock(&qi->q_lock); |
981 | cpu_relax(); |
982 | spin_lock(&qi->q_lock); |
983 | } |
984 | |
985 | qi->desc_status[index] = QI_DONE; |
986 | |
987 | reclaim_free_desc(qi); |
988 | spin_unlock_irqrestore(&qi->q_lock, flags); |
989 | |
990 | if (rc == -EAGAIN) |
991 | goto restart; |
992 | |
993 | return rc; |
994 | } |
995 | |
996 | /* |
997 | * Flush the global interrupt entry cache. |
998 | */ |
999 | void qi_global_iec(struct intel_iommu *iommu) |
1000 | { |
1001 | struct qi_desc desc; |
1002 | |
1003 | desc.low = QI_IEC_TYPE; |
1004 | desc.high = 0; |
1005 | |
1006 | /* should never fail */ |
1007 | qi_submit_sync(&desc, iommu); |
1008 | } |
1009 | |
1010 | void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, |
1011 | u64 type) |
1012 | { |
1013 | struct qi_desc desc; |
1014 | |
1015 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) |
1016 | | QI_CC_GRAN(type) | QI_CC_TYPE; |
1017 | desc.high = 0; |
1018 | |
1019 | qi_submit_sync(&desc, iommu); |
1020 | } |
1021 | |
1022 | void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
1023 | unsigned int size_order, u64 type) |
1024 | { |
1025 | u8 dw = 0, dr = 0; |
1026 | |
1027 | struct qi_desc desc; |
1028 | int ih = 0; |
1029 | |
1030 | if (cap_write_drain(iommu->cap)) |
1031 | dw = 1; |
1032 | |
1033 | if (cap_read_drain(iommu->cap)) |
1034 | dr = 1; |
1035 | |
1036 | desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) |
1037 | | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; |
1038 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) |
1039 | | QI_IOTLB_AM(size_order); |
1040 | |
1041 | qi_submit_sync(&desc, iommu); |
1042 | } |
1043 | |
1044 | void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, |
1045 | u64 addr, unsigned mask) |
1046 | { |
1047 | struct qi_desc desc; |
1048 | |
1049 | if (mask) { |
1050 | BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); |
1051 | addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1; |
1052 | desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; |
1053 | } else |
1054 | desc.high = QI_DEV_IOTLB_ADDR(addr); |
1055 | |
1056 | if (qdep >= QI_DEV_IOTLB_MAX_INVS) |
1057 | qdep = 0; |
1058 | |
1059 | desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | |
1060 | QI_DIOTLB_TYPE; |
1061 | |
1062 | qi_submit_sync(&desc, iommu); |
1063 | } |
1064 | |
1065 | /* |
1066 | * Disable Queued Invalidation interface. |
1067 | */ |
1068 | void dmar_disable_qi(struct intel_iommu *iommu) |
1069 | { |
1070 | unsigned long flags; |
1071 | u32 sts; |
1072 | cycles_t start_time = get_cycles(); |
1073 | |
1074 | if (!ecap_qis(iommu->ecap)) |
1075 | return; |
1076 | |
1077 | spin_lock_irqsave(&iommu->register_lock, flags); |
1078 | |
1079 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); |
1080 | if (!(sts & DMA_GSTS_QIES)) |
1081 | goto end; |
1082 | |
1083 | /* |
1084 | * Give a chance to HW to complete the pending invalidation requests. |
1085 | */ |
1086 | while ((readl(iommu->reg + DMAR_IQT_REG) != |
1087 | readl(iommu->reg + DMAR_IQH_REG)) && |
1088 | (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time))) |
1089 | cpu_relax(); |
1090 | |
1091 | iommu->gcmd &= ~DMA_GCMD_QIE; |
1092 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
1093 | |
1094 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, |
1095 | !(sts & DMA_GSTS_QIES), sts); |
1096 | end: |
1097 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
1098 | } |
1099 | |
1100 | /* |
1101 | * Enable queued invalidation. |
1102 | */ |
1103 | static void __dmar_enable_qi(struct intel_iommu *iommu) |
1104 | { |
1105 | u32 sts; |
1106 | unsigned long flags; |
1107 | struct q_inval *qi = iommu->qi; |
1108 | |
1109 | qi->free_head = qi->free_tail = 0; |
1110 | qi->free_cnt = QI_LENGTH; |
1111 | |
1112 | spin_lock_irqsave(&iommu->register_lock, flags); |
1113 | |
1114 | /* write zero to the tail reg */ |
1115 | writel(0, iommu->reg + DMAR_IQT_REG); |
1116 | |
1117 | dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); |
1118 | |
1119 | iommu->gcmd |= DMA_GCMD_QIE; |
1120 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
1121 | |
1122 | /* Make sure hardware complete it */ |
1123 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); |
1124 | |
1125 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
1126 | } |
1127 | |
1128 | /* |
1129 | * Enable Queued Invalidation interface. This is a must to support |
1130 | * interrupt-remapping. Also used by DMA-remapping, which replaces |
1131 | * register based IOTLB invalidation. |
1132 | */ |
1133 | int dmar_enable_qi(struct intel_iommu *iommu) |
1134 | { |
1135 | struct q_inval *qi; |
1136 | struct page *desc_page; |
1137 | |
1138 | if (!ecap_qis(iommu->ecap)) |
1139 | return -ENOENT; |
1140 | |
1141 | /* |
1142 | * queued invalidation is already setup and enabled. |
1143 | */ |
1144 | if (iommu->qi) |
1145 | return 0; |
1146 | |
1147 | iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); |
1148 | if (!iommu->qi) |
1149 | return -ENOMEM; |
1150 | |
1151 | qi = iommu->qi; |
1152 | |
1153 | |
1154 | desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); |
1155 | if (!desc_page) { |
1156 | kfree(qi); |
1157 | iommu->qi = 0; |
1158 | return -ENOMEM; |
1159 | } |
1160 | |
1161 | qi->desc = page_address(desc_page); |
1162 | |
1163 | qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC); |
1164 | if (!qi->desc_status) { |
1165 | free_page((unsigned long) qi->desc); |
1166 | kfree(qi); |
1167 | iommu->qi = 0; |
1168 | return -ENOMEM; |
1169 | } |
1170 | |
1171 | qi->free_head = qi->free_tail = 0; |
1172 | qi->free_cnt = QI_LENGTH; |
1173 | |
1174 | spin_lock_init(&qi->q_lock); |
1175 | |
1176 | __dmar_enable_qi(iommu); |
1177 | |
1178 | return 0; |
1179 | } |
1180 | |
1181 | /* iommu interrupt handling. Most stuff are MSI-like. */ |
1182 | |
1183 | enum faulttype { |
1184 | DMA_REMAP, |
1185 | INTR_REMAP, |
1186 | UNKNOWN, |
1187 | }; |
1188 | |
1189 | static const char *dma_remap_fault_reasons[] = |
1190 | { |
1191 | "Software", |
1192 | "Present bit in root entry is clear", |
1193 | "Present bit in context entry is clear", |
1194 | "Invalid context entry", |
1195 | "Access beyond MGAW", |
1196 | "PTE Write access is not set", |
1197 | "PTE Read access is not set", |
1198 | "Next page table ptr is invalid", |
1199 | "Root table address invalid", |
1200 | "Context table ptr is invalid", |
1201 | "non-zero reserved fields in RTP", |
1202 | "non-zero reserved fields in CTP", |
1203 | "non-zero reserved fields in PTE", |
1204 | }; |
1205 | |
1206 | static const char *intr_remap_fault_reasons[] = |
1207 | { |
1208 | "Detected reserved fields in the decoded interrupt-remapped request", |
1209 | "Interrupt index exceeded the interrupt-remapping table size", |
1210 | "Present field in the IRTE entry is clear", |
1211 | "Error accessing interrupt-remapping table pointed by IRTA_REG", |
1212 | "Detected reserved fields in the IRTE entry", |
1213 | "Blocked a compatibility format interrupt request", |
1214 | "Blocked an interrupt request due to source-id verification failure", |
1215 | }; |
1216 | |
1217 | #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) |
1218 | |
1219 | const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) |
1220 | { |
1221 | if (fault_reason >= 0x20 && (fault_reason <= 0x20 + |
1222 | ARRAY_SIZE(intr_remap_fault_reasons))) { |
1223 | *fault_type = INTR_REMAP; |
1224 | return intr_remap_fault_reasons[fault_reason - 0x20]; |
1225 | } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { |
1226 | *fault_type = DMA_REMAP; |
1227 | return dma_remap_fault_reasons[fault_reason]; |
1228 | } else { |
1229 | *fault_type = UNKNOWN; |
1230 | return "Unknown"; |
1231 | } |
1232 | } |
1233 | |
1234 | void dmar_msi_unmask(unsigned int irq) |
1235 | { |
1236 | struct intel_iommu *iommu = get_irq_data(irq); |
1237 | unsigned long flag; |
1238 | |
1239 | /* unmask it */ |
1240 | spin_lock_irqsave(&iommu->register_lock, flag); |
1241 | writel(0, iommu->reg + DMAR_FECTL_REG); |
1242 | /* Read a reg to force flush the post write */ |
1243 | readl(iommu->reg + DMAR_FECTL_REG); |
1244 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
1245 | } |
1246 | |
1247 | void dmar_msi_mask(unsigned int irq) |
1248 | { |
1249 | unsigned long flag; |
1250 | struct intel_iommu *iommu = get_irq_data(irq); |
1251 | |
1252 | /* mask it */ |
1253 | spin_lock_irqsave(&iommu->register_lock, flag); |
1254 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); |
1255 | /* Read a reg to force flush the post write */ |
1256 | readl(iommu->reg + DMAR_FECTL_REG); |
1257 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
1258 | } |
1259 | |
1260 | void dmar_msi_write(int irq, struct msi_msg *msg) |
1261 | { |
1262 | struct intel_iommu *iommu = get_irq_data(irq); |
1263 | unsigned long flag; |
1264 | |
1265 | spin_lock_irqsave(&iommu->register_lock, flag); |
1266 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); |
1267 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); |
1268 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); |
1269 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
1270 | } |
1271 | |
1272 | void dmar_msi_read(int irq, struct msi_msg *msg) |
1273 | { |
1274 | struct intel_iommu *iommu = get_irq_data(irq); |
1275 | unsigned long flag; |
1276 | |
1277 | spin_lock_irqsave(&iommu->register_lock, flag); |
1278 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); |
1279 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); |
1280 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); |
1281 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
1282 | } |
1283 | |
1284 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, |
1285 | u8 fault_reason, u16 source_id, unsigned long long addr) |
1286 | { |
1287 | const char *reason; |
1288 | int fault_type; |
1289 | |
1290 | reason = dmar_get_fault_reason(fault_reason, &fault_type); |
1291 | |
1292 | if (fault_type == INTR_REMAP) |
1293 | printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] " |
1294 | "fault index %llx\n" |
1295 | "INTR-REMAP:[fault reason %02d] %s\n", |
1296 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), |
1297 | PCI_FUNC(source_id & 0xFF), addr >> 48, |
1298 | fault_reason, reason); |
1299 | else |
1300 | printk(KERN_ERR |
1301 | "DMAR:[%s] Request device [%02x:%02x.%d] " |
1302 | "fault addr %llx \n" |
1303 | "DMAR:[fault reason %02d] %s\n", |
1304 | (type ? "DMA Read" : "DMA Write"), |
1305 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), |
1306 | PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); |
1307 | return 0; |
1308 | } |
1309 | |
1310 | #define PRIMARY_FAULT_REG_LEN (16) |
1311 | irqreturn_t dmar_fault(int irq, void *dev_id) |
1312 | { |
1313 | struct intel_iommu *iommu = dev_id; |
1314 | int reg, fault_index; |
1315 | u32 fault_status; |
1316 | unsigned long flag; |
1317 | |
1318 | spin_lock_irqsave(&iommu->register_lock, flag); |
1319 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
1320 | if (fault_status) |
1321 | printk(KERN_ERR "DRHD: handling fault status reg %x\n", |
1322 | fault_status); |
1323 | |
1324 | /* TBD: ignore advanced fault log currently */ |
1325 | if (!(fault_status & DMA_FSTS_PPF)) |
1326 | goto clear_rest; |
1327 | |
1328 | fault_index = dma_fsts_fault_record_index(fault_status); |
1329 | reg = cap_fault_reg_offset(iommu->cap); |
1330 | while (1) { |
1331 | u8 fault_reason; |
1332 | u16 source_id; |
1333 | u64 guest_addr; |
1334 | int type; |
1335 | u32 data; |
1336 | |
1337 | /* highest 32 bits */ |
1338 | data = readl(iommu->reg + reg + |
1339 | fault_index * PRIMARY_FAULT_REG_LEN + 12); |
1340 | if (!(data & DMA_FRCD_F)) |
1341 | break; |
1342 | |
1343 | fault_reason = dma_frcd_fault_reason(data); |
1344 | type = dma_frcd_type(data); |
1345 | |
1346 | data = readl(iommu->reg + reg + |
1347 | fault_index * PRIMARY_FAULT_REG_LEN + 8); |
1348 | source_id = dma_frcd_source_id(data); |
1349 | |
1350 | guest_addr = dmar_readq(iommu->reg + reg + |
1351 | fault_index * PRIMARY_FAULT_REG_LEN); |
1352 | guest_addr = dma_frcd_page_addr(guest_addr); |
1353 | /* clear the fault */ |
1354 | writel(DMA_FRCD_F, iommu->reg + reg + |
1355 | fault_index * PRIMARY_FAULT_REG_LEN + 12); |
1356 | |
1357 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
1358 | |
1359 | dmar_fault_do_one(iommu, type, fault_reason, |
1360 | source_id, guest_addr); |
1361 | |
1362 | fault_index++; |
1363 | if (fault_index >= cap_num_fault_regs(iommu->cap)) |
1364 | fault_index = 0; |
1365 | spin_lock_irqsave(&iommu->register_lock, flag); |
1366 | } |
1367 | clear_rest: |
1368 | /* clear all the other faults */ |
1369 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
1370 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); |
1371 | |
1372 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
1373 | return IRQ_HANDLED; |
1374 | } |
1375 | |
1376 | int dmar_set_interrupt(struct intel_iommu *iommu) |
1377 | { |
1378 | int irq, ret; |
1379 | |
1380 | /* |
1381 | * Check if the fault interrupt is already initialized. |
1382 | */ |
1383 | if (iommu->irq) |
1384 | return 0; |
1385 | |
1386 | irq = create_irq(); |
1387 | if (!irq) { |
1388 | printk(KERN_ERR "IOMMU: no free vectors\n"); |
1389 | return -EINVAL; |
1390 | } |
1391 | |
1392 | set_irq_data(irq, iommu); |
1393 | iommu->irq = irq; |
1394 | |
1395 | ret = arch_setup_dmar_msi(irq); |
1396 | if (ret) { |
1397 | set_irq_data(irq, NULL); |
1398 | iommu->irq = 0; |
1399 | destroy_irq(irq); |
1400 | return ret; |
1401 | } |
1402 | |
1403 | ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); |
1404 | if (ret) |
1405 | printk(KERN_ERR "IOMMU: can't request irq\n"); |
1406 | return ret; |
1407 | } |
1408 | |
1409 | int __init enable_drhd_fault_handling(void) |
1410 | { |
1411 | struct dmar_drhd_unit *drhd; |
1412 | |
1413 | /* |
1414 | * Enable fault control interrupt. |
1415 | */ |
1416 | for_each_drhd_unit(drhd) { |
1417 | int ret; |
1418 | struct intel_iommu *iommu = drhd->iommu; |
1419 | ret = dmar_set_interrupt(iommu); |
1420 | |
1421 | if (ret) { |
1422 | printk(KERN_ERR "DRHD %Lx: failed to enable fault, " |
1423 | " interrupt, ret %d\n", |
1424 | (unsigned long long)drhd->reg_base_addr, ret); |
1425 | return -1; |
1426 | } |
1427 | } |
1428 | |
1429 | return 0; |
1430 | } |
1431 | |
1432 | /* |
1433 | * Re-enable Queued Invalidation interface. |
1434 | */ |
1435 | int dmar_reenable_qi(struct intel_iommu *iommu) |
1436 | { |
1437 | if (!ecap_qis(iommu->ecap)) |
1438 | return -ENOENT; |
1439 | |
1440 | if (!iommu->qi) |
1441 | return -ENOENT; |
1442 | |
1443 | /* |
1444 | * First disable queued invalidation. |
1445 | */ |
1446 | dmar_disable_qi(iommu); |
1447 | /* |
1448 | * Then enable queued invalidation again. Since there is no pending |
1449 | * invalidation requests now, it's safe to re-enable queued |
1450 | * invalidation. |
1451 | */ |
1452 | __dmar_enable_qi(iommu); |
1453 | |
1454 | return 0; |
1455 | } |
1456 | |
1457 | /* |
1458 | * Check interrupt remapping support in DMAR table description. |
1459 | */ |
1460 | int dmar_ir_support(void) |
1461 | { |
1462 | struct acpi_table_dmar *dmar; |
1463 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
1464 | return dmar->flags & 0x1; |
1465 | } |
1466 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9