Root/
1 | /* |
2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ |
19 | |
20 | #include <linux/pci.h> |
21 | #include <linux/acpi.h> |
22 | #include <linux/list.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/syscore_ops.h> |
25 | #include <linux/interrupt.h> |
26 | #include <linux/msi.h> |
27 | #include <linux/amd-iommu.h> |
28 | #include <linux/export.h> |
29 | #include <linux/acpi.h> |
30 | #include <acpi/acpi.h> |
31 | #include <asm/pci-direct.h> |
32 | #include <asm/iommu.h> |
33 | #include <asm/gart.h> |
34 | #include <asm/x86_init.h> |
35 | #include <asm/iommu_table.h> |
36 | |
37 | #include "amd_iommu_proto.h" |
38 | #include "amd_iommu_types.h" |
39 | |
40 | /* |
41 | * definitions for the ACPI scanning code |
42 | */ |
43 | #define IVRS_HEADER_LENGTH 48 |
44 | |
45 | #define ACPI_IVHD_TYPE 0x10 |
46 | #define ACPI_IVMD_TYPE_ALL 0x20 |
47 | #define ACPI_IVMD_TYPE 0x21 |
48 | #define ACPI_IVMD_TYPE_RANGE 0x22 |
49 | |
50 | #define IVHD_DEV_ALL 0x01 |
51 | #define IVHD_DEV_SELECT 0x02 |
52 | #define IVHD_DEV_SELECT_RANGE_START 0x03 |
53 | #define IVHD_DEV_RANGE_END 0x04 |
54 | #define IVHD_DEV_ALIAS 0x42 |
55 | #define IVHD_DEV_ALIAS_RANGE 0x43 |
56 | #define IVHD_DEV_EXT_SELECT 0x46 |
57 | #define IVHD_DEV_EXT_SELECT_RANGE 0x47 |
58 | |
59 | #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 |
60 | #define IVHD_FLAG_PASSPW_EN_MASK 0x02 |
61 | #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 |
62 | #define IVHD_FLAG_ISOC_EN_MASK 0x08 |
63 | |
64 | #define IVMD_FLAG_EXCL_RANGE 0x08 |
65 | #define IVMD_FLAG_UNITY_MAP 0x01 |
66 | |
67 | #define ACPI_DEVFLAG_INITPASS 0x01 |
68 | #define ACPI_DEVFLAG_EXTINT 0x02 |
69 | #define ACPI_DEVFLAG_NMI 0x04 |
70 | #define ACPI_DEVFLAG_SYSMGT1 0x10 |
71 | #define ACPI_DEVFLAG_SYSMGT2 0x20 |
72 | #define ACPI_DEVFLAG_LINT0 0x40 |
73 | #define ACPI_DEVFLAG_LINT1 0x80 |
74 | #define ACPI_DEVFLAG_ATSDIS 0x10000000 |
75 | |
76 | /* |
77 | * ACPI table definitions |
78 | * |
79 | * These data structures are laid over the table to parse the important values |
80 | * out of it. |
81 | */ |
82 | |
83 | /* |
84 | * structure describing one IOMMU in the ACPI table. Typically followed by one |
85 | * or more ivhd_entrys. |
86 | */ |
87 | struct ivhd_header { |
88 | u8 type; |
89 | u8 flags; |
90 | u16 length; |
91 | u16 devid; |
92 | u16 cap_ptr; |
93 | u64 mmio_phys; |
94 | u16 pci_seg; |
95 | u16 info; |
96 | u32 reserved; |
97 | } __attribute__((packed)); |
98 | |
99 | /* |
100 | * A device entry describing which devices a specific IOMMU translates and |
101 | * which requestor ids they use. |
102 | */ |
103 | struct ivhd_entry { |
104 | u8 type; |
105 | u16 devid; |
106 | u8 flags; |
107 | u32 ext; |
108 | } __attribute__((packed)); |
109 | |
110 | /* |
111 | * An AMD IOMMU memory definition structure. It defines things like exclusion |
112 | * ranges for devices and regions that should be unity mapped. |
113 | */ |
114 | struct ivmd_header { |
115 | u8 type; |
116 | u8 flags; |
117 | u16 length; |
118 | u16 devid; |
119 | u16 aux; |
120 | u64 resv; |
121 | u64 range_start; |
122 | u64 range_length; |
123 | } __attribute__((packed)); |
124 | |
125 | bool amd_iommu_dump; |
126 | |
127 | static bool amd_iommu_detected; |
128 | static bool __initdata amd_iommu_disabled; |
129 | |
130 | u16 amd_iommu_last_bdf; /* largest PCI device id we have |
131 | to handle */ |
132 | LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings |
133 | we find in ACPI */ |
134 | u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */ |
135 | |
136 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the |
137 | system */ |
138 | |
139 | /* Array to assign indices to IOMMUs*/ |
140 | struct amd_iommu *amd_iommus[MAX_IOMMUS]; |
141 | int amd_iommus_present; |
142 | |
143 | /* IOMMUs have a non-present cache? */ |
144 | bool amd_iommu_np_cache __read_mostly; |
145 | bool amd_iommu_iotlb_sup __read_mostly = true; |
146 | |
147 | u32 amd_iommu_max_pasids __read_mostly = ~0; |
148 | |
149 | bool amd_iommu_v2_present __read_mostly; |
150 | |
151 | bool amd_iommu_force_isolation __read_mostly; |
152 | |
153 | /* |
154 | * List of protection domains - used during resume |
155 | */ |
156 | LIST_HEAD(amd_iommu_pd_list); |
157 | spinlock_t amd_iommu_pd_lock; |
158 | |
159 | /* |
160 | * Pointer to the device table which is shared by all AMD IOMMUs |
161 | * it is indexed by the PCI device id or the HT unit id and contains |
162 | * information about the domain the device belongs to as well as the |
163 | * page table root pointer. |
164 | */ |
165 | struct dev_table_entry *amd_iommu_dev_table; |
166 | |
167 | /* |
168 | * The alias table is a driver specific data structure which contains the |
169 | * mappings of the PCI device ids to the actual requestor ids on the IOMMU. |
170 | * More than one device can share the same requestor id. |
171 | */ |
172 | u16 *amd_iommu_alias_table; |
173 | |
174 | /* |
175 | * The rlookup table is used to find the IOMMU which is responsible |
176 | * for a specific device. It is also indexed by the PCI device id. |
177 | */ |
178 | struct amd_iommu **amd_iommu_rlookup_table; |
179 | |
180 | /* |
181 | * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap |
182 | * to know which ones are already in use. |
183 | */ |
184 | unsigned long *amd_iommu_pd_alloc_bitmap; |
185 | |
186 | static u32 dev_table_size; /* size of the device table */ |
187 | static u32 alias_table_size; /* size of the alias table */ |
188 | static u32 rlookup_table_size; /* size if the rlookup table */ |
189 | |
190 | enum iommu_init_state { |
191 | IOMMU_START_STATE, |
192 | IOMMU_IVRS_DETECTED, |
193 | IOMMU_ACPI_FINISHED, |
194 | IOMMU_ENABLED, |
195 | IOMMU_PCI_INIT, |
196 | IOMMU_INTERRUPTS_EN, |
197 | IOMMU_DMA_OPS, |
198 | IOMMU_INITIALIZED, |
199 | IOMMU_NOT_FOUND, |
200 | IOMMU_INIT_ERROR, |
201 | }; |
202 | |
203 | static enum iommu_init_state init_state = IOMMU_START_STATE; |
204 | |
205 | static int amd_iommu_enable_interrupts(void); |
206 | static int __init iommu_go_to_state(enum iommu_init_state state); |
207 | |
208 | static inline void update_last_devid(u16 devid) |
209 | { |
210 | if (devid > amd_iommu_last_bdf) |
211 | amd_iommu_last_bdf = devid; |
212 | } |
213 | |
214 | static inline unsigned long tbl_size(int entry_size) |
215 | { |
216 | unsigned shift = PAGE_SHIFT + |
217 | get_order(((int)amd_iommu_last_bdf + 1) * entry_size); |
218 | |
219 | return 1UL << shift; |
220 | } |
221 | |
222 | /* Access to l1 and l2 indexed register spaces */ |
223 | |
224 | static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) |
225 | { |
226 | u32 val; |
227 | |
228 | pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); |
229 | pci_read_config_dword(iommu->dev, 0xfc, &val); |
230 | return val; |
231 | } |
232 | |
233 | static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) |
234 | { |
235 | pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); |
236 | pci_write_config_dword(iommu->dev, 0xfc, val); |
237 | pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); |
238 | } |
239 | |
240 | static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) |
241 | { |
242 | u32 val; |
243 | |
244 | pci_write_config_dword(iommu->dev, 0xf0, address); |
245 | pci_read_config_dword(iommu->dev, 0xf4, &val); |
246 | return val; |
247 | } |
248 | |
249 | static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) |
250 | { |
251 | pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); |
252 | pci_write_config_dword(iommu->dev, 0xf4, val); |
253 | } |
254 | |
255 | /**************************************************************************** |
256 | * |
257 | * AMD IOMMU MMIO register space handling functions |
258 | * |
259 | * These functions are used to program the IOMMU device registers in |
260 | * MMIO space required for that driver. |
261 | * |
262 | ****************************************************************************/ |
263 | |
264 | /* |
265 | * This function set the exclusion range in the IOMMU. DMA accesses to the |
266 | * exclusion range are passed through untranslated |
267 | */ |
268 | static void iommu_set_exclusion_range(struct amd_iommu *iommu) |
269 | { |
270 | u64 start = iommu->exclusion_start & PAGE_MASK; |
271 | u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; |
272 | u64 entry; |
273 | |
274 | if (!iommu->exclusion_start) |
275 | return; |
276 | |
277 | entry = start | MMIO_EXCL_ENABLE_MASK; |
278 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, |
279 | &entry, sizeof(entry)); |
280 | |
281 | entry = limit; |
282 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, |
283 | &entry, sizeof(entry)); |
284 | } |
285 | |
286 | /* Programs the physical address of the device table into the IOMMU hardware */ |
287 | static void iommu_set_device_table(struct amd_iommu *iommu) |
288 | { |
289 | u64 entry; |
290 | |
291 | BUG_ON(iommu->mmio_base == NULL); |
292 | |
293 | entry = virt_to_phys(amd_iommu_dev_table); |
294 | entry |= (dev_table_size >> 12) - 1; |
295 | memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, |
296 | &entry, sizeof(entry)); |
297 | } |
298 | |
299 | /* Generic functions to enable/disable certain features of the IOMMU. */ |
300 | static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) |
301 | { |
302 | u32 ctrl; |
303 | |
304 | ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); |
305 | ctrl |= (1 << bit); |
306 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); |
307 | } |
308 | |
309 | static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) |
310 | { |
311 | u32 ctrl; |
312 | |
313 | ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); |
314 | ctrl &= ~(1 << bit); |
315 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); |
316 | } |
317 | |
318 | static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) |
319 | { |
320 | u32 ctrl; |
321 | |
322 | ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); |
323 | ctrl &= ~CTRL_INV_TO_MASK; |
324 | ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; |
325 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); |
326 | } |
327 | |
328 | /* Function to enable the hardware */ |
329 | static void iommu_enable(struct amd_iommu *iommu) |
330 | { |
331 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); |
332 | } |
333 | |
334 | static void iommu_disable(struct amd_iommu *iommu) |
335 | { |
336 | /* Disable command buffer */ |
337 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); |
338 | |
339 | /* Disable event logging and event interrupts */ |
340 | iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); |
341 | iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); |
342 | |
343 | /* Disable IOMMU hardware itself */ |
344 | iommu_feature_disable(iommu, CONTROL_IOMMU_EN); |
345 | } |
346 | |
347 | /* |
348 | * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in |
349 | * the system has one. |
350 | */ |
351 | static u8 __iomem * __init iommu_map_mmio_space(u64 address) |
352 | { |
353 | if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { |
354 | pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", |
355 | address); |
356 | pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); |
357 | return NULL; |
358 | } |
359 | |
360 | return (u8 __iomem *)ioremap_nocache(address, MMIO_REGION_LENGTH); |
361 | } |
362 | |
363 | static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) |
364 | { |
365 | if (iommu->mmio_base) |
366 | iounmap(iommu->mmio_base); |
367 | release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); |
368 | } |
369 | |
370 | /**************************************************************************** |
371 | * |
372 | * The functions below belong to the first pass of AMD IOMMU ACPI table |
373 | * parsing. In this pass we try to find out the highest device id this |
374 | * code has to handle. Upon this information the size of the shared data |
375 | * structures is determined later. |
376 | * |
377 | ****************************************************************************/ |
378 | |
379 | /* |
380 | * This function calculates the length of a given IVHD entry |
381 | */ |
382 | static inline int ivhd_entry_length(u8 *ivhd) |
383 | { |
384 | return 0x04 << (*ivhd >> 6); |
385 | } |
386 | |
387 | /* |
388 | * This function reads the last device id the IOMMU has to handle from the PCI |
389 | * capability header for this IOMMU |
390 | */ |
391 | static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) |
392 | { |
393 | u32 cap; |
394 | |
395 | cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); |
396 | update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); |
397 | |
398 | return 0; |
399 | } |
400 | |
401 | /* |
402 | * After reading the highest device id from the IOMMU PCI capability header |
403 | * this function looks if there is a higher device id defined in the ACPI table |
404 | */ |
405 | static int __init find_last_devid_from_ivhd(struct ivhd_header *h) |
406 | { |
407 | u8 *p = (void *)h, *end = (void *)h; |
408 | struct ivhd_entry *dev; |
409 | |
410 | p += sizeof(*h); |
411 | end += h->length; |
412 | |
413 | find_last_devid_on_pci(PCI_BUS(h->devid), |
414 | PCI_SLOT(h->devid), |
415 | PCI_FUNC(h->devid), |
416 | h->cap_ptr); |
417 | |
418 | while (p < end) { |
419 | dev = (struct ivhd_entry *)p; |
420 | switch (dev->type) { |
421 | case IVHD_DEV_SELECT: |
422 | case IVHD_DEV_RANGE_END: |
423 | case IVHD_DEV_ALIAS: |
424 | case IVHD_DEV_EXT_SELECT: |
425 | /* all the above subfield types refer to device ids */ |
426 | update_last_devid(dev->devid); |
427 | break; |
428 | default: |
429 | break; |
430 | } |
431 | p += ivhd_entry_length(p); |
432 | } |
433 | |
434 | WARN_ON(p != end); |
435 | |
436 | return 0; |
437 | } |
438 | |
439 | /* |
440 | * Iterate over all IVHD entries in the ACPI table and find the highest device |
441 | * id which we need to handle. This is the first of three functions which parse |
442 | * the ACPI table. So we check the checksum here. |
443 | */ |
444 | static int __init find_last_devid_acpi(struct acpi_table_header *table) |
445 | { |
446 | int i; |
447 | u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; |
448 | struct ivhd_header *h; |
449 | |
450 | /* |
451 | * Validate checksum here so we don't need to do it when |
452 | * we actually parse the table |
453 | */ |
454 | for (i = 0; i < table->length; ++i) |
455 | checksum += p[i]; |
456 | if (checksum != 0) |
457 | /* ACPI table corrupt */ |
458 | return -ENODEV; |
459 | |
460 | p += IVRS_HEADER_LENGTH; |
461 | |
462 | end += table->length; |
463 | while (p < end) { |
464 | h = (struct ivhd_header *)p; |
465 | switch (h->type) { |
466 | case ACPI_IVHD_TYPE: |
467 | find_last_devid_from_ivhd(h); |
468 | break; |
469 | default: |
470 | break; |
471 | } |
472 | p += h->length; |
473 | } |
474 | WARN_ON(p != end); |
475 | |
476 | return 0; |
477 | } |
478 | |
479 | /**************************************************************************** |
480 | * |
481 | * The following functions belong the the code path which parses the ACPI table |
482 | * the second time. In this ACPI parsing iteration we allocate IOMMU specific |
483 | * data structures, initialize the device/alias/rlookup table and also |
484 | * basically initialize the hardware. |
485 | * |
486 | ****************************************************************************/ |
487 | |
488 | /* |
489 | * Allocates the command buffer. This buffer is per AMD IOMMU. We can |
490 | * write commands to that buffer later and the IOMMU will execute them |
491 | * asynchronously |
492 | */ |
493 | static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) |
494 | { |
495 | u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
496 | get_order(CMD_BUFFER_SIZE)); |
497 | |
498 | if (cmd_buf == NULL) |
499 | return NULL; |
500 | |
501 | iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; |
502 | |
503 | return cmd_buf; |
504 | } |
505 | |
506 | /* |
507 | * This function resets the command buffer if the IOMMU stopped fetching |
508 | * commands from it. |
509 | */ |
510 | void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) |
511 | { |
512 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); |
513 | |
514 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); |
515 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
516 | |
517 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); |
518 | } |
519 | |
520 | /* |
521 | * This function writes the command buffer address to the hardware and |
522 | * enables it. |
523 | */ |
524 | static void iommu_enable_command_buffer(struct amd_iommu *iommu) |
525 | { |
526 | u64 entry; |
527 | |
528 | BUG_ON(iommu->cmd_buf == NULL); |
529 | |
530 | entry = (u64)virt_to_phys(iommu->cmd_buf); |
531 | entry |= MMIO_CMD_SIZE_512; |
532 | |
533 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, |
534 | &entry, sizeof(entry)); |
535 | |
536 | amd_iommu_reset_cmd_buffer(iommu); |
537 | iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); |
538 | } |
539 | |
540 | static void __init free_command_buffer(struct amd_iommu *iommu) |
541 | { |
542 | free_pages((unsigned long)iommu->cmd_buf, |
543 | get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); |
544 | } |
545 | |
546 | /* allocates the memory where the IOMMU will log its events to */ |
547 | static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) |
548 | { |
549 | iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
550 | get_order(EVT_BUFFER_SIZE)); |
551 | |
552 | if (iommu->evt_buf == NULL) |
553 | return NULL; |
554 | |
555 | iommu->evt_buf_size = EVT_BUFFER_SIZE; |
556 | |
557 | return iommu->evt_buf; |
558 | } |
559 | |
560 | static void iommu_enable_event_buffer(struct amd_iommu *iommu) |
561 | { |
562 | u64 entry; |
563 | |
564 | BUG_ON(iommu->evt_buf == NULL); |
565 | |
566 | entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; |
567 | |
568 | memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, |
569 | &entry, sizeof(entry)); |
570 | |
571 | /* set head and tail to zero manually */ |
572 | writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); |
573 | writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); |
574 | |
575 | iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); |
576 | } |
577 | |
578 | static void __init free_event_buffer(struct amd_iommu *iommu) |
579 | { |
580 | free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); |
581 | } |
582 | |
583 | /* allocates the memory where the IOMMU will log its events to */ |
584 | static u8 * __init alloc_ppr_log(struct amd_iommu *iommu) |
585 | { |
586 | iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
587 | get_order(PPR_LOG_SIZE)); |
588 | |
589 | if (iommu->ppr_log == NULL) |
590 | return NULL; |
591 | |
592 | return iommu->ppr_log; |
593 | } |
594 | |
595 | static void iommu_enable_ppr_log(struct amd_iommu *iommu) |
596 | { |
597 | u64 entry; |
598 | |
599 | if (iommu->ppr_log == NULL) |
600 | return; |
601 | |
602 | entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; |
603 | |
604 | memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, |
605 | &entry, sizeof(entry)); |
606 | |
607 | /* set head and tail to zero manually */ |
608 | writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
609 | writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
610 | |
611 | iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); |
612 | iommu_feature_enable(iommu, CONTROL_PPR_EN); |
613 | } |
614 | |
615 | static void __init free_ppr_log(struct amd_iommu *iommu) |
616 | { |
617 | if (iommu->ppr_log == NULL) |
618 | return; |
619 | |
620 | free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); |
621 | } |
622 | |
623 | static void iommu_enable_gt(struct amd_iommu *iommu) |
624 | { |
625 | if (!iommu_feature(iommu, FEATURE_GT)) |
626 | return; |
627 | |
628 | iommu_feature_enable(iommu, CONTROL_GT_EN); |
629 | } |
630 | |
631 | /* sets a specific bit in the device table entry. */ |
632 | static void set_dev_entry_bit(u16 devid, u8 bit) |
633 | { |
634 | int i = (bit >> 6) & 0x03; |
635 | int _bit = bit & 0x3f; |
636 | |
637 | amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); |
638 | } |
639 | |
640 | static int get_dev_entry_bit(u16 devid, u8 bit) |
641 | { |
642 | int i = (bit >> 6) & 0x03; |
643 | int _bit = bit & 0x3f; |
644 | |
645 | return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; |
646 | } |
647 | |
648 | |
649 | void amd_iommu_apply_erratum_63(u16 devid) |
650 | { |
651 | int sysmgt; |
652 | |
653 | sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | |
654 | (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); |
655 | |
656 | if (sysmgt == 0x01) |
657 | set_dev_entry_bit(devid, DEV_ENTRY_IW); |
658 | } |
659 | |
660 | /* Writes the specific IOMMU for a device into the rlookup table */ |
661 | static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) |
662 | { |
663 | amd_iommu_rlookup_table[devid] = iommu; |
664 | } |
665 | |
666 | /* |
667 | * This function takes the device specific flags read from the ACPI |
668 | * table and sets up the device table entry with that information |
669 | */ |
670 | static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, |
671 | u16 devid, u32 flags, u32 ext_flags) |
672 | { |
673 | if (flags & ACPI_DEVFLAG_INITPASS) |
674 | set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); |
675 | if (flags & ACPI_DEVFLAG_EXTINT) |
676 | set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); |
677 | if (flags & ACPI_DEVFLAG_NMI) |
678 | set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); |
679 | if (flags & ACPI_DEVFLAG_SYSMGT1) |
680 | set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); |
681 | if (flags & ACPI_DEVFLAG_SYSMGT2) |
682 | set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); |
683 | if (flags & ACPI_DEVFLAG_LINT0) |
684 | set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); |
685 | if (flags & ACPI_DEVFLAG_LINT1) |
686 | set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); |
687 | |
688 | amd_iommu_apply_erratum_63(devid); |
689 | |
690 | set_iommu_for_device(iommu, devid); |
691 | } |
692 | |
693 | /* |
694 | * Reads the device exclusion range from ACPI and initialize IOMMU with |
695 | * it |
696 | */ |
697 | static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) |
698 | { |
699 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; |
700 | |
701 | if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) |
702 | return; |
703 | |
704 | if (iommu) { |
705 | /* |
706 | * We only can configure exclusion ranges per IOMMU, not |
707 | * per device. But we can enable the exclusion range per |
708 | * device. This is done here |
709 | */ |
710 | set_dev_entry_bit(m->devid, DEV_ENTRY_EX); |
711 | iommu->exclusion_start = m->range_start; |
712 | iommu->exclusion_length = m->range_length; |
713 | } |
714 | } |
715 | |
716 | /* |
717 | * Takes a pointer to an AMD IOMMU entry in the ACPI table and |
718 | * initializes the hardware and our data structures with it. |
719 | */ |
720 | static void __init init_iommu_from_acpi(struct amd_iommu *iommu, |
721 | struct ivhd_header *h) |
722 | { |
723 | u8 *p = (u8 *)h; |
724 | u8 *end = p, flags = 0; |
725 | u16 devid = 0, devid_start = 0, devid_to = 0; |
726 | u32 dev_i, ext_flags = 0; |
727 | bool alias = false; |
728 | struct ivhd_entry *e; |
729 | |
730 | /* |
731 | * First save the recommended feature enable bits from ACPI |
732 | */ |
733 | iommu->acpi_flags = h->flags; |
734 | |
735 | /* |
736 | * Done. Now parse the device entries |
737 | */ |
738 | p += sizeof(struct ivhd_header); |
739 | end += h->length; |
740 | |
741 | |
742 | while (p < end) { |
743 | e = (struct ivhd_entry *)p; |
744 | switch (e->type) { |
745 | case IVHD_DEV_ALL: |
746 | |
747 | DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x" |
748 | " last device %02x:%02x.%x flags: %02x\n", |
749 | PCI_BUS(iommu->first_device), |
750 | PCI_SLOT(iommu->first_device), |
751 | PCI_FUNC(iommu->first_device), |
752 | PCI_BUS(iommu->last_device), |
753 | PCI_SLOT(iommu->last_device), |
754 | PCI_FUNC(iommu->last_device), |
755 | e->flags); |
756 | |
757 | for (dev_i = iommu->first_device; |
758 | dev_i <= iommu->last_device; ++dev_i) |
759 | set_dev_entry_from_acpi(iommu, dev_i, |
760 | e->flags, 0); |
761 | break; |
762 | case IVHD_DEV_SELECT: |
763 | |
764 | DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " |
765 | "flags: %02x\n", |
766 | PCI_BUS(e->devid), |
767 | PCI_SLOT(e->devid), |
768 | PCI_FUNC(e->devid), |
769 | e->flags); |
770 | |
771 | devid = e->devid; |
772 | set_dev_entry_from_acpi(iommu, devid, e->flags, 0); |
773 | break; |
774 | case IVHD_DEV_SELECT_RANGE_START: |
775 | |
776 | DUMP_printk(" DEV_SELECT_RANGE_START\t " |
777 | "devid: %02x:%02x.%x flags: %02x\n", |
778 | PCI_BUS(e->devid), |
779 | PCI_SLOT(e->devid), |
780 | PCI_FUNC(e->devid), |
781 | e->flags); |
782 | |
783 | devid_start = e->devid; |
784 | flags = e->flags; |
785 | ext_flags = 0; |
786 | alias = false; |
787 | break; |
788 | case IVHD_DEV_ALIAS: |
789 | |
790 | DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " |
791 | "flags: %02x devid_to: %02x:%02x.%x\n", |
792 | PCI_BUS(e->devid), |
793 | PCI_SLOT(e->devid), |
794 | PCI_FUNC(e->devid), |
795 | e->flags, |
796 | PCI_BUS(e->ext >> 8), |
797 | PCI_SLOT(e->ext >> 8), |
798 | PCI_FUNC(e->ext >> 8)); |
799 | |
800 | devid = e->devid; |
801 | devid_to = e->ext >> 8; |
802 | set_dev_entry_from_acpi(iommu, devid , e->flags, 0); |
803 | set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); |
804 | amd_iommu_alias_table[devid] = devid_to; |
805 | break; |
806 | case IVHD_DEV_ALIAS_RANGE: |
807 | |
808 | DUMP_printk(" DEV_ALIAS_RANGE\t\t " |
809 | "devid: %02x:%02x.%x flags: %02x " |
810 | "devid_to: %02x:%02x.%x\n", |
811 | PCI_BUS(e->devid), |
812 | PCI_SLOT(e->devid), |
813 | PCI_FUNC(e->devid), |
814 | e->flags, |
815 | PCI_BUS(e->ext >> 8), |
816 | PCI_SLOT(e->ext >> 8), |
817 | PCI_FUNC(e->ext >> 8)); |
818 | |
819 | devid_start = e->devid; |
820 | flags = e->flags; |
821 | devid_to = e->ext >> 8; |
822 | ext_flags = 0; |
823 | alias = true; |
824 | break; |
825 | case IVHD_DEV_EXT_SELECT: |
826 | |
827 | DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " |
828 | "flags: %02x ext: %08x\n", |
829 | PCI_BUS(e->devid), |
830 | PCI_SLOT(e->devid), |
831 | PCI_FUNC(e->devid), |
832 | e->flags, e->ext); |
833 | |
834 | devid = e->devid; |
835 | set_dev_entry_from_acpi(iommu, devid, e->flags, |
836 | e->ext); |
837 | break; |
838 | case IVHD_DEV_EXT_SELECT_RANGE: |
839 | |
840 | DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " |
841 | "%02x:%02x.%x flags: %02x ext: %08x\n", |
842 | PCI_BUS(e->devid), |
843 | PCI_SLOT(e->devid), |
844 | PCI_FUNC(e->devid), |
845 | e->flags, e->ext); |
846 | |
847 | devid_start = e->devid; |
848 | flags = e->flags; |
849 | ext_flags = e->ext; |
850 | alias = false; |
851 | break; |
852 | case IVHD_DEV_RANGE_END: |
853 | |
854 | DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", |
855 | PCI_BUS(e->devid), |
856 | PCI_SLOT(e->devid), |
857 | PCI_FUNC(e->devid)); |
858 | |
859 | devid = e->devid; |
860 | for (dev_i = devid_start; dev_i <= devid; ++dev_i) { |
861 | if (alias) { |
862 | amd_iommu_alias_table[dev_i] = devid_to; |
863 | set_dev_entry_from_acpi(iommu, |
864 | devid_to, flags, ext_flags); |
865 | } |
866 | set_dev_entry_from_acpi(iommu, dev_i, |
867 | flags, ext_flags); |
868 | } |
869 | break; |
870 | default: |
871 | break; |
872 | } |
873 | |
874 | p += ivhd_entry_length(p); |
875 | } |
876 | } |
877 | |
878 | /* Initializes the device->iommu mapping for the driver */ |
879 | static int __init init_iommu_devices(struct amd_iommu *iommu) |
880 | { |
881 | u32 i; |
882 | |
883 | for (i = iommu->first_device; i <= iommu->last_device; ++i) |
884 | set_iommu_for_device(iommu, i); |
885 | |
886 | return 0; |
887 | } |
888 | |
889 | static void __init free_iommu_one(struct amd_iommu *iommu) |
890 | { |
891 | free_command_buffer(iommu); |
892 | free_event_buffer(iommu); |
893 | free_ppr_log(iommu); |
894 | iommu_unmap_mmio_space(iommu); |
895 | } |
896 | |
897 | static void __init free_iommu_all(void) |
898 | { |
899 | struct amd_iommu *iommu, *next; |
900 | |
901 | for_each_iommu_safe(iommu, next) { |
902 | list_del(&iommu->list); |
903 | free_iommu_one(iommu); |
904 | kfree(iommu); |
905 | } |
906 | } |
907 | |
908 | /* |
909 | * This function clues the initialization function for one IOMMU |
910 | * together and also allocates the command buffer and programs the |
911 | * hardware. It does NOT enable the IOMMU. This is done afterwards. |
912 | */ |
913 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) |
914 | { |
915 | spin_lock_init(&iommu->lock); |
916 | |
917 | /* Add IOMMU to internal data structures */ |
918 | list_add_tail(&iommu->list, &amd_iommu_list); |
919 | iommu->index = amd_iommus_present++; |
920 | |
921 | if (unlikely(iommu->index >= MAX_IOMMUS)) { |
922 | WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); |
923 | return -ENOSYS; |
924 | } |
925 | |
926 | /* Index is fine - add IOMMU to the array */ |
927 | amd_iommus[iommu->index] = iommu; |
928 | |
929 | /* |
930 | * Copy data from ACPI table entry to the iommu struct |
931 | */ |
932 | iommu->devid = h->devid; |
933 | iommu->cap_ptr = h->cap_ptr; |
934 | iommu->pci_seg = h->pci_seg; |
935 | iommu->mmio_phys = h->mmio_phys; |
936 | iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); |
937 | if (!iommu->mmio_base) |
938 | return -ENOMEM; |
939 | |
940 | iommu->cmd_buf = alloc_command_buffer(iommu); |
941 | if (!iommu->cmd_buf) |
942 | return -ENOMEM; |
943 | |
944 | iommu->evt_buf = alloc_event_buffer(iommu); |
945 | if (!iommu->evt_buf) |
946 | return -ENOMEM; |
947 | |
948 | iommu->int_enabled = false; |
949 | |
950 | init_iommu_from_acpi(iommu, h); |
951 | init_iommu_devices(iommu); |
952 | |
953 | return 0; |
954 | } |
955 | |
956 | /* |
957 | * Iterates over all IOMMU entries in the ACPI table, allocates the |
958 | * IOMMU structure and initializes it with init_iommu_one() |
959 | */ |
960 | static int __init init_iommu_all(struct acpi_table_header *table) |
961 | { |
962 | u8 *p = (u8 *)table, *end = (u8 *)table; |
963 | struct ivhd_header *h; |
964 | struct amd_iommu *iommu; |
965 | int ret; |
966 | |
967 | end += table->length; |
968 | p += IVRS_HEADER_LENGTH; |
969 | |
970 | while (p < end) { |
971 | h = (struct ivhd_header *)p; |
972 | switch (*p) { |
973 | case ACPI_IVHD_TYPE: |
974 | |
975 | DUMP_printk("device: %02x:%02x.%01x cap: %04x " |
976 | "seg: %d flags: %01x info %04x\n", |
977 | PCI_BUS(h->devid), PCI_SLOT(h->devid), |
978 | PCI_FUNC(h->devid), h->cap_ptr, |
979 | h->pci_seg, h->flags, h->info); |
980 | DUMP_printk(" mmio-addr: %016llx\n", |
981 | h->mmio_phys); |
982 | |
983 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); |
984 | if (iommu == NULL) |
985 | return -ENOMEM; |
986 | |
987 | ret = init_iommu_one(iommu, h); |
988 | if (ret) |
989 | return ret; |
990 | break; |
991 | default: |
992 | break; |
993 | } |
994 | p += h->length; |
995 | |
996 | } |
997 | WARN_ON(p != end); |
998 | |
999 | return 0; |
1000 | } |
1001 | |
1002 | static int iommu_init_pci(struct amd_iommu *iommu) |
1003 | { |
1004 | int cap_ptr = iommu->cap_ptr; |
1005 | u32 range, misc, low, high; |
1006 | |
1007 | iommu->dev = pci_get_bus_and_slot(PCI_BUS(iommu->devid), |
1008 | iommu->devid & 0xff); |
1009 | if (!iommu->dev) |
1010 | return -ENODEV; |
1011 | |
1012 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, |
1013 | &iommu->cap); |
1014 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, |
1015 | &range); |
1016 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, |
1017 | &misc); |
1018 | |
1019 | iommu->first_device = calc_devid(MMIO_GET_BUS(range), |
1020 | MMIO_GET_FD(range)); |
1021 | iommu->last_device = calc_devid(MMIO_GET_BUS(range), |
1022 | MMIO_GET_LD(range)); |
1023 | |
1024 | if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) |
1025 | amd_iommu_iotlb_sup = false; |
1026 | |
1027 | /* read extended feature bits */ |
1028 | low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); |
1029 | high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); |
1030 | |
1031 | iommu->features = ((u64)high << 32) | low; |
1032 | |
1033 | if (iommu_feature(iommu, FEATURE_GT)) { |
1034 | int glxval; |
1035 | u32 pasids; |
1036 | u64 shift; |
1037 | |
1038 | shift = iommu->features & FEATURE_PASID_MASK; |
1039 | shift >>= FEATURE_PASID_SHIFT; |
1040 | pasids = (1 << shift); |
1041 | |
1042 | amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids); |
1043 | |
1044 | glxval = iommu->features & FEATURE_GLXVAL_MASK; |
1045 | glxval >>= FEATURE_GLXVAL_SHIFT; |
1046 | |
1047 | if (amd_iommu_max_glx_val == -1) |
1048 | amd_iommu_max_glx_val = glxval; |
1049 | else |
1050 | amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); |
1051 | } |
1052 | |
1053 | if (iommu_feature(iommu, FEATURE_GT) && |
1054 | iommu_feature(iommu, FEATURE_PPR)) { |
1055 | iommu->is_iommu_v2 = true; |
1056 | amd_iommu_v2_present = true; |
1057 | } |
1058 | |
1059 | if (iommu_feature(iommu, FEATURE_PPR)) { |
1060 | iommu->ppr_log = alloc_ppr_log(iommu); |
1061 | if (!iommu->ppr_log) |
1062 | return -ENOMEM; |
1063 | } |
1064 | |
1065 | if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) |
1066 | amd_iommu_np_cache = true; |
1067 | |
1068 | if (is_rd890_iommu(iommu->dev)) { |
1069 | int i, j; |
1070 | |
1071 | iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, |
1072 | PCI_DEVFN(0, 0)); |
1073 | |
1074 | /* |
1075 | * Some rd890 systems may not be fully reconfigured by the |
1076 | * BIOS, so it's necessary for us to store this information so |
1077 | * it can be reprogrammed on resume |
1078 | */ |
1079 | pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, |
1080 | &iommu->stored_addr_lo); |
1081 | pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, |
1082 | &iommu->stored_addr_hi); |
1083 | |
1084 | /* Low bit locks writes to configuration space */ |
1085 | iommu->stored_addr_lo &= ~1; |
1086 | |
1087 | for (i = 0; i < 6; i++) |
1088 | for (j = 0; j < 0x12; j++) |
1089 | iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); |
1090 | |
1091 | for (i = 0; i < 0x83; i++) |
1092 | iommu->stored_l2[i] = iommu_read_l2(iommu, i); |
1093 | } |
1094 | |
1095 | return pci_enable_device(iommu->dev); |
1096 | } |
1097 | |
1098 | static void print_iommu_info(void) |
1099 | { |
1100 | static const char * const feat_str[] = { |
1101 | "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", |
1102 | "IA", "GA", "HE", "PC" |
1103 | }; |
1104 | struct amd_iommu *iommu; |
1105 | |
1106 | for_each_iommu(iommu) { |
1107 | int i; |
1108 | |
1109 | pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n", |
1110 | dev_name(&iommu->dev->dev), iommu->cap_ptr); |
1111 | |
1112 | if (iommu->cap & (1 << IOMMU_CAP_EFR)) { |
1113 | pr_info("AMD-Vi: Extended features: "); |
1114 | for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { |
1115 | if (iommu_feature(iommu, (1ULL << i))) |
1116 | pr_cont(" %s", feat_str[i]); |
1117 | } |
1118 | } |
1119 | pr_cont("\n"); |
1120 | } |
1121 | } |
1122 | |
1123 | static int __init amd_iommu_init_pci(void) |
1124 | { |
1125 | struct amd_iommu *iommu; |
1126 | int ret = 0; |
1127 | |
1128 | for_each_iommu(iommu) { |
1129 | ret = iommu_init_pci(iommu); |
1130 | if (ret) |
1131 | break; |
1132 | } |
1133 | |
1134 | ret = amd_iommu_init_devices(); |
1135 | |
1136 | print_iommu_info(); |
1137 | |
1138 | return ret; |
1139 | } |
1140 | |
1141 | /**************************************************************************** |
1142 | * |
1143 | * The following functions initialize the MSI interrupts for all IOMMUs |
1144 | * in the system. Its a bit challenging because there could be multiple |
1145 | * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per |
1146 | * pci_dev. |
1147 | * |
1148 | ****************************************************************************/ |
1149 | |
1150 | static int iommu_setup_msi(struct amd_iommu *iommu) |
1151 | { |
1152 | int r; |
1153 | |
1154 | r = pci_enable_msi(iommu->dev); |
1155 | if (r) |
1156 | return r; |
1157 | |
1158 | r = request_threaded_irq(iommu->dev->irq, |
1159 | amd_iommu_int_handler, |
1160 | amd_iommu_int_thread, |
1161 | 0, "AMD-Vi", |
1162 | iommu->dev); |
1163 | |
1164 | if (r) { |
1165 | pci_disable_msi(iommu->dev); |
1166 | return r; |
1167 | } |
1168 | |
1169 | iommu->int_enabled = true; |
1170 | |
1171 | return 0; |
1172 | } |
1173 | |
1174 | static int iommu_init_msi(struct amd_iommu *iommu) |
1175 | { |
1176 | int ret; |
1177 | |
1178 | if (iommu->int_enabled) |
1179 | goto enable_faults; |
1180 | |
1181 | if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) |
1182 | ret = iommu_setup_msi(iommu); |
1183 | else |
1184 | ret = -ENODEV; |
1185 | |
1186 | if (ret) |
1187 | return ret; |
1188 | |
1189 | enable_faults: |
1190 | iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); |
1191 | |
1192 | if (iommu->ppr_log != NULL) |
1193 | iommu_feature_enable(iommu, CONTROL_PPFINT_EN); |
1194 | |
1195 | return 0; |
1196 | } |
1197 | |
1198 | /**************************************************************************** |
1199 | * |
1200 | * The next functions belong to the third pass of parsing the ACPI |
1201 | * table. In this last pass the memory mapping requirements are |
1202 | * gathered (like exclusion and unity mapping reanges). |
1203 | * |
1204 | ****************************************************************************/ |
1205 | |
1206 | static void __init free_unity_maps(void) |
1207 | { |
1208 | struct unity_map_entry *entry, *next; |
1209 | |
1210 | list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { |
1211 | list_del(&entry->list); |
1212 | kfree(entry); |
1213 | } |
1214 | } |
1215 | |
1216 | /* called when we find an exclusion range definition in ACPI */ |
1217 | static int __init init_exclusion_range(struct ivmd_header *m) |
1218 | { |
1219 | int i; |
1220 | |
1221 | switch (m->type) { |
1222 | case ACPI_IVMD_TYPE: |
1223 | set_device_exclusion_range(m->devid, m); |
1224 | break; |
1225 | case ACPI_IVMD_TYPE_ALL: |
1226 | for (i = 0; i <= amd_iommu_last_bdf; ++i) |
1227 | set_device_exclusion_range(i, m); |
1228 | break; |
1229 | case ACPI_IVMD_TYPE_RANGE: |
1230 | for (i = m->devid; i <= m->aux; ++i) |
1231 | set_device_exclusion_range(i, m); |
1232 | break; |
1233 | default: |
1234 | break; |
1235 | } |
1236 | |
1237 | return 0; |
1238 | } |
1239 | |
1240 | /* called for unity map ACPI definition */ |
1241 | static int __init init_unity_map_range(struct ivmd_header *m) |
1242 | { |
1243 | struct unity_map_entry *e = NULL; |
1244 | char *s; |
1245 | |
1246 | e = kzalloc(sizeof(*e), GFP_KERNEL); |
1247 | if (e == NULL) |
1248 | return -ENOMEM; |
1249 | |
1250 | switch (m->type) { |
1251 | default: |
1252 | kfree(e); |
1253 | return 0; |
1254 | case ACPI_IVMD_TYPE: |
1255 | s = "IVMD_TYPEi\t\t\t"; |
1256 | e->devid_start = e->devid_end = m->devid; |
1257 | break; |
1258 | case ACPI_IVMD_TYPE_ALL: |
1259 | s = "IVMD_TYPE_ALL\t\t"; |
1260 | e->devid_start = 0; |
1261 | e->devid_end = amd_iommu_last_bdf; |
1262 | break; |
1263 | case ACPI_IVMD_TYPE_RANGE: |
1264 | s = "IVMD_TYPE_RANGE\t\t"; |
1265 | e->devid_start = m->devid; |
1266 | e->devid_end = m->aux; |
1267 | break; |
1268 | } |
1269 | e->address_start = PAGE_ALIGN(m->range_start); |
1270 | e->address_end = e->address_start + PAGE_ALIGN(m->range_length); |
1271 | e->prot = m->flags >> 1; |
1272 | |
1273 | DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" |
1274 | " range_start: %016llx range_end: %016llx flags: %x\n", s, |
1275 | PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start), |
1276 | PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end), |
1277 | PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), |
1278 | e->address_start, e->address_end, m->flags); |
1279 | |
1280 | list_add_tail(&e->list, &amd_iommu_unity_map); |
1281 | |
1282 | return 0; |
1283 | } |
1284 | |
1285 | /* iterates over all memory definitions we find in the ACPI table */ |
1286 | static int __init init_memory_definitions(struct acpi_table_header *table) |
1287 | { |
1288 | u8 *p = (u8 *)table, *end = (u8 *)table; |
1289 | struct ivmd_header *m; |
1290 | |
1291 | end += table->length; |
1292 | p += IVRS_HEADER_LENGTH; |
1293 | |
1294 | while (p < end) { |
1295 | m = (struct ivmd_header *)p; |
1296 | if (m->flags & IVMD_FLAG_EXCL_RANGE) |
1297 | init_exclusion_range(m); |
1298 | else if (m->flags & IVMD_FLAG_UNITY_MAP) |
1299 | init_unity_map_range(m); |
1300 | |
1301 | p += m->length; |
1302 | } |
1303 | |
1304 | return 0; |
1305 | } |
1306 | |
1307 | /* |
1308 | * Init the device table to not allow DMA access for devices and |
1309 | * suppress all page faults |
1310 | */ |
1311 | static void init_device_table(void) |
1312 | { |
1313 | u32 devid; |
1314 | |
1315 | for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { |
1316 | set_dev_entry_bit(devid, DEV_ENTRY_VALID); |
1317 | set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); |
1318 | } |
1319 | } |
1320 | |
1321 | static void iommu_init_flags(struct amd_iommu *iommu) |
1322 | { |
1323 | iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? |
1324 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : |
1325 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); |
1326 | |
1327 | iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? |
1328 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : |
1329 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); |
1330 | |
1331 | iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? |
1332 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : |
1333 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); |
1334 | |
1335 | iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? |
1336 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : |
1337 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); |
1338 | |
1339 | /* |
1340 | * make IOMMU memory accesses cache coherent |
1341 | */ |
1342 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); |
1343 | |
1344 | /* Set IOTLB invalidation timeout to 1s */ |
1345 | iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); |
1346 | } |
1347 | |
1348 | static void iommu_apply_resume_quirks(struct amd_iommu *iommu) |
1349 | { |
1350 | int i, j; |
1351 | u32 ioc_feature_control; |
1352 | struct pci_dev *pdev = iommu->root_pdev; |
1353 | |
1354 | /* RD890 BIOSes may not have completely reconfigured the iommu */ |
1355 | if (!is_rd890_iommu(iommu->dev) || !pdev) |
1356 | return; |
1357 | |
1358 | /* |
1359 | * First, we need to ensure that the iommu is enabled. This is |
1360 | * controlled by a register in the northbridge |
1361 | */ |
1362 | |
1363 | /* Select Northbridge indirect register 0x75 and enable writing */ |
1364 | pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); |
1365 | pci_read_config_dword(pdev, 0x64, &ioc_feature_control); |
1366 | |
1367 | /* Enable the iommu */ |
1368 | if (!(ioc_feature_control & 0x1)) |
1369 | pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); |
1370 | |
1371 | /* Restore the iommu BAR */ |
1372 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, |
1373 | iommu->stored_addr_lo); |
1374 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, |
1375 | iommu->stored_addr_hi); |
1376 | |
1377 | /* Restore the l1 indirect regs for each of the 6 l1s */ |
1378 | for (i = 0; i < 6; i++) |
1379 | for (j = 0; j < 0x12; j++) |
1380 | iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); |
1381 | |
1382 | /* Restore the l2 indirect regs */ |
1383 | for (i = 0; i < 0x83; i++) |
1384 | iommu_write_l2(iommu, i, iommu->stored_l2[i]); |
1385 | |
1386 | /* Lock PCI setup registers */ |
1387 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, |
1388 | iommu->stored_addr_lo | 1); |
1389 | } |
1390 | |
1391 | /* |
1392 | * This function finally enables all IOMMUs found in the system after |
1393 | * they have been initialized |
1394 | */ |
1395 | static void early_enable_iommus(void) |
1396 | { |
1397 | struct amd_iommu *iommu; |
1398 | |
1399 | for_each_iommu(iommu) { |
1400 | iommu_disable(iommu); |
1401 | iommu_init_flags(iommu); |
1402 | iommu_set_device_table(iommu); |
1403 | iommu_enable_command_buffer(iommu); |
1404 | iommu_enable_event_buffer(iommu); |
1405 | iommu_set_exclusion_range(iommu); |
1406 | iommu_enable(iommu); |
1407 | iommu_flush_all_caches(iommu); |
1408 | } |
1409 | } |
1410 | |
1411 | static void enable_iommus_v2(void) |
1412 | { |
1413 | struct amd_iommu *iommu; |
1414 | |
1415 | for_each_iommu(iommu) { |
1416 | iommu_enable_ppr_log(iommu); |
1417 | iommu_enable_gt(iommu); |
1418 | } |
1419 | } |
1420 | |
1421 | static void enable_iommus(void) |
1422 | { |
1423 | early_enable_iommus(); |
1424 | |
1425 | enable_iommus_v2(); |
1426 | } |
1427 | |
1428 | static void disable_iommus(void) |
1429 | { |
1430 | struct amd_iommu *iommu; |
1431 | |
1432 | for_each_iommu(iommu) |
1433 | iommu_disable(iommu); |
1434 | } |
1435 | |
1436 | /* |
1437 | * Suspend/Resume support |
1438 | * disable suspend until real resume implemented |
1439 | */ |
1440 | |
1441 | static void amd_iommu_resume(void) |
1442 | { |
1443 | struct amd_iommu *iommu; |
1444 | |
1445 | for_each_iommu(iommu) |
1446 | iommu_apply_resume_quirks(iommu); |
1447 | |
1448 | /* re-load the hardware */ |
1449 | enable_iommus(); |
1450 | |
1451 | amd_iommu_enable_interrupts(); |
1452 | } |
1453 | |
1454 | static int amd_iommu_suspend(void) |
1455 | { |
1456 | /* disable IOMMUs to go out of the way for BIOS */ |
1457 | disable_iommus(); |
1458 | |
1459 | return 0; |
1460 | } |
1461 | |
1462 | static struct syscore_ops amd_iommu_syscore_ops = { |
1463 | .suspend = amd_iommu_suspend, |
1464 | .resume = amd_iommu_resume, |
1465 | }; |
1466 | |
1467 | static void __init free_on_init_error(void) |
1468 | { |
1469 | amd_iommu_uninit_devices(); |
1470 | |
1471 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, |
1472 | get_order(MAX_DOMAIN_ID/8)); |
1473 | |
1474 | free_pages((unsigned long)amd_iommu_rlookup_table, |
1475 | get_order(rlookup_table_size)); |
1476 | |
1477 | free_pages((unsigned long)amd_iommu_alias_table, |
1478 | get_order(alias_table_size)); |
1479 | |
1480 | free_pages((unsigned long)amd_iommu_dev_table, |
1481 | get_order(dev_table_size)); |
1482 | |
1483 | free_iommu_all(); |
1484 | |
1485 | free_unity_maps(); |
1486 | |
1487 | #ifdef CONFIG_GART_IOMMU |
1488 | /* |
1489 | * We failed to initialize the AMD IOMMU - try fallback to GART |
1490 | * if possible. |
1491 | */ |
1492 | gart_iommu_init(); |
1493 | |
1494 | #endif |
1495 | } |
1496 | |
1497 | /* |
1498 | * This is the hardware init function for AMD IOMMU in the system. |
1499 | * This function is called either from amd_iommu_init or from the interrupt |
1500 | * remapping setup code. |
1501 | * |
1502 | * This function basically parses the ACPI table for AMD IOMMU (IVRS) |
1503 | * three times: |
1504 | * |
1505 | * 1 pass) Find the highest PCI device id the driver has to handle. |
1506 | * Upon this information the size of the data structures is |
1507 | * determined that needs to be allocated. |
1508 | * |
1509 | * 2 pass) Initialize the data structures just allocated with the |
1510 | * information in the ACPI table about available AMD IOMMUs |
1511 | * in the system. It also maps the PCI devices in the |
1512 | * system to specific IOMMUs |
1513 | * |
1514 | * 3 pass) After the basic data structures are allocated and |
1515 | * initialized we update them with information about memory |
1516 | * remapping requirements parsed out of the ACPI table in |
1517 | * this last pass. |
1518 | * |
1519 | * After everything is set up the IOMMUs are enabled and the necessary |
1520 | * hotplug and suspend notifiers are registered. |
1521 | */ |
1522 | static int __init early_amd_iommu_init(void) |
1523 | { |
1524 | struct acpi_table_header *ivrs_base; |
1525 | acpi_size ivrs_size; |
1526 | acpi_status status; |
1527 | int i, ret = 0; |
1528 | |
1529 | if (!amd_iommu_detected) |
1530 | return -ENODEV; |
1531 | |
1532 | status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size); |
1533 | if (status == AE_NOT_FOUND) |
1534 | return -ENODEV; |
1535 | else if (ACPI_FAILURE(status)) { |
1536 | const char *err = acpi_format_exception(status); |
1537 | pr_err("AMD-Vi: IVRS table error: %s\n", err); |
1538 | return -EINVAL; |
1539 | } |
1540 | |
1541 | /* |
1542 | * First parse ACPI tables to find the largest Bus/Dev/Func |
1543 | * we need to handle. Upon this information the shared data |
1544 | * structures for the IOMMUs in the system will be allocated |
1545 | */ |
1546 | ret = find_last_devid_acpi(ivrs_base); |
1547 | if (ret) |
1548 | goto out; |
1549 | |
1550 | dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); |
1551 | alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); |
1552 | rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); |
1553 | |
1554 | /* Device table - directly used by all IOMMUs */ |
1555 | ret = -ENOMEM; |
1556 | amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
1557 | get_order(dev_table_size)); |
1558 | if (amd_iommu_dev_table == NULL) |
1559 | goto out; |
1560 | |
1561 | /* |
1562 | * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the |
1563 | * IOMMU see for that device |
1564 | */ |
1565 | amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, |
1566 | get_order(alias_table_size)); |
1567 | if (amd_iommu_alias_table == NULL) |
1568 | goto out; |
1569 | |
1570 | /* IOMMU rlookup table - find the IOMMU for a specific device */ |
1571 | amd_iommu_rlookup_table = (void *)__get_free_pages( |
1572 | GFP_KERNEL | __GFP_ZERO, |
1573 | get_order(rlookup_table_size)); |
1574 | if (amd_iommu_rlookup_table == NULL) |
1575 | goto out; |
1576 | |
1577 | amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( |
1578 | GFP_KERNEL | __GFP_ZERO, |
1579 | get_order(MAX_DOMAIN_ID/8)); |
1580 | if (amd_iommu_pd_alloc_bitmap == NULL) |
1581 | goto out; |
1582 | |
1583 | /* init the device table */ |
1584 | init_device_table(); |
1585 | |
1586 | /* |
1587 | * let all alias entries point to itself |
1588 | */ |
1589 | for (i = 0; i <= amd_iommu_last_bdf; ++i) |
1590 | amd_iommu_alias_table[i] = i; |
1591 | |
1592 | /* |
1593 | * never allocate domain 0 because its used as the non-allocated and |
1594 | * error value placeholder |
1595 | */ |
1596 | amd_iommu_pd_alloc_bitmap[0] = 1; |
1597 | |
1598 | spin_lock_init(&amd_iommu_pd_lock); |
1599 | |
1600 | /* |
1601 | * now the data structures are allocated and basically initialized |
1602 | * start the real acpi table scan |
1603 | */ |
1604 | ret = init_iommu_all(ivrs_base); |
1605 | if (ret) |
1606 | goto out; |
1607 | |
1608 | ret = init_memory_definitions(ivrs_base); |
1609 | if (ret) |
1610 | goto out; |
1611 | |
1612 | out: |
1613 | /* Don't leak any ACPI memory */ |
1614 | early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); |
1615 | ivrs_base = NULL; |
1616 | |
1617 | return ret; |
1618 | } |
1619 | |
1620 | static int amd_iommu_enable_interrupts(void) |
1621 | { |
1622 | struct amd_iommu *iommu; |
1623 | int ret = 0; |
1624 | |
1625 | for_each_iommu(iommu) { |
1626 | ret = iommu_init_msi(iommu); |
1627 | if (ret) |
1628 | goto out; |
1629 | } |
1630 | |
1631 | out: |
1632 | return ret; |
1633 | } |
1634 | |
1635 | static bool detect_ivrs(void) |
1636 | { |
1637 | struct acpi_table_header *ivrs_base; |
1638 | acpi_size ivrs_size; |
1639 | acpi_status status; |
1640 | |
1641 | status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size); |
1642 | if (status == AE_NOT_FOUND) |
1643 | return false; |
1644 | else if (ACPI_FAILURE(status)) { |
1645 | const char *err = acpi_format_exception(status); |
1646 | pr_err("AMD-Vi: IVRS table error: %s\n", err); |
1647 | return false; |
1648 | } |
1649 | |
1650 | early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); |
1651 | |
1652 | /* Make sure ACS will be enabled during PCI probe */ |
1653 | pci_request_acs(); |
1654 | |
1655 | return true; |
1656 | } |
1657 | |
1658 | static int amd_iommu_init_dma(void) |
1659 | { |
1660 | int ret; |
1661 | |
1662 | if (iommu_pass_through) |
1663 | ret = amd_iommu_init_passthrough(); |
1664 | else |
1665 | ret = amd_iommu_init_dma_ops(); |
1666 | |
1667 | if (ret) |
1668 | return ret; |
1669 | |
1670 | amd_iommu_init_api(); |
1671 | |
1672 | amd_iommu_init_notifier(); |
1673 | |
1674 | return 0; |
1675 | } |
1676 | |
1677 | /**************************************************************************** |
1678 | * |
1679 | * AMD IOMMU Initialization State Machine |
1680 | * |
1681 | ****************************************************************************/ |
1682 | |
1683 | static int __init state_next(void) |
1684 | { |
1685 | int ret = 0; |
1686 | |
1687 | switch (init_state) { |
1688 | case IOMMU_START_STATE: |
1689 | if (!detect_ivrs()) { |
1690 | init_state = IOMMU_NOT_FOUND; |
1691 | ret = -ENODEV; |
1692 | } else { |
1693 | init_state = IOMMU_IVRS_DETECTED; |
1694 | } |
1695 | break; |
1696 | case IOMMU_IVRS_DETECTED: |
1697 | ret = early_amd_iommu_init(); |
1698 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; |
1699 | break; |
1700 | case IOMMU_ACPI_FINISHED: |
1701 | early_enable_iommus(); |
1702 | register_syscore_ops(&amd_iommu_syscore_ops); |
1703 | x86_platform.iommu_shutdown = disable_iommus; |
1704 | init_state = IOMMU_ENABLED; |
1705 | break; |
1706 | case IOMMU_ENABLED: |
1707 | ret = amd_iommu_init_pci(); |
1708 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; |
1709 | enable_iommus_v2(); |
1710 | break; |
1711 | case IOMMU_PCI_INIT: |
1712 | ret = amd_iommu_enable_interrupts(); |
1713 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; |
1714 | break; |
1715 | case IOMMU_INTERRUPTS_EN: |
1716 | ret = amd_iommu_init_dma(); |
1717 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; |
1718 | break; |
1719 | case IOMMU_DMA_OPS: |
1720 | init_state = IOMMU_INITIALIZED; |
1721 | break; |
1722 | case IOMMU_INITIALIZED: |
1723 | /* Nothing to do */ |
1724 | break; |
1725 | case IOMMU_NOT_FOUND: |
1726 | case IOMMU_INIT_ERROR: |
1727 | /* Error states => do nothing */ |
1728 | ret = -EINVAL; |
1729 | break; |
1730 | default: |
1731 | /* Unknown state */ |
1732 | BUG(); |
1733 | } |
1734 | |
1735 | return ret; |
1736 | } |
1737 | |
1738 | static int __init iommu_go_to_state(enum iommu_init_state state) |
1739 | { |
1740 | int ret = 0; |
1741 | |
1742 | while (init_state != state) { |
1743 | ret = state_next(); |
1744 | if (init_state == IOMMU_NOT_FOUND || |
1745 | init_state == IOMMU_INIT_ERROR) |
1746 | break; |
1747 | } |
1748 | |
1749 | return ret; |
1750 | } |
1751 | |
1752 | |
1753 | |
1754 | /* |
1755 | * This is the core init function for AMD IOMMU hardware in the system. |
1756 | * This function is called from the generic x86 DMA layer initialization |
1757 | * code. |
1758 | */ |
1759 | static int __init amd_iommu_init(void) |
1760 | { |
1761 | int ret; |
1762 | |
1763 | ret = iommu_go_to_state(IOMMU_INITIALIZED); |
1764 | if (ret) { |
1765 | disable_iommus(); |
1766 | free_on_init_error(); |
1767 | } |
1768 | |
1769 | return ret; |
1770 | } |
1771 | |
1772 | /**************************************************************************** |
1773 | * |
1774 | * Early detect code. This code runs at IOMMU detection time in the DMA |
1775 | * layer. It just looks if there is an IVRS ACPI table to detect AMD |
1776 | * IOMMUs |
1777 | * |
1778 | ****************************************************************************/ |
1779 | int __init amd_iommu_detect(void) |
1780 | { |
1781 | int ret; |
1782 | |
1783 | if (no_iommu || (iommu_detected && !gart_iommu_aperture)) |
1784 | return -ENODEV; |
1785 | |
1786 | if (amd_iommu_disabled) |
1787 | return -ENODEV; |
1788 | |
1789 | ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); |
1790 | if (ret) |
1791 | return ret; |
1792 | |
1793 | amd_iommu_detected = true; |
1794 | iommu_detected = 1; |
1795 | x86_init.iommu.iommu_init = amd_iommu_init; |
1796 | |
1797 | return 0; |
1798 | } |
1799 | |
1800 | /**************************************************************************** |
1801 | * |
1802 | * Parsing functions for the AMD IOMMU specific kernel command line |
1803 | * options. |
1804 | * |
1805 | ****************************************************************************/ |
1806 | |
1807 | static int __init parse_amd_iommu_dump(char *str) |
1808 | { |
1809 | amd_iommu_dump = true; |
1810 | |
1811 | return 1; |
1812 | } |
1813 | |
1814 | static int __init parse_amd_iommu_options(char *str) |
1815 | { |
1816 | for (; *str; ++str) { |
1817 | if (strncmp(str, "fullflush", 9) == 0) |
1818 | amd_iommu_unmap_flush = true; |
1819 | if (strncmp(str, "off", 3) == 0) |
1820 | amd_iommu_disabled = true; |
1821 | if (strncmp(str, "force_isolation", 15) == 0) |
1822 | amd_iommu_force_isolation = true; |
1823 | } |
1824 | |
1825 | return 1; |
1826 | } |
1827 | |
1828 | __setup("amd_iommu_dump", parse_amd_iommu_dump); |
1829 | __setup("amd_iommu=", parse_amd_iommu_options); |
1830 | |
1831 | IOMMU_INIT_FINISH(amd_iommu_detect, |
1832 | gart_iommu_hole_init, |
1833 | NULL, |
1834 | NULL); |
1835 | |
1836 | bool amd_iommu_v2_supported(void) |
1837 | { |
1838 | return amd_iommu_v2_present; |
1839 | } |
1840 | EXPORT_SYMBOL(amd_iommu_v2_supported); |
1841 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9