Root/
1 | /* |
2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ |
19 | |
20 | #ifndef _ASM_X86_AMD_IOMMU_TYPES_H |
21 | #define _ASM_X86_AMD_IOMMU_TYPES_H |
22 | |
23 | #include <linux/types.h> |
24 | #include <linux/mutex.h> |
25 | #include <linux/list.h> |
26 | #include <linux/spinlock.h> |
27 | |
28 | /* |
29 | * Maximum number of IOMMUs supported |
30 | */ |
31 | #define MAX_IOMMUS 32 |
32 | |
33 | /* |
34 | * some size calculation constants |
35 | */ |
36 | #define DEV_TABLE_ENTRY_SIZE 32 |
37 | #define ALIAS_TABLE_ENTRY_SIZE 2 |
38 | #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) |
39 | |
40 | /* Length of the MMIO region for the AMD IOMMU */ |
41 | #define MMIO_REGION_LENGTH 0x4000 |
42 | |
43 | /* Capability offsets used by the driver */ |
44 | #define MMIO_CAP_HDR_OFFSET 0x00 |
45 | #define MMIO_RANGE_OFFSET 0x0c |
46 | #define MMIO_MISC_OFFSET 0x10 |
47 | |
48 | /* Masks, shifts and macros to parse the device range capability */ |
49 | #define MMIO_RANGE_LD_MASK 0xff000000 |
50 | #define MMIO_RANGE_FD_MASK 0x00ff0000 |
51 | #define MMIO_RANGE_BUS_MASK 0x0000ff00 |
52 | #define MMIO_RANGE_LD_SHIFT 24 |
53 | #define MMIO_RANGE_FD_SHIFT 16 |
54 | #define MMIO_RANGE_BUS_SHIFT 8 |
55 | #define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) |
56 | #define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) |
57 | #define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) |
58 | #define MMIO_MSI_NUM(x) ((x) & 0x1f) |
59 | |
60 | /* Flag masks for the AMD IOMMU exclusion range */ |
61 | #define MMIO_EXCL_ENABLE_MASK 0x01ULL |
62 | #define MMIO_EXCL_ALLOW_MASK 0x02ULL |
63 | |
64 | /* Used offsets into the MMIO space */ |
65 | #define MMIO_DEV_TABLE_OFFSET 0x0000 |
66 | #define MMIO_CMD_BUF_OFFSET 0x0008 |
67 | #define MMIO_EVT_BUF_OFFSET 0x0010 |
68 | #define MMIO_CONTROL_OFFSET 0x0018 |
69 | #define MMIO_EXCL_BASE_OFFSET 0x0020 |
70 | #define MMIO_EXCL_LIMIT_OFFSET 0x0028 |
71 | #define MMIO_EXT_FEATURES 0x0030 |
72 | #define MMIO_PPR_LOG_OFFSET 0x0038 |
73 | #define MMIO_CMD_HEAD_OFFSET 0x2000 |
74 | #define MMIO_CMD_TAIL_OFFSET 0x2008 |
75 | #define MMIO_EVT_HEAD_OFFSET 0x2010 |
76 | #define MMIO_EVT_TAIL_OFFSET 0x2018 |
77 | #define MMIO_STATUS_OFFSET 0x2020 |
78 | #define MMIO_PPR_HEAD_OFFSET 0x2030 |
79 | #define MMIO_PPR_TAIL_OFFSET 0x2038 |
80 | |
81 | |
82 | /* Extended Feature Bits */ |
83 | #define FEATURE_PREFETCH (1ULL<<0) |
84 | #define FEATURE_PPR (1ULL<<1) |
85 | #define FEATURE_X2APIC (1ULL<<2) |
86 | #define FEATURE_NX (1ULL<<3) |
87 | #define FEATURE_GT (1ULL<<4) |
88 | #define FEATURE_IA (1ULL<<6) |
89 | #define FEATURE_GA (1ULL<<7) |
90 | #define FEATURE_HE (1ULL<<8) |
91 | #define FEATURE_PC (1ULL<<9) |
92 | |
93 | #define FEATURE_PASID_SHIFT 32 |
94 | #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) |
95 | |
96 | #define FEATURE_GLXVAL_SHIFT 14 |
97 | #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) |
98 | |
99 | #define PASID_MASK 0x000fffff |
100 | |
101 | /* MMIO status bits */ |
102 | #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) |
103 | #define MMIO_STATUS_PPR_INT_MASK (1 << 6) |
104 | |
105 | /* event logging constants */ |
106 | #define EVENT_ENTRY_SIZE 0x10 |
107 | #define EVENT_TYPE_SHIFT 28 |
108 | #define EVENT_TYPE_MASK 0xf |
109 | #define EVENT_TYPE_ILL_DEV 0x1 |
110 | #define EVENT_TYPE_IO_FAULT 0x2 |
111 | #define EVENT_TYPE_DEV_TAB_ERR 0x3 |
112 | #define EVENT_TYPE_PAGE_TAB_ERR 0x4 |
113 | #define EVENT_TYPE_ILL_CMD 0x5 |
114 | #define EVENT_TYPE_CMD_HARD_ERR 0x6 |
115 | #define EVENT_TYPE_IOTLB_INV_TO 0x7 |
116 | #define EVENT_TYPE_INV_DEV_REQ 0x8 |
117 | #define EVENT_DEVID_MASK 0xffff |
118 | #define EVENT_DEVID_SHIFT 0 |
119 | #define EVENT_DOMID_MASK 0xffff |
120 | #define EVENT_DOMID_SHIFT 0 |
121 | #define EVENT_FLAGS_MASK 0xfff |
122 | #define EVENT_FLAGS_SHIFT 0x10 |
123 | |
124 | /* feature control bits */ |
125 | #define CONTROL_IOMMU_EN 0x00ULL |
126 | #define CONTROL_HT_TUN_EN 0x01ULL |
127 | #define CONTROL_EVT_LOG_EN 0x02ULL |
128 | #define CONTROL_EVT_INT_EN 0x03ULL |
129 | #define CONTROL_COMWAIT_EN 0x04ULL |
130 | #define CONTROL_INV_TIMEOUT 0x05ULL |
131 | #define CONTROL_PASSPW_EN 0x08ULL |
132 | #define CONTROL_RESPASSPW_EN 0x09ULL |
133 | #define CONTROL_COHERENT_EN 0x0aULL |
134 | #define CONTROL_ISOC_EN 0x0bULL |
135 | #define CONTROL_CMDBUF_EN 0x0cULL |
136 | #define CONTROL_PPFLOG_EN 0x0dULL |
137 | #define CONTROL_PPFINT_EN 0x0eULL |
138 | #define CONTROL_PPR_EN 0x0fULL |
139 | #define CONTROL_GT_EN 0x10ULL |
140 | |
141 | #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) |
142 | #define CTRL_INV_TO_NONE 0 |
143 | #define CTRL_INV_TO_1MS 1 |
144 | #define CTRL_INV_TO_10MS 2 |
145 | #define CTRL_INV_TO_100MS 3 |
146 | #define CTRL_INV_TO_1S 4 |
147 | #define CTRL_INV_TO_10S 5 |
148 | #define CTRL_INV_TO_100S 6 |
149 | |
150 | /* command specific defines */ |
151 | #define CMD_COMPL_WAIT 0x01 |
152 | #define CMD_INV_DEV_ENTRY 0x02 |
153 | #define CMD_INV_IOMMU_PAGES 0x03 |
154 | #define CMD_INV_IOTLB_PAGES 0x04 |
155 | #define CMD_COMPLETE_PPR 0x07 |
156 | #define CMD_INV_ALL 0x08 |
157 | |
158 | #define CMD_COMPL_WAIT_STORE_MASK 0x01 |
159 | #define CMD_COMPL_WAIT_INT_MASK 0x02 |
160 | #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 |
161 | #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 |
162 | #define CMD_INV_IOMMU_PAGES_GN_MASK 0x04 |
163 | |
164 | #define PPR_STATUS_MASK 0xf |
165 | #define PPR_STATUS_SHIFT 12 |
166 | |
167 | #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL |
168 | |
169 | /* macros and definitions for device table entries */ |
170 | #define DEV_ENTRY_VALID 0x00 |
171 | #define DEV_ENTRY_TRANSLATION 0x01 |
172 | #define DEV_ENTRY_IR 0x3d |
173 | #define DEV_ENTRY_IW 0x3e |
174 | #define DEV_ENTRY_NO_PAGE_FAULT 0x62 |
175 | #define DEV_ENTRY_EX 0x67 |
176 | #define DEV_ENTRY_SYSMGT1 0x68 |
177 | #define DEV_ENTRY_SYSMGT2 0x69 |
178 | #define DEV_ENTRY_INIT_PASS 0xb8 |
179 | #define DEV_ENTRY_EINT_PASS 0xb9 |
180 | #define DEV_ENTRY_NMI_PASS 0xba |
181 | #define DEV_ENTRY_LINT0_PASS 0xbe |
182 | #define DEV_ENTRY_LINT1_PASS 0xbf |
183 | #define DEV_ENTRY_MODE_MASK 0x07 |
184 | #define DEV_ENTRY_MODE_SHIFT 0x09 |
185 | |
186 | /* constants to configure the command buffer */ |
187 | #define CMD_BUFFER_SIZE 8192 |
188 | #define CMD_BUFFER_UNINITIALIZED 1 |
189 | #define CMD_BUFFER_ENTRIES 512 |
190 | #define MMIO_CMD_SIZE_SHIFT 56 |
191 | #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) |
192 | |
193 | /* constants for event buffer handling */ |
194 | #define EVT_BUFFER_SIZE 8192 /* 512 entries */ |
195 | #define EVT_LEN_MASK (0x9ULL << 56) |
196 | |
197 | /* Constants for PPR Log handling */ |
198 | #define PPR_LOG_ENTRIES 512 |
199 | #define PPR_LOG_SIZE_SHIFT 56 |
200 | #define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT) |
201 | #define PPR_ENTRY_SIZE 16 |
202 | #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) |
203 | |
204 | #define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL) |
205 | #define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL) |
206 | #define PPR_DEVID(x) ((x) & 0xffffULL) |
207 | #define PPR_TAG(x) (((x) >> 32) & 0x3ffULL) |
208 | #define PPR_PASID1(x) (((x) >> 16) & 0xffffULL) |
209 | #define PPR_PASID2(x) (((x) >> 42) & 0xfULL) |
210 | #define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x)) |
211 | |
212 | #define PPR_REQ_FAULT 0x01 |
213 | |
214 | #define PAGE_MODE_NONE 0x00 |
215 | #define PAGE_MODE_1_LEVEL 0x01 |
216 | #define PAGE_MODE_2_LEVEL 0x02 |
217 | #define PAGE_MODE_3_LEVEL 0x03 |
218 | #define PAGE_MODE_4_LEVEL 0x04 |
219 | #define PAGE_MODE_5_LEVEL 0x05 |
220 | #define PAGE_MODE_6_LEVEL 0x06 |
221 | |
222 | #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) |
223 | #define PM_LEVEL_SIZE(x) (((x) < 6) ? \ |
224 | ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ |
225 | (0xffffffffffffffffULL)) |
226 | #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) |
227 | #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) |
228 | #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ |
229 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) |
230 | #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) |
231 | |
232 | #define PM_MAP_4k 0 |
233 | #define PM_ADDR_MASK 0x000ffffffffff000ULL |
234 | #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ |
235 | (~((1ULL << (12 + ((lvl) * 9))) - 1))) |
236 | #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) |
237 | |
238 | /* |
239 | * Returns the page table level to use for a given page size |
240 | * Pagesize is expected to be a power-of-two |
241 | */ |
242 | #define PAGE_SIZE_LEVEL(pagesize) \ |
243 | ((__ffs(pagesize) - 12) / 9) |
244 | /* |
245 | * Returns the number of ptes to use for a given page size |
246 | * Pagesize is expected to be a power-of-two |
247 | */ |
248 | #define PAGE_SIZE_PTE_COUNT(pagesize) \ |
249 | (1ULL << ((__ffs(pagesize) - 12) % 9)) |
250 | |
251 | /* |
252 | * Aligns a given io-virtual address to a given page size |
253 | * Pagesize is expected to be a power-of-two |
254 | */ |
255 | #define PAGE_SIZE_ALIGN(address, pagesize) \ |
256 | ((address) & ~((pagesize) - 1)) |
257 | /* |
258 | * Creates an IOMMU PTE for an address an a given pagesize |
259 | * The PTE has no permission bits set |
260 | * Pagesize is expected to be a power-of-two larger than 4096 |
261 | */ |
262 | #define PAGE_SIZE_PTE(address, pagesize) \ |
263 | (((address) | ((pagesize) - 1)) & \ |
264 | (~(pagesize >> 1)) & PM_ADDR_MASK) |
265 | |
266 | /* |
267 | * Takes a PTE value with mode=0x07 and returns the page size it maps |
268 | */ |
269 | #define PTE_PAGE_SIZE(pte) \ |
270 | (1ULL << (1 + ffz(((pte) | 0xfffULL)))) |
271 | |
272 | #define IOMMU_PTE_P (1ULL << 0) |
273 | #define IOMMU_PTE_TV (1ULL << 1) |
274 | #define IOMMU_PTE_U (1ULL << 59) |
275 | #define IOMMU_PTE_FC (1ULL << 60) |
276 | #define IOMMU_PTE_IR (1ULL << 61) |
277 | #define IOMMU_PTE_IW (1ULL << 62) |
278 | |
279 | #define DTE_FLAG_IOTLB (0x01UL << 32) |
280 | #define DTE_FLAG_GV (0x01ULL << 55) |
281 | #define DTE_GLX_SHIFT (56) |
282 | #define DTE_GLX_MASK (3) |
283 | |
284 | #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) |
285 | #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) |
286 | #define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL) |
287 | |
288 | #define DTE_GCR3_INDEX_A 0 |
289 | #define DTE_GCR3_INDEX_B 1 |
290 | #define DTE_GCR3_INDEX_C 1 |
291 | |
292 | #define DTE_GCR3_SHIFT_A 58 |
293 | #define DTE_GCR3_SHIFT_B 16 |
294 | #define DTE_GCR3_SHIFT_C 43 |
295 | |
296 | #define GCR3_VALID 0x01ULL |
297 | |
298 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) |
299 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) |
300 | #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) |
301 | #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) |
302 | |
303 | #define IOMMU_PROT_MASK 0x03 |
304 | #define IOMMU_PROT_IR 0x01 |
305 | #define IOMMU_PROT_IW 0x02 |
306 | |
307 | /* IOMMU capabilities */ |
308 | #define IOMMU_CAP_IOTLB 24 |
309 | #define IOMMU_CAP_NPCACHE 26 |
310 | #define IOMMU_CAP_EFR 27 |
311 | |
312 | #define MAX_DOMAIN_ID 65536 |
313 | |
314 | /* FIXME: move this macro to <linux/pci.h> */ |
315 | #define PCI_BUS(x) (((x) >> 8) & 0xff) |
316 | |
317 | /* Protection domain flags */ |
318 | #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ |
319 | #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops |
320 | domain for an IOMMU */ |
321 | #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page |
322 | translation */ |
323 | #define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ |
324 | |
325 | extern bool amd_iommu_dump; |
326 | #define DUMP_printk(format, arg...) \ |
327 | do { \ |
328 | if (amd_iommu_dump) \ |
329 | printk(KERN_INFO "AMD-Vi: " format, ## arg); \ |
330 | } while(0); |
331 | |
332 | /* global flag if IOMMUs cache non-present entries */ |
333 | extern bool amd_iommu_np_cache; |
334 | /* Only true if all IOMMUs support device IOTLBs */ |
335 | extern bool amd_iommu_iotlb_sup; |
336 | |
337 | /* |
338 | * Make iterating over all IOMMUs easier |
339 | */ |
340 | #define for_each_iommu(iommu) \ |
341 | list_for_each_entry((iommu), &amd_iommu_list, list) |
342 | #define for_each_iommu_safe(iommu, next) \ |
343 | list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) |
344 | |
345 | #define APERTURE_RANGE_SHIFT 27 /* 128 MB */ |
346 | #define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) |
347 | #define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) |
348 | #define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ |
349 | #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) |
350 | #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) |
351 | |
352 | |
353 | /* |
354 | * This struct is used to pass information about |
355 | * incoming PPR faults around. |
356 | */ |
357 | struct amd_iommu_fault { |
358 | u64 address; /* IO virtual address of the fault*/ |
359 | u32 pasid; /* Address space identifier */ |
360 | u16 device_id; /* Originating PCI device id */ |
361 | u16 tag; /* PPR tag */ |
362 | u16 flags; /* Fault flags */ |
363 | |
364 | }; |
365 | |
366 | #define PPR_FAULT_EXEC (1 << 1) |
367 | #define PPR_FAULT_READ (1 << 2) |
368 | #define PPR_FAULT_WRITE (1 << 5) |
369 | #define PPR_FAULT_USER (1 << 6) |
370 | #define PPR_FAULT_RSVD (1 << 7) |
371 | #define PPR_FAULT_GN (1 << 8) |
372 | |
373 | struct iommu_domain; |
374 | |
375 | /* |
376 | * This structure contains generic data for IOMMU protection domains |
377 | * independent of their use. |
378 | */ |
379 | struct protection_domain { |
380 | struct list_head list; /* for list of all protection domains */ |
381 | struct list_head dev_list; /* List of all devices in this domain */ |
382 | spinlock_t lock; /* mostly used to lock the page table*/ |
383 | struct mutex api_lock; /* protect page tables in the iommu-api path */ |
384 | u16 id; /* the domain id written to the device table */ |
385 | int mode; /* paging mode (0-6 levels) */ |
386 | u64 *pt_root; /* page table root pointer */ |
387 | int glx; /* Number of levels for GCR3 table */ |
388 | u64 *gcr3_tbl; /* Guest CR3 table */ |
389 | unsigned long flags; /* flags to find out type of domain */ |
390 | bool updated; /* complete domain flush required */ |
391 | unsigned dev_cnt; /* devices assigned to this domain */ |
392 | unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ |
393 | void *priv; /* private data */ |
394 | struct iommu_domain *iommu_domain; /* Pointer to generic |
395 | domain structure */ |
396 | |
397 | }; |
398 | |
399 | /* |
400 | * This struct contains device specific data for the IOMMU |
401 | */ |
402 | struct iommu_dev_data { |
403 | struct list_head list; /* For domain->dev_list */ |
404 | struct list_head dev_data_list; /* For global dev_data_list */ |
405 | struct iommu_dev_data *alias_data;/* The alias dev_data */ |
406 | struct protection_domain *domain; /* Domain the device is bound to */ |
407 | atomic_t bind; /* Domain attach reverent count */ |
408 | u16 devid; /* PCI Device ID */ |
409 | bool iommu_v2; /* Device can make use of IOMMUv2 */ |
410 | bool passthrough; /* Default for device is pt_domain */ |
411 | struct { |
412 | bool enabled; |
413 | int qdep; |
414 | } ats; /* ATS state */ |
415 | bool pri_tlp; /* PASID TLB required for |
416 | PPR completions */ |
417 | u32 errata; /* Bitmap for errata to apply */ |
418 | }; |
419 | |
420 | /* |
421 | * For dynamic growth the aperture size is split into ranges of 128MB of |
422 | * DMA address space each. This struct represents one such range. |
423 | */ |
424 | struct aperture_range { |
425 | |
426 | /* address allocation bitmap */ |
427 | unsigned long *bitmap; |
428 | |
429 | /* |
430 | * Array of PTE pages for the aperture. In this array we save all the |
431 | * leaf pages of the domain page table used for the aperture. This way |
432 | * we don't need to walk the page table to find a specific PTE. We can |
433 | * just calculate its address in constant time. |
434 | */ |
435 | u64 *pte_pages[64]; |
436 | |
437 | unsigned long offset; |
438 | }; |
439 | |
440 | /* |
441 | * Data container for a dma_ops specific protection domain |
442 | */ |
443 | struct dma_ops_domain { |
444 | struct list_head list; |
445 | |
446 | /* generic protection domain information */ |
447 | struct protection_domain domain; |
448 | |
449 | /* size of the aperture for the mappings */ |
450 | unsigned long aperture_size; |
451 | |
452 | /* address we start to search for free addresses */ |
453 | unsigned long next_address; |
454 | |
455 | /* address space relevant data */ |
456 | struct aperture_range *aperture[APERTURE_MAX_RANGES]; |
457 | |
458 | /* This will be set to true when TLB needs to be flushed */ |
459 | bool need_flush; |
460 | |
461 | /* |
462 | * if this is a preallocated domain, keep the device for which it was |
463 | * preallocated in this variable |
464 | */ |
465 | u16 target_dev; |
466 | }; |
467 | |
468 | /* |
469 | * Structure where we save information about one hardware AMD IOMMU in the |
470 | * system. |
471 | */ |
472 | struct amd_iommu { |
473 | struct list_head list; |
474 | |
475 | /* Index within the IOMMU array */ |
476 | int index; |
477 | |
478 | /* locks the accesses to the hardware */ |
479 | spinlock_t lock; |
480 | |
481 | /* Pointer to PCI device of this IOMMU */ |
482 | struct pci_dev *dev; |
483 | |
484 | /* Cache pdev to root device for resume quirks */ |
485 | struct pci_dev *root_pdev; |
486 | |
487 | /* physical address of MMIO space */ |
488 | u64 mmio_phys; |
489 | /* virtual address of MMIO space */ |
490 | u8 __iomem *mmio_base; |
491 | |
492 | /* capabilities of that IOMMU read from ACPI */ |
493 | u32 cap; |
494 | |
495 | /* flags read from acpi table */ |
496 | u8 acpi_flags; |
497 | |
498 | /* Extended features */ |
499 | u64 features; |
500 | |
501 | /* IOMMUv2 */ |
502 | bool is_iommu_v2; |
503 | |
504 | /* PCI device id of the IOMMU device */ |
505 | u16 devid; |
506 | |
507 | /* |
508 | * Capability pointer. There could be more than one IOMMU per PCI |
509 | * device function if there are more than one AMD IOMMU capability |
510 | * pointers. |
511 | */ |
512 | u16 cap_ptr; |
513 | |
514 | /* pci domain of this IOMMU */ |
515 | u16 pci_seg; |
516 | |
517 | /* first device this IOMMU handles. read from PCI */ |
518 | u16 first_device; |
519 | /* last device this IOMMU handles. read from PCI */ |
520 | u16 last_device; |
521 | |
522 | /* start of exclusion range of that IOMMU */ |
523 | u64 exclusion_start; |
524 | /* length of exclusion range of that IOMMU */ |
525 | u64 exclusion_length; |
526 | |
527 | /* command buffer virtual address */ |
528 | u8 *cmd_buf; |
529 | /* size of command buffer */ |
530 | u32 cmd_buf_size; |
531 | |
532 | /* size of event buffer */ |
533 | u32 evt_buf_size; |
534 | /* event buffer virtual address */ |
535 | u8 *evt_buf; |
536 | |
537 | /* Base of the PPR log, if present */ |
538 | u8 *ppr_log; |
539 | |
540 | /* true if interrupts for this IOMMU are already enabled */ |
541 | bool int_enabled; |
542 | |
543 | /* if one, we need to send a completion wait command */ |
544 | bool need_sync; |
545 | |
546 | /* default dma_ops domain for that IOMMU */ |
547 | struct dma_ops_domain *default_dom; |
548 | |
549 | /* |
550 | * We can't rely on the BIOS to restore all values on reinit, so we |
551 | * need to stash them |
552 | */ |
553 | |
554 | /* The iommu BAR */ |
555 | u32 stored_addr_lo; |
556 | u32 stored_addr_hi; |
557 | |
558 | /* |
559 | * Each iommu has 6 l1s, each of which is documented as having 0x12 |
560 | * registers |
561 | */ |
562 | u32 stored_l1[6][0x12]; |
563 | |
564 | /* The l2 indirect registers */ |
565 | u32 stored_l2[0x83]; |
566 | }; |
567 | |
568 | /* |
569 | * List with all IOMMUs in the system. This list is not locked because it is |
570 | * only written and read at driver initialization or suspend time |
571 | */ |
572 | extern struct list_head amd_iommu_list; |
573 | |
574 | /* |
575 | * Array with pointers to each IOMMU struct |
576 | * The indices are referenced in the protection domains |
577 | */ |
578 | extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; |
579 | |
580 | /* Number of IOMMUs present in the system */ |
581 | extern int amd_iommus_present; |
582 | |
583 | /* |
584 | * Declarations for the global list of all protection domains |
585 | */ |
586 | extern spinlock_t amd_iommu_pd_lock; |
587 | extern struct list_head amd_iommu_pd_list; |
588 | |
589 | /* |
590 | * Structure defining one entry in the device table |
591 | */ |
592 | struct dev_table_entry { |
593 | u64 data[4]; |
594 | }; |
595 | |
596 | /* |
597 | * One entry for unity mappings parsed out of the ACPI table. |
598 | */ |
599 | struct unity_map_entry { |
600 | struct list_head list; |
601 | |
602 | /* starting device id this entry is used for (including) */ |
603 | u16 devid_start; |
604 | /* end device id this entry is used for (including) */ |
605 | u16 devid_end; |
606 | |
607 | /* start address to unity map (including) */ |
608 | u64 address_start; |
609 | /* end address to unity map (including) */ |
610 | u64 address_end; |
611 | |
612 | /* required protection */ |
613 | int prot; |
614 | }; |
615 | |
616 | /* |
617 | * List of all unity mappings. It is not locked because as runtime it is only |
618 | * read. It is created at ACPI table parsing time. |
619 | */ |
620 | extern struct list_head amd_iommu_unity_map; |
621 | |
622 | /* |
623 | * Data structures for device handling |
624 | */ |
625 | |
626 | /* |
627 | * Device table used by hardware. Read and write accesses by software are |
628 | * locked with the amd_iommu_pd_table lock. |
629 | */ |
630 | extern struct dev_table_entry *amd_iommu_dev_table; |
631 | |
632 | /* |
633 | * Alias table to find requestor ids to device ids. Not locked because only |
634 | * read on runtime. |
635 | */ |
636 | extern u16 *amd_iommu_alias_table; |
637 | |
638 | /* |
639 | * Reverse lookup table to find the IOMMU which translates a specific device. |
640 | */ |
641 | extern struct amd_iommu **amd_iommu_rlookup_table; |
642 | |
643 | /* size of the dma_ops aperture as power of 2 */ |
644 | extern unsigned amd_iommu_aperture_order; |
645 | |
646 | /* largest PCI device id we expect translation requests for */ |
647 | extern u16 amd_iommu_last_bdf; |
648 | |
649 | /* allocation bitmap for domain ids */ |
650 | extern unsigned long *amd_iommu_pd_alloc_bitmap; |
651 | |
652 | /* |
653 | * If true, the addresses will be flushed on unmap time, not when |
654 | * they are reused |
655 | */ |
656 | extern u32 amd_iommu_unmap_flush; |
657 | |
658 | /* Smallest number of PASIDs supported by any IOMMU in the system */ |
659 | extern u32 amd_iommu_max_pasids; |
660 | |
661 | extern bool amd_iommu_v2_present; |
662 | |
663 | extern bool amd_iommu_force_isolation; |
664 | |
665 | /* Max levels of glxval supported */ |
666 | extern int amd_iommu_max_glx_val; |
667 | |
668 | /* |
669 | * This function flushes all internal caches of |
670 | * the IOMMU used by this driver. |
671 | */ |
672 | extern void iommu_flush_all_caches(struct amd_iommu *iommu); |
673 | |
674 | /* takes bus and device/function and returns the device id |
675 | * FIXME: should that be in generic PCI code? */ |
676 | static inline u16 calc_devid(u8 bus, u8 devfn) |
677 | { |
678 | return (((u16)bus) << 8) | devfn; |
679 | } |
680 | |
681 | #ifdef CONFIG_AMD_IOMMU_STATS |
682 | |
683 | struct __iommu_counter { |
684 | char *name; |
685 | struct dentry *dent; |
686 | u64 value; |
687 | }; |
688 | |
689 | #define DECLARE_STATS_COUNTER(nm) \ |
690 | static struct __iommu_counter nm = { \ |
691 | .name = #nm, \ |
692 | } |
693 | |
694 | #define INC_STATS_COUNTER(name) name.value += 1 |
695 | #define ADD_STATS_COUNTER(name, x) name.value += (x) |
696 | #define SUB_STATS_COUNTER(name, x) name.value -= (x) |
697 | |
698 | #else /* CONFIG_AMD_IOMMU_STATS */ |
699 | |
700 | #define DECLARE_STATS_COUNTER(name) |
701 | #define INC_STATS_COUNTER(name) |
702 | #define ADD_STATS_COUNTER(name, x) |
703 | #define SUB_STATS_COUNTER(name, x) |
704 | |
705 | #endif /* CONFIG_AMD_IOMMU_STATS */ |
706 | |
707 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ |
708 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9