Root/
1 | /* |
2 | * Copyright (c) 2006, Intel Corporation. |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, |
6 | * version 2, as published by the Free Software Foundation. |
7 | * |
8 | * This program is distributed in the hope it will be useful, but WITHOUT |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
11 | * more details. |
12 | * |
13 | * You should have received a copy of the GNU General Public License along with |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
16 | * |
17 | * Copyright (C) 2006-2008 Intel Corporation |
18 | * Copyright IBM Corporation, 2008 |
19 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
20 | * |
21 | * Author: Allen M. Kay <allen.m.kay@intel.com> |
22 | * Author: Weidong Han <weidong.han@intel.com> |
23 | * Author: Ben-Ami Yassour <benami@il.ibm.com> |
24 | */ |
25 | |
26 | #include <linux/list.h> |
27 | #include <linux/kvm_host.h> |
28 | #include <linux/pci.h> |
29 | #include <linux/dmar.h> |
30 | #include <linux/iommu.h> |
31 | #include <linux/intel-iommu.h> |
32 | |
33 | static int kvm_iommu_unmap_memslots(struct kvm *kvm); |
34 | static void kvm_iommu_put_pages(struct kvm *kvm, |
35 | gfn_t base_gfn, unsigned long npages); |
36 | |
37 | static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot, |
38 | gfn_t gfn, unsigned long size) |
39 | { |
40 | gfn_t end_gfn; |
41 | pfn_t pfn; |
42 | |
43 | pfn = gfn_to_pfn_memslot(kvm, slot, gfn); |
44 | end_gfn = gfn + (size >> PAGE_SHIFT); |
45 | gfn += 1; |
46 | |
47 | if (is_error_pfn(pfn)) |
48 | return pfn; |
49 | |
50 | while (gfn < end_gfn) |
51 | gfn_to_pfn_memslot(kvm, slot, gfn++); |
52 | |
53 | return pfn; |
54 | } |
55 | |
56 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) |
57 | { |
58 | gfn_t gfn, end_gfn; |
59 | pfn_t pfn; |
60 | int r = 0; |
61 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
62 | int flags; |
63 | |
64 | /* check if iommu exists and in use */ |
65 | if (!domain) |
66 | return 0; |
67 | |
68 | gfn = slot->base_gfn; |
69 | end_gfn = gfn + slot->npages; |
70 | |
71 | flags = IOMMU_READ | IOMMU_WRITE; |
72 | if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) |
73 | flags |= IOMMU_CACHE; |
74 | |
75 | |
76 | while (gfn < end_gfn) { |
77 | unsigned long page_size; |
78 | |
79 | /* Check if already mapped */ |
80 | if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { |
81 | gfn += 1; |
82 | continue; |
83 | } |
84 | |
85 | /* Get the page size we could use to map */ |
86 | page_size = kvm_host_page_size(kvm, gfn); |
87 | |
88 | /* Make sure the page_size does not exceed the memslot */ |
89 | while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) |
90 | page_size >>= 1; |
91 | |
92 | /* Make sure gfn is aligned to the page size we want to map */ |
93 | while ((gfn << PAGE_SHIFT) & (page_size - 1)) |
94 | page_size >>= 1; |
95 | |
96 | /* |
97 | * Pin all pages we are about to map in memory. This is |
98 | * important because we unmap and unpin in 4kb steps later. |
99 | */ |
100 | pfn = kvm_pin_pages(kvm, slot, gfn, page_size); |
101 | if (is_error_pfn(pfn)) { |
102 | gfn += 1; |
103 | continue; |
104 | } |
105 | |
106 | /* Map into IO address space */ |
107 | r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), |
108 | get_order(page_size), flags); |
109 | if (r) { |
110 | printk(KERN_ERR "kvm_iommu_map_address:" |
111 | "iommu failed to map pfn=%llx\n", pfn); |
112 | goto unmap_pages; |
113 | } |
114 | |
115 | gfn += page_size >> PAGE_SHIFT; |
116 | |
117 | |
118 | } |
119 | |
120 | return 0; |
121 | |
122 | unmap_pages: |
123 | kvm_iommu_put_pages(kvm, slot->base_gfn, gfn); |
124 | return r; |
125 | } |
126 | |
127 | static int kvm_iommu_map_memslots(struct kvm *kvm) |
128 | { |
129 | int i, idx, r = 0; |
130 | struct kvm_memslots *slots; |
131 | |
132 | idx = srcu_read_lock(&kvm->srcu); |
133 | slots = kvm_memslots(kvm); |
134 | |
135 | for (i = 0; i < slots->nmemslots; i++) { |
136 | r = kvm_iommu_map_pages(kvm, &slots->memslots[i]); |
137 | if (r) |
138 | break; |
139 | } |
140 | srcu_read_unlock(&kvm->srcu, idx); |
141 | |
142 | return r; |
143 | } |
144 | |
145 | int kvm_assign_device(struct kvm *kvm, |
146 | struct kvm_assigned_dev_kernel *assigned_dev) |
147 | { |
148 | struct pci_dev *pdev = NULL; |
149 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
150 | int r, last_flags; |
151 | |
152 | /* check if iommu exists and in use */ |
153 | if (!domain) |
154 | return 0; |
155 | |
156 | pdev = assigned_dev->dev; |
157 | if (pdev == NULL) |
158 | return -ENODEV; |
159 | |
160 | r = iommu_attach_device(domain, &pdev->dev); |
161 | if (r) { |
162 | printk(KERN_ERR "assign device %x:%x:%x.%x failed", |
163 | pci_domain_nr(pdev->bus), |
164 | pdev->bus->number, |
165 | PCI_SLOT(pdev->devfn), |
166 | PCI_FUNC(pdev->devfn)); |
167 | return r; |
168 | } |
169 | |
170 | last_flags = kvm->arch.iommu_flags; |
171 | if (iommu_domain_has_cap(kvm->arch.iommu_domain, |
172 | IOMMU_CAP_CACHE_COHERENCY)) |
173 | kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY; |
174 | |
175 | /* Check if need to update IOMMU page table for guest memory */ |
176 | if ((last_flags ^ kvm->arch.iommu_flags) == |
177 | KVM_IOMMU_CACHE_COHERENCY) { |
178 | kvm_iommu_unmap_memslots(kvm); |
179 | r = kvm_iommu_map_memslots(kvm); |
180 | if (r) |
181 | goto out_unmap; |
182 | } |
183 | |
184 | printk(KERN_DEBUG "assign device %x:%x:%x.%x\n", |
185 | assigned_dev->host_segnr, |
186 | assigned_dev->host_busnr, |
187 | PCI_SLOT(assigned_dev->host_devfn), |
188 | PCI_FUNC(assigned_dev->host_devfn)); |
189 | |
190 | return 0; |
191 | out_unmap: |
192 | kvm_iommu_unmap_memslots(kvm); |
193 | return r; |
194 | } |
195 | |
196 | int kvm_deassign_device(struct kvm *kvm, |
197 | struct kvm_assigned_dev_kernel *assigned_dev) |
198 | { |
199 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
200 | struct pci_dev *pdev = NULL; |
201 | |
202 | /* check if iommu exists and in use */ |
203 | if (!domain) |
204 | return 0; |
205 | |
206 | pdev = assigned_dev->dev; |
207 | if (pdev == NULL) |
208 | return -ENODEV; |
209 | |
210 | iommu_detach_device(domain, &pdev->dev); |
211 | |
212 | printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n", |
213 | assigned_dev->host_segnr, |
214 | assigned_dev->host_busnr, |
215 | PCI_SLOT(assigned_dev->host_devfn), |
216 | PCI_FUNC(assigned_dev->host_devfn)); |
217 | |
218 | return 0; |
219 | } |
220 | |
221 | int kvm_iommu_map_guest(struct kvm *kvm) |
222 | { |
223 | int r; |
224 | |
225 | if (!iommu_found()) { |
226 | printk(KERN_ERR "%s: iommu not found\n", __func__); |
227 | return -ENODEV; |
228 | } |
229 | |
230 | kvm->arch.iommu_domain = iommu_domain_alloc(); |
231 | if (!kvm->arch.iommu_domain) |
232 | return -ENOMEM; |
233 | |
234 | r = kvm_iommu_map_memslots(kvm); |
235 | if (r) |
236 | goto out_unmap; |
237 | |
238 | return 0; |
239 | |
240 | out_unmap: |
241 | kvm_iommu_unmap_memslots(kvm); |
242 | return r; |
243 | } |
244 | |
245 | static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) |
246 | { |
247 | unsigned long i; |
248 | |
249 | for (i = 0; i < npages; ++i) |
250 | kvm_release_pfn_clean(pfn + i); |
251 | } |
252 | |
253 | static void kvm_iommu_put_pages(struct kvm *kvm, |
254 | gfn_t base_gfn, unsigned long npages) |
255 | { |
256 | struct iommu_domain *domain; |
257 | gfn_t end_gfn, gfn; |
258 | pfn_t pfn; |
259 | u64 phys; |
260 | |
261 | domain = kvm->arch.iommu_domain; |
262 | end_gfn = base_gfn + npages; |
263 | gfn = base_gfn; |
264 | |
265 | /* check if iommu exists and in use */ |
266 | if (!domain) |
267 | return; |
268 | |
269 | while (gfn < end_gfn) { |
270 | unsigned long unmap_pages; |
271 | int order; |
272 | |
273 | /* Get physical address */ |
274 | phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); |
275 | pfn = phys >> PAGE_SHIFT; |
276 | |
277 | /* Unmap address from IO address space */ |
278 | order = iommu_unmap(domain, gfn_to_gpa(gfn), 0); |
279 | unmap_pages = 1ULL << order; |
280 | |
281 | /* Unpin all pages we just unmapped to not leak any memory */ |
282 | kvm_unpin_pages(kvm, pfn, unmap_pages); |
283 | |
284 | gfn += unmap_pages; |
285 | } |
286 | } |
287 | |
288 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) |
289 | { |
290 | int i, idx; |
291 | struct kvm_memslots *slots; |
292 | |
293 | idx = srcu_read_lock(&kvm->srcu); |
294 | slots = kvm_memslots(kvm); |
295 | |
296 | for (i = 0; i < slots->nmemslots; i++) { |
297 | kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, |
298 | slots->memslots[i].npages); |
299 | } |
300 | srcu_read_unlock(&kvm->srcu, idx); |
301 | |
302 | return 0; |
303 | } |
304 | |
305 | int kvm_iommu_unmap_guest(struct kvm *kvm) |
306 | { |
307 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
308 | |
309 | /* check if iommu exists and in use */ |
310 | if (!domain) |
311 | return 0; |
312 | |
313 | kvm_iommu_unmap_memslots(kvm); |
314 | iommu_domain_free(domain); |
315 | return 0; |
316 | } |
317 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9