Root/
1 | #ifndef __KVM_HOST_H |
2 | #define __KVM_HOST_H |
3 | |
4 | /* |
5 | * This work is licensed under the terms of the GNU GPL, version 2. See |
6 | * the COPYING file in the top-level directory. |
7 | */ |
8 | |
9 | #include <linux/types.h> |
10 | #include <linux/hardirq.h> |
11 | #include <linux/list.h> |
12 | #include <linux/mutex.h> |
13 | #include <linux/spinlock.h> |
14 | #include <linux/signal.h> |
15 | #include <linux/sched.h> |
16 | #include <linux/bug.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/mmu_notifier.h> |
19 | #include <linux/preempt.h> |
20 | #include <linux/msi.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/rcupdate.h> |
23 | #include <linux/ratelimit.h> |
24 | #include <linux/err.h> |
25 | #include <linux/irqflags.h> |
26 | #include <linux/context_tracking.h> |
27 | #include <asm/signal.h> |
28 | |
29 | #include <linux/kvm.h> |
30 | #include <linux/kvm_para.h> |
31 | |
32 | #include <linux/kvm_types.h> |
33 | |
34 | #include <asm/kvm_host.h> |
35 | |
36 | #ifndef KVM_MMIO_SIZE |
37 | #define KVM_MMIO_SIZE 8 |
38 | #endif |
39 | |
40 | /* |
41 | * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used |
42 | * in kvm, other bits are visible for userspace which are defined in |
43 | * include/linux/kvm_h. |
44 | */ |
45 | #define KVM_MEMSLOT_INVALID (1UL << 16) |
46 | |
47 | /* Two fragments for cross MMIO pages. */ |
48 | #define KVM_MAX_MMIO_FRAGMENTS 2 |
49 | |
50 | /* |
51 | * For the normal pfn, the highest 12 bits should be zero, |
52 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, |
53 | * mask bit 63 to indicate the noslot pfn. |
54 | */ |
55 | #define KVM_PFN_ERR_MASK (0x7ffULL << 52) |
56 | #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) |
57 | #define KVM_PFN_NOSLOT (0x1ULL << 63) |
58 | |
59 | #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) |
60 | #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) |
61 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) |
62 | |
63 | /* |
64 | * error pfns indicate that the gfn is in slot but faild to |
65 | * translate it to pfn on host. |
66 | */ |
67 | static inline bool is_error_pfn(pfn_t pfn) |
68 | { |
69 | return !!(pfn & KVM_PFN_ERR_MASK); |
70 | } |
71 | |
72 | /* |
73 | * error_noslot pfns indicate that the gfn can not be |
74 | * translated to pfn - it is not in slot or failed to |
75 | * translate it to pfn. |
76 | */ |
77 | static inline bool is_error_noslot_pfn(pfn_t pfn) |
78 | { |
79 | return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); |
80 | } |
81 | |
82 | /* noslot pfn indicates that the gfn is not in slot. */ |
83 | static inline bool is_noslot_pfn(pfn_t pfn) |
84 | { |
85 | return pfn == KVM_PFN_NOSLOT; |
86 | } |
87 | |
88 | #define KVM_HVA_ERR_BAD (PAGE_OFFSET) |
89 | #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) |
90 | |
91 | static inline bool kvm_is_error_hva(unsigned long addr) |
92 | { |
93 | return addr >= PAGE_OFFSET; |
94 | } |
95 | |
96 | #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) |
97 | |
98 | static inline bool is_error_page(struct page *page) |
99 | { |
100 | return IS_ERR(page); |
101 | } |
102 | |
103 | /* |
104 | * vcpu->requests bit members |
105 | */ |
106 | #define KVM_REQ_TLB_FLUSH 0 |
107 | #define KVM_REQ_MIGRATE_TIMER 1 |
108 | #define KVM_REQ_REPORT_TPR_ACCESS 2 |
109 | #define KVM_REQ_MMU_RELOAD 3 |
110 | #define KVM_REQ_TRIPLE_FAULT 4 |
111 | #define KVM_REQ_PENDING_TIMER 5 |
112 | #define KVM_REQ_UNHALT 6 |
113 | #define KVM_REQ_MMU_SYNC 7 |
114 | #define KVM_REQ_CLOCK_UPDATE 8 |
115 | #define KVM_REQ_KICK 9 |
116 | #define KVM_REQ_DEACTIVATE_FPU 10 |
117 | #define KVM_REQ_EVENT 11 |
118 | #define KVM_REQ_APF_HALT 12 |
119 | #define KVM_REQ_STEAL_UPDATE 13 |
120 | #define KVM_REQ_NMI 14 |
121 | #define KVM_REQ_PMU 15 |
122 | #define KVM_REQ_PMI 16 |
123 | #define KVM_REQ_WATCHDOG 17 |
124 | #define KVM_REQ_MASTERCLOCK_UPDATE 18 |
125 | #define KVM_REQ_MCLOCK_INPROGRESS 19 |
126 | #define KVM_REQ_EPR_EXIT 20 |
127 | #define KVM_REQ_SCAN_IOAPIC 21 |
128 | #define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 |
129 | |
130 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
131 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |
132 | |
133 | struct kvm; |
134 | struct kvm_vcpu; |
135 | extern struct kmem_cache *kvm_vcpu_cache; |
136 | |
137 | extern raw_spinlock_t kvm_lock; |
138 | extern struct list_head vm_list; |
139 | |
140 | struct kvm_io_range { |
141 | gpa_t addr; |
142 | int len; |
143 | struct kvm_io_device *dev; |
144 | }; |
145 | |
146 | #define NR_IOBUS_DEVS 1000 |
147 | |
148 | struct kvm_io_bus { |
149 | int dev_count; |
150 | int ioeventfd_count; |
151 | struct kvm_io_range range[]; |
152 | }; |
153 | |
154 | enum kvm_bus { |
155 | KVM_MMIO_BUS, |
156 | KVM_PIO_BUS, |
157 | KVM_VIRTIO_CCW_NOTIFY_BUS, |
158 | KVM_NR_BUSES |
159 | }; |
160 | |
161 | int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
162 | int len, const void *val); |
163 | int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, |
164 | void *val); |
165 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
166 | int len, struct kvm_io_device *dev); |
167 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
168 | struct kvm_io_device *dev); |
169 | |
170 | #ifdef CONFIG_KVM_ASYNC_PF |
171 | struct kvm_async_pf { |
172 | struct work_struct work; |
173 | struct list_head link; |
174 | struct list_head queue; |
175 | struct kvm_vcpu *vcpu; |
176 | struct mm_struct *mm; |
177 | gva_t gva; |
178 | unsigned long addr; |
179 | struct kvm_arch_async_pf arch; |
180 | struct page *page; |
181 | bool done; |
182 | }; |
183 | |
184 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); |
185 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); |
186 | int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, |
187 | struct kvm_arch_async_pf *arch); |
188 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); |
189 | #endif |
190 | |
191 | enum { |
192 | OUTSIDE_GUEST_MODE, |
193 | IN_GUEST_MODE, |
194 | EXITING_GUEST_MODE, |
195 | READING_SHADOW_PAGE_TABLES, |
196 | }; |
197 | |
198 | /* |
199 | * Sometimes a large or cross-page mmio needs to be broken up into separate |
200 | * exits for userspace servicing. |
201 | */ |
202 | struct kvm_mmio_fragment { |
203 | gpa_t gpa; |
204 | void *data; |
205 | unsigned len; |
206 | }; |
207 | |
208 | struct kvm_vcpu { |
209 | struct kvm *kvm; |
210 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
211 | struct preempt_notifier preempt_notifier; |
212 | #endif |
213 | int cpu; |
214 | int vcpu_id; |
215 | int srcu_idx; |
216 | int mode; |
217 | unsigned long requests; |
218 | unsigned long guest_debug; |
219 | |
220 | struct mutex mutex; |
221 | struct kvm_run *run; |
222 | |
223 | int fpu_active; |
224 | int guest_fpu_loaded, guest_xcr0_loaded; |
225 | wait_queue_head_t wq; |
226 | struct pid *pid; |
227 | int sigset_active; |
228 | sigset_t sigset; |
229 | struct kvm_vcpu_stat stat; |
230 | |
231 | #ifdef CONFIG_HAS_IOMEM |
232 | int mmio_needed; |
233 | int mmio_read_completed; |
234 | int mmio_is_write; |
235 | int mmio_cur_fragment; |
236 | int mmio_nr_fragments; |
237 | struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; |
238 | #endif |
239 | |
240 | #ifdef CONFIG_KVM_ASYNC_PF |
241 | struct { |
242 | u32 queued; |
243 | struct list_head queue; |
244 | struct list_head done; |
245 | spinlock_t lock; |
246 | } async_pf; |
247 | #endif |
248 | |
249 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
250 | /* |
251 | * Cpu relax intercept or pause loop exit optimization |
252 | * in_spin_loop: set when a vcpu does a pause loop exit |
253 | * or cpu relax intercepted. |
254 | * dy_eligible: indicates whether vcpu is eligible for directed yield. |
255 | */ |
256 | struct { |
257 | bool in_spin_loop; |
258 | bool dy_eligible; |
259 | } spin_loop; |
260 | #endif |
261 | bool preempted; |
262 | struct kvm_vcpu_arch arch; |
263 | }; |
264 | |
265 | static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) |
266 | { |
267 | return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); |
268 | } |
269 | |
270 | /* |
271 | * Some of the bitops functions do not support too long bitmaps. |
272 | * This number must be determined not to exceed such limits. |
273 | */ |
274 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) |
275 | |
276 | struct kvm_memory_slot { |
277 | gfn_t base_gfn; |
278 | unsigned long npages; |
279 | unsigned long *dirty_bitmap; |
280 | struct kvm_arch_memory_slot arch; |
281 | unsigned long userspace_addr; |
282 | u32 flags; |
283 | short id; |
284 | }; |
285 | |
286 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) |
287 | { |
288 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; |
289 | } |
290 | |
291 | struct kvm_kernel_irq_routing_entry { |
292 | u32 gsi; |
293 | u32 type; |
294 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
295 | struct kvm *kvm, int irq_source_id, int level, |
296 | bool line_status); |
297 | union { |
298 | struct { |
299 | unsigned irqchip; |
300 | unsigned pin; |
301 | } irqchip; |
302 | struct msi_msg msi; |
303 | }; |
304 | struct hlist_node link; |
305 | }; |
306 | |
307 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
308 | |
309 | struct kvm_irq_routing_table { |
310 | int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; |
311 | struct kvm_kernel_irq_routing_entry *rt_entries; |
312 | u32 nr_rt_entries; |
313 | /* |
314 | * Array indexed by gsi. Each entry contains list of irq chips |
315 | * the gsi is connected to. |
316 | */ |
317 | struct hlist_head map[0]; |
318 | }; |
319 | |
320 | #else |
321 | |
322 | struct kvm_irq_routing_table {}; |
323 | |
324 | #endif |
325 | |
326 | #ifndef KVM_PRIVATE_MEM_SLOTS |
327 | #define KVM_PRIVATE_MEM_SLOTS 0 |
328 | #endif |
329 | |
330 | #ifndef KVM_MEM_SLOTS_NUM |
331 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
332 | #endif |
333 | |
334 | /* |
335 | * Note: |
336 | * memslots are not sorted by id anymore, please use id_to_memslot() |
337 | * to get the memslot by its id. |
338 | */ |
339 | struct kvm_memslots { |
340 | u64 generation; |
341 | struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; |
342 | /* The mapping table from slot id to the index in memslots[]. */ |
343 | short id_to_index[KVM_MEM_SLOTS_NUM]; |
344 | }; |
345 | |
346 | struct kvm { |
347 | spinlock_t mmu_lock; |
348 | struct mutex slots_lock; |
349 | struct mm_struct *mm; /* userspace tied to this vm */ |
350 | struct kvm_memslots *memslots; |
351 | struct srcu_struct srcu; |
352 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE |
353 | u32 bsp_vcpu_id; |
354 | #endif |
355 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
356 | atomic_t online_vcpus; |
357 | int last_boosted_vcpu; |
358 | struct list_head vm_list; |
359 | struct mutex lock; |
360 | struct kvm_io_bus *buses[KVM_NR_BUSES]; |
361 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
362 | struct { |
363 | spinlock_t lock; |
364 | struct list_head items; |
365 | struct list_head resampler_list; |
366 | struct mutex resampler_lock; |
367 | } irqfds; |
368 | struct list_head ioeventfds; |
369 | #endif |
370 | struct kvm_vm_stat stat; |
371 | struct kvm_arch arch; |
372 | atomic_t users_count; |
373 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
374 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
375 | spinlock_t ring_lock; |
376 | struct list_head coalesced_zones; |
377 | #endif |
378 | |
379 | struct mutex irq_lock; |
380 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
381 | /* |
382 | * Update side is protected by irq_lock and, |
383 | * if configured, irqfds.lock. |
384 | */ |
385 | struct kvm_irq_routing_table __rcu *irq_routing; |
386 | struct hlist_head mask_notifier_list; |
387 | struct hlist_head irq_ack_notifier_list; |
388 | #endif |
389 | |
390 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
391 | struct mmu_notifier mmu_notifier; |
392 | unsigned long mmu_notifier_seq; |
393 | long mmu_notifier_count; |
394 | #endif |
395 | long tlbs_dirty; |
396 | struct list_head devices; |
397 | }; |
398 | |
399 | #define kvm_err(fmt, ...) \ |
400 | pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
401 | #define kvm_info(fmt, ...) \ |
402 | pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
403 | #define kvm_debug(fmt, ...) \ |
404 | pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
405 | #define kvm_pr_unimpl(fmt, ...) \ |
406 | pr_err_ratelimited("kvm [%i]: " fmt, \ |
407 | task_tgid_nr(current), ## __VA_ARGS__) |
408 | |
409 | /* The guest did something we don't support. */ |
410 | #define vcpu_unimpl(vcpu, fmt, ...) \ |
411 | kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
412 | |
413 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) |
414 | { |
415 | smp_rmb(); |
416 | return kvm->vcpus[i]; |
417 | } |
418 | |
419 | #define kvm_for_each_vcpu(idx, vcpup, kvm) \ |
420 | for (idx = 0; \ |
421 | idx < atomic_read(&kvm->online_vcpus) && \ |
422 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ |
423 | idx++) |
424 | |
425 | #define kvm_for_each_memslot(memslot, slots) \ |
426 | for (memslot = &slots->memslots[0]; \ |
427 | memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ |
428 | memslot++) |
429 | |
430 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
431 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); |
432 | |
433 | int __must_check vcpu_load(struct kvm_vcpu *vcpu); |
434 | void vcpu_put(struct kvm_vcpu *vcpu); |
435 | |
436 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
437 | int kvm_irqfd_init(void); |
438 | void kvm_irqfd_exit(void); |
439 | #else |
440 | static inline int kvm_irqfd_init(void) |
441 | { |
442 | return 0; |
443 | } |
444 | |
445 | static inline void kvm_irqfd_exit(void) |
446 | { |
447 | } |
448 | #endif |
449 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
450 | struct module *module); |
451 | void kvm_exit(void); |
452 | |
453 | void kvm_get_kvm(struct kvm *kvm); |
454 | void kvm_put_kvm(struct kvm *kvm); |
455 | void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new, |
456 | u64 last_generation); |
457 | |
458 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) |
459 | { |
460 | return rcu_dereference_check(kvm->memslots, |
461 | srcu_read_lock_held(&kvm->srcu) |
462 | || lockdep_is_held(&kvm->slots_lock)); |
463 | } |
464 | |
465 | static inline struct kvm_memory_slot * |
466 | id_to_memslot(struct kvm_memslots *slots, int id) |
467 | { |
468 | int index = slots->id_to_index[id]; |
469 | struct kvm_memory_slot *slot; |
470 | |
471 | slot = &slots->memslots[index]; |
472 | |
473 | WARN_ON(slot->id != id); |
474 | return slot; |
475 | } |
476 | |
477 | /* |
478 | * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: |
479 | * - create a new memory slot |
480 | * - delete an existing memory slot |
481 | * - modify an existing memory slot |
482 | * -- move it in the guest physical memory space |
483 | * -- just change its flags |
484 | * |
485 | * Since flags can be changed by some of these operations, the following |
486 | * differentiation is the best we can do for __kvm_set_memory_region(): |
487 | */ |
488 | enum kvm_mr_change { |
489 | KVM_MR_CREATE, |
490 | KVM_MR_DELETE, |
491 | KVM_MR_MOVE, |
492 | KVM_MR_FLAGS_ONLY, |
493 | }; |
494 | |
495 | int kvm_set_memory_region(struct kvm *kvm, |
496 | struct kvm_userspace_memory_region *mem); |
497 | int __kvm_set_memory_region(struct kvm *kvm, |
498 | struct kvm_userspace_memory_region *mem); |
499 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, |
500 | struct kvm_memory_slot *dont); |
501 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); |
502 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
503 | struct kvm_memory_slot *memslot, |
504 | struct kvm_userspace_memory_region *mem, |
505 | enum kvm_mr_change change); |
506 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
507 | struct kvm_userspace_memory_region *mem, |
508 | const struct kvm_memory_slot *old, |
509 | enum kvm_mr_change change); |
510 | bool kvm_largepages_enabled(void); |
511 | void kvm_disable_largepages(void); |
512 | /* flush all memory translations */ |
513 | void kvm_arch_flush_shadow_all(struct kvm *kvm); |
514 | /* flush memory translations pointing to 'slot' */ |
515 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
516 | struct kvm_memory_slot *slot); |
517 | |
518 | int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, |
519 | int nr_pages); |
520 | |
521 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
522 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
523 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
524 | void kvm_release_page_clean(struct page *page); |
525 | void kvm_release_page_dirty(struct page *page); |
526 | void kvm_set_page_dirty(struct page *page); |
527 | void kvm_set_page_accessed(struct page *page); |
528 | |
529 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); |
530 | pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, |
531 | bool write_fault, bool *writable); |
532 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
533 | pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
534 | bool *writable); |
535 | pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
536 | pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); |
537 | |
538 | void kvm_release_pfn_dirty(pfn_t pfn); |
539 | void kvm_release_pfn_clean(pfn_t pfn); |
540 | void kvm_set_pfn_dirty(pfn_t pfn); |
541 | void kvm_set_pfn_accessed(pfn_t pfn); |
542 | void kvm_get_pfn(pfn_t pfn); |
543 | |
544 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
545 | int len); |
546 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, |
547 | unsigned long len); |
548 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
549 | int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
550 | void *data, unsigned long len); |
551 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
552 | int offset, int len); |
553 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
554 | unsigned long len); |
555 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
556 | void *data, unsigned long len); |
557 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
558 | gpa_t gpa, unsigned long len); |
559 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
560 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
561 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
562 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
563 | unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); |
564 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
565 | void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, |
566 | gfn_t gfn); |
567 | |
568 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
569 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
570 | bool kvm_vcpu_yield_to(struct kvm_vcpu *target); |
571 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); |
572 | void kvm_resched(struct kvm_vcpu *vcpu); |
573 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); |
574 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); |
575 | |
576 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
577 | void kvm_reload_remote_mmus(struct kvm *kvm); |
578 | void kvm_make_mclock_inprogress_request(struct kvm *kvm); |
579 | void kvm_make_scan_ioapic_request(struct kvm *kvm); |
580 | |
581 | long kvm_arch_dev_ioctl(struct file *filp, |
582 | unsigned int ioctl, unsigned long arg); |
583 | long kvm_arch_vcpu_ioctl(struct file *filp, |
584 | unsigned int ioctl, unsigned long arg); |
585 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); |
586 | |
587 | int kvm_dev_ioctl_check_extension(long ext); |
588 | |
589 | int kvm_get_dirty_log(struct kvm *kvm, |
590 | struct kvm_dirty_log *log, int *is_dirty); |
591 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
592 | struct kvm_dirty_log *log); |
593 | |
594 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
595 | struct kvm_userspace_memory_region *mem); |
596 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
597 | bool line_status); |
598 | long kvm_arch_vm_ioctl(struct file *filp, |
599 | unsigned int ioctl, unsigned long arg); |
600 | |
601 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
602 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
603 | |
604 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
605 | struct kvm_translation *tr); |
606 | |
607 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
608 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
609 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
610 | struct kvm_sregs *sregs); |
611 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
612 | struct kvm_sregs *sregs); |
613 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
614 | struct kvm_mp_state *mp_state); |
615 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
616 | struct kvm_mp_state *mp_state); |
617 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
618 | struct kvm_guest_debug *dbg); |
619 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); |
620 | |
621 | int kvm_arch_init(void *opaque); |
622 | void kvm_arch_exit(void); |
623 | |
624 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); |
625 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); |
626 | |
627 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); |
628 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
629 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); |
630 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); |
631 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); |
632 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); |
633 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
634 | |
635 | int kvm_arch_hardware_enable(void *garbage); |
636 | void kvm_arch_hardware_disable(void *garbage); |
637 | int kvm_arch_hardware_setup(void); |
638 | void kvm_arch_hardware_unsetup(void); |
639 | void kvm_arch_check_processor_compat(void *rtn); |
640 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
641 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
642 | |
643 | void kvm_free_physmem(struct kvm *kvm); |
644 | |
645 | void *kvm_kvzalloc(unsigned long size); |
646 | void kvm_kvfree(const void *addr); |
647 | |
648 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC |
649 | static inline struct kvm *kvm_arch_alloc_vm(void) |
650 | { |
651 | return kzalloc(sizeof(struct kvm), GFP_KERNEL); |
652 | } |
653 | |
654 | static inline void kvm_arch_free_vm(struct kvm *kvm) |
655 | { |
656 | kfree(kvm); |
657 | } |
658 | #endif |
659 | |
660 | static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) |
661 | { |
662 | #ifdef __KVM_HAVE_ARCH_WQP |
663 | return vcpu->arch.wqp; |
664 | #else |
665 | return &vcpu->wq; |
666 | #endif |
667 | } |
668 | |
669 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
670 | void kvm_arch_destroy_vm(struct kvm *kvm); |
671 | void kvm_arch_sync_events(struct kvm *kvm); |
672 | |
673 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
674 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
675 | |
676 | bool kvm_is_mmio_pfn(pfn_t pfn); |
677 | |
678 | struct kvm_irq_ack_notifier { |
679 | struct hlist_node link; |
680 | unsigned gsi; |
681 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); |
682 | }; |
683 | |
684 | struct kvm_assigned_dev_kernel { |
685 | struct kvm_irq_ack_notifier ack_notifier; |
686 | struct list_head list; |
687 | int assigned_dev_id; |
688 | int host_segnr; |
689 | int host_busnr; |
690 | int host_devfn; |
691 | unsigned int entries_nr; |
692 | int host_irq; |
693 | bool host_irq_disabled; |
694 | bool pci_2_3; |
695 | struct msix_entry *host_msix_entries; |
696 | int guest_irq; |
697 | struct msix_entry *guest_msix_entries; |
698 | unsigned long irq_requested_type; |
699 | int irq_source_id; |
700 | int flags; |
701 | struct pci_dev *dev; |
702 | struct kvm *kvm; |
703 | spinlock_t intx_lock; |
704 | spinlock_t intx_mask_lock; |
705 | char irq_name[32]; |
706 | struct pci_saved_state *pci_saved_state; |
707 | }; |
708 | |
709 | struct kvm_irq_mask_notifier { |
710 | void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); |
711 | int irq; |
712 | struct hlist_node link; |
713 | }; |
714 | |
715 | void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, |
716 | struct kvm_irq_mask_notifier *kimn); |
717 | void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, |
718 | struct kvm_irq_mask_notifier *kimn); |
719 | void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, |
720 | bool mask); |
721 | |
722 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, |
723 | bool line_status); |
724 | int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); |
725 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
726 | int irq_source_id, int level, bool line_status); |
727 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); |
728 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
729 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
730 | struct kvm_irq_ack_notifier *kian); |
731 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
732 | struct kvm_irq_ack_notifier *kian); |
733 | int kvm_request_irq_source_id(struct kvm *kvm); |
734 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
735 | |
736 | /* For vcpu->arch.iommu_flags */ |
737 | #define KVM_IOMMU_CACHE_COHERENCY 0x1 |
738 | |
739 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT |
740 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
741 | void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
742 | int kvm_iommu_map_guest(struct kvm *kvm); |
743 | int kvm_iommu_unmap_guest(struct kvm *kvm); |
744 | int kvm_assign_device(struct kvm *kvm, |
745 | struct kvm_assigned_dev_kernel *assigned_dev); |
746 | int kvm_deassign_device(struct kvm *kvm, |
747 | struct kvm_assigned_dev_kernel *assigned_dev); |
748 | #else |
749 | static inline int kvm_iommu_map_pages(struct kvm *kvm, |
750 | struct kvm_memory_slot *slot) |
751 | { |
752 | return 0; |
753 | } |
754 | |
755 | static inline void kvm_iommu_unmap_pages(struct kvm *kvm, |
756 | struct kvm_memory_slot *slot) |
757 | { |
758 | } |
759 | |
760 | static inline int kvm_iommu_unmap_guest(struct kvm *kvm) |
761 | { |
762 | return 0; |
763 | } |
764 | #endif |
765 | |
766 | static inline void kvm_guest_enter(void) |
767 | { |
768 | unsigned long flags; |
769 | |
770 | BUG_ON(preemptible()); |
771 | |
772 | local_irq_save(flags); |
773 | guest_enter(); |
774 | local_irq_restore(flags); |
775 | |
776 | /* KVM does not hold any references to rcu protected data when it |
777 | * switches CPU into a guest mode. In fact switching to a guest mode |
778 | * is very similar to exiting to userspase from rcu point of view. In |
779 | * addition CPU may stay in a guest mode for quite a long time (up to |
780 | * one time slice). Lets treat guest mode as quiescent state, just like |
781 | * we do with user-mode execution. |
782 | */ |
783 | rcu_virt_note_context_switch(smp_processor_id()); |
784 | } |
785 | |
786 | static inline void kvm_guest_exit(void) |
787 | { |
788 | unsigned long flags; |
789 | |
790 | local_irq_save(flags); |
791 | guest_exit(); |
792 | local_irq_restore(flags); |
793 | } |
794 | |
795 | /* |
796 | * search_memslots() and __gfn_to_memslot() are here because they are |
797 | * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. |
798 | * gfn_to_memslot() itself isn't here as an inline because that would |
799 | * bloat other code too much. |
800 | */ |
801 | static inline struct kvm_memory_slot * |
802 | search_memslots(struct kvm_memslots *slots, gfn_t gfn) |
803 | { |
804 | struct kvm_memory_slot *memslot; |
805 | |
806 | kvm_for_each_memslot(memslot, slots) |
807 | if (gfn >= memslot->base_gfn && |
808 | gfn < memslot->base_gfn + memslot->npages) |
809 | return memslot; |
810 | |
811 | return NULL; |
812 | } |
813 | |
814 | static inline struct kvm_memory_slot * |
815 | __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) |
816 | { |
817 | return search_memslots(slots, gfn); |
818 | } |
819 | |
820 | static inline unsigned long |
821 | __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) |
822 | { |
823 | return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; |
824 | } |
825 | |
826 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) |
827 | { |
828 | return gfn_to_memslot(kvm, gfn)->id; |
829 | } |
830 | |
831 | static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) |
832 | { |
833 | /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ |
834 | return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - |
835 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); |
836 | } |
837 | |
838 | static inline gfn_t |
839 | hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) |
840 | { |
841 | gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; |
842 | |
843 | return slot->base_gfn + gfn_offset; |
844 | } |
845 | |
846 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
847 | { |
848 | return (gpa_t)gfn << PAGE_SHIFT; |
849 | } |
850 | |
851 | static inline gfn_t gpa_to_gfn(gpa_t gpa) |
852 | { |
853 | return (gfn_t)(gpa >> PAGE_SHIFT); |
854 | } |
855 | |
856 | static inline hpa_t pfn_to_hpa(pfn_t pfn) |
857 | { |
858 | return (hpa_t)pfn << PAGE_SHIFT; |
859 | } |
860 | |
861 | static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) |
862 | { |
863 | set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); |
864 | } |
865 | |
866 | enum kvm_stat_kind { |
867 | KVM_STAT_VM, |
868 | KVM_STAT_VCPU, |
869 | }; |
870 | |
871 | struct kvm_stats_debugfs_item { |
872 | const char *name; |
873 | int offset; |
874 | enum kvm_stat_kind kind; |
875 | struct dentry *dentry; |
876 | }; |
877 | extern struct kvm_stats_debugfs_item debugfs_entries[]; |
878 | extern struct dentry *kvm_debugfs_dir; |
879 | |
880 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
881 | static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) |
882 | { |
883 | if (unlikely(kvm->mmu_notifier_count)) |
884 | return 1; |
885 | /* |
886 | * Ensure the read of mmu_notifier_count happens before the read |
887 | * of mmu_notifier_seq. This interacts with the smp_wmb() in |
888 | * mmu_notifier_invalidate_range_end to make sure that the caller |
889 | * either sees the old (non-zero) value of mmu_notifier_count or |
890 | * the new (incremented) value of mmu_notifier_seq. |
891 | * PowerPC Book3s HV KVM calls this under a per-page lock |
892 | * rather than under kvm->mmu_lock, for scalability, so |
893 | * can't rely on kvm->mmu_lock to keep things ordered. |
894 | */ |
895 | smp_rmb(); |
896 | if (kvm->mmu_notifier_seq != mmu_seq) |
897 | return 1; |
898 | return 0; |
899 | } |
900 | #endif |
901 | |
902 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
903 | |
904 | #define KVM_MAX_IRQ_ROUTES 1024 |
905 | |
906 | int kvm_setup_default_irq_routing(struct kvm *kvm); |
907 | int kvm_set_irq_routing(struct kvm *kvm, |
908 | const struct kvm_irq_routing_entry *entries, |
909 | unsigned nr, |
910 | unsigned flags); |
911 | int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, |
912 | struct kvm_kernel_irq_routing_entry *e, |
913 | const struct kvm_irq_routing_entry *ue); |
914 | void kvm_free_irq_routing(struct kvm *kvm); |
915 | |
916 | int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); |
917 | |
918 | #else |
919 | |
920 | static inline void kvm_free_irq_routing(struct kvm *kvm) {} |
921 | |
922 | #endif |
923 | |
924 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
925 | |
926 | void kvm_eventfd_init(struct kvm *kvm); |
927 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); |
928 | |
929 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
930 | int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); |
931 | void kvm_irqfd_release(struct kvm *kvm); |
932 | void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); |
933 | #else |
934 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
935 | { |
936 | return -EINVAL; |
937 | } |
938 | |
939 | static inline void kvm_irqfd_release(struct kvm *kvm) {} |
940 | #endif |
941 | |
942 | #else |
943 | |
944 | static inline void kvm_eventfd_init(struct kvm *kvm) {} |
945 | |
946 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
947 | { |
948 | return -EINVAL; |
949 | } |
950 | |
951 | static inline void kvm_irqfd_release(struct kvm *kvm) {} |
952 | |
953 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
954 | static inline void kvm_irq_routing_update(struct kvm *kvm, |
955 | struct kvm_irq_routing_table *irq_rt) |
956 | { |
957 | rcu_assign_pointer(kvm->irq_routing, irq_rt); |
958 | } |
959 | #endif |
960 | |
961 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
962 | { |
963 | return -ENOSYS; |
964 | } |
965 | |
966 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ |
967 | |
968 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE |
969 | static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) |
970 | { |
971 | return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; |
972 | } |
973 | |
974 | bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); |
975 | |
976 | #else |
977 | |
978 | static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } |
979 | |
980 | #endif |
981 | |
982 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT |
983 | |
984 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, |
985 | unsigned long arg); |
986 | |
987 | void kvm_free_all_assigned_devices(struct kvm *kvm); |
988 | |
989 | #else |
990 | |
991 | static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, |
992 | unsigned long arg) |
993 | { |
994 | return -ENOTTY; |
995 | } |
996 | |
997 | static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {} |
998 | |
999 | #endif |
1000 | |
1001 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
1002 | { |
1003 | set_bit(req, &vcpu->requests); |
1004 | } |
1005 | |
1006 | static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) |
1007 | { |
1008 | if (test_bit(req, &vcpu->requests)) { |
1009 | clear_bit(req, &vcpu->requests); |
1010 | return true; |
1011 | } else { |
1012 | return false; |
1013 | } |
1014 | } |
1015 | |
1016 | extern bool kvm_rebooting; |
1017 | |
1018 | struct kvm_device_ops; |
1019 | |
1020 | struct kvm_device { |
1021 | struct kvm_device_ops *ops; |
1022 | struct kvm *kvm; |
1023 | void *private; |
1024 | struct list_head vm_node; |
1025 | }; |
1026 | |
1027 | /* create, destroy, and name are mandatory */ |
1028 | struct kvm_device_ops { |
1029 | const char *name; |
1030 | int (*create)(struct kvm_device *dev, u32 type); |
1031 | |
1032 | /* |
1033 | * Destroy is responsible for freeing dev. |
1034 | * |
1035 | * Destroy may be called before or after destructors are called |
1036 | * on emulated I/O regions, depending on whether a reference is |
1037 | * held by a vcpu or other kvm component that gets destroyed |
1038 | * after the emulated I/O. |
1039 | */ |
1040 | void (*destroy)(struct kvm_device *dev); |
1041 | |
1042 | int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
1043 | int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
1044 | int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
1045 | long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, |
1046 | unsigned long arg); |
1047 | }; |
1048 | |
1049 | void kvm_device_get(struct kvm_device *dev); |
1050 | void kvm_device_put(struct kvm_device *dev); |
1051 | struct kvm_device *kvm_device_from_filp(struct file *filp); |
1052 | |
1053 | extern struct kvm_device_ops kvm_mpic_ops; |
1054 | extern struct kvm_device_ops kvm_xics_ops; |
1055 | |
1056 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
1057 | |
1058 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) |
1059 | { |
1060 | vcpu->spin_loop.in_spin_loop = val; |
1061 | } |
1062 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) |
1063 | { |
1064 | vcpu->spin_loop.dy_eligible = val; |
1065 | } |
1066 | |
1067 | #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
1068 | |
1069 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) |
1070 | { |
1071 | } |
1072 | |
1073 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) |
1074 | { |
1075 | } |
1076 | |
1077 | static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) |
1078 | { |
1079 | return true; |
1080 | } |
1081 | |
1082 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
1083 | #endif |
1084 | |
1085 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9