Root/
1 | /* |
2 | * KVM coalesced MMIO |
3 | * |
4 | * Copyright (c) 2008 Bull S.A.S. |
5 | * Copyright 2009 Red Hat, Inc. and/or its affiliates. |
6 | * |
7 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> |
8 | * |
9 | */ |
10 | |
11 | #include "iodev.h" |
12 | |
13 | #include <linux/kvm_host.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/kvm.h> |
16 | |
17 | #include "coalesced_mmio.h" |
18 | |
19 | static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev) |
20 | { |
21 | return container_of(dev, struct kvm_coalesced_mmio_dev, dev); |
22 | } |
23 | |
24 | static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, |
25 | gpa_t addr, int len) |
26 | { |
27 | /* is it in a batchable area ? |
28 | * (addr,len) is fully included in |
29 | * (zone->addr, zone->size) |
30 | */ |
31 | if (len < 0) |
32 | return 0; |
33 | if (addr + len < addr) |
34 | return 0; |
35 | if (addr < dev->zone.addr) |
36 | return 0; |
37 | if (addr + len > dev->zone.addr + dev->zone.size) |
38 | return 0; |
39 | return 1; |
40 | } |
41 | |
42 | static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) |
43 | { |
44 | struct kvm_coalesced_mmio_ring *ring; |
45 | unsigned avail; |
46 | |
47 | /* Are we able to batch it ? */ |
48 | |
49 | /* last is the first free entry |
50 | * check if we don't meet the first used entry |
51 | * there is always one unused entry in the buffer |
52 | */ |
53 | ring = dev->kvm->coalesced_mmio_ring; |
54 | avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; |
55 | if (avail == 0) { |
56 | /* full */ |
57 | return 0; |
58 | } |
59 | |
60 | return 1; |
61 | } |
62 | |
63 | static int coalesced_mmio_write(struct kvm_io_device *this, |
64 | gpa_t addr, int len, const void *val) |
65 | { |
66 | struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
67 | struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; |
68 | |
69 | if (!coalesced_mmio_in_range(dev, addr, len)) |
70 | return -EOPNOTSUPP; |
71 | |
72 | spin_lock(&dev->kvm->ring_lock); |
73 | |
74 | if (!coalesced_mmio_has_room(dev)) { |
75 | spin_unlock(&dev->kvm->ring_lock); |
76 | return -EOPNOTSUPP; |
77 | } |
78 | |
79 | /* copy data in first free entry of the ring */ |
80 | |
81 | ring->coalesced_mmio[ring->last].phys_addr = addr; |
82 | ring->coalesced_mmio[ring->last].len = len; |
83 | memcpy(ring->coalesced_mmio[ring->last].data, val, len); |
84 | smp_wmb(); |
85 | ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; |
86 | spin_unlock(&dev->kvm->ring_lock); |
87 | return 0; |
88 | } |
89 | |
90 | static void coalesced_mmio_destructor(struct kvm_io_device *this) |
91 | { |
92 | struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
93 | |
94 | list_del(&dev->list); |
95 | |
96 | kfree(dev); |
97 | } |
98 | |
99 | static const struct kvm_io_device_ops coalesced_mmio_ops = { |
100 | .write = coalesced_mmio_write, |
101 | .destructor = coalesced_mmio_destructor, |
102 | }; |
103 | |
104 | int kvm_coalesced_mmio_init(struct kvm *kvm) |
105 | { |
106 | struct page *page; |
107 | int ret; |
108 | |
109 | ret = -ENOMEM; |
110 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
111 | if (!page) |
112 | goto out_err; |
113 | |
114 | ret = 0; |
115 | kvm->coalesced_mmio_ring = page_address(page); |
116 | |
117 | /* |
118 | * We're using this spinlock to sync access to the coalesced ring. |
119 | * The list doesn't need it's own lock since device registration and |
120 | * unregistration should only happen when kvm->slots_lock is held. |
121 | */ |
122 | spin_lock_init(&kvm->ring_lock); |
123 | INIT_LIST_HEAD(&kvm->coalesced_zones); |
124 | |
125 | out_err: |
126 | return ret; |
127 | } |
128 | |
129 | void kvm_coalesced_mmio_free(struct kvm *kvm) |
130 | { |
131 | if (kvm->coalesced_mmio_ring) |
132 | free_page((unsigned long)kvm->coalesced_mmio_ring); |
133 | } |
134 | |
135 | int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, |
136 | struct kvm_coalesced_mmio_zone *zone) |
137 | { |
138 | int ret; |
139 | struct kvm_coalesced_mmio_dev *dev; |
140 | |
141 | dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); |
142 | if (!dev) |
143 | return -ENOMEM; |
144 | |
145 | kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); |
146 | dev->kvm = kvm; |
147 | dev->zone = *zone; |
148 | |
149 | mutex_lock(&kvm->slots_lock); |
150 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr, |
151 | zone->size, &dev->dev); |
152 | if (ret < 0) |
153 | goto out_free_dev; |
154 | list_add_tail(&dev->list, &kvm->coalesced_zones); |
155 | mutex_unlock(&kvm->slots_lock); |
156 | |
157 | return ret; |
158 | |
159 | out_free_dev: |
160 | mutex_unlock(&kvm->slots_lock); |
161 | |
162 | kfree(dev); |
163 | |
164 | if (dev == NULL) |
165 | return -ENXIO; |
166 | |
167 | return 0; |
168 | } |
169 | |
170 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, |
171 | struct kvm_coalesced_mmio_zone *zone) |
172 | { |
173 | struct kvm_coalesced_mmio_dev *dev, *tmp; |
174 | |
175 | mutex_lock(&kvm->slots_lock); |
176 | |
177 | list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) |
178 | if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) { |
179 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev); |
180 | kvm_iodevice_destructor(&dev->dev); |
181 | } |
182 | |
183 | mutex_unlock(&kvm->slots_lock); |
184 | |
185 | return 0; |
186 | } |
187 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9