Root/
1 | /* Virtio ring implementation. |
2 | * |
3 | * Copyright 2007 Rusty Russell IBM Corporation |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or |
8 | * (at your option) any later version. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
18 | */ |
19 | #include <linux/virtio.h> |
20 | #include <linux/virtio_ring.h> |
21 | #include <linux/virtio_config.h> |
22 | #include <linux/device.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/module.h> |
25 | #include <linux/hrtimer.h> |
26 | |
27 | /* virtio guest is communicating with a virtual "device" that actually runs on |
28 | * a host processor. Memory barriers are used to control SMP effects. */ |
29 | #ifdef CONFIG_SMP |
30 | /* Where possible, use SMP barriers which are more lightweight than mandatory |
31 | * barriers, because mandatory barriers control MMIO effects on accesses |
32 | * through relaxed memory I/O windows (which virtio-pci does not use). */ |
33 | #define virtio_mb(vq) \ |
34 | do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0) |
35 | #define virtio_rmb(vq) \ |
36 | do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0) |
37 | #define virtio_wmb(vq) \ |
38 | do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0) |
39 | #else |
40 | /* We must force memory ordering even if guest is UP since host could be |
41 | * running on another CPU, but SMP barriers are defined to barrier() in that |
42 | * configuration. So fall back to mandatory barriers instead. */ |
43 | #define virtio_mb(vq) mb() |
44 | #define virtio_rmb(vq) rmb() |
45 | #define virtio_wmb(vq) wmb() |
46 | #endif |
47 | |
48 | #ifdef DEBUG |
49 | /* For development, we want to crash whenever the ring is screwed. */ |
50 | #define BAD_RING(_vq, fmt, args...) \ |
51 | do { \ |
52 | dev_err(&(_vq)->vq.vdev->dev, \ |
53 | "%s:"fmt, (_vq)->vq.name, ##args); \ |
54 | BUG(); \ |
55 | } while (0) |
56 | /* Caller is supposed to guarantee no reentry. */ |
57 | #define START_USE(_vq) \ |
58 | do { \ |
59 | if ((_vq)->in_use) \ |
60 | panic("%s:in_use = %i\n", \ |
61 | (_vq)->vq.name, (_vq)->in_use); \ |
62 | (_vq)->in_use = __LINE__; \ |
63 | } while (0) |
64 | #define END_USE(_vq) \ |
65 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) |
66 | #else |
67 | #define BAD_RING(_vq, fmt, args...) \ |
68 | do { \ |
69 | dev_err(&_vq->vq.vdev->dev, \ |
70 | "%s:"fmt, (_vq)->vq.name, ##args); \ |
71 | (_vq)->broken = true; \ |
72 | } while (0) |
73 | #define START_USE(vq) |
74 | #define END_USE(vq) |
75 | #endif |
76 | |
77 | struct vring_virtqueue |
78 | { |
79 | struct virtqueue vq; |
80 | |
81 | /* Actual memory layout for this queue */ |
82 | struct vring vring; |
83 | |
84 | /* Can we use weak barriers? */ |
85 | bool weak_barriers; |
86 | |
87 | /* Other side has made a mess, don't try any more. */ |
88 | bool broken; |
89 | |
90 | /* Host supports indirect buffers */ |
91 | bool indirect; |
92 | |
93 | /* Host publishes avail event idx */ |
94 | bool event; |
95 | |
96 | /* Number of free buffers */ |
97 | unsigned int num_free; |
98 | /* Head of free buffer list. */ |
99 | unsigned int free_head; |
100 | /* Number we've added since last sync. */ |
101 | unsigned int num_added; |
102 | |
103 | /* Last used index we've seen. */ |
104 | u16 last_used_idx; |
105 | |
106 | /* How to notify other side. FIXME: commonalize hcalls! */ |
107 | void (*notify)(struct virtqueue *vq); |
108 | |
109 | #ifdef DEBUG |
110 | /* They're supposed to lock for us. */ |
111 | unsigned int in_use; |
112 | |
113 | /* Figure out if their kicks are too delayed. */ |
114 | bool last_add_time_valid; |
115 | ktime_t last_add_time; |
116 | #endif |
117 | |
118 | /* Tokens for callbacks. */ |
119 | void *data[]; |
120 | }; |
121 | |
122 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) |
123 | |
124 | /* Set up an indirect table of descriptors and add it to the queue. */ |
125 | static int vring_add_indirect(struct vring_virtqueue *vq, |
126 | struct scatterlist sg[], |
127 | unsigned int out, |
128 | unsigned int in, |
129 | gfp_t gfp) |
130 | { |
131 | struct vring_desc *desc; |
132 | unsigned head; |
133 | int i; |
134 | |
135 | desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); |
136 | if (!desc) |
137 | return -ENOMEM; |
138 | |
139 | /* Transfer entries from the sg list into the indirect page */ |
140 | for (i = 0; i < out; i++) { |
141 | desc[i].flags = VRING_DESC_F_NEXT; |
142 | desc[i].addr = sg_phys(sg); |
143 | desc[i].len = sg->length; |
144 | desc[i].next = i+1; |
145 | sg++; |
146 | } |
147 | for (; i < (out + in); i++) { |
148 | desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; |
149 | desc[i].addr = sg_phys(sg); |
150 | desc[i].len = sg->length; |
151 | desc[i].next = i+1; |
152 | sg++; |
153 | } |
154 | |
155 | /* Last one doesn't continue. */ |
156 | desc[i-1].flags &= ~VRING_DESC_F_NEXT; |
157 | desc[i-1].next = 0; |
158 | |
159 | /* We're about to use a buffer */ |
160 | vq->num_free--; |
161 | |
162 | /* Use a single buffer which doesn't continue */ |
163 | head = vq->free_head; |
164 | vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; |
165 | vq->vring.desc[head].addr = virt_to_phys(desc); |
166 | vq->vring.desc[head].len = i * sizeof(struct vring_desc); |
167 | |
168 | /* Update free pointer */ |
169 | vq->free_head = vq->vring.desc[head].next; |
170 | |
171 | return head; |
172 | } |
173 | |
174 | /** |
175 | * virtqueue_add_buf - expose buffer to other end |
176 | * @vq: the struct virtqueue we're talking about. |
177 | * @sg: the description of the buffer(s). |
178 | * @out_num: the number of sg readable by other side |
179 | * @in_num: the number of sg which are writable (after readable ones) |
180 | * @data: the token identifying the buffer. |
181 | * @gfp: how to do memory allocations (if necessary). |
182 | * |
183 | * Caller must ensure we don't call this with other virtqueue operations |
184 | * at the same time (except where noted). |
185 | * |
186 | * Returns remaining capacity of queue or a negative error |
187 | * (ie. ENOSPC). Note that it only really makes sense to treat all |
188 | * positive return values as "available": indirect buffers mean that |
189 | * we can put an entire sg[] array inside a single queue entry. |
190 | */ |
191 | int virtqueue_add_buf(struct virtqueue *_vq, |
192 | struct scatterlist sg[], |
193 | unsigned int out, |
194 | unsigned int in, |
195 | void *data, |
196 | gfp_t gfp) |
197 | { |
198 | struct vring_virtqueue *vq = to_vvq(_vq); |
199 | unsigned int i, avail, uninitialized_var(prev); |
200 | int head; |
201 | |
202 | START_USE(vq); |
203 | |
204 | BUG_ON(data == NULL); |
205 | |
206 | #ifdef DEBUG |
207 | { |
208 | ktime_t now = ktime_get(); |
209 | |
210 | /* No kick or get, with .1 second between? Warn. */ |
211 | if (vq->last_add_time_valid) |
212 | WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) |
213 | > 100); |
214 | vq->last_add_time = now; |
215 | vq->last_add_time_valid = true; |
216 | } |
217 | #endif |
218 | |
219 | /* If the host supports indirect descriptor tables, and we have multiple |
220 | * buffers, then go indirect. FIXME: tune this threshold */ |
221 | if (vq->indirect && (out + in) > 1 && vq->num_free) { |
222 | head = vring_add_indirect(vq, sg, out, in, gfp); |
223 | if (likely(head >= 0)) |
224 | goto add_head; |
225 | } |
226 | |
227 | BUG_ON(out + in > vq->vring.num); |
228 | BUG_ON(out + in == 0); |
229 | |
230 | if (vq->num_free < out + in) { |
231 | pr_debug("Can't add buf len %i - avail = %i\n", |
232 | out + in, vq->num_free); |
233 | /* FIXME: for historical reasons, we force a notify here if |
234 | * there are outgoing parts to the buffer. Presumably the |
235 | * host should service the ring ASAP. */ |
236 | if (out) |
237 | vq->notify(&vq->vq); |
238 | END_USE(vq); |
239 | return -ENOSPC; |
240 | } |
241 | |
242 | /* We're about to use some buffers from the free list. */ |
243 | vq->num_free -= out + in; |
244 | |
245 | head = vq->free_head; |
246 | for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { |
247 | vq->vring.desc[i].flags = VRING_DESC_F_NEXT; |
248 | vq->vring.desc[i].addr = sg_phys(sg); |
249 | vq->vring.desc[i].len = sg->length; |
250 | prev = i; |
251 | sg++; |
252 | } |
253 | for (; in; i = vq->vring.desc[i].next, in--) { |
254 | vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; |
255 | vq->vring.desc[i].addr = sg_phys(sg); |
256 | vq->vring.desc[i].len = sg->length; |
257 | prev = i; |
258 | sg++; |
259 | } |
260 | /* Last one doesn't continue. */ |
261 | vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; |
262 | |
263 | /* Update free pointer */ |
264 | vq->free_head = i; |
265 | |
266 | add_head: |
267 | /* Set token. */ |
268 | vq->data[head] = data; |
269 | |
270 | /* Put entry in available array (but don't update avail->idx until they |
271 | * do sync). */ |
272 | avail = (vq->vring.avail->idx & (vq->vring.num-1)); |
273 | vq->vring.avail->ring[avail] = head; |
274 | |
275 | /* Descriptors and available array need to be set before we expose the |
276 | * new available array entries. */ |
277 | virtio_wmb(vq); |
278 | vq->vring.avail->idx++; |
279 | vq->num_added++; |
280 | |
281 | /* This is very unlikely, but theoretically possible. Kick |
282 | * just in case. */ |
283 | if (unlikely(vq->num_added == (1 << 16) - 1)) |
284 | virtqueue_kick(_vq); |
285 | |
286 | pr_debug("Added buffer head %i to %p\n", head, vq); |
287 | END_USE(vq); |
288 | |
289 | return vq->num_free; |
290 | } |
291 | EXPORT_SYMBOL_GPL(virtqueue_add_buf); |
292 | |
293 | /** |
294 | * virtqueue_kick_prepare - first half of split virtqueue_kick call. |
295 | * @vq: the struct virtqueue |
296 | * |
297 | * Instead of virtqueue_kick(), you can do: |
298 | * if (virtqueue_kick_prepare(vq)) |
299 | * virtqueue_notify(vq); |
300 | * |
301 | * This is sometimes useful because the virtqueue_kick_prepare() needs |
302 | * to be serialized, but the actual virtqueue_notify() call does not. |
303 | */ |
304 | bool virtqueue_kick_prepare(struct virtqueue *_vq) |
305 | { |
306 | struct vring_virtqueue *vq = to_vvq(_vq); |
307 | u16 new, old; |
308 | bool needs_kick; |
309 | |
310 | START_USE(vq); |
311 | /* We need to expose available array entries before checking avail |
312 | * event. */ |
313 | virtio_mb(vq); |
314 | |
315 | old = vq->vring.avail->idx - vq->num_added; |
316 | new = vq->vring.avail->idx; |
317 | vq->num_added = 0; |
318 | |
319 | #ifdef DEBUG |
320 | if (vq->last_add_time_valid) { |
321 | WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), |
322 | vq->last_add_time)) > 100); |
323 | } |
324 | vq->last_add_time_valid = false; |
325 | #endif |
326 | |
327 | if (vq->event) { |
328 | needs_kick = vring_need_event(vring_avail_event(&vq->vring), |
329 | new, old); |
330 | } else { |
331 | needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); |
332 | } |
333 | END_USE(vq); |
334 | return needs_kick; |
335 | } |
336 | EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); |
337 | |
338 | /** |
339 | * virtqueue_notify - second half of split virtqueue_kick call. |
340 | * @vq: the struct virtqueue |
341 | * |
342 | * This does not need to be serialized. |
343 | */ |
344 | void virtqueue_notify(struct virtqueue *_vq) |
345 | { |
346 | struct vring_virtqueue *vq = to_vvq(_vq); |
347 | |
348 | /* Prod other side to tell it about changes. */ |
349 | vq->notify(_vq); |
350 | } |
351 | EXPORT_SYMBOL_GPL(virtqueue_notify); |
352 | |
353 | /** |
354 | * virtqueue_kick - update after add_buf |
355 | * @vq: the struct virtqueue |
356 | * |
357 | * After one or more virtqueue_add_buf calls, invoke this to kick |
358 | * the other side. |
359 | * |
360 | * Caller must ensure we don't call this with other virtqueue |
361 | * operations at the same time (except where noted). |
362 | */ |
363 | void virtqueue_kick(struct virtqueue *vq) |
364 | { |
365 | if (virtqueue_kick_prepare(vq)) |
366 | virtqueue_notify(vq); |
367 | } |
368 | EXPORT_SYMBOL_GPL(virtqueue_kick); |
369 | |
370 | static void detach_buf(struct vring_virtqueue *vq, unsigned int head) |
371 | { |
372 | unsigned int i; |
373 | |
374 | /* Clear data ptr. */ |
375 | vq->data[head] = NULL; |
376 | |
377 | /* Put back on free list: find end */ |
378 | i = head; |
379 | |
380 | /* Free the indirect table */ |
381 | if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) |
382 | kfree(phys_to_virt(vq->vring.desc[i].addr)); |
383 | |
384 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { |
385 | i = vq->vring.desc[i].next; |
386 | vq->num_free++; |
387 | } |
388 | |
389 | vq->vring.desc[i].next = vq->free_head; |
390 | vq->free_head = head; |
391 | /* Plus final descriptor */ |
392 | vq->num_free++; |
393 | } |
394 | |
395 | static inline bool more_used(const struct vring_virtqueue *vq) |
396 | { |
397 | return vq->last_used_idx != vq->vring.used->idx; |
398 | } |
399 | |
400 | /** |
401 | * virtqueue_get_buf - get the next used buffer |
402 | * @vq: the struct virtqueue we're talking about. |
403 | * @len: the length written into the buffer |
404 | * |
405 | * If the driver wrote data into the buffer, @len will be set to the |
406 | * amount written. This means you don't need to clear the buffer |
407 | * beforehand to ensure there's no data leakage in the case of short |
408 | * writes. |
409 | * |
410 | * Caller must ensure we don't call this with other virtqueue |
411 | * operations at the same time (except where noted). |
412 | * |
413 | * Returns NULL if there are no used buffers, or the "data" token |
414 | * handed to virtqueue_add_buf(). |
415 | */ |
416 | void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) |
417 | { |
418 | struct vring_virtqueue *vq = to_vvq(_vq); |
419 | void *ret; |
420 | unsigned int i; |
421 | u16 last_used; |
422 | |
423 | START_USE(vq); |
424 | |
425 | if (unlikely(vq->broken)) { |
426 | END_USE(vq); |
427 | return NULL; |
428 | } |
429 | |
430 | if (!more_used(vq)) { |
431 | pr_debug("No more buffers in queue\n"); |
432 | END_USE(vq); |
433 | return NULL; |
434 | } |
435 | |
436 | /* Only get used array entries after they have been exposed by host. */ |
437 | virtio_rmb(vq); |
438 | |
439 | last_used = (vq->last_used_idx & (vq->vring.num - 1)); |
440 | i = vq->vring.used->ring[last_used].id; |
441 | *len = vq->vring.used->ring[last_used].len; |
442 | |
443 | if (unlikely(i >= vq->vring.num)) { |
444 | BAD_RING(vq, "id %u out of range\n", i); |
445 | return NULL; |
446 | } |
447 | if (unlikely(!vq->data[i])) { |
448 | BAD_RING(vq, "id %u is not a head!\n", i); |
449 | return NULL; |
450 | } |
451 | |
452 | /* detach_buf clears data, so grab it now. */ |
453 | ret = vq->data[i]; |
454 | detach_buf(vq, i); |
455 | vq->last_used_idx++; |
456 | /* If we expect an interrupt for the next entry, tell host |
457 | * by writing event index and flush out the write before |
458 | * the read in the next get_buf call. */ |
459 | if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { |
460 | vring_used_event(&vq->vring) = vq->last_used_idx; |
461 | virtio_mb(vq); |
462 | } |
463 | |
464 | #ifdef DEBUG |
465 | vq->last_add_time_valid = false; |
466 | #endif |
467 | |
468 | END_USE(vq); |
469 | return ret; |
470 | } |
471 | EXPORT_SYMBOL_GPL(virtqueue_get_buf); |
472 | |
473 | /** |
474 | * virtqueue_disable_cb - disable callbacks |
475 | * @vq: the struct virtqueue we're talking about. |
476 | * |
477 | * Note that this is not necessarily synchronous, hence unreliable and only |
478 | * useful as an optimization. |
479 | * |
480 | * Unlike other operations, this need not be serialized. |
481 | */ |
482 | void virtqueue_disable_cb(struct virtqueue *_vq) |
483 | { |
484 | struct vring_virtqueue *vq = to_vvq(_vq); |
485 | |
486 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
487 | } |
488 | EXPORT_SYMBOL_GPL(virtqueue_disable_cb); |
489 | |
490 | /** |
491 | * virtqueue_enable_cb - restart callbacks after disable_cb. |
492 | * @vq: the struct virtqueue we're talking about. |
493 | * |
494 | * This re-enables callbacks; it returns "false" if there are pending |
495 | * buffers in the queue, to detect a possible race between the driver |
496 | * checking for more work, and enabling callbacks. |
497 | * |
498 | * Caller must ensure we don't call this with other virtqueue |
499 | * operations at the same time (except where noted). |
500 | */ |
501 | bool virtqueue_enable_cb(struct virtqueue *_vq) |
502 | { |
503 | struct vring_virtqueue *vq = to_vvq(_vq); |
504 | |
505 | START_USE(vq); |
506 | |
507 | /* We optimistically turn back on interrupts, then check if there was |
508 | * more to do. */ |
509 | /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to |
510 | * either clear the flags bit or point the event index at the next |
511 | * entry. Always do both to keep code simple. */ |
512 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
513 | vring_used_event(&vq->vring) = vq->last_used_idx; |
514 | virtio_mb(vq); |
515 | if (unlikely(more_used(vq))) { |
516 | END_USE(vq); |
517 | return false; |
518 | } |
519 | |
520 | END_USE(vq); |
521 | return true; |
522 | } |
523 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb); |
524 | |
525 | /** |
526 | * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. |
527 | * @vq: the struct virtqueue we're talking about. |
528 | * |
529 | * This re-enables callbacks but hints to the other side to delay |
530 | * interrupts until most of the available buffers have been processed; |
531 | * it returns "false" if there are many pending buffers in the queue, |
532 | * to detect a possible race between the driver checking for more work, |
533 | * and enabling callbacks. |
534 | * |
535 | * Caller must ensure we don't call this with other virtqueue |
536 | * operations at the same time (except where noted). |
537 | */ |
538 | bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) |
539 | { |
540 | struct vring_virtqueue *vq = to_vvq(_vq); |
541 | u16 bufs; |
542 | |
543 | START_USE(vq); |
544 | |
545 | /* We optimistically turn back on interrupts, then check if there was |
546 | * more to do. */ |
547 | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to |
548 | * either clear the flags bit or point the event index at the next |
549 | * entry. Always do both to keep code simple. */ |
550 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
551 | /* TODO: tune this threshold */ |
552 | bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; |
553 | vring_used_event(&vq->vring) = vq->last_used_idx + bufs; |
554 | virtio_mb(vq); |
555 | if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { |
556 | END_USE(vq); |
557 | return false; |
558 | } |
559 | |
560 | END_USE(vq); |
561 | return true; |
562 | } |
563 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); |
564 | |
565 | /** |
566 | * virtqueue_detach_unused_buf - detach first unused buffer |
567 | * @vq: the struct virtqueue we're talking about. |
568 | * |
569 | * Returns NULL or the "data" token handed to virtqueue_add_buf(). |
570 | * This is not valid on an active queue; it is useful only for device |
571 | * shutdown. |
572 | */ |
573 | void *virtqueue_detach_unused_buf(struct virtqueue *_vq) |
574 | { |
575 | struct vring_virtqueue *vq = to_vvq(_vq); |
576 | unsigned int i; |
577 | void *buf; |
578 | |
579 | START_USE(vq); |
580 | |
581 | for (i = 0; i < vq->vring.num; i++) { |
582 | if (!vq->data[i]) |
583 | continue; |
584 | /* detach_buf clears data, so grab it now. */ |
585 | buf = vq->data[i]; |
586 | detach_buf(vq, i); |
587 | vq->vring.avail->idx--; |
588 | END_USE(vq); |
589 | return buf; |
590 | } |
591 | /* That should have freed everything. */ |
592 | BUG_ON(vq->num_free != vq->vring.num); |
593 | |
594 | END_USE(vq); |
595 | return NULL; |
596 | } |
597 | EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); |
598 | |
599 | irqreturn_t vring_interrupt(int irq, void *_vq) |
600 | { |
601 | struct vring_virtqueue *vq = to_vvq(_vq); |
602 | |
603 | if (!more_used(vq)) { |
604 | pr_debug("virtqueue interrupt with no work for %p\n", vq); |
605 | return IRQ_NONE; |
606 | } |
607 | |
608 | if (unlikely(vq->broken)) |
609 | return IRQ_HANDLED; |
610 | |
611 | pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); |
612 | if (vq->vq.callback) |
613 | vq->vq.callback(&vq->vq); |
614 | |
615 | return IRQ_HANDLED; |
616 | } |
617 | EXPORT_SYMBOL_GPL(vring_interrupt); |
618 | |
619 | struct virtqueue *vring_new_virtqueue(unsigned int num, |
620 | unsigned int vring_align, |
621 | struct virtio_device *vdev, |
622 | bool weak_barriers, |
623 | void *pages, |
624 | void (*notify)(struct virtqueue *), |
625 | void (*callback)(struct virtqueue *), |
626 | const char *name) |
627 | { |
628 | struct vring_virtqueue *vq; |
629 | unsigned int i; |
630 | |
631 | /* We assume num is a power of 2. */ |
632 | if (num & (num - 1)) { |
633 | dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); |
634 | return NULL; |
635 | } |
636 | |
637 | vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); |
638 | if (!vq) |
639 | return NULL; |
640 | |
641 | vring_init(&vq->vring, num, pages, vring_align); |
642 | vq->vq.callback = callback; |
643 | vq->vq.vdev = vdev; |
644 | vq->vq.name = name; |
645 | vq->notify = notify; |
646 | vq->weak_barriers = weak_barriers; |
647 | vq->broken = false; |
648 | vq->last_used_idx = 0; |
649 | vq->num_added = 0; |
650 | list_add_tail(&vq->vq.list, &vdev->vqs); |
651 | #ifdef DEBUG |
652 | vq->in_use = false; |
653 | vq->last_add_time_valid = false; |
654 | #endif |
655 | |
656 | vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); |
657 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
658 | |
659 | /* No callback? Tell other side not to bother us. */ |
660 | if (!callback) |
661 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
662 | |
663 | /* Put everything in free lists. */ |
664 | vq->num_free = num; |
665 | vq->free_head = 0; |
666 | for (i = 0; i < num-1; i++) { |
667 | vq->vring.desc[i].next = i+1; |
668 | vq->data[i] = NULL; |
669 | } |
670 | vq->data[i] = NULL; |
671 | |
672 | return &vq->vq; |
673 | } |
674 | EXPORT_SYMBOL_GPL(vring_new_virtqueue); |
675 | |
676 | void vring_del_virtqueue(struct virtqueue *vq) |
677 | { |
678 | list_del(&vq->list); |
679 | kfree(to_vvq(vq)); |
680 | } |
681 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); |
682 | |
683 | /* Manipulates transport-specific feature bits. */ |
684 | void vring_transport_features(struct virtio_device *vdev) |
685 | { |
686 | unsigned int i; |
687 | |
688 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { |
689 | switch (i) { |
690 | case VIRTIO_RING_F_INDIRECT_DESC: |
691 | break; |
692 | case VIRTIO_RING_F_EVENT_IDX: |
693 | break; |
694 | default: |
695 | /* We don't understand this bit. */ |
696 | clear_bit(i, vdev->features); |
697 | } |
698 | } |
699 | } |
700 | EXPORT_SYMBOL_GPL(vring_transport_features); |
701 | |
702 | /** |
703 | * virtqueue_get_vring_size - return the size of the virtqueue's vring |
704 | * @vq: the struct virtqueue containing the vring of interest. |
705 | * |
706 | * Returns the size of the vring. This is mainly used for boasting to |
707 | * userspace. Unlike other operations, this need not be serialized. |
708 | */ |
709 | unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) |
710 | { |
711 | |
712 | struct vring_virtqueue *vq = to_vvq(_vq); |
713 | |
714 | return vq->vring.num; |
715 | } |
716 | EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); |
717 | |
718 | MODULE_LICENSE("GPL"); |
719 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9