Root/
1 | /* |
2 | * |
3 | * Copyright (c) 2009, Microsoft Corporation. |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms and conditions of the GNU General Public License, |
7 | * version 2, as published by the Free Software Foundation. |
8 | * |
9 | * This program is distributed in the hope it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | * more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License along with |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
17 | * |
18 | * Authors: |
19 | * Haiyang Zhang <haiyangz@microsoft.com> |
20 | * Hank Janssen <hjanssen@microsoft.com> |
21 | * K. Y. Srinivasan <kys@microsoft.com> |
22 | * |
23 | */ |
24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
25 | |
26 | #include <linux/kernel.h> |
27 | #include <linux/mm.h> |
28 | #include <linux/hyperv.h> |
29 | |
30 | #include "hyperv_vmbus.h" |
31 | |
32 | void hv_begin_read(struct hv_ring_buffer_info *rbi) |
33 | { |
34 | rbi->ring_buffer->interrupt_mask = 1; |
35 | smp_mb(); |
36 | } |
37 | |
38 | u32 hv_end_read(struct hv_ring_buffer_info *rbi) |
39 | { |
40 | u32 read; |
41 | u32 write; |
42 | |
43 | rbi->ring_buffer->interrupt_mask = 0; |
44 | smp_mb(); |
45 | |
46 | /* |
47 | * Now check to see if the ring buffer is still empty. |
48 | * If it is not, we raced and we need to process new |
49 | * incoming messages. |
50 | */ |
51 | hv_get_ringbuffer_availbytes(rbi, &read, &write); |
52 | |
53 | return read; |
54 | } |
55 | |
56 | /* |
57 | * When we write to the ring buffer, check if the host needs to |
58 | * be signaled. Here is the details of this protocol: |
59 | * |
60 | * 1. The host guarantees that while it is draining the |
61 | * ring buffer, it will set the interrupt_mask to |
62 | * indicate it does not need to be interrupted when |
63 | * new data is placed. |
64 | * |
65 | * 2. The host guarantees that it will completely drain |
66 | * the ring buffer before exiting the read loop. Further, |
67 | * once the ring buffer is empty, it will clear the |
68 | * interrupt_mask and re-check to see if new data has |
69 | * arrived. |
70 | */ |
71 | |
72 | static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) |
73 | { |
74 | if (rbi->ring_buffer->interrupt_mask) |
75 | return false; |
76 | |
77 | /* |
78 | * This is the only case we need to signal when the |
79 | * ring transitions from being empty to non-empty. |
80 | */ |
81 | if (old_write == rbi->ring_buffer->read_index) |
82 | return true; |
83 | |
84 | return false; |
85 | } |
86 | |
87 | /* |
88 | * To optimize the flow management on the send-side, |
89 | * when the sender is blocked because of lack of |
90 | * sufficient space in the ring buffer, potential the |
91 | * consumer of the ring buffer can signal the producer. |
92 | * This is controlled by the following parameters: |
93 | * |
94 | * 1. pending_send_sz: This is the size in bytes that the |
95 | * producer is trying to send. |
96 | * 2. The feature bit feat_pending_send_sz set to indicate if |
97 | * the consumer of the ring will signal when the ring |
98 | * state transitions from being full to a state where |
99 | * there is room for the producer to send the pending packet. |
100 | */ |
101 | |
102 | static bool hv_need_to_signal_on_read(u32 old_rd, |
103 | struct hv_ring_buffer_info *rbi) |
104 | { |
105 | u32 prev_write_sz; |
106 | u32 cur_write_sz; |
107 | u32 r_size; |
108 | u32 write_loc = rbi->ring_buffer->write_index; |
109 | u32 read_loc = rbi->ring_buffer->read_index; |
110 | u32 pending_sz = rbi->ring_buffer->pending_send_sz; |
111 | |
112 | /* |
113 | * If the other end is not blocked on write don't bother. |
114 | */ |
115 | if (pending_sz == 0) |
116 | return false; |
117 | |
118 | r_size = rbi->ring_datasize; |
119 | cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) : |
120 | read_loc - write_loc; |
121 | |
122 | prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) : |
123 | old_rd - write_loc; |
124 | |
125 | |
126 | if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz)) |
127 | return true; |
128 | |
129 | return false; |
130 | } |
131 | |
132 | /* |
133 | * hv_get_next_write_location() |
134 | * |
135 | * Get the next write location for the specified ring buffer |
136 | * |
137 | */ |
138 | static inline u32 |
139 | hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) |
140 | { |
141 | u32 next = ring_info->ring_buffer->write_index; |
142 | |
143 | return next; |
144 | } |
145 | |
146 | /* |
147 | * hv_set_next_write_location() |
148 | * |
149 | * Set the next write location for the specified ring buffer |
150 | * |
151 | */ |
152 | static inline void |
153 | hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, |
154 | u32 next_write_location) |
155 | { |
156 | ring_info->ring_buffer->write_index = next_write_location; |
157 | } |
158 | |
159 | /* |
160 | * hv_get_next_read_location() |
161 | * |
162 | * Get the next read location for the specified ring buffer |
163 | */ |
164 | static inline u32 |
165 | hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) |
166 | { |
167 | u32 next = ring_info->ring_buffer->read_index; |
168 | |
169 | return next; |
170 | } |
171 | |
172 | /* |
173 | * hv_get_next_readlocation_withoffset() |
174 | * |
175 | * Get the next read location + offset for the specified ring buffer. |
176 | * This allows the caller to skip |
177 | */ |
178 | static inline u32 |
179 | hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, |
180 | u32 offset) |
181 | { |
182 | u32 next = ring_info->ring_buffer->read_index; |
183 | |
184 | next += offset; |
185 | next %= ring_info->ring_datasize; |
186 | |
187 | return next; |
188 | } |
189 | |
190 | /* |
191 | * |
192 | * hv_set_next_read_location() |
193 | * |
194 | * Set the next read location for the specified ring buffer |
195 | * |
196 | */ |
197 | static inline void |
198 | hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, |
199 | u32 next_read_location) |
200 | { |
201 | ring_info->ring_buffer->read_index = next_read_location; |
202 | } |
203 | |
204 | |
205 | /* |
206 | * |
207 | * hv_get_ring_buffer() |
208 | * |
209 | * Get the start of the ring buffer |
210 | */ |
211 | static inline void * |
212 | hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) |
213 | { |
214 | return (void *)ring_info->ring_buffer->buffer; |
215 | } |
216 | |
217 | |
218 | /* |
219 | * |
220 | * hv_get_ring_buffersize() |
221 | * |
222 | * Get the size of the ring buffer |
223 | */ |
224 | static inline u32 |
225 | hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) |
226 | { |
227 | return ring_info->ring_datasize; |
228 | } |
229 | |
230 | /* |
231 | * |
232 | * hv_get_ring_bufferindices() |
233 | * |
234 | * Get the read and write indices as u64 of the specified ring buffer |
235 | * |
236 | */ |
237 | static inline u64 |
238 | hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) |
239 | { |
240 | return (u64)ring_info->ring_buffer->write_index << 32; |
241 | } |
242 | |
243 | /* |
244 | * |
245 | * hv_copyfrom_ringbuffer() |
246 | * |
247 | * Helper routine to copy to source from ring buffer. |
248 | * Assume there is enough room. Handles wrap-around in src case only!! |
249 | * |
250 | */ |
251 | static u32 hv_copyfrom_ringbuffer( |
252 | struct hv_ring_buffer_info *ring_info, |
253 | void *dest, |
254 | u32 destlen, |
255 | u32 start_read_offset) |
256 | { |
257 | void *ring_buffer = hv_get_ring_buffer(ring_info); |
258 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); |
259 | |
260 | u32 frag_len; |
261 | |
262 | /* wrap-around detected at the src */ |
263 | if (destlen > ring_buffer_size - start_read_offset) { |
264 | frag_len = ring_buffer_size - start_read_offset; |
265 | |
266 | memcpy(dest, ring_buffer + start_read_offset, frag_len); |
267 | memcpy(dest + frag_len, ring_buffer, destlen - frag_len); |
268 | } else |
269 | |
270 | memcpy(dest, ring_buffer + start_read_offset, destlen); |
271 | |
272 | |
273 | start_read_offset += destlen; |
274 | start_read_offset %= ring_buffer_size; |
275 | |
276 | return start_read_offset; |
277 | } |
278 | |
279 | |
280 | /* |
281 | * |
282 | * hv_copyto_ringbuffer() |
283 | * |
284 | * Helper routine to copy from source to ring buffer. |
285 | * Assume there is enough room. Handles wrap-around in dest case only!! |
286 | * |
287 | */ |
288 | static u32 hv_copyto_ringbuffer( |
289 | struct hv_ring_buffer_info *ring_info, |
290 | u32 start_write_offset, |
291 | void *src, |
292 | u32 srclen) |
293 | { |
294 | void *ring_buffer = hv_get_ring_buffer(ring_info); |
295 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); |
296 | u32 frag_len; |
297 | |
298 | /* wrap-around detected! */ |
299 | if (srclen > ring_buffer_size - start_write_offset) { |
300 | frag_len = ring_buffer_size - start_write_offset; |
301 | memcpy(ring_buffer + start_write_offset, src, frag_len); |
302 | memcpy(ring_buffer, src + frag_len, srclen - frag_len); |
303 | } else |
304 | memcpy(ring_buffer + start_write_offset, src, srclen); |
305 | |
306 | start_write_offset += srclen; |
307 | start_write_offset %= ring_buffer_size; |
308 | |
309 | return start_write_offset; |
310 | } |
311 | |
312 | /* |
313 | * |
314 | * hv_ringbuffer_get_debuginfo() |
315 | * |
316 | * Get various debug metrics for the specified ring buffer |
317 | * |
318 | */ |
319 | void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, |
320 | struct hv_ring_buffer_debug_info *debug_info) |
321 | { |
322 | u32 bytes_avail_towrite; |
323 | u32 bytes_avail_toread; |
324 | |
325 | if (ring_info->ring_buffer) { |
326 | hv_get_ringbuffer_availbytes(ring_info, |
327 | &bytes_avail_toread, |
328 | &bytes_avail_towrite); |
329 | |
330 | debug_info->bytes_avail_toread = bytes_avail_toread; |
331 | debug_info->bytes_avail_towrite = bytes_avail_towrite; |
332 | debug_info->current_read_index = |
333 | ring_info->ring_buffer->read_index; |
334 | debug_info->current_write_index = |
335 | ring_info->ring_buffer->write_index; |
336 | debug_info->current_interrupt_mask = |
337 | ring_info->ring_buffer->interrupt_mask; |
338 | } |
339 | } |
340 | |
341 | /* |
342 | * |
343 | * hv_ringbuffer_init() |
344 | * |
345 | *Initialize the ring buffer |
346 | * |
347 | */ |
348 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
349 | void *buffer, u32 buflen) |
350 | { |
351 | if (sizeof(struct hv_ring_buffer) != PAGE_SIZE) |
352 | return -EINVAL; |
353 | |
354 | memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); |
355 | |
356 | ring_info->ring_buffer = (struct hv_ring_buffer *)buffer; |
357 | ring_info->ring_buffer->read_index = |
358 | ring_info->ring_buffer->write_index = 0; |
359 | |
360 | ring_info->ring_size = buflen; |
361 | ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer); |
362 | |
363 | spin_lock_init(&ring_info->ring_lock); |
364 | |
365 | return 0; |
366 | } |
367 | |
368 | /* |
369 | * |
370 | * hv_ringbuffer_cleanup() |
371 | * |
372 | * Cleanup the ring buffer |
373 | * |
374 | */ |
375 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) |
376 | { |
377 | } |
378 | |
379 | /* |
380 | * |
381 | * hv_ringbuffer_write() |
382 | * |
383 | * Write to the ring buffer |
384 | * |
385 | */ |
386 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, |
387 | struct scatterlist *sglist, u32 sgcount, bool *signal) |
388 | { |
389 | int i = 0; |
390 | u32 bytes_avail_towrite; |
391 | u32 bytes_avail_toread; |
392 | u32 totalbytes_towrite = 0; |
393 | |
394 | struct scatterlist *sg; |
395 | u32 next_write_location; |
396 | u32 old_write; |
397 | u64 prev_indices = 0; |
398 | unsigned long flags; |
399 | |
400 | for_each_sg(sglist, sg, sgcount, i) |
401 | { |
402 | totalbytes_towrite += sg->length; |
403 | } |
404 | |
405 | totalbytes_towrite += sizeof(u64); |
406 | |
407 | spin_lock_irqsave(&outring_info->ring_lock, flags); |
408 | |
409 | hv_get_ringbuffer_availbytes(outring_info, |
410 | &bytes_avail_toread, |
411 | &bytes_avail_towrite); |
412 | |
413 | |
414 | /* If there is only room for the packet, assume it is full. */ |
415 | /* Otherwise, the next time around, we think the ring buffer */ |
416 | /* is empty since the read index == write index */ |
417 | if (bytes_avail_towrite <= totalbytes_towrite) { |
418 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
419 | return -EAGAIN; |
420 | } |
421 | |
422 | /* Write to the ring buffer */ |
423 | next_write_location = hv_get_next_write_location(outring_info); |
424 | |
425 | old_write = next_write_location; |
426 | |
427 | for_each_sg(sglist, sg, sgcount, i) |
428 | { |
429 | next_write_location = hv_copyto_ringbuffer(outring_info, |
430 | next_write_location, |
431 | sg_virt(sg), |
432 | sg->length); |
433 | } |
434 | |
435 | /* Set previous packet start */ |
436 | prev_indices = hv_get_ring_bufferindices(outring_info); |
437 | |
438 | next_write_location = hv_copyto_ringbuffer(outring_info, |
439 | next_write_location, |
440 | &prev_indices, |
441 | sizeof(u64)); |
442 | |
443 | /* Issue a full memory barrier before updating the write index */ |
444 | smp_mb(); |
445 | |
446 | /* Now, update the write location */ |
447 | hv_set_next_write_location(outring_info, next_write_location); |
448 | |
449 | |
450 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
451 | |
452 | *signal = hv_need_to_signal(old_write, outring_info); |
453 | return 0; |
454 | } |
455 | |
456 | |
457 | /* |
458 | * |
459 | * hv_ringbuffer_peek() |
460 | * |
461 | * Read without advancing the read index |
462 | * |
463 | */ |
464 | int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info, |
465 | void *Buffer, u32 buflen) |
466 | { |
467 | u32 bytes_avail_towrite; |
468 | u32 bytes_avail_toread; |
469 | u32 next_read_location = 0; |
470 | unsigned long flags; |
471 | |
472 | spin_lock_irqsave(&Inring_info->ring_lock, flags); |
473 | |
474 | hv_get_ringbuffer_availbytes(Inring_info, |
475 | &bytes_avail_toread, |
476 | &bytes_avail_towrite); |
477 | |
478 | /* Make sure there is something to read */ |
479 | if (bytes_avail_toread < buflen) { |
480 | |
481 | spin_unlock_irqrestore(&Inring_info->ring_lock, flags); |
482 | |
483 | return -EAGAIN; |
484 | } |
485 | |
486 | /* Convert to byte offset */ |
487 | next_read_location = hv_get_next_read_location(Inring_info); |
488 | |
489 | next_read_location = hv_copyfrom_ringbuffer(Inring_info, |
490 | Buffer, |
491 | buflen, |
492 | next_read_location); |
493 | |
494 | spin_unlock_irqrestore(&Inring_info->ring_lock, flags); |
495 | |
496 | return 0; |
497 | } |
498 | |
499 | |
500 | /* |
501 | * |
502 | * hv_ringbuffer_read() |
503 | * |
504 | * Read and advance the read index |
505 | * |
506 | */ |
507 | int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, |
508 | u32 buflen, u32 offset, bool *signal) |
509 | { |
510 | u32 bytes_avail_towrite; |
511 | u32 bytes_avail_toread; |
512 | u32 next_read_location = 0; |
513 | u64 prev_indices = 0; |
514 | unsigned long flags; |
515 | u32 old_read; |
516 | |
517 | if (buflen <= 0) |
518 | return -EINVAL; |
519 | |
520 | spin_lock_irqsave(&inring_info->ring_lock, flags); |
521 | |
522 | hv_get_ringbuffer_availbytes(inring_info, |
523 | &bytes_avail_toread, |
524 | &bytes_avail_towrite); |
525 | |
526 | old_read = bytes_avail_toread; |
527 | |
528 | /* Make sure there is something to read */ |
529 | if (bytes_avail_toread < buflen) { |
530 | spin_unlock_irqrestore(&inring_info->ring_lock, flags); |
531 | |
532 | return -EAGAIN; |
533 | } |
534 | |
535 | next_read_location = |
536 | hv_get_next_readlocation_withoffset(inring_info, offset); |
537 | |
538 | next_read_location = hv_copyfrom_ringbuffer(inring_info, |
539 | buffer, |
540 | buflen, |
541 | next_read_location); |
542 | |
543 | next_read_location = hv_copyfrom_ringbuffer(inring_info, |
544 | &prev_indices, |
545 | sizeof(u64), |
546 | next_read_location); |
547 | |
548 | /* Make sure all reads are done before we update the read index since */ |
549 | /* the writer may start writing to the read area once the read index */ |
550 | /*is updated */ |
551 | smp_mb(); |
552 | |
553 | /* Update the read index */ |
554 | hv_set_next_read_location(inring_info, next_read_location); |
555 | |
556 | spin_unlock_irqrestore(&inring_info->ring_lock, flags); |
557 | |
558 | *signal = hv_need_to_signal_on_read(old_read, inring_info); |
559 | |
560 | return 0; |
561 | } |
562 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9