Root/
1 | /* |
2 | * |
3 | * Copyright (c) 2009, Microsoft Corporation. |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms and conditions of the GNU General Public License, |
7 | * version 2, as published by the Free Software Foundation. |
8 | * |
9 | * This program is distributed in the hope it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | * more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License along with |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
17 | * |
18 | * Authors: |
19 | * Haiyang Zhang <haiyangz@microsoft.com> |
20 | * Hank Janssen <hjanssen@microsoft.com> |
21 | * K. Y. Srinivasan <kys@microsoft.com> |
22 | * |
23 | */ |
24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
25 | |
26 | #include <linux/kernel.h> |
27 | #include <linux/mm.h> |
28 | #include <linux/hyperv.h> |
29 | |
30 | #include "hyperv_vmbus.h" |
31 | |
32 | void hv_begin_read(struct hv_ring_buffer_info *rbi) |
33 | { |
34 | rbi->ring_buffer->interrupt_mask = 1; |
35 | mb(); |
36 | } |
37 | |
38 | u32 hv_end_read(struct hv_ring_buffer_info *rbi) |
39 | { |
40 | u32 read; |
41 | u32 write; |
42 | |
43 | rbi->ring_buffer->interrupt_mask = 0; |
44 | mb(); |
45 | |
46 | /* |
47 | * Now check to see if the ring buffer is still empty. |
48 | * If it is not, we raced and we need to process new |
49 | * incoming messages. |
50 | */ |
51 | hv_get_ringbuffer_availbytes(rbi, &read, &write); |
52 | |
53 | return read; |
54 | } |
55 | |
56 | /* |
57 | * When we write to the ring buffer, check if the host needs to |
58 | * be signaled. Here is the details of this protocol: |
59 | * |
60 | * 1. The host guarantees that while it is draining the |
61 | * ring buffer, it will set the interrupt_mask to |
62 | * indicate it does not need to be interrupted when |
63 | * new data is placed. |
64 | * |
65 | * 2. The host guarantees that it will completely drain |
66 | * the ring buffer before exiting the read loop. Further, |
67 | * once the ring buffer is empty, it will clear the |
68 | * interrupt_mask and re-check to see if new data has |
69 | * arrived. |
70 | */ |
71 | |
72 | static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) |
73 | { |
74 | mb(); |
75 | if (rbi->ring_buffer->interrupt_mask) |
76 | return false; |
77 | |
78 | /* check interrupt_mask before read_index */ |
79 | rmb(); |
80 | /* |
81 | * This is the only case we need to signal when the |
82 | * ring transitions from being empty to non-empty. |
83 | */ |
84 | if (old_write == rbi->ring_buffer->read_index) |
85 | return true; |
86 | |
87 | return false; |
88 | } |
89 | |
90 | /* |
91 | * To optimize the flow management on the send-side, |
92 | * when the sender is blocked because of lack of |
93 | * sufficient space in the ring buffer, potential the |
94 | * consumer of the ring buffer can signal the producer. |
95 | * This is controlled by the following parameters: |
96 | * |
97 | * 1. pending_send_sz: This is the size in bytes that the |
98 | * producer is trying to send. |
99 | * 2. The feature bit feat_pending_send_sz set to indicate if |
100 | * the consumer of the ring will signal when the ring |
101 | * state transitions from being full to a state where |
102 | * there is room for the producer to send the pending packet. |
103 | */ |
104 | |
105 | static bool hv_need_to_signal_on_read(u32 old_rd, |
106 | struct hv_ring_buffer_info *rbi) |
107 | { |
108 | u32 prev_write_sz; |
109 | u32 cur_write_sz; |
110 | u32 r_size; |
111 | u32 write_loc = rbi->ring_buffer->write_index; |
112 | u32 read_loc = rbi->ring_buffer->read_index; |
113 | u32 pending_sz = rbi->ring_buffer->pending_send_sz; |
114 | |
115 | /* |
116 | * If the other end is not blocked on write don't bother. |
117 | */ |
118 | if (pending_sz == 0) |
119 | return false; |
120 | |
121 | r_size = rbi->ring_datasize; |
122 | cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) : |
123 | read_loc - write_loc; |
124 | |
125 | prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) : |
126 | old_rd - write_loc; |
127 | |
128 | |
129 | if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz)) |
130 | return true; |
131 | |
132 | return false; |
133 | } |
134 | |
135 | /* |
136 | * hv_get_next_write_location() |
137 | * |
138 | * Get the next write location for the specified ring buffer |
139 | * |
140 | */ |
141 | static inline u32 |
142 | hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) |
143 | { |
144 | u32 next = ring_info->ring_buffer->write_index; |
145 | |
146 | return next; |
147 | } |
148 | |
149 | /* |
150 | * hv_set_next_write_location() |
151 | * |
152 | * Set the next write location for the specified ring buffer |
153 | * |
154 | */ |
155 | static inline void |
156 | hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, |
157 | u32 next_write_location) |
158 | { |
159 | ring_info->ring_buffer->write_index = next_write_location; |
160 | } |
161 | |
162 | /* |
163 | * hv_get_next_read_location() |
164 | * |
165 | * Get the next read location for the specified ring buffer |
166 | */ |
167 | static inline u32 |
168 | hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) |
169 | { |
170 | u32 next = ring_info->ring_buffer->read_index; |
171 | |
172 | return next; |
173 | } |
174 | |
175 | /* |
176 | * hv_get_next_readlocation_withoffset() |
177 | * |
178 | * Get the next read location + offset for the specified ring buffer. |
179 | * This allows the caller to skip |
180 | */ |
181 | static inline u32 |
182 | hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, |
183 | u32 offset) |
184 | { |
185 | u32 next = ring_info->ring_buffer->read_index; |
186 | |
187 | next += offset; |
188 | next %= ring_info->ring_datasize; |
189 | |
190 | return next; |
191 | } |
192 | |
193 | /* |
194 | * |
195 | * hv_set_next_read_location() |
196 | * |
197 | * Set the next read location for the specified ring buffer |
198 | * |
199 | */ |
200 | static inline void |
201 | hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, |
202 | u32 next_read_location) |
203 | { |
204 | ring_info->ring_buffer->read_index = next_read_location; |
205 | } |
206 | |
207 | |
208 | /* |
209 | * |
210 | * hv_get_ring_buffer() |
211 | * |
212 | * Get the start of the ring buffer |
213 | */ |
214 | static inline void * |
215 | hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) |
216 | { |
217 | return (void *)ring_info->ring_buffer->buffer; |
218 | } |
219 | |
220 | |
221 | /* |
222 | * |
223 | * hv_get_ring_buffersize() |
224 | * |
225 | * Get the size of the ring buffer |
226 | */ |
227 | static inline u32 |
228 | hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) |
229 | { |
230 | return ring_info->ring_datasize; |
231 | } |
232 | |
233 | /* |
234 | * |
235 | * hv_get_ring_bufferindices() |
236 | * |
237 | * Get the read and write indices as u64 of the specified ring buffer |
238 | * |
239 | */ |
240 | static inline u64 |
241 | hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) |
242 | { |
243 | return (u64)ring_info->ring_buffer->write_index << 32; |
244 | } |
245 | |
246 | /* |
247 | * |
248 | * hv_copyfrom_ringbuffer() |
249 | * |
250 | * Helper routine to copy to source from ring buffer. |
251 | * Assume there is enough room. Handles wrap-around in src case only!! |
252 | * |
253 | */ |
254 | static u32 hv_copyfrom_ringbuffer( |
255 | struct hv_ring_buffer_info *ring_info, |
256 | void *dest, |
257 | u32 destlen, |
258 | u32 start_read_offset) |
259 | { |
260 | void *ring_buffer = hv_get_ring_buffer(ring_info); |
261 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); |
262 | |
263 | u32 frag_len; |
264 | |
265 | /* wrap-around detected at the src */ |
266 | if (destlen > ring_buffer_size - start_read_offset) { |
267 | frag_len = ring_buffer_size - start_read_offset; |
268 | |
269 | memcpy(dest, ring_buffer + start_read_offset, frag_len); |
270 | memcpy(dest + frag_len, ring_buffer, destlen - frag_len); |
271 | } else |
272 | |
273 | memcpy(dest, ring_buffer + start_read_offset, destlen); |
274 | |
275 | |
276 | start_read_offset += destlen; |
277 | start_read_offset %= ring_buffer_size; |
278 | |
279 | return start_read_offset; |
280 | } |
281 | |
282 | |
283 | /* |
284 | * |
285 | * hv_copyto_ringbuffer() |
286 | * |
287 | * Helper routine to copy from source to ring buffer. |
288 | * Assume there is enough room. Handles wrap-around in dest case only!! |
289 | * |
290 | */ |
291 | static u32 hv_copyto_ringbuffer( |
292 | struct hv_ring_buffer_info *ring_info, |
293 | u32 start_write_offset, |
294 | void *src, |
295 | u32 srclen) |
296 | { |
297 | void *ring_buffer = hv_get_ring_buffer(ring_info); |
298 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); |
299 | u32 frag_len; |
300 | |
301 | /* wrap-around detected! */ |
302 | if (srclen > ring_buffer_size - start_write_offset) { |
303 | frag_len = ring_buffer_size - start_write_offset; |
304 | memcpy(ring_buffer + start_write_offset, src, frag_len); |
305 | memcpy(ring_buffer, src + frag_len, srclen - frag_len); |
306 | } else |
307 | memcpy(ring_buffer + start_write_offset, src, srclen); |
308 | |
309 | start_write_offset += srclen; |
310 | start_write_offset %= ring_buffer_size; |
311 | |
312 | return start_write_offset; |
313 | } |
314 | |
315 | /* |
316 | * |
317 | * hv_ringbuffer_get_debuginfo() |
318 | * |
319 | * Get various debug metrics for the specified ring buffer |
320 | * |
321 | */ |
322 | void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, |
323 | struct hv_ring_buffer_debug_info *debug_info) |
324 | { |
325 | u32 bytes_avail_towrite; |
326 | u32 bytes_avail_toread; |
327 | |
328 | if (ring_info->ring_buffer) { |
329 | hv_get_ringbuffer_availbytes(ring_info, |
330 | &bytes_avail_toread, |
331 | &bytes_avail_towrite); |
332 | |
333 | debug_info->bytes_avail_toread = bytes_avail_toread; |
334 | debug_info->bytes_avail_towrite = bytes_avail_towrite; |
335 | debug_info->current_read_index = |
336 | ring_info->ring_buffer->read_index; |
337 | debug_info->current_write_index = |
338 | ring_info->ring_buffer->write_index; |
339 | debug_info->current_interrupt_mask = |
340 | ring_info->ring_buffer->interrupt_mask; |
341 | } |
342 | } |
343 | |
344 | /* |
345 | * |
346 | * hv_ringbuffer_init() |
347 | * |
348 | *Initialize the ring buffer |
349 | * |
350 | */ |
351 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
352 | void *buffer, u32 buflen) |
353 | { |
354 | if (sizeof(struct hv_ring_buffer) != PAGE_SIZE) |
355 | return -EINVAL; |
356 | |
357 | memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); |
358 | |
359 | ring_info->ring_buffer = (struct hv_ring_buffer *)buffer; |
360 | ring_info->ring_buffer->read_index = |
361 | ring_info->ring_buffer->write_index = 0; |
362 | |
363 | ring_info->ring_size = buflen; |
364 | ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer); |
365 | |
366 | spin_lock_init(&ring_info->ring_lock); |
367 | |
368 | return 0; |
369 | } |
370 | |
371 | /* |
372 | * |
373 | * hv_ringbuffer_cleanup() |
374 | * |
375 | * Cleanup the ring buffer |
376 | * |
377 | */ |
378 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) |
379 | { |
380 | } |
381 | |
382 | /* |
383 | * |
384 | * hv_ringbuffer_write() |
385 | * |
386 | * Write to the ring buffer |
387 | * |
388 | */ |
389 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, |
390 | struct scatterlist *sglist, u32 sgcount, bool *signal) |
391 | { |
392 | int i = 0; |
393 | u32 bytes_avail_towrite; |
394 | u32 bytes_avail_toread; |
395 | u32 totalbytes_towrite = 0; |
396 | |
397 | struct scatterlist *sg; |
398 | u32 next_write_location; |
399 | u32 old_write; |
400 | u64 prev_indices = 0; |
401 | unsigned long flags; |
402 | |
403 | for_each_sg(sglist, sg, sgcount, i) |
404 | { |
405 | totalbytes_towrite += sg->length; |
406 | } |
407 | |
408 | totalbytes_towrite += sizeof(u64); |
409 | |
410 | spin_lock_irqsave(&outring_info->ring_lock, flags); |
411 | |
412 | hv_get_ringbuffer_availbytes(outring_info, |
413 | &bytes_avail_toread, |
414 | &bytes_avail_towrite); |
415 | |
416 | |
417 | /* If there is only room for the packet, assume it is full. */ |
418 | /* Otherwise, the next time around, we think the ring buffer */ |
419 | /* is empty since the read index == write index */ |
420 | if (bytes_avail_towrite <= totalbytes_towrite) { |
421 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
422 | return -EAGAIN; |
423 | } |
424 | |
425 | /* Write to the ring buffer */ |
426 | next_write_location = hv_get_next_write_location(outring_info); |
427 | |
428 | old_write = next_write_location; |
429 | |
430 | for_each_sg(sglist, sg, sgcount, i) |
431 | { |
432 | next_write_location = hv_copyto_ringbuffer(outring_info, |
433 | next_write_location, |
434 | sg_virt(sg), |
435 | sg->length); |
436 | } |
437 | |
438 | /* Set previous packet start */ |
439 | prev_indices = hv_get_ring_bufferindices(outring_info); |
440 | |
441 | next_write_location = hv_copyto_ringbuffer(outring_info, |
442 | next_write_location, |
443 | &prev_indices, |
444 | sizeof(u64)); |
445 | |
446 | /* Issue a full memory barrier before updating the write index */ |
447 | mb(); |
448 | |
449 | /* Now, update the write location */ |
450 | hv_set_next_write_location(outring_info, next_write_location); |
451 | |
452 | |
453 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
454 | |
455 | *signal = hv_need_to_signal(old_write, outring_info); |
456 | return 0; |
457 | } |
458 | |
459 | |
460 | /* |
461 | * |
462 | * hv_ringbuffer_peek() |
463 | * |
464 | * Read without advancing the read index |
465 | * |
466 | */ |
467 | int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info, |
468 | void *Buffer, u32 buflen) |
469 | { |
470 | u32 bytes_avail_towrite; |
471 | u32 bytes_avail_toread; |
472 | u32 next_read_location = 0; |
473 | unsigned long flags; |
474 | |
475 | spin_lock_irqsave(&Inring_info->ring_lock, flags); |
476 | |
477 | hv_get_ringbuffer_availbytes(Inring_info, |
478 | &bytes_avail_toread, |
479 | &bytes_avail_towrite); |
480 | |
481 | /* Make sure there is something to read */ |
482 | if (bytes_avail_toread < buflen) { |
483 | |
484 | spin_unlock_irqrestore(&Inring_info->ring_lock, flags); |
485 | |
486 | return -EAGAIN; |
487 | } |
488 | |
489 | /* Convert to byte offset */ |
490 | next_read_location = hv_get_next_read_location(Inring_info); |
491 | |
492 | next_read_location = hv_copyfrom_ringbuffer(Inring_info, |
493 | Buffer, |
494 | buflen, |
495 | next_read_location); |
496 | |
497 | spin_unlock_irqrestore(&Inring_info->ring_lock, flags); |
498 | |
499 | return 0; |
500 | } |
501 | |
502 | |
503 | /* |
504 | * |
505 | * hv_ringbuffer_read() |
506 | * |
507 | * Read and advance the read index |
508 | * |
509 | */ |
510 | int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, |
511 | u32 buflen, u32 offset, bool *signal) |
512 | { |
513 | u32 bytes_avail_towrite; |
514 | u32 bytes_avail_toread; |
515 | u32 next_read_location = 0; |
516 | u64 prev_indices = 0; |
517 | unsigned long flags; |
518 | u32 old_read; |
519 | |
520 | if (buflen <= 0) |
521 | return -EINVAL; |
522 | |
523 | spin_lock_irqsave(&inring_info->ring_lock, flags); |
524 | |
525 | hv_get_ringbuffer_availbytes(inring_info, |
526 | &bytes_avail_toread, |
527 | &bytes_avail_towrite); |
528 | |
529 | old_read = bytes_avail_toread; |
530 | |
531 | /* Make sure there is something to read */ |
532 | if (bytes_avail_toread < buflen) { |
533 | spin_unlock_irqrestore(&inring_info->ring_lock, flags); |
534 | |
535 | return -EAGAIN; |
536 | } |
537 | |
538 | next_read_location = |
539 | hv_get_next_readlocation_withoffset(inring_info, offset); |
540 | |
541 | next_read_location = hv_copyfrom_ringbuffer(inring_info, |
542 | buffer, |
543 | buflen, |
544 | next_read_location); |
545 | |
546 | next_read_location = hv_copyfrom_ringbuffer(inring_info, |
547 | &prev_indices, |
548 | sizeof(u64), |
549 | next_read_location); |
550 | |
551 | /* Make sure all reads are done before we update the read index since */ |
552 | /* the writer may start writing to the read area once the read index */ |
553 | /*is updated */ |
554 | mb(); |
555 | |
556 | /* Update the read index */ |
557 | hv_set_next_read_location(inring_info, next_read_location); |
558 | |
559 | spin_unlock_irqrestore(&inring_info->ring_lock, flags); |
560 | |
561 | *signal = hv_need_to_signal_on_read(old_read, inring_info); |
562 | |
563 | return 0; |
564 | } |
565 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9