Root/drivers/hv/ring_buffer.c

1/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/kernel.h>
27#include <linux/mm.h>
28#include <linux/hyperv.h>
29
30#include "hyperv_vmbus.h"
31
32
33/*
34 * hv_get_next_write_location()
35 *
36 * Get the next write location for the specified ring buffer
37 *
38 */
39static inline u32
40hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
41{
42    u32 next = ring_info->ring_buffer->write_index;
43
44    return next;
45}
46
47/*
48 * hv_set_next_write_location()
49 *
50 * Set the next write location for the specified ring buffer
51 *
52 */
53static inline void
54hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
55             u32 next_write_location)
56{
57    ring_info->ring_buffer->write_index = next_write_location;
58}
59
60/*
61 * hv_get_next_read_location()
62 *
63 * Get the next read location for the specified ring buffer
64 */
65static inline u32
66hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
67{
68    u32 next = ring_info->ring_buffer->read_index;
69
70    return next;
71}
72
73/*
74 * hv_get_next_readlocation_withoffset()
75 *
76 * Get the next read location + offset for the specified ring buffer.
77 * This allows the caller to skip
78 */
79static inline u32
80hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
81                 u32 offset)
82{
83    u32 next = ring_info->ring_buffer->read_index;
84
85    next += offset;
86    next %= ring_info->ring_datasize;
87
88    return next;
89}
90
91/*
92 *
93 * hv_set_next_read_location()
94 *
95 * Set the next read location for the specified ring buffer
96 *
97 */
98static inline void
99hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
100            u32 next_read_location)
101{
102    ring_info->ring_buffer->read_index = next_read_location;
103}
104
105
106/*
107 *
108 * hv_get_ring_buffer()
109 *
110 * Get the start of the ring buffer
111 */
112static inline void *
113hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
114{
115    return (void *)ring_info->ring_buffer->buffer;
116}
117
118
119/*
120 *
121 * hv_get_ring_buffersize()
122 *
123 * Get the size of the ring buffer
124 */
125static inline u32
126hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
127{
128    return ring_info->ring_datasize;
129}
130
131/*
132 *
133 * hv_get_ring_bufferindices()
134 *
135 * Get the read and write indices as u64 of the specified ring buffer
136 *
137 */
138static inline u64
139hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
140{
141    return (u64)ring_info->ring_buffer->write_index << 32;
142}
143
144/*
145 *
146 * hv_copyfrom_ringbuffer()
147 *
148 * Helper routine to copy to source from ring buffer.
149 * Assume there is enough room. Handles wrap-around in src case only!!
150 *
151 */
152static u32 hv_copyfrom_ringbuffer(
153    struct hv_ring_buffer_info *ring_info,
154    void *dest,
155    u32 destlen,
156    u32 start_read_offset)
157{
158    void *ring_buffer = hv_get_ring_buffer(ring_info);
159    u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
160
161    u32 frag_len;
162
163    /* wrap-around detected at the src */
164    if (destlen > ring_buffer_size - start_read_offset) {
165        frag_len = ring_buffer_size - start_read_offset;
166
167        memcpy(dest, ring_buffer + start_read_offset, frag_len);
168        memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
169    } else
170
171        memcpy(dest, ring_buffer + start_read_offset, destlen);
172
173
174    start_read_offset += destlen;
175    start_read_offset %= ring_buffer_size;
176
177    return start_read_offset;
178}
179
180
181/*
182 *
183 * hv_copyto_ringbuffer()
184 *
185 * Helper routine to copy from source to ring buffer.
186 * Assume there is enough room. Handles wrap-around in dest case only!!
187 *
188 */
189static u32 hv_copyto_ringbuffer(
190    struct hv_ring_buffer_info *ring_info,
191    u32 start_write_offset,
192    void *src,
193    u32 srclen)
194{
195    void *ring_buffer = hv_get_ring_buffer(ring_info);
196    u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
197    u32 frag_len;
198
199    /* wrap-around detected! */
200    if (srclen > ring_buffer_size - start_write_offset) {
201        frag_len = ring_buffer_size - start_write_offset;
202        memcpy(ring_buffer + start_write_offset, src, frag_len);
203        memcpy(ring_buffer, src + frag_len, srclen - frag_len);
204    } else
205        memcpy(ring_buffer + start_write_offset, src, srclen);
206
207    start_write_offset += srclen;
208    start_write_offset %= ring_buffer_size;
209
210    return start_write_offset;
211}
212
213/*
214 *
215 * hv_ringbuffer_get_debuginfo()
216 *
217 * Get various debug metrics for the specified ring buffer
218 *
219 */
220void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
221                struct hv_ring_buffer_debug_info *debug_info)
222{
223    u32 bytes_avail_towrite;
224    u32 bytes_avail_toread;
225
226    if (ring_info->ring_buffer) {
227        hv_get_ringbuffer_availbytes(ring_info,
228                    &bytes_avail_toread,
229                    &bytes_avail_towrite);
230
231        debug_info->bytes_avail_toread = bytes_avail_toread;
232        debug_info->bytes_avail_towrite = bytes_avail_towrite;
233        debug_info->current_read_index =
234            ring_info->ring_buffer->read_index;
235        debug_info->current_write_index =
236            ring_info->ring_buffer->write_index;
237        debug_info->current_interrupt_mask =
238            ring_info->ring_buffer->interrupt_mask;
239    }
240}
241
242
243/*
244 *
245 * hv_get_ringbuffer_interrupt_mask()
246 *
247 * Get the interrupt mask for the specified ring buffer
248 *
249 */
250u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
251{
252    return rbi->ring_buffer->interrupt_mask;
253}
254
255/*
256 *
257 * hv_ringbuffer_init()
258 *
259 *Initialize the ring buffer
260 *
261 */
262int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
263           void *buffer, u32 buflen)
264{
265    if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
266        return -EINVAL;
267
268    memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
269
270    ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
271    ring_info->ring_buffer->read_index =
272        ring_info->ring_buffer->write_index = 0;
273
274    ring_info->ring_size = buflen;
275    ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
276
277    spin_lock_init(&ring_info->ring_lock);
278
279    return 0;
280}
281
282/*
283 *
284 * hv_ringbuffer_cleanup()
285 *
286 * Cleanup the ring buffer
287 *
288 */
289void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
290{
291}
292
293/*
294 *
295 * hv_ringbuffer_write()
296 *
297 * Write to the ring buffer
298 *
299 */
300int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
301            struct scatterlist *sglist, u32 sgcount)
302{
303    int i = 0;
304    u32 bytes_avail_towrite;
305    u32 bytes_avail_toread;
306    u32 totalbytes_towrite = 0;
307
308    struct scatterlist *sg;
309    u32 next_write_location;
310    u64 prev_indices = 0;
311    unsigned long flags;
312
313    for_each_sg(sglist, sg, sgcount, i)
314    {
315        totalbytes_towrite += sg->length;
316    }
317
318    totalbytes_towrite += sizeof(u64);
319
320    spin_lock_irqsave(&outring_info->ring_lock, flags);
321
322    hv_get_ringbuffer_availbytes(outring_info,
323                &bytes_avail_toread,
324                &bytes_avail_towrite);
325
326
327    /* If there is only room for the packet, assume it is full. */
328    /* Otherwise, the next time around, we think the ring buffer */
329    /* is empty since the read index == write index */
330    if (bytes_avail_towrite <= totalbytes_towrite) {
331        spin_unlock_irqrestore(&outring_info->ring_lock, flags);
332        return -EAGAIN;
333    }
334
335    /* Write to the ring buffer */
336    next_write_location = hv_get_next_write_location(outring_info);
337
338    for_each_sg(sglist, sg, sgcount, i)
339    {
340        next_write_location = hv_copyto_ringbuffer(outring_info,
341                             next_write_location,
342                             sg_virt(sg),
343                             sg->length);
344    }
345
346    /* Set previous packet start */
347    prev_indices = hv_get_ring_bufferindices(outring_info);
348
349    next_write_location = hv_copyto_ringbuffer(outring_info,
350                         next_write_location,
351                         &prev_indices,
352                         sizeof(u64));
353
354    /* Make sure we flush all writes before updating the writeIndex */
355    smp_wmb();
356
357    /* Now, update the write location */
358    hv_set_next_write_location(outring_info, next_write_location);
359
360
361    spin_unlock_irqrestore(&outring_info->ring_lock, flags);
362    return 0;
363}
364
365
366/*
367 *
368 * hv_ringbuffer_peek()
369 *
370 * Read without advancing the read index
371 *
372 */
373int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
374           void *Buffer, u32 buflen)
375{
376    u32 bytes_avail_towrite;
377    u32 bytes_avail_toread;
378    u32 next_read_location = 0;
379    unsigned long flags;
380
381    spin_lock_irqsave(&Inring_info->ring_lock, flags);
382
383    hv_get_ringbuffer_availbytes(Inring_info,
384                &bytes_avail_toread,
385                &bytes_avail_towrite);
386
387    /* Make sure there is something to read */
388    if (bytes_avail_toread < buflen) {
389
390        spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
391
392        return -EAGAIN;
393    }
394
395    /* Convert to byte offset */
396    next_read_location = hv_get_next_read_location(Inring_info);
397
398    next_read_location = hv_copyfrom_ringbuffer(Inring_info,
399                        Buffer,
400                        buflen,
401                        next_read_location);
402
403    spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
404
405    return 0;
406}
407
408
409/*
410 *
411 * hv_ringbuffer_read()
412 *
413 * Read and advance the read index
414 *
415 */
416int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
417           u32 buflen, u32 offset)
418{
419    u32 bytes_avail_towrite;
420    u32 bytes_avail_toread;
421    u32 next_read_location = 0;
422    u64 prev_indices = 0;
423    unsigned long flags;
424
425    if (buflen <= 0)
426        return -EINVAL;
427
428    spin_lock_irqsave(&inring_info->ring_lock, flags);
429
430    hv_get_ringbuffer_availbytes(inring_info,
431                &bytes_avail_toread,
432                &bytes_avail_towrite);
433
434    /* Make sure there is something to read */
435    if (bytes_avail_toread < buflen) {
436        spin_unlock_irqrestore(&inring_info->ring_lock, flags);
437
438        return -EAGAIN;
439    }
440
441    next_read_location =
442        hv_get_next_readlocation_withoffset(inring_info, offset);
443
444    next_read_location = hv_copyfrom_ringbuffer(inring_info,
445                        buffer,
446                        buflen,
447                        next_read_location);
448
449    next_read_location = hv_copyfrom_ringbuffer(inring_info,
450                        &prev_indices,
451                        sizeof(u64),
452                        next_read_location);
453
454    /* Make sure all reads are done before we update the read index since */
455    /* the writer may start writing to the read area once the read index */
456    /*is updated */
457    smp_mb();
458
459    /* Update the read index */
460    hv_set_next_read_location(inring_info, next_read_location);
461
462    spin_unlock_irqrestore(&inring_info->ring_lock, flags);
463
464    return 0;
465}
466

Archive Download this file



interactive