Root/
1 | /* A network driver using virtio. |
2 | * |
3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or |
8 | * (at your option) any later version. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ |
19 | //#define DEBUG |
20 | #include <linux/netdevice.h> |
21 | #include <linux/etherdevice.h> |
22 | #include <linux/ethtool.h> |
23 | #include <linux/module.h> |
24 | #include <linux/virtio.h> |
25 | #include <linux/virtio_net.h> |
26 | #include <linux/scatterlist.h> |
27 | #include <linux/if_vlan.h> |
28 | #include <linux/slab.h> |
29 | |
30 | static int napi_weight = 128; |
31 | module_param(napi_weight, int, 0444); |
32 | |
33 | static bool csum = true, gso = true; |
34 | module_param(csum, bool, 0444); |
35 | module_param(gso, bool, 0444); |
36 | |
37 | /* FIXME: MTU in config. */ |
38 | #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
39 | #define GOOD_COPY_LEN 128 |
40 | |
41 | #define VIRTNET_SEND_COMMAND_SG_MAX 2 |
42 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
43 | |
44 | struct virtnet_stats { |
45 | struct u64_stats_sync tx_syncp; |
46 | struct u64_stats_sync rx_syncp; |
47 | u64 tx_bytes; |
48 | u64 tx_packets; |
49 | |
50 | u64 rx_bytes; |
51 | u64 rx_packets; |
52 | }; |
53 | |
54 | struct virtnet_info { |
55 | struct virtio_device *vdev; |
56 | struct virtqueue *rvq, *svq, *cvq; |
57 | struct net_device *dev; |
58 | struct napi_struct napi; |
59 | unsigned int status; |
60 | |
61 | /* Number of input buffers, and max we've ever had. */ |
62 | unsigned int num, max; |
63 | |
64 | /* I like... big packets and I cannot lie! */ |
65 | bool big_packets; |
66 | |
67 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
68 | bool mergeable_rx_bufs; |
69 | |
70 | /* enable config space updates */ |
71 | bool config_enable; |
72 | |
73 | /* Active statistics */ |
74 | struct virtnet_stats __percpu *stats; |
75 | |
76 | /* Work struct for refilling if we run low on memory. */ |
77 | struct delayed_work refill; |
78 | |
79 | /* Work struct for config space updates */ |
80 | struct work_struct config_work; |
81 | |
82 | /* Lock for config space updates */ |
83 | struct mutex config_lock; |
84 | |
85 | /* Chain pages by the private ptr. */ |
86 | struct page *pages; |
87 | |
88 | /* fragments + linear part + virtio header */ |
89 | struct scatterlist rx_sg[MAX_SKB_FRAGS + 2]; |
90 | struct scatterlist tx_sg[MAX_SKB_FRAGS + 2]; |
91 | }; |
92 | |
93 | struct skb_vnet_hdr { |
94 | union { |
95 | struct virtio_net_hdr hdr; |
96 | struct virtio_net_hdr_mrg_rxbuf mhdr; |
97 | }; |
98 | unsigned int num_sg; |
99 | }; |
100 | |
101 | struct padded_vnet_hdr { |
102 | struct virtio_net_hdr hdr; |
103 | /* |
104 | * virtio_net_hdr should be in a separated sg buffer because of a |
105 | * QEMU bug, and data sg buffer shares same page with this header sg. |
106 | * This padding makes next sg 16 byte aligned after virtio_net_hdr. |
107 | */ |
108 | char padding[6]; |
109 | }; |
110 | |
111 | static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) |
112 | { |
113 | return (struct skb_vnet_hdr *)skb->cb; |
114 | } |
115 | |
116 | /* |
117 | * private is used to chain pages for big packets, put the whole |
118 | * most recent used list in the beginning for reuse |
119 | */ |
120 | static void give_pages(struct virtnet_info *vi, struct page *page) |
121 | { |
122 | struct page *end; |
123 | |
124 | /* Find end of list, sew whole thing into vi->pages. */ |
125 | for (end = page; end->private; end = (struct page *)end->private); |
126 | end->private = (unsigned long)vi->pages; |
127 | vi->pages = page; |
128 | } |
129 | |
130 | static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) |
131 | { |
132 | struct page *p = vi->pages; |
133 | |
134 | if (p) { |
135 | vi->pages = (struct page *)p->private; |
136 | /* clear private here, it is used to chain pages */ |
137 | p->private = 0; |
138 | } else |
139 | p = alloc_page(gfp_mask); |
140 | return p; |
141 | } |
142 | |
143 | static void skb_xmit_done(struct virtqueue *svq) |
144 | { |
145 | struct virtnet_info *vi = svq->vdev->priv; |
146 | |
147 | /* Suppress further interrupts. */ |
148 | virtqueue_disable_cb(svq); |
149 | |
150 | /* We were probably waiting for more output buffers. */ |
151 | netif_wake_queue(vi->dev); |
152 | } |
153 | |
154 | static void set_skb_frag(struct sk_buff *skb, struct page *page, |
155 | unsigned int offset, unsigned int *len) |
156 | { |
157 | int size = min((unsigned)PAGE_SIZE - offset, *len); |
158 | int i = skb_shinfo(skb)->nr_frags; |
159 | |
160 | __skb_fill_page_desc(skb, i, page, offset, size); |
161 | |
162 | skb->data_len += size; |
163 | skb->len += size; |
164 | skb->truesize += PAGE_SIZE; |
165 | skb_shinfo(skb)->nr_frags++; |
166 | *len -= size; |
167 | } |
168 | |
169 | /* Called from bottom half context */ |
170 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
171 | struct page *page, unsigned int len) |
172 | { |
173 | struct sk_buff *skb; |
174 | struct skb_vnet_hdr *hdr; |
175 | unsigned int copy, hdr_len, offset; |
176 | char *p; |
177 | |
178 | p = page_address(page); |
179 | |
180 | /* copy small packet so we can reuse these pages for small data */ |
181 | skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); |
182 | if (unlikely(!skb)) |
183 | return NULL; |
184 | |
185 | hdr = skb_vnet_hdr(skb); |
186 | |
187 | if (vi->mergeable_rx_bufs) { |
188 | hdr_len = sizeof hdr->mhdr; |
189 | offset = hdr_len; |
190 | } else { |
191 | hdr_len = sizeof hdr->hdr; |
192 | offset = sizeof(struct padded_vnet_hdr); |
193 | } |
194 | |
195 | memcpy(hdr, p, hdr_len); |
196 | |
197 | len -= hdr_len; |
198 | p += offset; |
199 | |
200 | copy = len; |
201 | if (copy > skb_tailroom(skb)) |
202 | copy = skb_tailroom(skb); |
203 | memcpy(skb_put(skb, copy), p, copy); |
204 | |
205 | len -= copy; |
206 | offset += copy; |
207 | |
208 | /* |
209 | * Verify that we can indeed put this data into a skb. |
210 | * This is here to handle cases when the device erroneously |
211 | * tries to receive more than is possible. This is usually |
212 | * the case of a broken device. |
213 | */ |
214 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { |
215 | if (net_ratelimit()) |
216 | pr_debug("%s: too much data\n", skb->dev->name); |
217 | dev_kfree_skb(skb); |
218 | return NULL; |
219 | } |
220 | |
221 | while (len) { |
222 | set_skb_frag(skb, page, offset, &len); |
223 | page = (struct page *)page->private; |
224 | offset = 0; |
225 | } |
226 | |
227 | if (page) |
228 | give_pages(vi, page); |
229 | |
230 | return skb; |
231 | } |
232 | |
233 | static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) |
234 | { |
235 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); |
236 | struct page *page; |
237 | int num_buf, i, len; |
238 | |
239 | num_buf = hdr->mhdr.num_buffers; |
240 | while (--num_buf) { |
241 | i = skb_shinfo(skb)->nr_frags; |
242 | if (i >= MAX_SKB_FRAGS) { |
243 | pr_debug("%s: packet too long\n", skb->dev->name); |
244 | skb->dev->stats.rx_length_errors++; |
245 | return -EINVAL; |
246 | } |
247 | page = virtqueue_get_buf(vi->rvq, &len); |
248 | if (!page) { |
249 | pr_debug("%s: rx error: %d buffers missing\n", |
250 | skb->dev->name, hdr->mhdr.num_buffers); |
251 | skb->dev->stats.rx_length_errors++; |
252 | return -EINVAL; |
253 | } |
254 | |
255 | if (len > PAGE_SIZE) |
256 | len = PAGE_SIZE; |
257 | |
258 | set_skb_frag(skb, page, 0, &len); |
259 | |
260 | --vi->num; |
261 | } |
262 | return 0; |
263 | } |
264 | |
265 | static void receive_buf(struct net_device *dev, void *buf, unsigned int len) |
266 | { |
267 | struct virtnet_info *vi = netdev_priv(dev); |
268 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
269 | struct sk_buff *skb; |
270 | struct page *page; |
271 | struct skb_vnet_hdr *hdr; |
272 | |
273 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { |
274 | pr_debug("%s: short packet %i\n", dev->name, len); |
275 | dev->stats.rx_length_errors++; |
276 | if (vi->mergeable_rx_bufs || vi->big_packets) |
277 | give_pages(vi, buf); |
278 | else |
279 | dev_kfree_skb(buf); |
280 | return; |
281 | } |
282 | |
283 | if (!vi->mergeable_rx_bufs && !vi->big_packets) { |
284 | skb = buf; |
285 | len -= sizeof(struct virtio_net_hdr); |
286 | skb_trim(skb, len); |
287 | } else { |
288 | page = buf; |
289 | skb = page_to_skb(vi, page, len); |
290 | if (unlikely(!skb)) { |
291 | dev->stats.rx_dropped++; |
292 | give_pages(vi, page); |
293 | return; |
294 | } |
295 | if (vi->mergeable_rx_bufs) |
296 | if (receive_mergeable(vi, skb)) { |
297 | dev_kfree_skb(skb); |
298 | return; |
299 | } |
300 | } |
301 | |
302 | hdr = skb_vnet_hdr(skb); |
303 | |
304 | u64_stats_update_begin(&stats->rx_syncp); |
305 | stats->rx_bytes += skb->len; |
306 | stats->rx_packets++; |
307 | u64_stats_update_end(&stats->rx_syncp); |
308 | |
309 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
310 | pr_debug("Needs csum!\n"); |
311 | if (!skb_partial_csum_set(skb, |
312 | hdr->hdr.csum_start, |
313 | hdr->hdr.csum_offset)) |
314 | goto frame_err; |
315 | } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { |
316 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
317 | } |
318 | |
319 | skb->protocol = eth_type_trans(skb, dev); |
320 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", |
321 | ntohs(skb->protocol), skb->len, skb->pkt_type); |
322 | |
323 | if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
324 | pr_debug("GSO!\n"); |
325 | switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
326 | case VIRTIO_NET_HDR_GSO_TCPV4: |
327 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
328 | break; |
329 | case VIRTIO_NET_HDR_GSO_UDP: |
330 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
331 | break; |
332 | case VIRTIO_NET_HDR_GSO_TCPV6: |
333 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
334 | break; |
335 | default: |
336 | if (net_ratelimit()) |
337 | printk(KERN_WARNING "%s: bad gso type %u.\n", |
338 | dev->name, hdr->hdr.gso_type); |
339 | goto frame_err; |
340 | } |
341 | |
342 | if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) |
343 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
344 | |
345 | skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; |
346 | if (skb_shinfo(skb)->gso_size == 0) { |
347 | if (net_ratelimit()) |
348 | printk(KERN_WARNING "%s: zero gso size.\n", |
349 | dev->name); |
350 | goto frame_err; |
351 | } |
352 | |
353 | /* Header must be checked, and gso_segs computed. */ |
354 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
355 | skb_shinfo(skb)->gso_segs = 0; |
356 | } |
357 | |
358 | netif_receive_skb(skb); |
359 | return; |
360 | |
361 | frame_err: |
362 | dev->stats.rx_frame_errors++; |
363 | dev_kfree_skb(skb); |
364 | } |
365 | |
366 | static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) |
367 | { |
368 | struct sk_buff *skb; |
369 | struct skb_vnet_hdr *hdr; |
370 | int err; |
371 | |
372 | skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); |
373 | if (unlikely(!skb)) |
374 | return -ENOMEM; |
375 | |
376 | skb_put(skb, MAX_PACKET_LEN); |
377 | |
378 | hdr = skb_vnet_hdr(skb); |
379 | sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr); |
380 | |
381 | skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); |
382 | |
383 | err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp); |
384 | if (err < 0) |
385 | dev_kfree_skb(skb); |
386 | |
387 | return err; |
388 | } |
389 | |
390 | static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) |
391 | { |
392 | struct page *first, *list = NULL; |
393 | char *p; |
394 | int i, err, offset; |
395 | |
396 | /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */ |
397 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
398 | first = get_a_page(vi, gfp); |
399 | if (!first) { |
400 | if (list) |
401 | give_pages(vi, list); |
402 | return -ENOMEM; |
403 | } |
404 | sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE); |
405 | |
406 | /* chain new page in list head to match sg */ |
407 | first->private = (unsigned long)list; |
408 | list = first; |
409 | } |
410 | |
411 | first = get_a_page(vi, gfp); |
412 | if (!first) { |
413 | give_pages(vi, list); |
414 | return -ENOMEM; |
415 | } |
416 | p = page_address(first); |
417 | |
418 | /* vi->rx_sg[0], vi->rx_sg[1] share the same page */ |
419 | /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */ |
420 | sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr)); |
421 | |
422 | /* vi->rx_sg[1] for data packet, from offset */ |
423 | offset = sizeof(struct padded_vnet_hdr); |
424 | sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset); |
425 | |
426 | /* chain first in list head */ |
427 | first->private = (unsigned long)list; |
428 | err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, |
429 | first, gfp); |
430 | if (err < 0) |
431 | give_pages(vi, first); |
432 | |
433 | return err; |
434 | } |
435 | |
436 | static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) |
437 | { |
438 | struct page *page; |
439 | int err; |
440 | |
441 | page = get_a_page(vi, gfp); |
442 | if (!page) |
443 | return -ENOMEM; |
444 | |
445 | sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); |
446 | |
447 | err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp); |
448 | if (err < 0) |
449 | give_pages(vi, page); |
450 | |
451 | return err; |
452 | } |
453 | |
454 | /* |
455 | * Returns false if we couldn't fill entirely (OOM). |
456 | * |
457 | * Normally run in the receive path, but can also be run from ndo_open |
458 | * before we're receiving packets, or from refill_work which is |
459 | * careful to disable receiving (using napi_disable). |
460 | */ |
461 | static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) |
462 | { |
463 | int err; |
464 | bool oom; |
465 | |
466 | do { |
467 | if (vi->mergeable_rx_bufs) |
468 | err = add_recvbuf_mergeable(vi, gfp); |
469 | else if (vi->big_packets) |
470 | err = add_recvbuf_big(vi, gfp); |
471 | else |
472 | err = add_recvbuf_small(vi, gfp); |
473 | |
474 | oom = err == -ENOMEM; |
475 | if (err < 0) |
476 | break; |
477 | ++vi->num; |
478 | } while (err > 0); |
479 | if (unlikely(vi->num > vi->max)) |
480 | vi->max = vi->num; |
481 | virtqueue_kick(vi->rvq); |
482 | return !oom; |
483 | } |
484 | |
485 | static void skb_recv_done(struct virtqueue *rvq) |
486 | { |
487 | struct virtnet_info *vi = rvq->vdev->priv; |
488 | /* Schedule NAPI, Suppress further interrupts if successful. */ |
489 | if (napi_schedule_prep(&vi->napi)) { |
490 | virtqueue_disable_cb(rvq); |
491 | __napi_schedule(&vi->napi); |
492 | } |
493 | } |
494 | |
495 | static void virtnet_napi_enable(struct virtnet_info *vi) |
496 | { |
497 | napi_enable(&vi->napi); |
498 | |
499 | /* If all buffers were filled by other side before we napi_enabled, we |
500 | * won't get another interrupt, so process any outstanding packets |
501 | * now. virtnet_poll wants re-enable the queue, so we disable here. |
502 | * We synchronize against interrupts via NAPI_STATE_SCHED */ |
503 | if (napi_schedule_prep(&vi->napi)) { |
504 | virtqueue_disable_cb(vi->rvq); |
505 | local_bh_disable(); |
506 | __napi_schedule(&vi->napi); |
507 | local_bh_enable(); |
508 | } |
509 | } |
510 | |
511 | static void refill_work(struct work_struct *work) |
512 | { |
513 | struct virtnet_info *vi; |
514 | bool still_empty; |
515 | |
516 | vi = container_of(work, struct virtnet_info, refill.work); |
517 | napi_disable(&vi->napi); |
518 | still_empty = !try_fill_recv(vi, GFP_KERNEL); |
519 | virtnet_napi_enable(vi); |
520 | |
521 | /* In theory, this can happen: if we don't get any buffers in |
522 | * we will *never* try to fill again. */ |
523 | if (still_empty) |
524 | queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2); |
525 | } |
526 | |
527 | static int virtnet_poll(struct napi_struct *napi, int budget) |
528 | { |
529 | struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); |
530 | void *buf; |
531 | unsigned int len, received = 0; |
532 | |
533 | again: |
534 | while (received < budget && |
535 | (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) { |
536 | receive_buf(vi->dev, buf, len); |
537 | --vi->num; |
538 | received++; |
539 | } |
540 | |
541 | if (vi->num < vi->max / 2) { |
542 | if (!try_fill_recv(vi, GFP_ATOMIC)) |
543 | queue_delayed_work(system_nrt_wq, &vi->refill, 0); |
544 | } |
545 | |
546 | /* Out of packets? */ |
547 | if (received < budget) { |
548 | napi_complete(napi); |
549 | if (unlikely(!virtqueue_enable_cb(vi->rvq)) && |
550 | napi_schedule_prep(napi)) { |
551 | virtqueue_disable_cb(vi->rvq); |
552 | __napi_schedule(napi); |
553 | goto again; |
554 | } |
555 | } |
556 | |
557 | return received; |
558 | } |
559 | |
560 | static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) |
561 | { |
562 | struct sk_buff *skb; |
563 | unsigned int len, tot_sgs = 0; |
564 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
565 | |
566 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { |
567 | pr_debug("Sent skb %p\n", skb); |
568 | |
569 | u64_stats_update_begin(&stats->tx_syncp); |
570 | stats->tx_bytes += skb->len; |
571 | stats->tx_packets++; |
572 | u64_stats_update_end(&stats->tx_syncp); |
573 | |
574 | tot_sgs += skb_vnet_hdr(skb)->num_sg; |
575 | dev_kfree_skb_any(skb); |
576 | } |
577 | return tot_sgs; |
578 | } |
579 | |
580 | static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) |
581 | { |
582 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); |
583 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
584 | |
585 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
586 | |
587 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
588 | hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
589 | hdr->hdr.csum_start = skb_checksum_start_offset(skb); |
590 | hdr->hdr.csum_offset = skb->csum_offset; |
591 | } else { |
592 | hdr->hdr.flags = 0; |
593 | hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; |
594 | } |
595 | |
596 | if (skb_is_gso(skb)) { |
597 | hdr->hdr.hdr_len = skb_headlen(skb); |
598 | hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; |
599 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
600 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
601 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
602 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
603 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
604 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; |
605 | else |
606 | BUG(); |
607 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) |
608 | hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
609 | } else { |
610 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; |
611 | hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; |
612 | } |
613 | |
614 | hdr->mhdr.num_buffers = 0; |
615 | |
616 | /* Encode metadata header at front. */ |
617 | if (vi->mergeable_rx_bufs) |
618 | sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr); |
619 | else |
620 | sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr); |
621 | |
622 | hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1; |
623 | return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg, |
624 | 0, skb, GFP_ATOMIC); |
625 | } |
626 | |
627 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
628 | { |
629 | struct virtnet_info *vi = netdev_priv(dev); |
630 | int capacity; |
631 | |
632 | /* Free up any pending old buffers before queueing new ones. */ |
633 | free_old_xmit_skbs(vi); |
634 | |
635 | /* Try to transmit */ |
636 | capacity = xmit_skb(vi, skb); |
637 | |
638 | /* This can happen with OOM and indirect buffers. */ |
639 | if (unlikely(capacity < 0)) { |
640 | if (likely(capacity == -ENOMEM)) { |
641 | if (net_ratelimit()) |
642 | dev_warn(&dev->dev, |
643 | "TX queue failure: out of memory\n"); |
644 | } else { |
645 | dev->stats.tx_fifo_errors++; |
646 | if (net_ratelimit()) |
647 | dev_warn(&dev->dev, |
648 | "Unexpected TX queue failure: %d\n", |
649 | capacity); |
650 | } |
651 | dev->stats.tx_dropped++; |
652 | kfree_skb(skb); |
653 | return NETDEV_TX_OK; |
654 | } |
655 | virtqueue_kick(vi->svq); |
656 | |
657 | /* Don't wait up for transmitted skbs to be freed. */ |
658 | skb_orphan(skb); |
659 | nf_reset(skb); |
660 | |
661 | /* Apparently nice girls don't return TX_BUSY; stop the queue |
662 | * before it gets out of hand. Naturally, this wastes entries. */ |
663 | if (capacity < 2+MAX_SKB_FRAGS) { |
664 | netif_stop_queue(dev); |
665 | if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) { |
666 | /* More just got used, free them then recheck. */ |
667 | capacity += free_old_xmit_skbs(vi); |
668 | if (capacity >= 2+MAX_SKB_FRAGS) { |
669 | netif_start_queue(dev); |
670 | virtqueue_disable_cb(vi->svq); |
671 | } |
672 | } |
673 | } |
674 | |
675 | return NETDEV_TX_OK; |
676 | } |
677 | |
678 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
679 | { |
680 | struct virtnet_info *vi = netdev_priv(dev); |
681 | struct virtio_device *vdev = vi->vdev; |
682 | int ret; |
683 | |
684 | ret = eth_mac_addr(dev, p); |
685 | if (ret) |
686 | return ret; |
687 | |
688 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
689 | vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), |
690 | dev->dev_addr, dev->addr_len); |
691 | |
692 | return 0; |
693 | } |
694 | |
695 | static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, |
696 | struct rtnl_link_stats64 *tot) |
697 | { |
698 | struct virtnet_info *vi = netdev_priv(dev); |
699 | int cpu; |
700 | unsigned int start; |
701 | |
702 | for_each_possible_cpu(cpu) { |
703 | struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); |
704 | u64 tpackets, tbytes, rpackets, rbytes; |
705 | |
706 | do { |
707 | start = u64_stats_fetch_begin_bh(&stats->tx_syncp); |
708 | tpackets = stats->tx_packets; |
709 | tbytes = stats->tx_bytes; |
710 | } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); |
711 | |
712 | do { |
713 | start = u64_stats_fetch_begin_bh(&stats->rx_syncp); |
714 | rpackets = stats->rx_packets; |
715 | rbytes = stats->rx_bytes; |
716 | } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); |
717 | |
718 | tot->rx_packets += rpackets; |
719 | tot->tx_packets += tpackets; |
720 | tot->rx_bytes += rbytes; |
721 | tot->tx_bytes += tbytes; |
722 | } |
723 | |
724 | tot->tx_dropped = dev->stats.tx_dropped; |
725 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
726 | tot->rx_dropped = dev->stats.rx_dropped; |
727 | tot->rx_length_errors = dev->stats.rx_length_errors; |
728 | tot->rx_frame_errors = dev->stats.rx_frame_errors; |
729 | |
730 | return tot; |
731 | } |
732 | |
733 | #ifdef CONFIG_NET_POLL_CONTROLLER |
734 | static void virtnet_netpoll(struct net_device *dev) |
735 | { |
736 | struct virtnet_info *vi = netdev_priv(dev); |
737 | |
738 | napi_schedule(&vi->napi); |
739 | } |
740 | #endif |
741 | |
742 | static int virtnet_open(struct net_device *dev) |
743 | { |
744 | struct virtnet_info *vi = netdev_priv(dev); |
745 | |
746 | /* Make sure we have some buffers: if oom use wq. */ |
747 | if (!try_fill_recv(vi, GFP_KERNEL)) |
748 | queue_delayed_work(system_nrt_wq, &vi->refill, 0); |
749 | |
750 | virtnet_napi_enable(vi); |
751 | return 0; |
752 | } |
753 | |
754 | /* |
755 | * Send command via the control virtqueue and check status. Commands |
756 | * supported by the hypervisor, as indicated by feature bits, should |
757 | * never fail unless improperly formated. |
758 | */ |
759 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, |
760 | struct scatterlist *data, int out, int in) |
761 | { |
762 | struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; |
763 | struct virtio_net_ctrl_hdr ctrl; |
764 | virtio_net_ctrl_ack status = ~0; |
765 | unsigned int tmp; |
766 | int i; |
767 | |
768 | /* Caller should know better */ |
769 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || |
770 | (out + in > VIRTNET_SEND_COMMAND_SG_MAX)); |
771 | |
772 | out++; /* Add header */ |
773 | in++; /* Add return status */ |
774 | |
775 | ctrl.class = class; |
776 | ctrl.cmd = cmd; |
777 | |
778 | sg_init_table(sg, out + in); |
779 | |
780 | sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); |
781 | for_each_sg(data, s, out + in - 2, i) |
782 | sg_set_buf(&sg[i + 1], sg_virt(s), s->length); |
783 | sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); |
784 | |
785 | BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); |
786 | |
787 | virtqueue_kick(vi->cvq); |
788 | |
789 | /* |
790 | * Spin for a response, the kick causes an ioport write, trapping |
791 | * into the hypervisor, so the request should be handled immediately. |
792 | */ |
793 | while (!virtqueue_get_buf(vi->cvq, &tmp)) |
794 | cpu_relax(); |
795 | |
796 | return status == VIRTIO_NET_OK; |
797 | } |
798 | |
799 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
800 | { |
801 | rtnl_lock(); |
802 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, |
803 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, |
804 | 0, 0)) |
805 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
806 | rtnl_unlock(); |
807 | } |
808 | |
809 | static int virtnet_close(struct net_device *dev) |
810 | { |
811 | struct virtnet_info *vi = netdev_priv(dev); |
812 | |
813 | /* Make sure refill_work doesn't re-enable napi! */ |
814 | cancel_delayed_work_sync(&vi->refill); |
815 | napi_disable(&vi->napi); |
816 | |
817 | return 0; |
818 | } |
819 | |
820 | static void virtnet_set_rx_mode(struct net_device *dev) |
821 | { |
822 | struct virtnet_info *vi = netdev_priv(dev); |
823 | struct scatterlist sg[2]; |
824 | u8 promisc, allmulti; |
825 | struct virtio_net_ctrl_mac *mac_data; |
826 | struct netdev_hw_addr *ha; |
827 | int uc_count; |
828 | int mc_count; |
829 | void *buf; |
830 | int i; |
831 | |
832 | /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ |
833 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
834 | return; |
835 | |
836 | promisc = ((dev->flags & IFF_PROMISC) != 0); |
837 | allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
838 | |
839 | sg_init_one(sg, &promisc, sizeof(promisc)); |
840 | |
841 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
842 | VIRTIO_NET_CTRL_RX_PROMISC, |
843 | sg, 1, 0)) |
844 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
845 | promisc ? "en" : "dis"); |
846 | |
847 | sg_init_one(sg, &allmulti, sizeof(allmulti)); |
848 | |
849 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
850 | VIRTIO_NET_CTRL_RX_ALLMULTI, |
851 | sg, 1, 0)) |
852 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
853 | allmulti ? "en" : "dis"); |
854 | |
855 | uc_count = netdev_uc_count(dev); |
856 | mc_count = netdev_mc_count(dev); |
857 | /* MAC filter - use one buffer for both lists */ |
858 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
859 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); |
860 | mac_data = buf; |
861 | if (!buf) { |
862 | dev_warn(&dev->dev, "No memory for MAC address buffer\n"); |
863 | return; |
864 | } |
865 | |
866 | sg_init_table(sg, 2); |
867 | |
868 | /* Store the unicast list and count in the front of the buffer */ |
869 | mac_data->entries = uc_count; |
870 | i = 0; |
871 | netdev_for_each_uc_addr(ha, dev) |
872 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
873 | |
874 | sg_set_buf(&sg[0], mac_data, |
875 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
876 | |
877 | /* multicast list and count fill the end */ |
878 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
879 | |
880 | mac_data->entries = mc_count; |
881 | i = 0; |
882 | netdev_for_each_mc_addr(ha, dev) |
883 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
884 | |
885 | sg_set_buf(&sg[1], mac_data, |
886 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
887 | |
888 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
889 | VIRTIO_NET_CTRL_MAC_TABLE_SET, |
890 | sg, 2, 0)) |
891 | dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); |
892 | |
893 | kfree(buf); |
894 | } |
895 | |
896 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) |
897 | { |
898 | struct virtnet_info *vi = netdev_priv(dev); |
899 | struct scatterlist sg; |
900 | |
901 | sg_init_one(&sg, &vid, sizeof(vid)); |
902 | |
903 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
904 | VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) |
905 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
906 | return 0; |
907 | } |
908 | |
909 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) |
910 | { |
911 | struct virtnet_info *vi = netdev_priv(dev); |
912 | struct scatterlist sg; |
913 | |
914 | sg_init_one(&sg, &vid, sizeof(vid)); |
915 | |
916 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
917 | VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) |
918 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
919 | return 0; |
920 | } |
921 | |
922 | static void virtnet_get_ringparam(struct net_device *dev, |
923 | struct ethtool_ringparam *ring) |
924 | { |
925 | struct virtnet_info *vi = netdev_priv(dev); |
926 | |
927 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq); |
928 | ring->tx_max_pending = virtqueue_get_vring_size(vi->svq); |
929 | ring->rx_pending = ring->rx_max_pending; |
930 | ring->tx_pending = ring->tx_max_pending; |
931 | |
932 | } |
933 | |
934 | |
935 | static void virtnet_get_drvinfo(struct net_device *dev, |
936 | struct ethtool_drvinfo *info) |
937 | { |
938 | struct virtnet_info *vi = netdev_priv(dev); |
939 | struct virtio_device *vdev = vi->vdev; |
940 | |
941 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
942 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); |
943 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); |
944 | |
945 | } |
946 | |
947 | static const struct ethtool_ops virtnet_ethtool_ops = { |
948 | .get_drvinfo = virtnet_get_drvinfo, |
949 | .get_link = ethtool_op_get_link, |
950 | .get_ringparam = virtnet_get_ringparam, |
951 | }; |
952 | |
953 | #define MIN_MTU 68 |
954 | #define MAX_MTU 65535 |
955 | |
956 | static int virtnet_change_mtu(struct net_device *dev, int new_mtu) |
957 | { |
958 | if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) |
959 | return -EINVAL; |
960 | dev->mtu = new_mtu; |
961 | return 0; |
962 | } |
963 | |
964 | static const struct net_device_ops virtnet_netdev = { |
965 | .ndo_open = virtnet_open, |
966 | .ndo_stop = virtnet_close, |
967 | .ndo_start_xmit = start_xmit, |
968 | .ndo_validate_addr = eth_validate_addr, |
969 | .ndo_set_mac_address = virtnet_set_mac_address, |
970 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
971 | .ndo_change_mtu = virtnet_change_mtu, |
972 | .ndo_get_stats64 = virtnet_stats, |
973 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
974 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
975 | #ifdef CONFIG_NET_POLL_CONTROLLER |
976 | .ndo_poll_controller = virtnet_netpoll, |
977 | #endif |
978 | }; |
979 | |
980 | static void virtnet_config_changed_work(struct work_struct *work) |
981 | { |
982 | struct virtnet_info *vi = |
983 | container_of(work, struct virtnet_info, config_work); |
984 | u16 v; |
985 | |
986 | mutex_lock(&vi->config_lock); |
987 | if (!vi->config_enable) |
988 | goto done; |
989 | |
990 | if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, |
991 | offsetof(struct virtio_net_config, status), |
992 | &v) < 0) |
993 | goto done; |
994 | |
995 | if (v & VIRTIO_NET_S_ANNOUNCE) { |
996 | netif_notify_peers(vi->dev); |
997 | virtnet_ack_link_announce(vi); |
998 | } |
999 | |
1000 | /* Ignore unknown (future) status bits */ |
1001 | v &= VIRTIO_NET_S_LINK_UP; |
1002 | |
1003 | if (vi->status == v) |
1004 | goto done; |
1005 | |
1006 | vi->status = v; |
1007 | |
1008 | if (vi->status & VIRTIO_NET_S_LINK_UP) { |
1009 | netif_carrier_on(vi->dev); |
1010 | netif_wake_queue(vi->dev); |
1011 | } else { |
1012 | netif_carrier_off(vi->dev); |
1013 | netif_stop_queue(vi->dev); |
1014 | } |
1015 | done: |
1016 | mutex_unlock(&vi->config_lock); |
1017 | } |
1018 | |
1019 | static void virtnet_config_changed(struct virtio_device *vdev) |
1020 | { |
1021 | struct virtnet_info *vi = vdev->priv; |
1022 | |
1023 | queue_work(system_nrt_wq, &vi->config_work); |
1024 | } |
1025 | |
1026 | static int init_vqs(struct virtnet_info *vi) |
1027 | { |
1028 | struct virtqueue *vqs[3]; |
1029 | vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL}; |
1030 | const char *names[] = { "input", "output", "control" }; |
1031 | int nvqs, err; |
1032 | |
1033 | /* We expect two virtqueues, receive then send, |
1034 | * and optionally control. */ |
1035 | nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2; |
1036 | |
1037 | err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names); |
1038 | if (err) |
1039 | return err; |
1040 | |
1041 | vi->rvq = vqs[0]; |
1042 | vi->svq = vqs[1]; |
1043 | |
1044 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { |
1045 | vi->cvq = vqs[2]; |
1046 | |
1047 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
1048 | vi->dev->features |= NETIF_F_HW_VLAN_FILTER; |
1049 | } |
1050 | return 0; |
1051 | } |
1052 | |
1053 | static int virtnet_probe(struct virtio_device *vdev) |
1054 | { |
1055 | int err; |
1056 | struct net_device *dev; |
1057 | struct virtnet_info *vi; |
1058 | |
1059 | /* Allocate ourselves a network device with room for our info */ |
1060 | dev = alloc_etherdev(sizeof(struct virtnet_info)); |
1061 | if (!dev) |
1062 | return -ENOMEM; |
1063 | |
1064 | /* Set up network device as normal. */ |
1065 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
1066 | dev->netdev_ops = &virtnet_netdev; |
1067 | dev->features = NETIF_F_HIGHDMA; |
1068 | |
1069 | SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); |
1070 | SET_NETDEV_DEV(dev, &vdev->dev); |
1071 | |
1072 | /* Do we support "hardware" checksums? */ |
1073 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
1074 | /* This opens up the world of extra features. */ |
1075 | dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
1076 | if (csum) |
1077 | dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
1078 | |
1079 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { |
1080 | dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO |
1081 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
1082 | } |
1083 | /* Individual feature bits: what can host handle? */ |
1084 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
1085 | dev->hw_features |= NETIF_F_TSO; |
1086 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) |
1087 | dev->hw_features |= NETIF_F_TSO6; |
1088 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) |
1089 | dev->hw_features |= NETIF_F_TSO_ECN; |
1090 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) |
1091 | dev->hw_features |= NETIF_F_UFO; |
1092 | |
1093 | if (gso) |
1094 | dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); |
1095 | /* (!csum && gso) case will be fixed by register_netdev() */ |
1096 | } |
1097 | |
1098 | /* Configuration may specify what MAC to use. Otherwise random. */ |
1099 | if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, |
1100 | offsetof(struct virtio_net_config, mac), |
1101 | dev->dev_addr, dev->addr_len) < 0) |
1102 | eth_hw_addr_random(dev); |
1103 | |
1104 | /* Set up our device-specific information */ |
1105 | vi = netdev_priv(dev); |
1106 | netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); |
1107 | vi->dev = dev; |
1108 | vi->vdev = vdev; |
1109 | vdev->priv = vi; |
1110 | vi->pages = NULL; |
1111 | vi->stats = alloc_percpu(struct virtnet_stats); |
1112 | err = -ENOMEM; |
1113 | if (vi->stats == NULL) |
1114 | goto free; |
1115 | |
1116 | INIT_DELAYED_WORK(&vi->refill, refill_work); |
1117 | mutex_init(&vi->config_lock); |
1118 | vi->config_enable = true; |
1119 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
1120 | sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); |
1121 | sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); |
1122 | |
1123 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
1124 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
1125 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
1126 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) |
1127 | vi->big_packets = true; |
1128 | |
1129 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
1130 | vi->mergeable_rx_bufs = true; |
1131 | |
1132 | err = init_vqs(vi); |
1133 | if (err) |
1134 | goto free_stats; |
1135 | |
1136 | err = register_netdev(dev); |
1137 | if (err) { |
1138 | pr_debug("virtio_net: registering device failed\n"); |
1139 | goto free_vqs; |
1140 | } |
1141 | |
1142 | /* Last of all, set up some receive buffers. */ |
1143 | try_fill_recv(vi, GFP_KERNEL); |
1144 | |
1145 | /* If we didn't even get one input buffer, we're useless. */ |
1146 | if (vi->num == 0) { |
1147 | err = -ENOMEM; |
1148 | goto unregister; |
1149 | } |
1150 | |
1151 | /* Assume link up if device can't report link status, |
1152 | otherwise get link status from config. */ |
1153 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
1154 | netif_carrier_off(dev); |
1155 | queue_work(system_nrt_wq, &vi->config_work); |
1156 | } else { |
1157 | vi->status = VIRTIO_NET_S_LINK_UP; |
1158 | netif_carrier_on(dev); |
1159 | } |
1160 | |
1161 | pr_debug("virtnet: registered device %s\n", dev->name); |
1162 | return 0; |
1163 | |
1164 | unregister: |
1165 | unregister_netdev(dev); |
1166 | free_vqs: |
1167 | vdev->config->del_vqs(vdev); |
1168 | free_stats: |
1169 | free_percpu(vi->stats); |
1170 | free: |
1171 | free_netdev(dev); |
1172 | return err; |
1173 | } |
1174 | |
1175 | static void free_unused_bufs(struct virtnet_info *vi) |
1176 | { |
1177 | void *buf; |
1178 | while (1) { |
1179 | buf = virtqueue_detach_unused_buf(vi->svq); |
1180 | if (!buf) |
1181 | break; |
1182 | dev_kfree_skb(buf); |
1183 | } |
1184 | while (1) { |
1185 | buf = virtqueue_detach_unused_buf(vi->rvq); |
1186 | if (!buf) |
1187 | break; |
1188 | if (vi->mergeable_rx_bufs || vi->big_packets) |
1189 | give_pages(vi, buf); |
1190 | else |
1191 | dev_kfree_skb(buf); |
1192 | --vi->num; |
1193 | } |
1194 | BUG_ON(vi->num != 0); |
1195 | } |
1196 | |
1197 | static void remove_vq_common(struct virtnet_info *vi) |
1198 | { |
1199 | vi->vdev->config->reset(vi->vdev); |
1200 | |
1201 | /* Free unused buffers in both send and recv, if any. */ |
1202 | free_unused_bufs(vi); |
1203 | |
1204 | vi->vdev->config->del_vqs(vi->vdev); |
1205 | |
1206 | while (vi->pages) |
1207 | __free_pages(get_a_page(vi, GFP_KERNEL), 0); |
1208 | } |
1209 | |
1210 | static void __devexit virtnet_remove(struct virtio_device *vdev) |
1211 | { |
1212 | struct virtnet_info *vi = vdev->priv; |
1213 | |
1214 | /* Prevent config work handler from accessing the device. */ |
1215 | mutex_lock(&vi->config_lock); |
1216 | vi->config_enable = false; |
1217 | mutex_unlock(&vi->config_lock); |
1218 | |
1219 | unregister_netdev(vi->dev); |
1220 | |
1221 | remove_vq_common(vi); |
1222 | |
1223 | flush_work(&vi->config_work); |
1224 | |
1225 | free_percpu(vi->stats); |
1226 | free_netdev(vi->dev); |
1227 | } |
1228 | |
1229 | #ifdef CONFIG_PM |
1230 | static int virtnet_freeze(struct virtio_device *vdev) |
1231 | { |
1232 | struct virtnet_info *vi = vdev->priv; |
1233 | |
1234 | /* Prevent config work handler from accessing the device */ |
1235 | mutex_lock(&vi->config_lock); |
1236 | vi->config_enable = false; |
1237 | mutex_unlock(&vi->config_lock); |
1238 | |
1239 | netif_device_detach(vi->dev); |
1240 | cancel_delayed_work_sync(&vi->refill); |
1241 | |
1242 | if (netif_running(vi->dev)) |
1243 | napi_disable(&vi->napi); |
1244 | |
1245 | remove_vq_common(vi); |
1246 | |
1247 | flush_work(&vi->config_work); |
1248 | |
1249 | return 0; |
1250 | } |
1251 | |
1252 | static int virtnet_restore(struct virtio_device *vdev) |
1253 | { |
1254 | struct virtnet_info *vi = vdev->priv; |
1255 | int err; |
1256 | |
1257 | err = init_vqs(vi); |
1258 | if (err) |
1259 | return err; |
1260 | |
1261 | if (netif_running(vi->dev)) |
1262 | virtnet_napi_enable(vi); |
1263 | |
1264 | netif_device_attach(vi->dev); |
1265 | |
1266 | if (!try_fill_recv(vi, GFP_KERNEL)) |
1267 | queue_delayed_work(system_nrt_wq, &vi->refill, 0); |
1268 | |
1269 | mutex_lock(&vi->config_lock); |
1270 | vi->config_enable = true; |
1271 | mutex_unlock(&vi->config_lock); |
1272 | |
1273 | return 0; |
1274 | } |
1275 | #endif |
1276 | |
1277 | static struct virtio_device_id id_table[] = { |
1278 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, |
1279 | { 0 }, |
1280 | }; |
1281 | |
1282 | static unsigned int features[] = { |
1283 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, |
1284 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, |
1285 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
1286 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, |
1287 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, |
1288 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, |
1289 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, |
1290 | VIRTIO_NET_F_GUEST_ANNOUNCE, |
1291 | }; |
1292 | |
1293 | static struct virtio_driver virtio_net_driver = { |
1294 | .feature_table = features, |
1295 | .feature_table_size = ARRAY_SIZE(features), |
1296 | .driver.name = KBUILD_MODNAME, |
1297 | .driver.owner = THIS_MODULE, |
1298 | .id_table = id_table, |
1299 | .probe = virtnet_probe, |
1300 | .remove = __devexit_p(virtnet_remove), |
1301 | .config_changed = virtnet_config_changed, |
1302 | #ifdef CONFIG_PM |
1303 | .freeze = virtnet_freeze, |
1304 | .restore = virtnet_restore, |
1305 | #endif |
1306 | }; |
1307 | |
1308 | static int __init init(void) |
1309 | { |
1310 | return register_virtio_driver(&virtio_net_driver); |
1311 | } |
1312 | |
1313 | static void __exit fini(void) |
1314 | { |
1315 | unregister_virtio_driver(&virtio_net_driver); |
1316 | } |
1317 | module_init(init); |
1318 | module_exit(fini); |
1319 | |
1320 | MODULE_DEVICE_TABLE(virtio, id_table); |
1321 | MODULE_DESCRIPTION("Virtio network driver"); |
1322 | MODULE_LICENSE("GPL"); |
1323 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9