Root/
1 | /* |
2 | * Copyright (c) 2007-2013 Nicira, Inc. |
3 | * |
4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of version 2 of the GNU General Public |
6 | * License as published by the Free Software Foundation. |
7 | * |
8 | * This program is distributed in the hope that it will be useful, but |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
11 | * General Public License for more details. |
12 | * |
13 | * You should have received a copy of the GNU General Public License |
14 | * along with this program; if not, write to the Free Software |
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
16 | * 02110-1301, USA |
17 | */ |
18 | |
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
20 | |
21 | #include <linux/init.h> |
22 | #include <linux/module.h> |
23 | #include <linux/if_arp.h> |
24 | #include <linux/if_vlan.h> |
25 | #include <linux/in.h> |
26 | #include <linux/ip.h> |
27 | #include <linux/jhash.h> |
28 | #include <linux/delay.h> |
29 | #include <linux/time.h> |
30 | #include <linux/etherdevice.h> |
31 | #include <linux/genetlink.h> |
32 | #include <linux/kernel.h> |
33 | #include <linux/kthread.h> |
34 | #include <linux/mutex.h> |
35 | #include <linux/percpu.h> |
36 | #include <linux/rcupdate.h> |
37 | #include <linux/tcp.h> |
38 | #include <linux/udp.h> |
39 | #include <linux/ethtool.h> |
40 | #include <linux/wait.h> |
41 | #include <asm/div64.h> |
42 | #include <linux/highmem.h> |
43 | #include <linux/netfilter_bridge.h> |
44 | #include <linux/netfilter_ipv4.h> |
45 | #include <linux/inetdevice.h> |
46 | #include <linux/list.h> |
47 | #include <linux/lockdep.h> |
48 | #include <linux/openvswitch.h> |
49 | #include <linux/rculist.h> |
50 | #include <linux/dmi.h> |
51 | #include <linux/workqueue.h> |
52 | #include <net/genetlink.h> |
53 | #include <net/net_namespace.h> |
54 | #include <net/netns/generic.h> |
55 | |
56 | #include "datapath.h" |
57 | #include "flow.h" |
58 | #include "flow_netlink.h" |
59 | #include "vport-internal_dev.h" |
60 | #include "vport-netdev.h" |
61 | |
62 | int ovs_net_id __read_mostly; |
63 | |
64 | static void ovs_notify(struct genl_family *family, |
65 | struct sk_buff *skb, struct genl_info *info) |
66 | { |
67 | genl_notify(family, skb, genl_info_net(info), info->snd_portid, |
68 | 0, info->nlhdr, GFP_KERNEL); |
69 | } |
70 | |
71 | /** |
72 | * DOC: Locking: |
73 | * |
74 | * All writes e.g. Writes to device state (add/remove datapath, port, set |
75 | * operations on vports, etc.), Writes to other state (flow table |
76 | * modifications, set miscellaneous datapath parameters, etc.) are protected |
77 | * by ovs_lock. |
78 | * |
79 | * Reads are protected by RCU. |
80 | * |
81 | * There are a few special cases (mostly stats) that have their own |
82 | * synchronization but they nest under all of above and don't interact with |
83 | * each other. |
84 | * |
85 | * The RTNL lock nests inside ovs_mutex. |
86 | */ |
87 | |
88 | static DEFINE_MUTEX(ovs_mutex); |
89 | |
90 | void ovs_lock(void) |
91 | { |
92 | mutex_lock(&ovs_mutex); |
93 | } |
94 | |
95 | void ovs_unlock(void) |
96 | { |
97 | mutex_unlock(&ovs_mutex); |
98 | } |
99 | |
100 | #ifdef CONFIG_LOCKDEP |
101 | int lockdep_ovsl_is_held(void) |
102 | { |
103 | if (debug_locks) |
104 | return lockdep_is_held(&ovs_mutex); |
105 | else |
106 | return 1; |
107 | } |
108 | #endif |
109 | |
110 | static struct vport *new_vport(const struct vport_parms *); |
111 | static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *, |
112 | const struct dp_upcall_info *); |
113 | static int queue_userspace_packet(struct net *, int dp_ifindex, |
114 | struct sk_buff *, |
115 | const struct dp_upcall_info *); |
116 | |
117 | /* Must be called with rcu_read_lock or ovs_mutex. */ |
118 | static struct datapath *get_dp(struct net *net, int dp_ifindex) |
119 | { |
120 | struct datapath *dp = NULL; |
121 | struct net_device *dev; |
122 | |
123 | rcu_read_lock(); |
124 | dev = dev_get_by_index_rcu(net, dp_ifindex); |
125 | if (dev) { |
126 | struct vport *vport = ovs_internal_dev_get_vport(dev); |
127 | if (vport) |
128 | dp = vport->dp; |
129 | } |
130 | rcu_read_unlock(); |
131 | |
132 | return dp; |
133 | } |
134 | |
135 | /* Must be called with rcu_read_lock or ovs_mutex. */ |
136 | const char *ovs_dp_name(const struct datapath *dp) |
137 | { |
138 | struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL); |
139 | return vport->ops->get_name(vport); |
140 | } |
141 | |
142 | static int get_dpifindex(struct datapath *dp) |
143 | { |
144 | struct vport *local; |
145 | int ifindex; |
146 | |
147 | rcu_read_lock(); |
148 | |
149 | local = ovs_vport_rcu(dp, OVSP_LOCAL); |
150 | if (local) |
151 | ifindex = netdev_vport_priv(local)->dev->ifindex; |
152 | else |
153 | ifindex = 0; |
154 | |
155 | rcu_read_unlock(); |
156 | |
157 | return ifindex; |
158 | } |
159 | |
160 | static void destroy_dp_rcu(struct rcu_head *rcu) |
161 | { |
162 | struct datapath *dp = container_of(rcu, struct datapath, rcu); |
163 | |
164 | ovs_flow_tbl_destroy(&dp->table); |
165 | free_percpu(dp->stats_percpu); |
166 | release_net(ovs_dp_get_net(dp)); |
167 | kfree(dp->ports); |
168 | kfree(dp); |
169 | } |
170 | |
171 | static struct hlist_head *vport_hash_bucket(const struct datapath *dp, |
172 | u16 port_no) |
173 | { |
174 | return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)]; |
175 | } |
176 | |
177 | struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) |
178 | { |
179 | struct vport *vport; |
180 | struct hlist_head *head; |
181 | |
182 | head = vport_hash_bucket(dp, port_no); |
183 | hlist_for_each_entry_rcu(vport, head, dp_hash_node) { |
184 | if (vport->port_no == port_no) |
185 | return vport; |
186 | } |
187 | return NULL; |
188 | } |
189 | |
190 | /* Called with ovs_mutex. */ |
191 | static struct vport *new_vport(const struct vport_parms *parms) |
192 | { |
193 | struct vport *vport; |
194 | |
195 | vport = ovs_vport_add(parms); |
196 | if (!IS_ERR(vport)) { |
197 | struct datapath *dp = parms->dp; |
198 | struct hlist_head *head = vport_hash_bucket(dp, vport->port_no); |
199 | |
200 | hlist_add_head_rcu(&vport->dp_hash_node, head); |
201 | } |
202 | return vport; |
203 | } |
204 | |
205 | void ovs_dp_detach_port(struct vport *p) |
206 | { |
207 | ASSERT_OVSL(); |
208 | |
209 | /* First drop references to device. */ |
210 | hlist_del_rcu(&p->dp_hash_node); |
211 | |
212 | /* Then destroy it. */ |
213 | ovs_vport_del(p); |
214 | } |
215 | |
216 | /* Must be called with rcu_read_lock. */ |
217 | void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) |
218 | { |
219 | struct datapath *dp = p->dp; |
220 | struct sw_flow *flow; |
221 | struct dp_stats_percpu *stats; |
222 | struct sw_flow_key key; |
223 | u64 *stats_counter; |
224 | u32 n_mask_hit; |
225 | int error; |
226 | |
227 | stats = this_cpu_ptr(dp->stats_percpu); |
228 | |
229 | /* Extract flow from 'skb' into 'key'. */ |
230 | error = ovs_flow_extract(skb, p->port_no, &key); |
231 | if (unlikely(error)) { |
232 | kfree_skb(skb); |
233 | return; |
234 | } |
235 | |
236 | /* Look up flow. */ |
237 | flow = ovs_flow_tbl_lookup(&dp->table, &key, &n_mask_hit); |
238 | if (unlikely(!flow)) { |
239 | struct dp_upcall_info upcall; |
240 | |
241 | upcall.cmd = OVS_PACKET_CMD_MISS; |
242 | upcall.key = &key; |
243 | upcall.userdata = NULL; |
244 | upcall.portid = p->upcall_portid; |
245 | ovs_dp_upcall(dp, skb, &upcall); |
246 | consume_skb(skb); |
247 | stats_counter = &stats->n_missed; |
248 | goto out; |
249 | } |
250 | |
251 | OVS_CB(skb)->flow = flow; |
252 | OVS_CB(skb)->pkt_key = &key; |
253 | |
254 | stats_counter = &stats->n_hit; |
255 | ovs_flow_used(OVS_CB(skb)->flow, skb); |
256 | ovs_execute_actions(dp, skb); |
257 | |
258 | out: |
259 | /* Update datapath statistics. */ |
260 | u64_stats_update_begin(&stats->sync); |
261 | (*stats_counter)++; |
262 | stats->n_mask_hit += n_mask_hit; |
263 | u64_stats_update_end(&stats->sync); |
264 | } |
265 | |
266 | static struct genl_family dp_packet_genl_family = { |
267 | .id = GENL_ID_GENERATE, |
268 | .hdrsize = sizeof(struct ovs_header), |
269 | .name = OVS_PACKET_FAMILY, |
270 | .version = OVS_PACKET_VERSION, |
271 | .maxattr = OVS_PACKET_ATTR_MAX, |
272 | .netnsok = true, |
273 | .parallel_ops = true, |
274 | }; |
275 | |
276 | int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, |
277 | const struct dp_upcall_info *upcall_info) |
278 | { |
279 | struct dp_stats_percpu *stats; |
280 | int dp_ifindex; |
281 | int err; |
282 | |
283 | if (upcall_info->portid == 0) { |
284 | err = -ENOTCONN; |
285 | goto err; |
286 | } |
287 | |
288 | dp_ifindex = get_dpifindex(dp); |
289 | if (!dp_ifindex) { |
290 | err = -ENODEV; |
291 | goto err; |
292 | } |
293 | |
294 | if (!skb_is_gso(skb)) |
295 | err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info); |
296 | else |
297 | err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info); |
298 | if (err) |
299 | goto err; |
300 | |
301 | return 0; |
302 | |
303 | err: |
304 | stats = this_cpu_ptr(dp->stats_percpu); |
305 | |
306 | u64_stats_update_begin(&stats->sync); |
307 | stats->n_lost++; |
308 | u64_stats_update_end(&stats->sync); |
309 | |
310 | return err; |
311 | } |
312 | |
313 | static int queue_gso_packets(struct net *net, int dp_ifindex, |
314 | struct sk_buff *skb, |
315 | const struct dp_upcall_info *upcall_info) |
316 | { |
317 | unsigned short gso_type = skb_shinfo(skb)->gso_type; |
318 | struct dp_upcall_info later_info; |
319 | struct sw_flow_key later_key; |
320 | struct sk_buff *segs, *nskb; |
321 | int err; |
322 | |
323 | segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false); |
324 | if (IS_ERR(segs)) |
325 | return PTR_ERR(segs); |
326 | |
327 | /* Queue all of the segments. */ |
328 | skb = segs; |
329 | do { |
330 | err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info); |
331 | if (err) |
332 | break; |
333 | |
334 | if (skb == segs && gso_type & SKB_GSO_UDP) { |
335 | /* The initial flow key extracted by ovs_flow_extract() |
336 | * in this case is for a first fragment, so we need to |
337 | * properly mark later fragments. |
338 | */ |
339 | later_key = *upcall_info->key; |
340 | later_key.ip.frag = OVS_FRAG_TYPE_LATER; |
341 | |
342 | later_info = *upcall_info; |
343 | later_info.key = &later_key; |
344 | upcall_info = &later_info; |
345 | } |
346 | } while ((skb = skb->next)); |
347 | |
348 | /* Free all of the segments. */ |
349 | skb = segs; |
350 | do { |
351 | nskb = skb->next; |
352 | if (err) |
353 | kfree_skb(skb); |
354 | else |
355 | consume_skb(skb); |
356 | } while ((skb = nskb)); |
357 | return err; |
358 | } |
359 | |
360 | static size_t key_attr_size(void) |
361 | { |
362 | return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ |
363 | + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ |
364 | + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */ |
365 | + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */ |
366 | + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */ |
367 | + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */ |
368 | + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */ |
369 | + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */ |
370 | + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */ |
371 | + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */ |
372 | + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ |
373 | + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ |
374 | + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ |
375 | + nla_total_size(4) /* OVS_KEY_ATTR_8021Q */ |
376 | + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */ |
377 | + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ |
378 | + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */ |
379 | + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */ |
380 | + nla_total_size(28); /* OVS_KEY_ATTR_ND */ |
381 | } |
382 | |
383 | static size_t upcall_msg_size(const struct sk_buff *skb, |
384 | const struct nlattr *userdata) |
385 | { |
386 | size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) |
387 | + nla_total_size(skb->len) /* OVS_PACKET_ATTR_PACKET */ |
388 | + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */ |
389 | |
390 | /* OVS_PACKET_ATTR_USERDATA */ |
391 | if (userdata) |
392 | size += NLA_ALIGN(userdata->nla_len); |
393 | |
394 | return size; |
395 | } |
396 | |
397 | static int queue_userspace_packet(struct net *net, int dp_ifindex, |
398 | struct sk_buff *skb, |
399 | const struct dp_upcall_info *upcall_info) |
400 | { |
401 | struct ovs_header *upcall; |
402 | struct sk_buff *nskb = NULL; |
403 | struct sk_buff *user_skb; /* to be queued to userspace */ |
404 | struct nlattr *nla; |
405 | int err; |
406 | |
407 | if (vlan_tx_tag_present(skb)) { |
408 | nskb = skb_clone(skb, GFP_ATOMIC); |
409 | if (!nskb) |
410 | return -ENOMEM; |
411 | |
412 | nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb)); |
413 | if (!nskb) |
414 | return -ENOMEM; |
415 | |
416 | nskb->vlan_tci = 0; |
417 | skb = nskb; |
418 | } |
419 | |
420 | if (nla_attr_size(skb->len) > USHRT_MAX) { |
421 | err = -EFBIG; |
422 | goto out; |
423 | } |
424 | |
425 | user_skb = genlmsg_new(upcall_msg_size(skb, upcall_info->userdata), GFP_ATOMIC); |
426 | if (!user_skb) { |
427 | err = -ENOMEM; |
428 | goto out; |
429 | } |
430 | |
431 | upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, |
432 | 0, upcall_info->cmd); |
433 | upcall->dp_ifindex = dp_ifindex; |
434 | |
435 | nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); |
436 | ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb); |
437 | nla_nest_end(user_skb, nla); |
438 | |
439 | if (upcall_info->userdata) |
440 | __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA, |
441 | nla_len(upcall_info->userdata), |
442 | nla_data(upcall_info->userdata)); |
443 | |
444 | nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); |
445 | |
446 | skb_copy_and_csum_dev(skb, nla_data(nla)); |
447 | |
448 | genlmsg_end(user_skb, upcall); |
449 | err = genlmsg_unicast(net, user_skb, upcall_info->portid); |
450 | |
451 | out: |
452 | kfree_skb(nskb); |
453 | return err; |
454 | } |
455 | |
456 | static void clear_stats(struct sw_flow *flow) |
457 | { |
458 | flow->used = 0; |
459 | flow->tcp_flags = 0; |
460 | flow->packet_count = 0; |
461 | flow->byte_count = 0; |
462 | } |
463 | |
464 | static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) |
465 | { |
466 | struct ovs_header *ovs_header = info->userhdr; |
467 | struct nlattr **a = info->attrs; |
468 | struct sw_flow_actions *acts; |
469 | struct sk_buff *packet; |
470 | struct sw_flow *flow; |
471 | struct datapath *dp; |
472 | struct ethhdr *eth; |
473 | int len; |
474 | int err; |
475 | |
476 | err = -EINVAL; |
477 | if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || |
478 | !a[OVS_PACKET_ATTR_ACTIONS]) |
479 | goto err; |
480 | |
481 | len = nla_len(a[OVS_PACKET_ATTR_PACKET]); |
482 | packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL); |
483 | err = -ENOMEM; |
484 | if (!packet) |
485 | goto err; |
486 | skb_reserve(packet, NET_IP_ALIGN); |
487 | |
488 | nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len); |
489 | |
490 | skb_reset_mac_header(packet); |
491 | eth = eth_hdr(packet); |
492 | |
493 | /* Normally, setting the skb 'protocol' field would be handled by a |
494 | * call to eth_type_trans(), but it assumes there's a sending |
495 | * device, which we may not have. */ |
496 | if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) |
497 | packet->protocol = eth->h_proto; |
498 | else |
499 | packet->protocol = htons(ETH_P_802_2); |
500 | |
501 | /* Build an sw_flow for sending this packet. */ |
502 | flow = ovs_flow_alloc(); |
503 | err = PTR_ERR(flow); |
504 | if (IS_ERR(flow)) |
505 | goto err_kfree_skb; |
506 | |
507 | err = ovs_flow_extract(packet, -1, &flow->key); |
508 | if (err) |
509 | goto err_flow_free; |
510 | |
511 | err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]); |
512 | if (err) |
513 | goto err_flow_free; |
514 | acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); |
515 | err = PTR_ERR(acts); |
516 | if (IS_ERR(acts)) |
517 | goto err_flow_free; |
518 | |
519 | err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], |
520 | &flow->key, 0, &acts); |
521 | rcu_assign_pointer(flow->sf_acts, acts); |
522 | if (err) |
523 | goto err_flow_free; |
524 | |
525 | OVS_CB(packet)->flow = flow; |
526 | OVS_CB(packet)->pkt_key = &flow->key; |
527 | packet->priority = flow->key.phy.priority; |
528 | packet->mark = flow->key.phy.skb_mark; |
529 | |
530 | rcu_read_lock(); |
531 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
532 | err = -ENODEV; |
533 | if (!dp) |
534 | goto err_unlock; |
535 | |
536 | local_bh_disable(); |
537 | err = ovs_execute_actions(dp, packet); |
538 | local_bh_enable(); |
539 | rcu_read_unlock(); |
540 | |
541 | ovs_flow_free(flow, false); |
542 | return err; |
543 | |
544 | err_unlock: |
545 | rcu_read_unlock(); |
546 | err_flow_free: |
547 | ovs_flow_free(flow, false); |
548 | err_kfree_skb: |
549 | kfree_skb(packet); |
550 | err: |
551 | return err; |
552 | } |
553 | |
554 | static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { |
555 | [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, |
556 | [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, |
557 | [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, |
558 | }; |
559 | |
560 | static const struct genl_ops dp_packet_genl_ops[] = { |
561 | { .cmd = OVS_PACKET_CMD_EXECUTE, |
562 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ |
563 | .policy = packet_policy, |
564 | .doit = ovs_packet_cmd_execute |
565 | } |
566 | }; |
567 | |
568 | static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats, |
569 | struct ovs_dp_megaflow_stats *mega_stats) |
570 | { |
571 | int i; |
572 | |
573 | memset(mega_stats, 0, sizeof(*mega_stats)); |
574 | |
575 | stats->n_flows = ovs_flow_tbl_count(&dp->table); |
576 | mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table); |
577 | |
578 | stats->n_hit = stats->n_missed = stats->n_lost = 0; |
579 | |
580 | for_each_possible_cpu(i) { |
581 | const struct dp_stats_percpu *percpu_stats; |
582 | struct dp_stats_percpu local_stats; |
583 | unsigned int start; |
584 | |
585 | percpu_stats = per_cpu_ptr(dp->stats_percpu, i); |
586 | |
587 | do { |
588 | start = u64_stats_fetch_begin_bh(&percpu_stats->sync); |
589 | local_stats = *percpu_stats; |
590 | } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start)); |
591 | |
592 | stats->n_hit += local_stats.n_hit; |
593 | stats->n_missed += local_stats.n_missed; |
594 | stats->n_lost += local_stats.n_lost; |
595 | mega_stats->n_mask_hit += local_stats.n_mask_hit; |
596 | } |
597 | } |
598 | |
599 | static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { |
600 | [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED }, |
601 | [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, |
602 | [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, |
603 | }; |
604 | |
605 | static struct genl_family dp_flow_genl_family = { |
606 | .id = GENL_ID_GENERATE, |
607 | .hdrsize = sizeof(struct ovs_header), |
608 | .name = OVS_FLOW_FAMILY, |
609 | .version = OVS_FLOW_VERSION, |
610 | .maxattr = OVS_FLOW_ATTR_MAX, |
611 | .netnsok = true, |
612 | .parallel_ops = true, |
613 | }; |
614 | |
615 | static struct genl_multicast_group ovs_dp_flow_multicast_group = { |
616 | .name = OVS_FLOW_MCGROUP |
617 | }; |
618 | |
619 | static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) |
620 | { |
621 | return NLMSG_ALIGN(sizeof(struct ovs_header)) |
622 | + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */ |
623 | + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */ |
624 | + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ |
625 | + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ |
626 | + nla_total_size(8) /* OVS_FLOW_ATTR_USED */ |
627 | + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */ |
628 | } |
629 | |
630 | /* Called with ovs_mutex. */ |
631 | static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, |
632 | struct sk_buff *skb, u32 portid, |
633 | u32 seq, u32 flags, u8 cmd) |
634 | { |
635 | const int skb_orig_len = skb->len; |
636 | struct nlattr *start; |
637 | struct ovs_flow_stats stats; |
638 | struct ovs_header *ovs_header; |
639 | struct nlattr *nla; |
640 | unsigned long used; |
641 | u8 tcp_flags; |
642 | int err; |
643 | |
644 | ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); |
645 | if (!ovs_header) |
646 | return -EMSGSIZE; |
647 | |
648 | ovs_header->dp_ifindex = get_dpifindex(dp); |
649 | |
650 | /* Fill flow key. */ |
651 | nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); |
652 | if (!nla) |
653 | goto nla_put_failure; |
654 | |
655 | err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb); |
656 | if (err) |
657 | goto error; |
658 | nla_nest_end(skb, nla); |
659 | |
660 | nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK); |
661 | if (!nla) |
662 | goto nla_put_failure; |
663 | |
664 | err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb); |
665 | if (err) |
666 | goto error; |
667 | |
668 | nla_nest_end(skb, nla); |
669 | |
670 | spin_lock_bh(&flow->lock); |
671 | used = flow->used; |
672 | stats.n_packets = flow->packet_count; |
673 | stats.n_bytes = flow->byte_count; |
674 | tcp_flags = (u8)ntohs(flow->tcp_flags); |
675 | spin_unlock_bh(&flow->lock); |
676 | |
677 | if (used && |
678 | nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) |
679 | goto nla_put_failure; |
680 | |
681 | if (stats.n_packets && |
682 | nla_put(skb, OVS_FLOW_ATTR_STATS, |
683 | sizeof(struct ovs_flow_stats), &stats)) |
684 | goto nla_put_failure; |
685 | |
686 | if (tcp_flags && |
687 | nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags)) |
688 | goto nla_put_failure; |
689 | |
690 | /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if |
691 | * this is the first flow to be dumped into 'skb'. This is unusual for |
692 | * Netlink but individual action lists can be longer than |
693 | * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this. |
694 | * The userspace caller can always fetch the actions separately if it |
695 | * really wants them. (Most userspace callers in fact don't care.) |
696 | * |
697 | * This can only fail for dump operations because the skb is always |
698 | * properly sized for single flows. |
699 | */ |
700 | start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS); |
701 | if (start) { |
702 | const struct sw_flow_actions *sf_acts; |
703 | |
704 | sf_acts = rcu_dereference_check(flow->sf_acts, |
705 | lockdep_ovsl_is_held()); |
706 | |
707 | err = ovs_nla_put_actions(sf_acts->actions, |
708 | sf_acts->actions_len, skb); |
709 | if (!err) |
710 | nla_nest_end(skb, start); |
711 | else { |
712 | if (skb_orig_len) |
713 | goto error; |
714 | |
715 | nla_nest_cancel(skb, start); |
716 | } |
717 | } else if (skb_orig_len) |
718 | goto nla_put_failure; |
719 | |
720 | return genlmsg_end(skb, ovs_header); |
721 | |
722 | nla_put_failure: |
723 | err = -EMSGSIZE; |
724 | error: |
725 | genlmsg_cancel(skb, ovs_header); |
726 | return err; |
727 | } |
728 | |
729 | static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow) |
730 | { |
731 | const struct sw_flow_actions *sf_acts; |
732 | |
733 | sf_acts = ovsl_dereference(flow->sf_acts); |
734 | |
735 | return genlmsg_new(ovs_flow_cmd_msg_size(sf_acts), GFP_KERNEL); |
736 | } |
737 | |
738 | static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, |
739 | struct datapath *dp, |
740 | u32 portid, u32 seq, u8 cmd) |
741 | { |
742 | struct sk_buff *skb; |
743 | int retval; |
744 | |
745 | skb = ovs_flow_cmd_alloc_info(flow); |
746 | if (!skb) |
747 | return ERR_PTR(-ENOMEM); |
748 | |
749 | retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd); |
750 | BUG_ON(retval < 0); |
751 | return skb; |
752 | } |
753 | |
754 | static struct sw_flow *__ovs_flow_tbl_lookup(struct flow_table *tbl, |
755 | const struct sw_flow_key *key) |
756 | { |
757 | u32 __always_unused n_mask_hit; |
758 | |
759 | return ovs_flow_tbl_lookup(tbl, key, &n_mask_hit); |
760 | } |
761 | |
762 | static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) |
763 | { |
764 | struct nlattr **a = info->attrs; |
765 | struct ovs_header *ovs_header = info->userhdr; |
766 | struct sw_flow_key key, masked_key; |
767 | struct sw_flow *flow = NULL; |
768 | struct sw_flow_mask mask; |
769 | struct sk_buff *reply; |
770 | struct datapath *dp; |
771 | struct sw_flow_actions *acts = NULL; |
772 | struct sw_flow_match match; |
773 | int error; |
774 | |
775 | /* Extract key. */ |
776 | error = -EINVAL; |
777 | if (!a[OVS_FLOW_ATTR_KEY]) |
778 | goto error; |
779 | |
780 | ovs_match_init(&match, &key, &mask); |
781 | error = ovs_nla_get_match(&match, |
782 | a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); |
783 | if (error) |
784 | goto error; |
785 | |
786 | /* Validate actions. */ |
787 | if (a[OVS_FLOW_ATTR_ACTIONS]) { |
788 | acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); |
789 | error = PTR_ERR(acts); |
790 | if (IS_ERR(acts)) |
791 | goto error; |
792 | |
793 | ovs_flow_mask_key(&masked_key, &key, &mask); |
794 | error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], |
795 | &masked_key, 0, &acts); |
796 | if (error) { |
797 | OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); |
798 | goto err_kfree; |
799 | } |
800 | } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) { |
801 | error = -EINVAL; |
802 | goto error; |
803 | } |
804 | |
805 | ovs_lock(); |
806 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
807 | error = -ENODEV; |
808 | if (!dp) |
809 | goto err_unlock_ovs; |
810 | |
811 | /* Check if this is a duplicate flow */ |
812 | flow = __ovs_flow_tbl_lookup(&dp->table, &key); |
813 | if (!flow) { |
814 | /* Bail out if we're not allowed to create a new flow. */ |
815 | error = -ENOENT; |
816 | if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) |
817 | goto err_unlock_ovs; |
818 | |
819 | /* Allocate flow. */ |
820 | flow = ovs_flow_alloc(); |
821 | if (IS_ERR(flow)) { |
822 | error = PTR_ERR(flow); |
823 | goto err_unlock_ovs; |
824 | } |
825 | clear_stats(flow); |
826 | |
827 | flow->key = masked_key; |
828 | flow->unmasked_key = key; |
829 | rcu_assign_pointer(flow->sf_acts, acts); |
830 | |
831 | /* Put flow in bucket. */ |
832 | error = ovs_flow_tbl_insert(&dp->table, flow, &mask); |
833 | if (error) { |
834 | acts = NULL; |
835 | goto err_flow_free; |
836 | } |
837 | |
838 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, |
839 | info->snd_seq, OVS_FLOW_CMD_NEW); |
840 | } else { |
841 | /* We found a matching flow. */ |
842 | struct sw_flow_actions *old_acts; |
843 | |
844 | /* Bail out if we're not allowed to modify an existing flow. |
845 | * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL |
846 | * because Generic Netlink treats the latter as a dump |
847 | * request. We also accept NLM_F_EXCL in case that bug ever |
848 | * gets fixed. |
849 | */ |
850 | error = -EEXIST; |
851 | if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW && |
852 | info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) |
853 | goto err_unlock_ovs; |
854 | |
855 | /* The unmasked key has to be the same for flow updates. */ |
856 | error = -EINVAL; |
857 | if (!ovs_flow_cmp_unmasked_key(flow, &match)) { |
858 | OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n"); |
859 | goto err_unlock_ovs; |
860 | } |
861 | |
862 | /* Update actions. */ |
863 | old_acts = ovsl_dereference(flow->sf_acts); |
864 | rcu_assign_pointer(flow->sf_acts, acts); |
865 | ovs_nla_free_flow_actions(old_acts); |
866 | |
867 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, |
868 | info->snd_seq, OVS_FLOW_CMD_NEW); |
869 | |
870 | /* Clear stats. */ |
871 | if (a[OVS_FLOW_ATTR_CLEAR]) { |
872 | spin_lock_bh(&flow->lock); |
873 | clear_stats(flow); |
874 | spin_unlock_bh(&flow->lock); |
875 | } |
876 | } |
877 | ovs_unlock(); |
878 | |
879 | if (!IS_ERR(reply)) |
880 | ovs_notify(&dp_flow_genl_family, reply, info); |
881 | else |
882 | genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0, |
883 | 0, PTR_ERR(reply)); |
884 | return 0; |
885 | |
886 | err_flow_free: |
887 | ovs_flow_free(flow, false); |
888 | err_unlock_ovs: |
889 | ovs_unlock(); |
890 | err_kfree: |
891 | kfree(acts); |
892 | error: |
893 | return error; |
894 | } |
895 | |
896 | static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) |
897 | { |
898 | struct nlattr **a = info->attrs; |
899 | struct ovs_header *ovs_header = info->userhdr; |
900 | struct sw_flow_key key; |
901 | struct sk_buff *reply; |
902 | struct sw_flow *flow; |
903 | struct datapath *dp; |
904 | struct sw_flow_match match; |
905 | int err; |
906 | |
907 | if (!a[OVS_FLOW_ATTR_KEY]) { |
908 | OVS_NLERR("Flow get message rejected, Key attribute missing.\n"); |
909 | return -EINVAL; |
910 | } |
911 | |
912 | ovs_match_init(&match, &key, NULL); |
913 | err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); |
914 | if (err) |
915 | return err; |
916 | |
917 | ovs_lock(); |
918 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
919 | if (!dp) { |
920 | err = -ENODEV; |
921 | goto unlock; |
922 | } |
923 | |
924 | flow = __ovs_flow_tbl_lookup(&dp->table, &key); |
925 | if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { |
926 | err = -ENOENT; |
927 | goto unlock; |
928 | } |
929 | |
930 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, |
931 | info->snd_seq, OVS_FLOW_CMD_NEW); |
932 | if (IS_ERR(reply)) { |
933 | err = PTR_ERR(reply); |
934 | goto unlock; |
935 | } |
936 | |
937 | ovs_unlock(); |
938 | return genlmsg_reply(reply, info); |
939 | unlock: |
940 | ovs_unlock(); |
941 | return err; |
942 | } |
943 | |
944 | static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) |
945 | { |
946 | struct nlattr **a = info->attrs; |
947 | struct ovs_header *ovs_header = info->userhdr; |
948 | struct sw_flow_key key; |
949 | struct sk_buff *reply; |
950 | struct sw_flow *flow; |
951 | struct datapath *dp; |
952 | struct sw_flow_match match; |
953 | int err; |
954 | |
955 | ovs_lock(); |
956 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
957 | if (!dp) { |
958 | err = -ENODEV; |
959 | goto unlock; |
960 | } |
961 | |
962 | if (!a[OVS_FLOW_ATTR_KEY]) { |
963 | err = ovs_flow_tbl_flush(&dp->table); |
964 | goto unlock; |
965 | } |
966 | |
967 | ovs_match_init(&match, &key, NULL); |
968 | err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); |
969 | if (err) |
970 | goto unlock; |
971 | |
972 | flow = __ovs_flow_tbl_lookup(&dp->table, &key); |
973 | if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { |
974 | err = -ENOENT; |
975 | goto unlock; |
976 | } |
977 | |
978 | reply = ovs_flow_cmd_alloc_info(flow); |
979 | if (!reply) { |
980 | err = -ENOMEM; |
981 | goto unlock; |
982 | } |
983 | |
984 | ovs_flow_tbl_remove(&dp->table, flow); |
985 | |
986 | err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, |
987 | info->snd_seq, 0, OVS_FLOW_CMD_DEL); |
988 | BUG_ON(err < 0); |
989 | |
990 | ovs_flow_free(flow, true); |
991 | ovs_unlock(); |
992 | |
993 | ovs_notify(&dp_flow_genl_family, reply, info); |
994 | return 0; |
995 | unlock: |
996 | ovs_unlock(); |
997 | return err; |
998 | } |
999 | |
1000 | static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) |
1001 | { |
1002 | struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); |
1003 | struct table_instance *ti; |
1004 | struct datapath *dp; |
1005 | |
1006 | rcu_read_lock(); |
1007 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
1008 | if (!dp) { |
1009 | rcu_read_unlock(); |
1010 | return -ENODEV; |
1011 | } |
1012 | |
1013 | ti = rcu_dereference(dp->table.ti); |
1014 | for (;;) { |
1015 | struct sw_flow *flow; |
1016 | u32 bucket, obj; |
1017 | |
1018 | bucket = cb->args[0]; |
1019 | obj = cb->args[1]; |
1020 | flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj); |
1021 | if (!flow) |
1022 | break; |
1023 | |
1024 | if (ovs_flow_cmd_fill_info(flow, dp, skb, |
1025 | NETLINK_CB(cb->skb).portid, |
1026 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
1027 | OVS_FLOW_CMD_NEW) < 0) |
1028 | break; |
1029 | |
1030 | cb->args[0] = bucket; |
1031 | cb->args[1] = obj; |
1032 | } |
1033 | rcu_read_unlock(); |
1034 | return skb->len; |
1035 | } |
1036 | |
1037 | static const struct genl_ops dp_flow_genl_ops[] = { |
1038 | { .cmd = OVS_FLOW_CMD_NEW, |
1039 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ |
1040 | .policy = flow_policy, |
1041 | .doit = ovs_flow_cmd_new_or_set |
1042 | }, |
1043 | { .cmd = OVS_FLOW_CMD_DEL, |
1044 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ |
1045 | .policy = flow_policy, |
1046 | .doit = ovs_flow_cmd_del |
1047 | }, |
1048 | { .cmd = OVS_FLOW_CMD_GET, |
1049 | .flags = 0, /* OK for unprivileged users. */ |
1050 | .policy = flow_policy, |
1051 | .doit = ovs_flow_cmd_get, |
1052 | .dumpit = ovs_flow_cmd_dump |
1053 | }, |
1054 | { .cmd = OVS_FLOW_CMD_SET, |
1055 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ |
1056 | .policy = flow_policy, |
1057 | .doit = ovs_flow_cmd_new_or_set, |
1058 | }, |
1059 | }; |
1060 | |
1061 | static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { |
1062 | [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, |
1063 | [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, |
1064 | }; |
1065 | |
1066 | static struct genl_family dp_datapath_genl_family = { |
1067 | .id = GENL_ID_GENERATE, |
1068 | .hdrsize = sizeof(struct ovs_header), |
1069 | .name = OVS_DATAPATH_FAMILY, |
1070 | .version = OVS_DATAPATH_VERSION, |
1071 | .maxattr = OVS_DP_ATTR_MAX, |
1072 | .netnsok = true, |
1073 | .parallel_ops = true, |
1074 | }; |
1075 | |
1076 | static struct genl_multicast_group ovs_dp_datapath_multicast_group = { |
1077 | .name = OVS_DATAPATH_MCGROUP |
1078 | }; |
1079 | |
1080 | static size_t ovs_dp_cmd_msg_size(void) |
1081 | { |
1082 | size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header)); |
1083 | |
1084 | msgsize += nla_total_size(IFNAMSIZ); |
1085 | msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); |
1086 | msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats)); |
1087 | |
1088 | return msgsize; |
1089 | } |
1090 | |
1091 | static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, |
1092 | u32 portid, u32 seq, u32 flags, u8 cmd) |
1093 | { |
1094 | struct ovs_header *ovs_header; |
1095 | struct ovs_dp_stats dp_stats; |
1096 | struct ovs_dp_megaflow_stats dp_megaflow_stats; |
1097 | int err; |
1098 | |
1099 | ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family, |
1100 | flags, cmd); |
1101 | if (!ovs_header) |
1102 | goto error; |
1103 | |
1104 | ovs_header->dp_ifindex = get_dpifindex(dp); |
1105 | |
1106 | rcu_read_lock(); |
1107 | err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp)); |
1108 | rcu_read_unlock(); |
1109 | if (err) |
1110 | goto nla_put_failure; |
1111 | |
1112 | get_dp_stats(dp, &dp_stats, &dp_megaflow_stats); |
1113 | if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), |
1114 | &dp_stats)) |
1115 | goto nla_put_failure; |
1116 | |
1117 | if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS, |
1118 | sizeof(struct ovs_dp_megaflow_stats), |
1119 | &dp_megaflow_stats)) |
1120 | goto nla_put_failure; |
1121 | |
1122 | return genlmsg_end(skb, ovs_header); |
1123 | |
1124 | nla_put_failure: |
1125 | genlmsg_cancel(skb, ovs_header); |
1126 | error: |
1127 | return -EMSGSIZE; |
1128 | } |
1129 | |
1130 | static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid, |
1131 | u32 seq, u8 cmd) |
1132 | { |
1133 | struct sk_buff *skb; |
1134 | int retval; |
1135 | |
1136 | skb = genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL); |
1137 | if (!skb) |
1138 | return ERR_PTR(-ENOMEM); |
1139 | |
1140 | retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd); |
1141 | if (retval < 0) { |
1142 | kfree_skb(skb); |
1143 | return ERR_PTR(retval); |
1144 | } |
1145 | return skb; |
1146 | } |
1147 | |
1148 | /* Called with ovs_mutex. */ |
1149 | static struct datapath *lookup_datapath(struct net *net, |
1150 | struct ovs_header *ovs_header, |
1151 | struct nlattr *a[OVS_DP_ATTR_MAX + 1]) |
1152 | { |
1153 | struct datapath *dp; |
1154 | |
1155 | if (!a[OVS_DP_ATTR_NAME]) |
1156 | dp = get_dp(net, ovs_header->dp_ifindex); |
1157 | else { |
1158 | struct vport *vport; |
1159 | |
1160 | rcu_read_lock(); |
1161 | vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME])); |
1162 | dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; |
1163 | rcu_read_unlock(); |
1164 | } |
1165 | return dp ? dp : ERR_PTR(-ENODEV); |
1166 | } |
1167 | |
1168 | static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) |
1169 | { |
1170 | struct nlattr **a = info->attrs; |
1171 | struct vport_parms parms; |
1172 | struct sk_buff *reply; |
1173 | struct datapath *dp; |
1174 | struct vport *vport; |
1175 | struct ovs_net *ovs_net; |
1176 | int err, i; |
1177 | |
1178 | err = -EINVAL; |
1179 | if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) |
1180 | goto err; |
1181 | |
1182 | ovs_lock(); |
1183 | |
1184 | err = -ENOMEM; |
1185 | dp = kzalloc(sizeof(*dp), GFP_KERNEL); |
1186 | if (dp == NULL) |
1187 | goto err_unlock_ovs; |
1188 | |
1189 | ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); |
1190 | |
1191 | /* Allocate table. */ |
1192 | err = ovs_flow_tbl_init(&dp->table); |
1193 | if (err) |
1194 | goto err_free_dp; |
1195 | |
1196 | dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); |
1197 | if (!dp->stats_percpu) { |
1198 | err = -ENOMEM; |
1199 | goto err_destroy_table; |
1200 | } |
1201 | |
1202 | for_each_possible_cpu(i) { |
1203 | struct dp_stats_percpu *dpath_stats; |
1204 | dpath_stats = per_cpu_ptr(dp->stats_percpu, i); |
1205 | u64_stats_init(&dpath_stats->sync); |
1206 | } |
1207 | |
1208 | dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head), |
1209 | GFP_KERNEL); |
1210 | if (!dp->ports) { |
1211 | err = -ENOMEM; |
1212 | goto err_destroy_percpu; |
1213 | } |
1214 | |
1215 | for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) |
1216 | INIT_HLIST_HEAD(&dp->ports[i]); |
1217 | |
1218 | /* Set up our datapath device. */ |
1219 | parms.name = nla_data(a[OVS_DP_ATTR_NAME]); |
1220 | parms.type = OVS_VPORT_TYPE_INTERNAL; |
1221 | parms.options = NULL; |
1222 | parms.dp = dp; |
1223 | parms.port_no = OVSP_LOCAL; |
1224 | parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]); |
1225 | |
1226 | vport = new_vport(&parms); |
1227 | if (IS_ERR(vport)) { |
1228 | err = PTR_ERR(vport); |
1229 | if (err == -EBUSY) |
1230 | err = -EEXIST; |
1231 | |
1232 | goto err_destroy_ports_array; |
1233 | } |
1234 | |
1235 | reply = ovs_dp_cmd_build_info(dp, info->snd_portid, |
1236 | info->snd_seq, OVS_DP_CMD_NEW); |
1237 | err = PTR_ERR(reply); |
1238 | if (IS_ERR(reply)) |
1239 | goto err_destroy_local_port; |
1240 | |
1241 | ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); |
1242 | list_add_tail_rcu(&dp->list_node, &ovs_net->dps); |
1243 | |
1244 | ovs_unlock(); |
1245 | |
1246 | ovs_notify(&dp_datapath_genl_family, reply, info); |
1247 | return 0; |
1248 | |
1249 | err_destroy_local_port: |
1250 | ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); |
1251 | err_destroy_ports_array: |
1252 | kfree(dp->ports); |
1253 | err_destroy_percpu: |
1254 | free_percpu(dp->stats_percpu); |
1255 | err_destroy_table: |
1256 | ovs_flow_tbl_destroy(&dp->table); |
1257 | err_free_dp: |
1258 | release_net(ovs_dp_get_net(dp)); |
1259 | kfree(dp); |
1260 | err_unlock_ovs: |
1261 | ovs_unlock(); |
1262 | err: |
1263 | return err; |
1264 | } |
1265 | |
1266 | /* Called with ovs_mutex. */ |
1267 | static void __dp_destroy(struct datapath *dp) |
1268 | { |
1269 | int i; |
1270 | |
1271 | for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { |
1272 | struct vport *vport; |
1273 | struct hlist_node *n; |
1274 | |
1275 | hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) |
1276 | if (vport->port_no != OVSP_LOCAL) |
1277 | ovs_dp_detach_port(vport); |
1278 | } |
1279 | |
1280 | list_del_rcu(&dp->list_node); |
1281 | |
1282 | /* OVSP_LOCAL is datapath internal port. We need to make sure that |
1283 | * all port in datapath are destroyed first before freeing datapath. |
1284 | */ |
1285 | ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); |
1286 | |
1287 | call_rcu(&dp->rcu, destroy_dp_rcu); |
1288 | } |
1289 | |
1290 | static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) |
1291 | { |
1292 | struct sk_buff *reply; |
1293 | struct datapath *dp; |
1294 | int err; |
1295 | |
1296 | ovs_lock(); |
1297 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); |
1298 | err = PTR_ERR(dp); |
1299 | if (IS_ERR(dp)) |
1300 | goto unlock; |
1301 | |
1302 | reply = ovs_dp_cmd_build_info(dp, info->snd_portid, |
1303 | info->snd_seq, OVS_DP_CMD_DEL); |
1304 | err = PTR_ERR(reply); |
1305 | if (IS_ERR(reply)) |
1306 | goto unlock; |
1307 | |
1308 | __dp_destroy(dp); |
1309 | ovs_unlock(); |
1310 | |
1311 | ovs_notify(&dp_datapath_genl_family, reply, info); |
1312 | |
1313 | return 0; |
1314 | unlock: |
1315 | ovs_unlock(); |
1316 | return err; |
1317 | } |
1318 | |
1319 | static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) |
1320 | { |
1321 | struct sk_buff *reply; |
1322 | struct datapath *dp; |
1323 | int err; |
1324 | |
1325 | ovs_lock(); |
1326 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); |
1327 | err = PTR_ERR(dp); |
1328 | if (IS_ERR(dp)) |
1329 | goto unlock; |
1330 | |
1331 | reply = ovs_dp_cmd_build_info(dp, info->snd_portid, |
1332 | info->snd_seq, OVS_DP_CMD_NEW); |
1333 | if (IS_ERR(reply)) { |
1334 | err = PTR_ERR(reply); |
1335 | genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0, |
1336 | 0, err); |
1337 | err = 0; |
1338 | goto unlock; |
1339 | } |
1340 | |
1341 | ovs_unlock(); |
1342 | ovs_notify(&dp_datapath_genl_family, reply, info); |
1343 | |
1344 | return 0; |
1345 | unlock: |
1346 | ovs_unlock(); |
1347 | return err; |
1348 | } |
1349 | |
1350 | static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) |
1351 | { |
1352 | struct sk_buff *reply; |
1353 | struct datapath *dp; |
1354 | int err; |
1355 | |
1356 | ovs_lock(); |
1357 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); |
1358 | if (IS_ERR(dp)) { |
1359 | err = PTR_ERR(dp); |
1360 | goto unlock; |
1361 | } |
1362 | |
1363 | reply = ovs_dp_cmd_build_info(dp, info->snd_portid, |
1364 | info->snd_seq, OVS_DP_CMD_NEW); |
1365 | if (IS_ERR(reply)) { |
1366 | err = PTR_ERR(reply); |
1367 | goto unlock; |
1368 | } |
1369 | |
1370 | ovs_unlock(); |
1371 | return genlmsg_reply(reply, info); |
1372 | |
1373 | unlock: |
1374 | ovs_unlock(); |
1375 | return err; |
1376 | } |
1377 | |
1378 | static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) |
1379 | { |
1380 | struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id); |
1381 | struct datapath *dp; |
1382 | int skip = cb->args[0]; |
1383 | int i = 0; |
1384 | |
1385 | rcu_read_lock(); |
1386 | list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) { |
1387 | if (i >= skip && |
1388 | ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, |
1389 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
1390 | OVS_DP_CMD_NEW) < 0) |
1391 | break; |
1392 | i++; |
1393 | } |
1394 | rcu_read_unlock(); |
1395 | |
1396 | cb->args[0] = i; |
1397 | |
1398 | return skb->len; |
1399 | } |
1400 | |
1401 | static const struct genl_ops dp_datapath_genl_ops[] = { |
1402 | { .cmd = OVS_DP_CMD_NEW, |
1403 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ |
1404 | .policy = datapath_policy, |
1405 | .doit = ovs_dp_cmd_new |
1406 | }, |
1407 | { .cmd = OVS_DP_CMD_DEL, |
1408 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ |
1409 | .policy = datapath_policy, |
1410 | .doit = ovs_dp_cmd_del |
1411 | }, |
1412 | { .cmd = OVS_DP_CMD_GET, |
1413 | .flags = 0, /* OK for unprivileged users. */ |
1414 | .policy = datapath_policy, |
1415 | .doit = ovs_dp_cmd_get, |
1416 | .dumpit = ovs_dp_cmd_dump |
1417 | }, |
1418 | { .cmd = OVS_DP_CMD_SET, |
1419 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ |
1420 | .policy = datapath_policy, |
1421 | .doit = ovs_dp_cmd_set, |
1422 | }, |
1423 | }; |
1424 | |
1425 | static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { |
1426 | [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, |
1427 | [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, |
1428 | [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, |
1429 | [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, |
1430 | [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 }, |
1431 | [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, |
1432 | }; |
1433 | |
1434 | struct genl_family dp_vport_genl_family = { |
1435 | .id = GENL_ID_GENERATE, |
1436 | .hdrsize = sizeof(struct ovs_header), |
1437 | .name = OVS_VPORT_FAMILY, |
1438 | .version = OVS_VPORT_VERSION, |
1439 | .maxattr = OVS_VPORT_ATTR_MAX, |
1440 | .netnsok = true, |
1441 | .parallel_ops = true, |
1442 | }; |
1443 | |
1444 | struct genl_multicast_group ovs_dp_vport_multicast_group = { |
1445 | .name = OVS_VPORT_MCGROUP |
1446 | }; |
1447 | |
1448 | /* Called with ovs_mutex or RCU read lock. */ |
1449 | static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, |
1450 | u32 portid, u32 seq, u32 flags, u8 cmd) |
1451 | { |
1452 | struct ovs_header *ovs_header; |
1453 | struct ovs_vport_stats vport_stats; |
1454 | int err; |
1455 | |
1456 | ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family, |
1457 | flags, cmd); |
1458 | if (!ovs_header) |
1459 | return -EMSGSIZE; |
1460 | |
1461 | ovs_header->dp_ifindex = get_dpifindex(vport->dp); |
1462 | |
1463 | if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) || |
1464 | nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) || |
1465 | nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) || |
1466 | nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid)) |
1467 | goto nla_put_failure; |
1468 | |
1469 | ovs_vport_get_stats(vport, &vport_stats); |
1470 | if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), |
1471 | &vport_stats)) |
1472 | goto nla_put_failure; |
1473 | |
1474 | err = ovs_vport_get_options(vport, skb); |
1475 | if (err == -EMSGSIZE) |
1476 | goto error; |
1477 | |
1478 | return genlmsg_end(skb, ovs_header); |
1479 | |
1480 | nla_put_failure: |
1481 | err = -EMSGSIZE; |
1482 | error: |
1483 | genlmsg_cancel(skb, ovs_header); |
1484 | return err; |
1485 | } |
1486 | |
1487 | /* Called with ovs_mutex or RCU read lock. */ |
1488 | struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid, |
1489 | u32 seq, u8 cmd) |
1490 | { |
1491 | struct sk_buff *skb; |
1492 | int retval; |
1493 | |
1494 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); |
1495 | if (!skb) |
1496 | return ERR_PTR(-ENOMEM); |
1497 | |
1498 | retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); |
1499 | BUG_ON(retval < 0); |
1500 | |
1501 | return skb; |
1502 | } |
1503 | |
1504 | /* Called with ovs_mutex or RCU read lock. */ |
1505 | static struct vport *lookup_vport(struct net *net, |
1506 | struct ovs_header *ovs_header, |
1507 | struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) |
1508 | { |
1509 | struct datapath *dp; |
1510 | struct vport *vport; |
1511 | |
1512 | if (a[OVS_VPORT_ATTR_NAME]) { |
1513 | vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME])); |
1514 | if (!vport) |
1515 | return ERR_PTR(-ENODEV); |
1516 | if (ovs_header->dp_ifindex && |
1517 | ovs_header->dp_ifindex != get_dpifindex(vport->dp)) |
1518 | return ERR_PTR(-ENODEV); |
1519 | return vport; |
1520 | } else if (a[OVS_VPORT_ATTR_PORT_NO]) { |
1521 | u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); |
1522 | |
1523 | if (port_no >= DP_MAX_PORTS) |
1524 | return ERR_PTR(-EFBIG); |
1525 | |
1526 | dp = get_dp(net, ovs_header->dp_ifindex); |
1527 | if (!dp) |
1528 | return ERR_PTR(-ENODEV); |
1529 | |
1530 | vport = ovs_vport_ovsl_rcu(dp, port_no); |
1531 | if (!vport) |
1532 | return ERR_PTR(-ENODEV); |
1533 | return vport; |
1534 | } else |
1535 | return ERR_PTR(-EINVAL); |
1536 | } |
1537 | |
1538 | static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) |
1539 | { |
1540 | struct nlattr **a = info->attrs; |
1541 | struct ovs_header *ovs_header = info->userhdr; |
1542 | struct vport_parms parms; |
1543 | struct sk_buff *reply; |
1544 | struct vport *vport; |
1545 | struct datapath *dp; |
1546 | u32 port_no; |
1547 | int err; |
1548 | |
1549 | err = -EINVAL; |
1550 | if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] || |
1551 | !a[OVS_VPORT_ATTR_UPCALL_PID]) |
1552 | goto exit; |
1553 | |
1554 | ovs_lock(); |
1555 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
1556 | err = -ENODEV; |
1557 | if (!dp) |
1558 | goto exit_unlock; |
1559 | |
1560 | if (a[OVS_VPORT_ATTR_PORT_NO]) { |
1561 | port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); |
1562 | |
1563 | err = -EFBIG; |
1564 | if (port_no >= DP_MAX_PORTS) |
1565 | goto exit_unlock; |
1566 | |
1567 | vport = ovs_vport_ovsl(dp, port_no); |
1568 | err = -EBUSY; |
1569 | if (vport) |
1570 | goto exit_unlock; |
1571 | } else { |
1572 | for (port_no = 1; ; port_no++) { |
1573 | if (port_no >= DP_MAX_PORTS) { |
1574 | err = -EFBIG; |
1575 | goto exit_unlock; |
1576 | } |
1577 | vport = ovs_vport_ovsl(dp, port_no); |
1578 | if (!vport) |
1579 | break; |
1580 | } |
1581 | } |
1582 | |
1583 | parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]); |
1584 | parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]); |
1585 | parms.options = a[OVS_VPORT_ATTR_OPTIONS]; |
1586 | parms.dp = dp; |
1587 | parms.port_no = port_no; |
1588 | parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); |
1589 | |
1590 | vport = new_vport(&parms); |
1591 | err = PTR_ERR(vport); |
1592 | if (IS_ERR(vport)) |
1593 | goto exit_unlock; |
1594 | |
1595 | err = 0; |
1596 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, |
1597 | OVS_VPORT_CMD_NEW); |
1598 | if (IS_ERR(reply)) { |
1599 | err = PTR_ERR(reply); |
1600 | ovs_dp_detach_port(vport); |
1601 | goto exit_unlock; |
1602 | } |
1603 | |
1604 | ovs_notify(&dp_vport_genl_family, reply, info); |
1605 | |
1606 | exit_unlock: |
1607 | ovs_unlock(); |
1608 | exit: |
1609 | return err; |
1610 | } |
1611 | |
1612 | static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) |
1613 | { |
1614 | struct nlattr **a = info->attrs; |
1615 | struct sk_buff *reply; |
1616 | struct vport *vport; |
1617 | int err; |
1618 | |
1619 | ovs_lock(); |
1620 | vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); |
1621 | err = PTR_ERR(vport); |
1622 | if (IS_ERR(vport)) |
1623 | goto exit_unlock; |
1624 | |
1625 | if (a[OVS_VPORT_ATTR_TYPE] && |
1626 | nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) { |
1627 | err = -EINVAL; |
1628 | goto exit_unlock; |
1629 | } |
1630 | |
1631 | reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
1632 | if (!reply) { |
1633 | err = -ENOMEM; |
1634 | goto exit_unlock; |
1635 | } |
1636 | |
1637 | if (a[OVS_VPORT_ATTR_OPTIONS]) { |
1638 | err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); |
1639 | if (err) |
1640 | goto exit_free; |
1641 | } |
1642 | |
1643 | if (a[OVS_VPORT_ATTR_UPCALL_PID]) |
1644 | vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); |
1645 | |
1646 | err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, |
1647 | info->snd_seq, 0, OVS_VPORT_CMD_NEW); |
1648 | BUG_ON(err < 0); |
1649 | |
1650 | ovs_unlock(); |
1651 | ovs_notify(&dp_vport_genl_family, reply, info); |
1652 | return 0; |
1653 | |
1654 | exit_free: |
1655 | kfree_skb(reply); |
1656 | exit_unlock: |
1657 | ovs_unlock(); |
1658 | return err; |
1659 | } |
1660 | |
1661 | static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) |
1662 | { |
1663 | struct nlattr **a = info->attrs; |
1664 | struct sk_buff *reply; |
1665 | struct vport *vport; |
1666 | int err; |
1667 | |
1668 | ovs_lock(); |
1669 | vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); |
1670 | err = PTR_ERR(vport); |
1671 | if (IS_ERR(vport)) |
1672 | goto exit_unlock; |
1673 | |
1674 | if (vport->port_no == OVSP_LOCAL) { |
1675 | err = -EINVAL; |
1676 | goto exit_unlock; |
1677 | } |
1678 | |
1679 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, |
1680 | info->snd_seq, OVS_VPORT_CMD_DEL); |
1681 | err = PTR_ERR(reply); |
1682 | if (IS_ERR(reply)) |
1683 | goto exit_unlock; |
1684 | |
1685 | err = 0; |
1686 | ovs_dp_detach_port(vport); |
1687 | |
1688 | ovs_notify(&dp_vport_genl_family, reply, info); |
1689 | |
1690 | exit_unlock: |
1691 | ovs_unlock(); |
1692 | return err; |
1693 | } |
1694 | |
1695 | static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info) |
1696 | { |
1697 | struct nlattr **a = info->attrs; |
1698 | struct ovs_header *ovs_header = info->userhdr; |
1699 | struct sk_buff *reply; |
1700 | struct vport *vport; |
1701 | int err; |
1702 | |
1703 | rcu_read_lock(); |
1704 | vport = lookup_vport(sock_net(skb->sk), ovs_header, a); |
1705 | err = PTR_ERR(vport); |
1706 | if (IS_ERR(vport)) |
1707 | goto exit_unlock; |
1708 | |
1709 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, |
1710 | info->snd_seq, OVS_VPORT_CMD_NEW); |
1711 | err = PTR_ERR(reply); |
1712 | if (IS_ERR(reply)) |
1713 | goto exit_unlock; |
1714 | |
1715 | rcu_read_unlock(); |
1716 | |
1717 | return genlmsg_reply(reply, info); |
1718 | |
1719 | exit_unlock: |
1720 | rcu_read_unlock(); |
1721 | return err; |
1722 | } |
1723 | |
1724 | static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) |
1725 | { |
1726 | struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); |
1727 | struct datapath *dp; |
1728 | int bucket = cb->args[0], skip = cb->args[1]; |
1729 | int i, j = 0; |
1730 | |
1731 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
1732 | if (!dp) |
1733 | return -ENODEV; |
1734 | |
1735 | rcu_read_lock(); |
1736 | for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { |
1737 | struct vport *vport; |
1738 | |
1739 | j = 0; |
1740 | hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) { |
1741 | if (j >= skip && |
1742 | ovs_vport_cmd_fill_info(vport, skb, |
1743 | NETLINK_CB(cb->skb).portid, |
1744 | cb->nlh->nlmsg_seq, |
1745 | NLM_F_MULTI, |
1746 | OVS_VPORT_CMD_NEW) < 0) |
1747 | goto out; |
1748 | |
1749 | j++; |
1750 | } |
1751 | skip = 0; |
1752 | } |
1753 | out: |
1754 | rcu_read_unlock(); |
1755 | |
1756 | cb->args[0] = i; |
1757 | cb->args[1] = j; |
1758 | |
1759 | return skb->len; |
1760 | } |
1761 | |
1762 | static const struct genl_ops dp_vport_genl_ops[] = { |
1763 | { .cmd = OVS_VPORT_CMD_NEW, |
1764 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ |
1765 | .policy = vport_policy, |
1766 | .doit = ovs_vport_cmd_new |
1767 | }, |
1768 | { .cmd = OVS_VPORT_CMD_DEL, |
1769 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ |
1770 | .policy = vport_policy, |
1771 | .doit = ovs_vport_cmd_del |
1772 | }, |
1773 | { .cmd = OVS_VPORT_CMD_GET, |
1774 | .flags = 0, /* OK for unprivileged users. */ |
1775 | .policy = vport_policy, |
1776 | .doit = ovs_vport_cmd_get, |
1777 | .dumpit = ovs_vport_cmd_dump |
1778 | }, |
1779 | { .cmd = OVS_VPORT_CMD_SET, |
1780 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ |
1781 | .policy = vport_policy, |
1782 | .doit = ovs_vport_cmd_set, |
1783 | }, |
1784 | }; |
1785 | |
1786 | struct genl_family_and_ops { |
1787 | struct genl_family *family; |
1788 | const struct genl_ops *ops; |
1789 | int n_ops; |
1790 | const struct genl_multicast_group *group; |
1791 | }; |
1792 | |
1793 | static const struct genl_family_and_ops dp_genl_families[] = { |
1794 | { &dp_datapath_genl_family, |
1795 | dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops), |
1796 | &ovs_dp_datapath_multicast_group }, |
1797 | { &dp_vport_genl_family, |
1798 | dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops), |
1799 | &ovs_dp_vport_multicast_group }, |
1800 | { &dp_flow_genl_family, |
1801 | dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops), |
1802 | &ovs_dp_flow_multicast_group }, |
1803 | { &dp_packet_genl_family, |
1804 | dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops), |
1805 | NULL }, |
1806 | }; |
1807 | |
1808 | static void dp_unregister_genl(int n_families) |
1809 | { |
1810 | int i; |
1811 | |
1812 | for (i = 0; i < n_families; i++) |
1813 | genl_unregister_family(dp_genl_families[i].family); |
1814 | } |
1815 | |
1816 | static int dp_register_genl(void) |
1817 | { |
1818 | int n_registered; |
1819 | int err; |
1820 | int i; |
1821 | |
1822 | n_registered = 0; |
1823 | for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { |
1824 | const struct genl_family_and_ops *f = &dp_genl_families[i]; |
1825 | |
1826 | f->family->ops = f->ops; |
1827 | f->family->n_ops = f->n_ops; |
1828 | f->family->mcgrps = f->group; |
1829 | f->family->n_mcgrps = f->group ? 1 : 0; |
1830 | err = genl_register_family(f->family); |
1831 | if (err) |
1832 | goto error; |
1833 | n_registered++; |
1834 | } |
1835 | |
1836 | return 0; |
1837 | |
1838 | error: |
1839 | dp_unregister_genl(n_registered); |
1840 | return err; |
1841 | } |
1842 | |
1843 | static int __net_init ovs_init_net(struct net *net) |
1844 | { |
1845 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); |
1846 | |
1847 | INIT_LIST_HEAD(&ovs_net->dps); |
1848 | INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq); |
1849 | return 0; |
1850 | } |
1851 | |
1852 | static void __net_exit ovs_exit_net(struct net *net) |
1853 | { |
1854 | struct datapath *dp, *dp_next; |
1855 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); |
1856 | |
1857 | ovs_lock(); |
1858 | list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) |
1859 | __dp_destroy(dp); |
1860 | ovs_unlock(); |
1861 | |
1862 | cancel_work_sync(&ovs_net->dp_notify_work); |
1863 | } |
1864 | |
1865 | static struct pernet_operations ovs_net_ops = { |
1866 | .init = ovs_init_net, |
1867 | .exit = ovs_exit_net, |
1868 | .id = &ovs_net_id, |
1869 | .size = sizeof(struct ovs_net), |
1870 | }; |
1871 | |
1872 | static int __init dp_init(void) |
1873 | { |
1874 | int err; |
1875 | |
1876 | BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); |
1877 | |
1878 | pr_info("Open vSwitch switching datapath\n"); |
1879 | |
1880 | err = ovs_flow_init(); |
1881 | if (err) |
1882 | goto error; |
1883 | |
1884 | err = ovs_vport_init(); |
1885 | if (err) |
1886 | goto error_flow_exit; |
1887 | |
1888 | err = register_pernet_device(&ovs_net_ops); |
1889 | if (err) |
1890 | goto error_vport_exit; |
1891 | |
1892 | err = register_netdevice_notifier(&ovs_dp_device_notifier); |
1893 | if (err) |
1894 | goto error_netns_exit; |
1895 | |
1896 | err = dp_register_genl(); |
1897 | if (err < 0) |
1898 | goto error_unreg_notifier; |
1899 | |
1900 | return 0; |
1901 | |
1902 | error_unreg_notifier: |
1903 | unregister_netdevice_notifier(&ovs_dp_device_notifier); |
1904 | error_netns_exit: |
1905 | unregister_pernet_device(&ovs_net_ops); |
1906 | error_vport_exit: |
1907 | ovs_vport_exit(); |
1908 | error_flow_exit: |
1909 | ovs_flow_exit(); |
1910 | error: |
1911 | return err; |
1912 | } |
1913 | |
1914 | static void dp_cleanup(void) |
1915 | { |
1916 | dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); |
1917 | unregister_netdevice_notifier(&ovs_dp_device_notifier); |
1918 | unregister_pernet_device(&ovs_net_ops); |
1919 | rcu_barrier(); |
1920 | ovs_vport_exit(); |
1921 | ovs_flow_exit(); |
1922 | } |
1923 | |
1924 | module_init(dp_init); |
1925 | module_exit(dp_cleanup); |
1926 | |
1927 | MODULE_DESCRIPTION("Open vSwitch switching datapath"); |
1928 | MODULE_LICENSE("GPL"); |
1929 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9