2 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
19 #include <linux/jhash.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/genetlink.h>
24 #include <linux/kernel.h>
25 #include <linux/kthread.h>
26 #include <linux/mutex.h>
27 #include <linux/percpu.h>
28 #include <linux/rcupdate.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/version.h>
32 #include <linux/ethtool.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
37 #include <linux/highmem.h>
38 #include <linux/netfilter_bridge.h>
39 #include <linux/netfilter_ipv4.h>
40 #include <linux/inetdevice.h>
41 #include <linux/list.h>
42 #include <linux/rculist.h>
43 #include <linux/dmi.h>
44 #include <net/inet_ecn.h>
45 #include <net/genetlink.h>
47 #include "openvswitch/datapath-protocol.h"
54 #include "vport-internal_dev.h"
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
57 LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
58 #error Kernels before 2.6.18 or after 3.0 are not supported by this version of Open vSwitch.
61 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
62 EXPORT_SYMBOL(dp_ioctl_hook);
67 * Writes to device state (add/remove datapath, port, set operations on vports,
68 * etc.) are protected by RTNL.
70 * Writes to other state (flow table modifications, set miscellaneous datapath
71 * parameters such as drop frags, etc.) are protected by genl_mutex. The RTNL
72 * lock nests inside genl_mutex.
74 * Reads are protected by RCU.
76 * There are a few special cases (mostly stats) that have their own
77 * synchronization but they nest under all of above and don't interact with
81 /* Global list of datapaths to enable dumping them all out.
82 * Protected by genl_mutex.
84 static LIST_HEAD(dps);
86 static struct vport *new_vport(const struct vport_parms *);
87 static int queue_userspace_packets(struct datapath *, struct sk_buff *,
88 const struct dp_upcall_info *);
90 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
91 struct datapath *get_dp(int dp_ifindex)
93 struct datapath *dp = NULL;
94 struct net_device *dev;
97 dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
99 struct vport *vport = internal_dev_get_vport(dev);
107 EXPORT_SYMBOL_GPL(get_dp);
109 /* Must be called with genl_mutex. */
110 static struct flow_table *get_table_protected(struct datapath *dp)
112 return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
115 /* Must be called with rcu_read_lock or RTNL lock. */
116 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
118 return rcu_dereference_rtnl(dp->ports[port_no]);
121 /* Must be called with rcu_read_lock or RTNL lock. */
122 const char *dp_name(const struct datapath *dp)
124 return vport_get_name(rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]));
127 static int get_dpifindex(struct datapath *dp)
134 local = get_vport_protected(dp, OVSP_LOCAL);
136 ifindex = vport_get_ifindex(local);
145 static inline size_t br_nlmsg_size(void)
147 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
148 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
149 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
150 + nla_total_size(4) /* IFLA_MASTER */
151 + nla_total_size(4) /* IFLA_MTU */
152 + nla_total_size(1); /* IFLA_OPERSTATE */
155 /* Caller must hold RTNL lock. */
156 static int dp_fill_ifinfo(struct sk_buff *skb,
157 const struct vport *port,
158 int event, unsigned int flags)
160 struct datapath *dp = port->dp;
161 int ifindex = vport_get_ifindex(port);
162 struct ifinfomsg *hdr;
163 struct nlmsghdr *nlh;
168 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
172 hdr = nlmsg_data(nlh);
173 hdr->ifi_family = AF_BRIDGE;
175 hdr->ifi_type = ARPHRD_ETHER;
176 hdr->ifi_index = ifindex;
177 hdr->ifi_flags = vport_get_flags(port);
180 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
181 NLA_PUT_U32(skb, IFLA_MASTER, get_dpifindex(dp));
182 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
183 #ifdef IFLA_OPERSTATE
184 NLA_PUT_U8(skb, IFLA_OPERSTATE,
185 vport_is_running(port)
186 ? vport_get_operstate(port)
190 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
192 return nlmsg_end(skb, nlh);
195 nlmsg_cancel(skb, nlh);
199 /* Caller must hold RTNL lock. */
200 static void dp_ifinfo_notify(int event, struct vport *port)
205 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
209 err = dp_fill_ifinfo(skb, port, event, 0);
211 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
212 WARN_ON(err == -EMSGSIZE);
216 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
220 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
223 static void release_dp(struct kobject *kobj)
225 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
229 static struct kobj_type dp_ktype = {
230 .release = release_dp
233 static void destroy_dp_rcu(struct rcu_head *rcu)
235 struct datapath *dp = container_of(rcu, struct datapath, rcu);
237 flow_tbl_destroy(dp->table);
238 free_percpu(dp->stats_percpu);
239 kobject_put(&dp->ifobj);
242 /* Called with RTNL lock and genl_lock. */
243 static struct vport *new_vport(const struct vport_parms *parms)
247 vport = vport_add(parms);
248 if (!IS_ERR(vport)) {
249 struct datapath *dp = parms->dp;
251 rcu_assign_pointer(dp->ports[parms->port_no], vport);
252 list_add(&vport->node, &dp->port_list);
254 dp_ifinfo_notify(RTM_NEWLINK, vport);
260 /* Called with RTNL lock. */
261 void dp_detach_port(struct vport *p)
265 if (p->port_no != OVSP_LOCAL)
267 dp_ifinfo_notify(RTM_DELLINK, p);
269 /* First drop references to device. */
271 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
273 /* Then destroy it. */
277 /* Must be called with rcu_read_lock. */
278 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
280 struct datapath *dp = p->dp;
281 struct sw_flow *flow;
282 struct dp_stats_percpu *stats;
283 int stats_counter_off;
286 OVS_CB(skb)->vport = p;
288 if (!OVS_CB(skb)->flow) {
289 struct sw_flow_key key;
293 /* Extract flow from 'skb' into 'key'. */
294 error = flow_extract(skb, p->port_no, &key, &key_len, &is_frag);
295 if (unlikely(error)) {
300 if (is_frag && dp->drop_frags) {
302 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
307 flow = flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
308 if (unlikely(!flow)) {
309 struct dp_upcall_info upcall;
311 upcall.cmd = OVS_PACKET_CMD_MISS;
314 upcall.sample_pool = 0;
315 upcall.actions = NULL;
316 upcall.actions_len = 0;
317 dp_upcall(dp, skb, &upcall);
318 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
322 OVS_CB(skb)->flow = flow;
325 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
326 flow_used(OVS_CB(skb)->flow, skb);
327 execute_actions(dp, skb);
330 /* Update datapath statistics. */
332 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
334 write_seqcount_begin(&stats->seqlock);
335 (*(u64 *)((u8 *)stats + stats_counter_off))++;
336 write_seqcount_end(&stats->seqlock);
341 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
343 u16 csum_start, csum_offset;
346 get_skb_csum_pointers(skb, &csum_start, &csum_offset);
347 csum_start -= skb_headroom(skb);
349 skb_copy_bits(skb, 0, to, csum_start);
351 csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
352 skb->len - csum_start, 0);
353 *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
356 static struct genl_family dp_packet_genl_family = {
357 .id = GENL_ID_GENERATE,
358 .hdrsize = sizeof(struct ovs_header),
359 .name = OVS_PACKET_FAMILY,
361 .maxattr = OVS_PACKET_ATTR_MAX
364 /* Generic Netlink multicast groups for upcalls.
366 * We really want three unique multicast groups per datapath, but we can't even
367 * get one, because genl_register_mc_group() takes genl_lock, which is also
368 * held during Generic Netlink message processing, so trying to acquire
369 * multicast groups during OVS_DP_NEW processing deadlocks. Instead, we
370 * preallocate a few groups and use them round-robin for datapaths. Collision
371 * isn't fatal--multicast listeners should check that the family is the one
372 * that they want and discard others--but it wastes time and memory to receive
375 #define PACKET_N_MC_GROUPS 16
376 static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
378 static u32 packet_mc_group(int dp_ifindex, u8 cmd)
381 BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
383 idx = jhash_2words(dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
384 return packet_mc_groups[idx].id;
387 static int packet_register_mc_groups(void)
391 for (i = 0; i < PACKET_N_MC_GROUPS; i++) {
392 struct genl_multicast_group *group = &packet_mc_groups[i];
395 sprintf(group->name, "packet%d", i);
396 error = genl_register_mc_group(&dp_packet_genl_family, group);
403 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
405 struct dp_stats_percpu *stats;
408 forward_ip_summed(skb, true);
410 /* Break apart GSO packets into their component pieces. Otherwise
411 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
412 if (skb_is_gso(skb)) {
413 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
424 err = queue_userspace_packets(dp, skb, upcall_info);
432 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
434 write_seqcount_begin(&stats->seqlock);
436 write_seqcount_end(&stats->seqlock);
443 /* Send each packet in the 'skb' list to userspace for 'dp' as directed by
444 * 'upcall_info'. There will be only one packet unless we broke up a GSO
447 static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb,
448 const struct dp_upcall_info *upcall_info)
452 struct sk_buff *nskb;
455 dp_ifindex = get_dpifindex(dp);
462 group = packet_mc_group(dp_ifindex, upcall_info->cmd);
465 struct ovs_header *upcall;
466 struct sk_buff *user_skb; /* to be queued to userspace */
473 err = vlan_deaccel_tag(skb);
477 if (nla_attr_size(skb->len) > USHRT_MAX) {
482 len = sizeof(struct ovs_header);
483 len += nla_total_size(skb->len);
484 len += nla_total_size(FLOW_BUFSIZE);
485 if (upcall_info->userdata)
486 len += nla_total_size(8);
487 if (upcall_info->sample_pool)
488 len += nla_total_size(4);
489 if (upcall_info->actions_len)
490 len += nla_total_size(upcall_info->actions_len);
492 user_skb = genlmsg_new(len, GFP_ATOMIC);
494 netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS);
499 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
500 upcall->dp_ifindex = dp_ifindex;
502 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
503 flow_to_nlattrs(upcall_info->key, user_skb);
504 nla_nest_end(user_skb, nla);
506 if (upcall_info->userdata)
507 nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, upcall_info->userdata);
508 if (upcall_info->sample_pool)
509 nla_put_u32(user_skb, OVS_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
510 if (upcall_info->actions_len) {
511 const struct nlattr *actions = upcall_info->actions;
512 u32 actions_len = upcall_info->actions_len;
514 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
515 memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
516 nla_nest_end(user_skb, nla);
519 nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
520 if (skb->ip_summed == CHECKSUM_PARTIAL)
521 copy_and_csum_skb(skb, nla_data(nla));
523 skb_copy_bits(skb, 0, nla_data(nla), skb->len);
525 err = genlmsg_multicast(user_skb, 0, group, GFP_ATOMIC);
536 while ((skb = nskb) != NULL) {
543 /* Called with genl_mutex. */
544 static int flush_flows(int dp_ifindex)
546 struct flow_table *old_table;
547 struct flow_table *new_table;
550 dp = get_dp(dp_ifindex);
554 old_table = get_table_protected(dp);
555 new_table = flow_tbl_alloc(TBL_MIN_BUCKETS);
559 rcu_assign_pointer(dp->table, new_table);
561 flow_tbl_deferred_destroy(old_table);
565 static int validate_actions(const struct nlattr *attr)
567 const struct nlattr *a;
570 nla_for_each_nested(a, attr, rem) {
571 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
572 [OVS_ACTION_ATTR_OUTPUT] = 4,
573 [OVS_ACTION_ATTR_USERSPACE] = 8,
574 [OVS_ACTION_ATTR_PUSH_VLAN] = 2,
575 [OVS_ACTION_ATTR_POP_VLAN] = 0,
576 [OVS_ACTION_ATTR_SET_DL_SRC] = ETH_ALEN,
577 [OVS_ACTION_ATTR_SET_DL_DST] = ETH_ALEN,
578 [OVS_ACTION_ATTR_SET_NW_SRC] = 4,
579 [OVS_ACTION_ATTR_SET_NW_DST] = 4,
580 [OVS_ACTION_ATTR_SET_NW_TOS] = 1,
581 [OVS_ACTION_ATTR_SET_TP_SRC] = 2,
582 [OVS_ACTION_ATTR_SET_TP_DST] = 2,
583 [OVS_ACTION_ATTR_SET_TUNNEL] = 8,
584 [OVS_ACTION_ATTR_SET_PRIORITY] = 4,
585 [OVS_ACTION_ATTR_POP_PRIORITY] = 0,
587 int type = nla_type(a);
589 if (type > OVS_ACTION_ATTR_MAX || nla_len(a) != action_lens[type])
593 case OVS_ACTION_ATTR_UNSPEC:
596 case OVS_ACTION_ATTR_USERSPACE:
597 case OVS_ACTION_ATTR_POP_VLAN:
598 case OVS_ACTION_ATTR_SET_DL_SRC:
599 case OVS_ACTION_ATTR_SET_DL_DST:
600 case OVS_ACTION_ATTR_SET_NW_SRC:
601 case OVS_ACTION_ATTR_SET_NW_DST:
602 case OVS_ACTION_ATTR_SET_TP_SRC:
603 case OVS_ACTION_ATTR_SET_TP_DST:
604 case OVS_ACTION_ATTR_SET_TUNNEL:
605 case OVS_ACTION_ATTR_SET_PRIORITY:
606 case OVS_ACTION_ATTR_POP_PRIORITY:
607 /* No validation needed. */
610 case OVS_ACTION_ATTR_OUTPUT:
611 if (nla_get_u32(a) >= DP_MAX_PORTS)
615 case OVS_ACTION_ATTR_PUSH_VLAN:
616 if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
620 case OVS_ACTION_ATTR_SET_NW_TOS:
621 if (nla_get_u8(a) & INET_ECN_MASK)
635 static void clear_stats(struct sw_flow *flow)
639 flow->packet_count = 0;
640 flow->byte_count = 0;
643 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
645 struct ovs_header *ovs_header = info->userhdr;
646 struct nlattr **a = info->attrs;
647 struct sw_flow_actions *acts;
648 struct sk_buff *packet;
649 struct sw_flow *flow;
658 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
659 !a[OVS_PACKET_ATTR_ACTIONS] ||
660 nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
663 err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS]);
667 len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
668 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
672 skb_reserve(packet, NET_IP_ALIGN);
674 memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
676 skb_reset_mac_header(packet);
677 eth = eth_hdr(packet);
679 /* Normally, setting the skb 'protocol' field would be handled by a
680 * call to eth_type_trans(), but it assumes there's a sending
681 * device, which we may not have. */
682 if (ntohs(eth->h_proto) >= 1536)
683 packet->protocol = eth->h_proto;
685 packet->protocol = htons(ETH_P_802_2);
687 /* Build an sw_flow for sending this packet. */
693 err = flow_extract(packet, -1, &flow->key, &key_len, &is_frag);
697 err = flow_metadata_from_nlattrs(&flow->key.eth.in_port,
698 &flow->key.eth.tun_id,
699 a[OVS_PACKET_ATTR_KEY]);
703 flow->hash = flow_hash(&flow->key, key_len);
705 acts = flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
709 rcu_assign_pointer(flow->sf_acts, acts);
711 OVS_CB(packet)->flow = flow;
714 dp = get_dp(ovs_header->dp_ifindex);
719 if (flow->key.eth.in_port < DP_MAX_PORTS)
720 OVS_CB(packet)->vport = get_vport_protected(dp,
721 flow->key.eth.in_port);
723 err = execute_actions(dp, packet);
739 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
740 [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
741 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
742 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
745 static struct genl_ops dp_packet_genl_ops[] = {
746 { .cmd = OVS_PACKET_CMD_EXECUTE,
747 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
748 .policy = packet_policy,
749 .doit = ovs_packet_cmd_execute
753 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
756 struct flow_table *table = get_table_protected(dp);
758 stats->n_flows = flow_tbl_count(table);
760 stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
761 for_each_possible_cpu(i) {
762 const struct dp_stats_percpu *percpu_stats;
763 struct dp_stats_percpu local_stats;
766 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
769 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
770 local_stats = *percpu_stats;
771 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
773 stats->n_frags += local_stats.n_frags;
774 stats->n_hit += local_stats.n_hit;
775 stats->n_missed += local_stats.n_missed;
776 stats->n_lost += local_stats.n_lost;
780 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
781 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
782 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
783 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
786 static struct genl_family dp_flow_genl_family = {
787 .id = GENL_ID_GENERATE,
788 .hdrsize = sizeof(struct ovs_header),
789 .name = OVS_FLOW_FAMILY,
791 .maxattr = OVS_FLOW_ATTR_MAX
794 static struct genl_multicast_group dp_flow_multicast_group = {
795 .name = OVS_FLOW_MCGROUP
798 /* Called with genl_lock. */
799 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
800 struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd)
802 const int skb_orig_len = skb->len;
803 const struct sw_flow_actions *sf_acts;
804 struct ovs_flow_stats stats;
805 struct ovs_header *ovs_header;
811 sf_acts = rcu_dereference_protected(flow->sf_acts,
812 lockdep_genl_is_held());
814 ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
818 ovs_header->dp_ifindex = get_dpifindex(dp);
820 nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
822 goto nla_put_failure;
823 err = flow_to_nlattrs(&flow->key, skb);
826 nla_nest_end(skb, nla);
828 spin_lock_bh(&flow->lock);
830 stats.n_packets = flow->packet_count;
831 stats.n_bytes = flow->byte_count;
832 tcp_flags = flow->tcp_flags;
833 spin_unlock_bh(&flow->lock);
836 NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, flow_used_time(used));
839 NLA_PUT(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats);
842 NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
844 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
845 * this is the first flow to be dumped into 'skb'. This is unusual for
846 * Netlink but individual action lists can be longer than
847 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
848 * The userspace caller can always fetch the actions separately if it
849 * really wants them. (Most userspace callers in fact don't care.)
851 * This can only fail for dump operations because the skb is always
852 * properly sized for single flows.
854 err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
856 if (err < 0 && skb_orig_len)
859 return genlmsg_end(skb, ovs_header);
864 genlmsg_cancel(skb, ovs_header);
868 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
870 const struct sw_flow_actions *sf_acts;
873 sf_acts = rcu_dereference_protected(flow->sf_acts,
874 lockdep_genl_is_held());
876 len = nla_total_size(FLOW_BUFSIZE); /* OVS_FLOW_ATTR_KEY */
877 len += nla_total_size(sf_acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
878 len += nla_total_size(sizeof(struct ovs_flow_stats)); /* OVS_FLOW_ATTR_STATS */
879 len += nla_total_size(1); /* OVS_FLOW_ATTR_TCP_FLAGS */
880 len += nla_total_size(8); /* OVS_FLOW_ATTR_USED */
881 return genlmsg_new(NLMSG_ALIGN(sizeof(struct ovs_header)) + len, GFP_KERNEL);
884 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp,
885 u32 pid, u32 seq, u8 cmd)
890 skb = ovs_flow_cmd_alloc_info(flow);
892 return ERR_PTR(-ENOMEM);
894 retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
899 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
901 struct nlattr **a = info->attrs;
902 struct ovs_header *ovs_header = info->userhdr;
903 struct sw_flow_key key;
904 struct sw_flow *flow;
905 struct sk_buff *reply;
907 struct flow_table *table;
913 if (!a[OVS_FLOW_ATTR_KEY])
915 error = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
919 /* Validate actions. */
920 if (a[OVS_FLOW_ATTR_ACTIONS]) {
921 error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS]);
924 } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
929 dp = get_dp(ovs_header->dp_ifindex);
934 table = get_table_protected(dp);
935 flow = flow_tbl_lookup(table, &key, key_len);
937 struct sw_flow_actions *acts;
939 /* Bail out if we're not allowed to create a new flow. */
941 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
944 /* Expand table, if necessary, to make room. */
945 if (flow_tbl_need_to_expand(table)) {
946 struct flow_table *new_table;
948 new_table = flow_tbl_expand(table);
949 if (!IS_ERR(new_table)) {
950 rcu_assign_pointer(dp->table, new_table);
951 flow_tbl_deferred_destroy(table);
952 table = get_table_protected(dp);
959 error = PTR_ERR(flow);
965 /* Obtain actions. */
966 acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
967 error = PTR_ERR(acts);
969 goto error_free_flow;
970 rcu_assign_pointer(flow->sf_acts, acts);
972 /* Put flow in bucket. */
973 flow->hash = flow_hash(&key, key_len);
974 flow_tbl_insert(table, flow);
976 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
977 info->snd_seq, OVS_FLOW_CMD_NEW);
979 /* We found a matching flow. */
980 struct sw_flow_actions *old_acts;
982 /* Bail out if we're not allowed to modify an existing flow.
983 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
984 * because Generic Netlink treats the latter as a dump
985 * request. We also accept NLM_F_EXCL in case that bug ever
989 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
990 info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
993 /* Update actions. */
994 old_acts = rcu_dereference_protected(flow->sf_acts,
995 lockdep_genl_is_held());
996 if (a[OVS_FLOW_ATTR_ACTIONS] &&
997 (old_acts->actions_len != nla_len(a[OVS_FLOW_ATTR_ACTIONS]) ||
998 memcmp(old_acts->actions, nla_data(a[OVS_FLOW_ATTR_ACTIONS]),
999 old_acts->actions_len))) {
1000 struct sw_flow_actions *new_acts;
1002 new_acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
1003 error = PTR_ERR(new_acts);
1004 if (IS_ERR(new_acts))
1007 rcu_assign_pointer(flow->sf_acts, new_acts);
1008 flow_deferred_free_acts(old_acts);
1011 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1012 info->snd_seq, OVS_FLOW_CMD_NEW);
1015 if (a[OVS_FLOW_ATTR_CLEAR]) {
1016 spin_lock_bh(&flow->lock);
1018 spin_unlock_bh(&flow->lock);
1023 genl_notify(reply, genl_info_net(info), info->snd_pid,
1024 dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1026 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1027 dp_flow_multicast_group.id, PTR_ERR(reply));
1036 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1038 struct nlattr **a = info->attrs;
1039 struct ovs_header *ovs_header = info->userhdr;
1040 struct sw_flow_key key;
1041 struct sk_buff *reply;
1042 struct sw_flow *flow;
1043 struct datapath *dp;
1044 struct flow_table *table;
1048 if (!a[OVS_FLOW_ATTR_KEY])
1050 err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1054 dp = get_dp(ovs_header->dp_ifindex);
1058 table = get_table_protected(dp);
1059 flow = flow_tbl_lookup(table, &key, key_len);
1063 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW);
1065 return PTR_ERR(reply);
1067 return genlmsg_reply(reply, info);
1070 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1072 struct nlattr **a = info->attrs;
1073 struct ovs_header *ovs_header = info->userhdr;
1074 struct sw_flow_key key;
1075 struct sk_buff *reply;
1076 struct sw_flow *flow;
1077 struct datapath *dp;
1078 struct flow_table *table;
1082 if (!a[OVS_FLOW_ATTR_KEY])
1083 return flush_flows(ovs_header->dp_ifindex);
1084 err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1088 dp = get_dp(ovs_header->dp_ifindex);
1092 table = get_table_protected(dp);
1093 flow = flow_tbl_lookup(table, &key, key_len);
1097 reply = ovs_flow_cmd_alloc_info(flow);
1101 flow_tbl_remove(table, flow);
1103 err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1104 info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1107 flow_deferred_free(flow);
1109 genl_notify(reply, genl_info_net(info), info->snd_pid,
1110 dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1114 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1116 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1117 struct datapath *dp;
1119 dp = get_dp(ovs_header->dp_ifindex);
1124 struct sw_flow *flow;
1127 bucket = cb->args[0];
1129 flow = flow_tbl_next(get_table_protected(dp), &bucket, &obj);
1133 if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
1134 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1135 OVS_FLOW_CMD_NEW) < 0)
1138 cb->args[0] = bucket;
1144 static struct genl_ops dp_flow_genl_ops[] = {
1145 { .cmd = OVS_FLOW_CMD_NEW,
1146 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1147 .policy = flow_policy,
1148 .doit = ovs_flow_cmd_new_or_set
1150 { .cmd = OVS_FLOW_CMD_DEL,
1151 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1152 .policy = flow_policy,
1153 .doit = ovs_flow_cmd_del
1155 { .cmd = OVS_FLOW_CMD_GET,
1156 .flags = 0, /* OK for unprivileged users. */
1157 .policy = flow_policy,
1158 .doit = ovs_flow_cmd_get,
1159 .dumpit = ovs_flow_cmd_dump
1161 { .cmd = OVS_FLOW_CMD_SET,
1162 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1163 .policy = flow_policy,
1164 .doit = ovs_flow_cmd_new_or_set,
1168 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1169 #ifdef HAVE_NLA_NUL_STRING
1170 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1172 [OVS_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
1173 [OVS_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
1176 static struct genl_family dp_datapath_genl_family = {
1177 .id = GENL_ID_GENERATE,
1178 .hdrsize = sizeof(struct ovs_header),
1179 .name = OVS_DATAPATH_FAMILY,
1181 .maxattr = OVS_DP_ATTR_MAX
1184 static struct genl_multicast_group dp_datapath_multicast_group = {
1185 .name = OVS_DATAPATH_MCGROUP
1188 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1189 u32 pid, u32 seq, u32 flags, u8 cmd)
1191 struct ovs_header *ovs_header;
1194 int dp_ifindex = get_dpifindex(dp);
1196 ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1201 ovs_header->dp_ifindex = dp_ifindex;
1204 err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp));
1207 goto nla_put_failure;
1209 nla = nla_reserve(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats));
1211 goto nla_put_failure;
1212 get_dp_stats(dp, nla_data(nla));
1214 NLA_PUT_U32(skb, OVS_DP_ATTR_IPV4_FRAGS,
1215 dp->drop_frags ? OVS_DP_FRAG_DROP : OVS_DP_FRAG_ZERO);
1217 if (dp->sflow_probability)
1218 NLA_PUT_U32(skb, OVS_DP_ATTR_SAMPLING, dp->sflow_probability);
1220 nla = nla_nest_start(skb, OVS_DP_ATTR_MCGROUPS);
1222 goto nla_put_failure;
1223 NLA_PUT_U32(skb, OVS_PACKET_CMD_MISS,
1224 packet_mc_group(dp_ifindex, OVS_PACKET_CMD_MISS));
1225 NLA_PUT_U32(skb, OVS_PACKET_CMD_ACTION,
1226 packet_mc_group(dp_ifindex, OVS_PACKET_CMD_ACTION));
1227 NLA_PUT_U32(skb, OVS_PACKET_CMD_SAMPLE,
1228 packet_mc_group(dp_ifindex, OVS_PACKET_CMD_SAMPLE));
1229 nla_nest_end(skb, nla);
1231 return genlmsg_end(skb, ovs_header);
1234 genlmsg_cancel(skb, ovs_header);
1239 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
1242 struct sk_buff *skb;
1245 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1247 return ERR_PTR(-ENOMEM);
1249 retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1252 return ERR_PTR(retval);
1257 static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1259 if (a[OVS_DP_ATTR_IPV4_FRAGS]) {
1260 u32 frags = nla_get_u32(a[OVS_DP_ATTR_IPV4_FRAGS]);
1262 if (frags != OVS_DP_FRAG_ZERO && frags != OVS_DP_FRAG_DROP)
1266 return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
1269 /* Called with genl_mutex and optionally with RTNL lock also. */
1270 static struct datapath *lookup_datapath(struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1272 struct datapath *dp;
1274 if (!a[OVS_DP_ATTR_NAME])
1275 dp = get_dp(ovs_header->dp_ifindex);
1277 struct vport *vport;
1280 vport = vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
1281 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1284 return dp ? dp : ERR_PTR(-ENODEV);
1287 /* Called with genl_mutex. */
1288 static void change_datapath(struct datapath *dp, struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1290 if (a[OVS_DP_ATTR_IPV4_FRAGS])
1291 dp->drop_frags = nla_get_u32(a[OVS_DP_ATTR_IPV4_FRAGS]) == OVS_DP_FRAG_DROP;
1292 if (a[OVS_DP_ATTR_SAMPLING])
1293 dp->sflow_probability = nla_get_u32(a[OVS_DP_ATTR_SAMPLING]);
1296 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1298 struct nlattr **a = info->attrs;
1299 struct vport_parms parms;
1300 struct sk_buff *reply;
1301 struct datapath *dp;
1302 struct vport *vport;
1306 if (!a[OVS_DP_ATTR_NAME])
1309 err = ovs_dp_cmd_validate(a);
1315 if (!try_module_get(THIS_MODULE))
1316 goto err_unlock_rtnl;
1319 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1321 goto err_put_module;
1322 INIT_LIST_HEAD(&dp->port_list);
1324 /* Initialize kobject for bridge. This will be added as
1325 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1326 dp->ifobj.kset = NULL;
1327 kobject_init(&dp->ifobj, &dp_ktype);
1329 /* Allocate table. */
1331 rcu_assign_pointer(dp->table, flow_tbl_alloc(TBL_MIN_BUCKETS));
1336 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1337 if (!dp->stats_percpu) {
1339 goto err_destroy_table;
1342 change_datapath(dp, a);
1344 /* Set up our datapath device. */
1345 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1346 parms.type = OVS_VPORT_TYPE_INTERNAL;
1347 parms.options = NULL;
1349 parms.port_no = OVSP_LOCAL;
1350 vport = new_vport(&parms);
1351 if (IS_ERR(vport)) {
1352 err = PTR_ERR(vport);
1356 goto err_destroy_percpu;
1359 reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1360 err = PTR_ERR(reply);
1362 goto err_destroy_local_port;
1364 list_add_tail(&dp->list_node, &dps);
1365 dp_sysfs_add_dp(dp);
1369 genl_notify(reply, genl_info_net(info), info->snd_pid,
1370 dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1373 err_destroy_local_port:
1374 dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
1376 free_percpu(dp->stats_percpu);
1378 flow_tbl_destroy(get_table_protected(dp));
1382 module_put(THIS_MODULE);
1389 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1391 struct vport *vport, *next_vport;
1392 struct sk_buff *reply;
1393 struct datapath *dp;
1396 err = ovs_dp_cmd_validate(info->attrs);
1401 dp = lookup_datapath(info->userhdr, info->attrs);
1406 reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_DEL);
1407 err = PTR_ERR(reply);
1411 list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
1412 if (vport->port_no != OVSP_LOCAL)
1413 dp_detach_port(vport);
1415 dp_sysfs_del_dp(dp);
1416 list_del(&dp->list_node);
1417 dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
1419 /* rtnl_unlock() will wait until all the references to devices that
1420 * are pending unregistration have been dropped. We do it here to
1421 * ensure that any internal devices (which contain DP pointers) are
1422 * fully destroyed before freeing the datapath.
1426 call_rcu(&dp->rcu, destroy_dp_rcu);
1427 module_put(THIS_MODULE);
1429 genl_notify(reply, genl_info_net(info), info->snd_pid,
1430 dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1440 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1442 struct sk_buff *reply;
1443 struct datapath *dp;
1446 err = ovs_dp_cmd_validate(info->attrs);
1450 dp = lookup_datapath(info->userhdr, info->attrs);
1454 change_datapath(dp, info->attrs);
1456 reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1457 if (IS_ERR(reply)) {
1458 err = PTR_ERR(reply);
1459 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1460 dp_datapath_multicast_group.id, err);
1464 genl_notify(reply, genl_info_net(info), info->snd_pid,
1465 dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1469 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1471 struct sk_buff *reply;
1472 struct datapath *dp;
1475 err = ovs_dp_cmd_validate(info->attrs);
1479 dp = lookup_datapath(info->userhdr, info->attrs);
1483 reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1485 return PTR_ERR(reply);
1487 return genlmsg_reply(reply, info);
1490 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1492 struct datapath *dp;
1493 int skip = cb->args[0];
1496 list_for_each_entry (dp, &dps, list_node) {
1499 if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1500 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1501 OVS_DP_CMD_NEW) < 0)
1511 static struct genl_ops dp_datapath_genl_ops[] = {
1512 { .cmd = OVS_DP_CMD_NEW,
1513 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1514 .policy = datapath_policy,
1515 .doit = ovs_dp_cmd_new
1517 { .cmd = OVS_DP_CMD_DEL,
1518 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1519 .policy = datapath_policy,
1520 .doit = ovs_dp_cmd_del
1522 { .cmd = OVS_DP_CMD_GET,
1523 .flags = 0, /* OK for unprivileged users. */
1524 .policy = datapath_policy,
1525 .doit = ovs_dp_cmd_get,
1526 .dumpit = ovs_dp_cmd_dump
1528 { .cmd = OVS_DP_CMD_SET,
1529 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1530 .policy = datapath_policy,
1531 .doit = ovs_dp_cmd_set,
1535 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1536 #ifdef HAVE_NLA_NUL_STRING
1537 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1538 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1539 [OVS_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1541 [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
1542 [OVS_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
1544 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1545 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1546 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1549 static struct genl_family dp_vport_genl_family = {
1550 .id = GENL_ID_GENERATE,
1551 .hdrsize = sizeof(struct ovs_header),
1552 .name = OVS_VPORT_FAMILY,
1554 .maxattr = OVS_VPORT_ATTR_MAX
1557 struct genl_multicast_group dp_vport_multicast_group = {
1558 .name = OVS_VPORT_MCGROUP
1561 /* Called with RTNL lock or RCU read lock. */
1562 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1563 u32 pid, u32 seq, u32 flags, u8 cmd)
1565 struct ovs_header *ovs_header;
1570 ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1575 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1577 NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
1578 NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport_get_type(vport));
1579 NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport_get_name(vport));
1581 nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats));
1583 goto nla_put_failure;
1585 vport_get_stats(vport, nla_data(nla));
1587 NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1589 err = vport_get_options(vport, skb);
1590 if (err == -EMSGSIZE)
1593 ifindex = vport_get_ifindex(vport);
1595 NLA_PUT_U32(skb, OVS_VPORT_ATTR_IFINDEX, ifindex);
1597 return genlmsg_end(skb, ovs_header);
1602 genlmsg_cancel(skb, ovs_header);
1606 /* Called with RTNL lock or RCU read lock. */
1607 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
1610 struct sk_buff *skb;
1613 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1615 return ERR_PTR(-ENOMEM);
1617 retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1620 return ERR_PTR(retval);
1625 static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1627 return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1630 /* Called with RTNL lock or RCU read lock. */
1631 static struct vport *lookup_vport(struct ovs_header *ovs_header,
1632 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1634 struct datapath *dp;
1635 struct vport *vport;
1637 if (a[OVS_VPORT_ATTR_NAME]) {
1638 vport = vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
1640 return ERR_PTR(-ENODEV);
1642 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1643 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1645 if (port_no >= DP_MAX_PORTS)
1646 return ERR_PTR(-EFBIG);
1648 dp = get_dp(ovs_header->dp_ifindex);
1650 return ERR_PTR(-ENODEV);
1652 vport = get_vport_protected(dp, port_no);
1654 return ERR_PTR(-ENOENT);
1657 return ERR_PTR(-EINVAL);
1660 /* Called with RTNL lock. */
1661 static int change_vport(struct vport *vport, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1665 if (a[OVS_VPORT_ATTR_STATS])
1666 vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1668 if (a[OVS_VPORT_ATTR_ADDRESS])
1669 err = vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
1674 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1676 struct nlattr **a = info->attrs;
1677 struct ovs_header *ovs_header = info->userhdr;
1678 struct vport_parms parms;
1679 struct sk_buff *reply;
1680 struct vport *vport;
1681 struct datapath *dp;
1686 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE])
1689 err = ovs_vport_cmd_validate(a);
1694 dp = get_dp(ovs_header->dp_ifindex);
1699 if (a[OVS_VPORT_ATTR_PORT_NO]) {
1700 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1703 if (port_no >= DP_MAX_PORTS)
1706 vport = get_vport_protected(dp, port_no);
1711 for (port_no = 1; ; port_no++) {
1712 if (port_no >= DP_MAX_PORTS) {
1716 vport = get_vport_protected(dp, port_no);
1722 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1723 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1724 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1726 parms.port_no = port_no;
1728 vport = new_vport(&parms);
1729 err = PTR_ERR(vport);
1733 dp_sysfs_add_if(vport);
1735 err = change_vport(vport, a);
1737 reply = ovs_vport_cmd_build_info(vport, info->snd_pid,
1738 info->snd_seq, OVS_VPORT_CMD_NEW);
1740 err = PTR_ERR(reply);
1743 dp_detach_port(vport);
1746 genl_notify(reply, genl_info_net(info), info->snd_pid,
1747 dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1756 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1758 struct nlattr **a = info->attrs;
1759 struct sk_buff *reply;
1760 struct vport *vport;
1763 err = ovs_vport_cmd_validate(a);
1768 vport = lookup_vport(info->userhdr, a);
1769 err = PTR_ERR(vport);
1774 if (a[OVS_VPORT_ATTR_OPTIONS])
1775 err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1777 err = change_vport(vport, a);
1779 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1781 if (IS_ERR(reply)) {
1782 err = PTR_ERR(reply);
1783 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1784 dp_vport_multicast_group.id, err);
1788 genl_notify(reply, genl_info_net(info), info->snd_pid,
1789 dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1797 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1799 struct nlattr **a = info->attrs;
1800 struct sk_buff *reply;
1801 struct vport *vport;
1804 err = ovs_vport_cmd_validate(a);
1809 vport = lookup_vport(info->userhdr, a);
1810 err = PTR_ERR(vport);
1814 if (vport->port_no == OVSP_LOCAL) {
1819 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1821 err = PTR_ERR(reply);
1825 dp_detach_port(vport);
1827 genl_notify(reply, genl_info_net(info), info->snd_pid,
1828 dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1836 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1838 struct nlattr **a = info->attrs;
1839 struct ovs_header *ovs_header = info->userhdr;
1840 struct sk_buff *reply;
1841 struct vport *vport;
1844 err = ovs_vport_cmd_validate(a);
1849 vport = lookup_vport(ovs_header, a);
1850 err = PTR_ERR(vport);
1854 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1856 err = PTR_ERR(reply);
1862 return genlmsg_reply(reply, info);
1870 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1872 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1873 struct datapath *dp;
1877 dp = get_dp(ovs_header->dp_ifindex);
1882 for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
1883 struct vport *vport;
1885 vport = get_vport_protected(dp, port_no);
1889 if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
1890 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1891 OVS_VPORT_CMD_NEW) < 0)
1896 cb->args[0] = port_no;
1902 static struct genl_ops dp_vport_genl_ops[] = {
1903 { .cmd = OVS_VPORT_CMD_NEW,
1904 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1905 .policy = vport_policy,
1906 .doit = ovs_vport_cmd_new
1908 { .cmd = OVS_VPORT_CMD_DEL,
1909 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1910 .policy = vport_policy,
1911 .doit = ovs_vport_cmd_del
1913 { .cmd = OVS_VPORT_CMD_GET,
1914 .flags = 0, /* OK for unprivileged users. */
1915 .policy = vport_policy,
1916 .doit = ovs_vport_cmd_get,
1917 .dumpit = ovs_vport_cmd_dump
1919 { .cmd = OVS_VPORT_CMD_SET,
1920 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1921 .policy = vport_policy,
1922 .doit = ovs_vport_cmd_set,
1926 struct genl_family_and_ops {
1927 struct genl_family *family;
1928 struct genl_ops *ops;
1930 struct genl_multicast_group *group;
1933 static const struct genl_family_and_ops dp_genl_families[] = {
1934 { &dp_datapath_genl_family,
1935 dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1936 &dp_datapath_multicast_group },
1937 { &dp_vport_genl_family,
1938 dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1939 &dp_vport_multicast_group },
1940 { &dp_flow_genl_family,
1941 dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1942 &dp_flow_multicast_group },
1943 { &dp_packet_genl_family,
1944 dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1948 static void dp_unregister_genl(int n_families)
1952 for (i = 0; i < n_families; i++)
1953 genl_unregister_family(dp_genl_families[i].family);
1956 static int dp_register_genl(void)
1963 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
1964 const struct genl_family_and_ops *f = &dp_genl_families[i];
1966 err = genl_register_family_with_ops(f->family, f->ops,
1973 err = genl_register_mc_group(f->family, f->group);
1979 err = packet_register_mc_groups();
1985 dp_unregister_genl(n_registered);
1989 static int __init dp_init(void)
1991 struct sk_buff *dummy_skb;
1994 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
1996 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2004 goto error_tnl_exit;
2008 goto error_flow_exit;
2010 err = register_netdevice_notifier(&dp_device_notifier);
2012 goto error_vport_exit;
2014 err = dp_register_genl();
2016 goto error_unreg_notifier;
2020 error_unreg_notifier:
2021 unregister_netdevice_notifier(&dp_device_notifier);
2032 static void dp_cleanup(void)
2035 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2036 unregister_netdevice_notifier(&dp_device_notifier);
2042 module_init(dp_init);
2043 module_exit(dp_cleanup);
2045 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2046 MODULE_LICENSE("GPL");