2 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/genetlink.h>
24 #include <linux/kernel.h>
25 #include <linux/kthread.h>
26 #include <linux/mutex.h>
27 #include <linux/percpu.h>
28 #include <linux/rcupdate.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/version.h>
32 #include <linux/ethtool.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
37 #include <linux/highmem.h>
38 #include <linux/netfilter_bridge.h>
39 #include <linux/netfilter_ipv4.h>
40 #include <linux/inetdevice.h>
41 #include <linux/list.h>
42 #include <linux/rculist.h>
43 #include <linux/dmi.h>
44 #include <net/inet_ecn.h>
45 #include <net/genetlink.h>
46 #include <linux/compat.h>
48 #include "openvswitch/datapath-protocol.h"
53 #include "loop_counter.h"
55 #include "vport-internal_dev.h"
57 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
58 EXPORT_SYMBOL(dp_ioctl_hook);
63 * Writes to device state (add/remove datapath, port, set operations on vports,
64 * etc.) are protected by RTNL.
66 * Writes to other state (flow table modifications, set miscellaneous datapath
67 * parameters such as drop frags, etc.) are protected by genl_mutex. The RTNL
68 * lock nests inside genl_mutex.
70 * Reads are protected by RCU.
72 * There are a few special cases (mostly stats) that have their own
73 * synchronization but they nest under all of above and don't interact with
77 /* Protected by genl_mutex. */
78 static struct datapath __rcu *dps[256];
80 static struct vport *new_vport(const struct vport_parms *);
82 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
83 struct datapath *get_dp(int dp_idx)
85 if (dp_idx < 0 || dp_idx >= ARRAY_SIZE(dps))
88 return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
89 lockdep_rtnl_is_held() ||
90 lockdep_genl_is_held());
92 EXPORT_SYMBOL_GPL(get_dp);
94 /* Must be called with genl_mutex. */
95 static struct tbl *get_table_protected(struct datapath *dp)
97 return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
100 /* Must be called with rcu_read_lock or RTNL lock. */
101 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
103 return rcu_dereference_rtnl(dp->ports[port_no]);
106 /* Must be called with rcu_read_lock or RTNL lock. */
107 const char *dp_name(const struct datapath *dp)
109 return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
112 static inline size_t br_nlmsg_size(void)
114 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
115 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
116 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
117 + nla_total_size(4) /* IFLA_MASTER */
118 + nla_total_size(4) /* IFLA_MTU */
119 + nla_total_size(4) /* IFLA_LINK */
120 + nla_total_size(1); /* IFLA_OPERSTATE */
123 /* Caller must hold RTNL lock. */
124 static int dp_fill_ifinfo(struct sk_buff *skb,
125 const struct vport *port,
126 int event, unsigned int flags)
128 struct datapath *dp = port->dp;
129 int ifindex = vport_get_ifindex(port);
130 int iflink = vport_get_iflink(port);
131 struct ifinfomsg *hdr;
132 struct nlmsghdr *nlh;
140 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
144 hdr = nlmsg_data(nlh);
145 hdr->ifi_family = AF_BRIDGE;
147 hdr->ifi_type = ARPHRD_ETHER;
148 hdr->ifi_index = ifindex;
149 hdr->ifi_flags = vport_get_flags(port);
152 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
153 NLA_PUT_U32(skb, IFLA_MASTER,
154 vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
155 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
156 #ifdef IFLA_OPERSTATE
157 NLA_PUT_U8(skb, IFLA_OPERSTATE,
158 vport_is_running(port)
159 ? vport_get_operstate(port)
163 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
165 if (ifindex != iflink)
166 NLA_PUT_U32(skb, IFLA_LINK,iflink);
168 return nlmsg_end(skb, nlh);
171 nlmsg_cancel(skb, nlh);
175 /* Caller must hold RTNL lock. */
176 static void dp_ifinfo_notify(int event, struct vport *port)
181 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
185 err = dp_fill_ifinfo(skb, port, event, 0);
187 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
188 WARN_ON(err == -EMSGSIZE);
192 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
196 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
199 static void release_dp(struct kobject *kobj)
201 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
205 static struct kobj_type dp_ktype = {
206 .release = release_dp
209 static void destroy_dp_rcu(struct rcu_head *rcu)
211 struct datapath *dp = container_of(rcu, struct datapath, rcu);
214 for (i = 0; i < DP_N_QUEUES; i++)
215 skb_queue_purge(&dp->queues[i]);
217 tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
218 free_percpu(dp->stats_percpu);
219 kobject_put(&dp->ifobj);
222 /* Called with RTNL lock and genl_lock. */
223 static struct vport *new_vport(const struct vport_parms *parms)
227 vport = vport_add(parms);
228 if (!IS_ERR(vport)) {
229 struct datapath *dp = parms->dp;
231 rcu_assign_pointer(dp->ports[parms->port_no], vport);
232 list_add(&vport->node, &dp->port_list);
234 dp_ifinfo_notify(RTM_NEWLINK, vport);
240 /* Called with RTNL lock. */
241 int dp_detach_port(struct vport *p)
245 if (p->port_no != ODPP_LOCAL)
247 dp_ifinfo_notify(RTM_DELLINK, p);
249 /* First drop references to device. */
251 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
253 /* Then destroy it. */
257 /* Must be called with rcu_read_lock. */
258 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
260 struct datapath *dp = p->dp;
261 struct dp_stats_percpu *stats;
262 int stats_counter_off;
263 struct sw_flow_actions *acts;
264 struct loop_counter *loop;
267 OVS_CB(skb)->vport = p;
269 if (!OVS_CB(skb)->flow) {
270 struct sw_flow_key key;
271 struct tbl_node *flow_node;
274 /* Extract flow from 'skb' into 'key'. */
275 error = flow_extract(skb, p->port_no, &key, &is_frag);
276 if (unlikely(error)) {
281 if (is_frag && dp->drop_frags) {
283 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
288 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
289 flow_hash(&key), flow_cmp);
290 if (unlikely(!flow_node)) {
291 struct dp_upcall_info upcall;
293 upcall.type = _ODPL_MISS_NR;
296 upcall.sample_pool = 0;
297 upcall.actions = NULL;
298 upcall.actions_len = 0;
299 dp_upcall(dp, skb, &upcall);
300 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
304 OVS_CB(skb)->flow = flow_cast(flow_node);
307 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
308 flow_used(OVS_CB(skb)->flow, skb);
310 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
312 /* Check whether we've looped too much. */
313 loop = loop_get_counter();
314 if (unlikely(++loop->count > MAX_LOOPS))
315 loop->looping = true;
316 if (unlikely(loop->looping)) {
317 loop_suppress(dp, acts);
322 /* Execute actions. */
323 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
326 /* Check whether sub-actions looped too much. */
327 if (unlikely(loop->looping))
328 loop_suppress(dp, acts);
331 /* Decrement loop counter. */
333 loop->looping = false;
337 /* Update datapath statistics. */
339 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
341 write_seqcount_begin(&stats->seqlock);
342 (*(u64 *)((u8 *)stats + stats_counter_off))++;
343 write_seqcount_end(&stats->seqlock);
348 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
350 u16 csum_start, csum_offset;
353 get_skb_csum_pointers(skb, &csum_start, &csum_offset);
354 csum_start -= skb_headroom(skb);
355 BUG_ON(csum_start >= skb_headlen(skb));
357 skb_copy_bits(skb, 0, to, csum_start);
359 csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
360 skb->len - csum_start, 0);
361 *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
364 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
365 * unless we broke up a GSO packet. */
366 static int queue_control_packets(struct datapath *dp, struct sk_buff *skb,
367 const struct dp_upcall_info *upcall_info)
369 struct sk_buff *nskb;
373 if (OVS_CB(skb)->vport)
374 port_no = OVS_CB(skb)->vport->port_no;
376 port_no = ODPP_LOCAL;
379 struct odp_packet *upcall;
380 struct sk_buff *user_skb; /* to be queued to userspace */
387 len = sizeof(struct odp_packet);
388 len += nla_total_size(4); /* ODP_PACKET_ATTR_TYPE. */
389 len += nla_total_size(skb->len);
390 len += nla_total_size(FLOW_BUFSIZE);
391 if (upcall_info->userdata)
392 len += nla_total_size(8);
393 if (upcall_info->sample_pool)
394 len += nla_total_size(4);
395 if (upcall_info->actions_len)
396 len += nla_total_size(upcall_info->actions_len);
398 user_skb = alloc_skb(len, GFP_ATOMIC);
402 upcall = (struct odp_packet *)__skb_put(user_skb, sizeof(*upcall));
403 upcall->dp_idx = dp->dp_idx;
405 nla_put_u32(user_skb, ODP_PACKET_ATTR_TYPE, upcall_info->type);
407 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
408 flow_to_nlattrs(upcall_info->key, user_skb);
409 nla_nest_end(user_skb, nla);
411 if (upcall_info->userdata)
412 nla_put_u64(user_skb, ODP_PACKET_ATTR_USERDATA, upcall_info->userdata);
413 if (upcall_info->sample_pool)
414 nla_put_u32(user_skb, ODP_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
415 if (upcall_info->actions_len) {
416 const struct nlattr *actions = upcall_info->actions;
417 u32 actions_len = upcall_info->actions_len;
419 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_ACTIONS);
420 memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
421 nla_nest_end(user_skb, nla);
424 nla = __nla_reserve(user_skb, ODP_PACKET_ATTR_PACKET, skb->len);
425 if (skb->ip_summed == CHECKSUM_PARTIAL)
426 copy_and_csum_skb(skb, nla_data(nla));
428 skb_copy_bits(skb, 0, nla_data(nla), skb->len);
430 upcall->len = user_skb->len;
431 skb_queue_tail(&dp->queues[upcall_info->type], user_skb);
440 while ((skb = nskb) != NULL) {
447 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
449 struct dp_stats_percpu *stats;
450 struct sk_buff_head *queue;
453 WARN_ON_ONCE(skb_shared(skb));
454 BUG_ON(upcall_info->type >= DP_N_QUEUES);
456 queue = &dp->queues[upcall_info->type];
458 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
461 forward_ip_summed(skb);
463 err = vswitch_skb_checksum_setup(skb);
467 /* Break apart GSO packets into their component pieces. Otherwise
468 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
469 if (skb_is_gso(skb)) {
470 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
480 err = queue_control_packets(dp, skb, upcall_info);
481 wake_up_interruptible(&dp->waitqueue);
488 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
490 write_seqcount_begin(&stats->seqlock);
492 write_seqcount_end(&stats->seqlock);
499 /* Called with genl_mutex. */
500 static int flush_flows(int dp_idx)
502 struct tbl *old_table;
503 struct tbl *new_table;
510 old_table = get_table_protected(dp);
511 new_table = tbl_create(TBL_MIN_BUCKETS);
515 rcu_assign_pointer(dp->table, new_table);
517 tbl_deferred_destroy(old_table, flow_free_tbl);
522 static int validate_actions(const struct nlattr *actions, u32 actions_len)
524 const struct nlattr *a;
527 nla_for_each_attr(a, actions, actions_len, rem) {
528 static const u32 action_lens[ODPAT_MAX + 1] = {
530 [ODPAT_CONTROLLER] = 8,
531 [ODPAT_SET_DL_TCI] = 2,
532 [ODPAT_STRIP_VLAN] = 0,
533 [ODPAT_SET_DL_SRC] = ETH_ALEN,
534 [ODPAT_SET_DL_DST] = ETH_ALEN,
535 [ODPAT_SET_NW_SRC] = 4,
536 [ODPAT_SET_NW_DST] = 4,
537 [ODPAT_SET_NW_TOS] = 1,
538 [ODPAT_SET_TP_SRC] = 2,
539 [ODPAT_SET_TP_DST] = 2,
540 [ODPAT_SET_TUNNEL] = 8,
541 [ODPAT_SET_PRIORITY] = 4,
542 [ODPAT_POP_PRIORITY] = 0,
543 [ODPAT_DROP_SPOOFED_ARP] = 0,
545 int type = nla_type(a);
547 if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
554 case ODPAT_CONTROLLER:
555 case ODPAT_STRIP_VLAN:
556 case ODPAT_SET_DL_SRC:
557 case ODPAT_SET_DL_DST:
558 case ODPAT_SET_NW_SRC:
559 case ODPAT_SET_NW_DST:
560 case ODPAT_SET_TP_SRC:
561 case ODPAT_SET_TP_DST:
562 case ODPAT_SET_TUNNEL:
563 case ODPAT_SET_PRIORITY:
564 case ODPAT_POP_PRIORITY:
565 case ODPAT_DROP_SPOOFED_ARP:
566 /* No validation needed. */
570 if (nla_get_u32(a) >= DP_MAX_PORTS)
574 case ODPAT_SET_DL_TCI:
575 if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
579 case ODPAT_SET_NW_TOS:
580 if (nla_get_u8(a) & INET_ECN_MASK)
599 struct sw_flow_key key;
600 const struct nlattr *actions;
606 static struct sw_flow_actions *get_actions(const struct dp_flowcmd *flowcmd)
608 struct sw_flow_actions *actions;
610 actions = flow_actions_alloc(flowcmd->actions_len);
611 if (!IS_ERR(actions) && flowcmd->actions_len)
612 memcpy(actions->actions, flowcmd->actions, flowcmd->actions_len);
616 static void clear_stats(struct sw_flow *flow)
620 flow->packet_count = 0;
621 flow->byte_count = 0;
624 /* Called with genl_mutex. */
625 static int expand_table(struct datapath *dp)
627 struct tbl *old_table = get_table_protected(dp);
628 struct tbl *new_table;
630 new_table = tbl_expand(old_table);
631 if (IS_ERR(new_table))
632 return PTR_ERR(new_table);
634 rcu_assign_pointer(dp->table, new_table);
635 tbl_deferred_destroy(old_table, NULL);
640 static const struct nla_policy execute_policy[ODP_PACKET_ATTR_MAX + 1] = {
641 [ODP_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
642 [ODP_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
645 static int execute_packet(const struct odp_packet __user *uodp_packet)
647 struct nlattr *a[ODP_PACKET_ATTR_MAX + 1];
648 struct odp_packet *odp_packet;
649 struct sk_buff *skb, *packet;
650 unsigned int actions_len;
651 struct nlattr *actions;
652 struct sw_flow_key key;
659 if (get_user(len, &uodp_packet->len))
661 if (len < sizeof(struct odp_packet))
664 skb = alloc_skb(len, GFP_KERNEL);
669 if (copy_from_user(__skb_put(skb, len), uodp_packet, len))
672 odp_packet = (struct odp_packet *)skb->data;
674 if (odp_packet->len != len)
677 __skb_pull(skb, sizeof(struct odp_packet));
678 err = nla_parse(a, ODP_PACKET_ATTR_MAX, (struct nlattr *)skb->data,
679 skb->len, execute_policy);
684 if (!a[ODP_PACKET_ATTR_PACKET] || !a[ODP_PACKET_ATTR_ACTIONS] ||
685 nla_len(a[ODP_PACKET_ATTR_PACKET]) < ETH_HLEN)
688 actions = nla_data(a[ODP_PACKET_ATTR_ACTIONS]);
689 actions_len = nla_len(a[ODP_PACKET_ATTR_ACTIONS]);
690 err = validate_actions(actions, actions_len);
694 packet = skb_clone(skb, GFP_KERNEL);
698 packet->data = nla_data(a[ODP_PACKET_ATTR_PACKET]);
699 packet->len = nla_len(a[ODP_PACKET_ATTR_PACKET]);
701 skb_reset_mac_header(packet);
702 eth = eth_hdr(packet);
704 /* Normally, setting the skb 'protocol' field would be handled by a
705 * call to eth_type_trans(), but it assumes there's a sending
706 * device, which we may not have. */
707 if (ntohs(eth->h_proto) >= 1536)
708 packet->protocol = eth->h_proto;
710 packet->protocol = htons(ETH_P_802_2);
712 err = flow_extract(packet, -1, &key, &is_frag);
717 dp = get_dp(odp_packet->dp_idx);
720 err = execute_actions(dp, packet, &key, actions, actions_len);
728 static void get_dp_stats(struct datapath *dp, struct odp_stats *stats)
732 stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
733 for_each_possible_cpu(i) {
734 const struct dp_stats_percpu *percpu_stats;
735 struct dp_stats_percpu local_stats;
738 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
741 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
742 local_stats = *percpu_stats;
743 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
745 stats->n_frags += local_stats.n_frags;
746 stats->n_hit += local_stats.n_hit;
747 stats->n_missed += local_stats.n_missed;
748 stats->n_lost += local_stats.n_lost;
752 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports.
753 * Called with RTNL lock.
755 int dp_min_mtu(const struct datapath *dp)
762 list_for_each_entry (p, &dp->port_list, node) {
765 /* Skip any internal ports, since that's what we're trying to
767 if (is_internal_vport(p))
770 dev_mtu = vport_get_mtu(p);
771 if (!mtu || dev_mtu < mtu)
775 return mtu ? mtu : ETH_DATA_LEN;
778 /* Sets the MTU of all datapath devices to the minimum of the ports
779 * Called with RTNL lock.
781 void set_internal_devs_mtu(const struct datapath *dp)
788 mtu = dp_min_mtu(dp);
790 list_for_each_entry (p, &dp->port_list, node) {
791 if (is_internal_vport(p))
792 vport_set_mtu(p, mtu);
796 static int get_listen_mask(const struct file *f)
798 return (long)f->private_data;
801 static void set_listen_mask(struct file *f, int listen_mask)
803 f->private_data = (void*)(long)listen_mask;
806 static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
807 [ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
808 [ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
809 [ODP_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
810 [ODP_FLOW_ATTR_STATE] = { .type = NLA_U64 },
814 static int copy_flow_to_user(struct odp_flow __user *dst, struct datapath *dp,
815 struct sw_flow *flow, u32 total_len, u64 state)
817 const struct sw_flow_actions *sf_acts;
818 struct odp_flow_stats stats;
819 struct odp_flow *odp_flow;
826 sf_acts = rcu_dereference_protected(flow->sf_acts,
827 lockdep_genl_is_held());
829 skb = alloc_skb(128 + FLOW_BUFSIZE + sf_acts->actions_len, GFP_KERNEL);
834 odp_flow = (struct odp_flow*)__skb_put(skb, sizeof(struct odp_flow));
835 odp_flow->dp_idx = dp->dp_idx;
836 odp_flow->total_len = total_len;
838 nla = nla_nest_start(skb, ODP_FLOW_ATTR_KEY);
840 goto nla_put_failure;
841 err = flow_to_nlattrs(&flow->key, skb);
844 nla_nest_end(skb, nla);
846 nla = nla_nest_start(skb, ODP_FLOW_ATTR_ACTIONS);
847 if (!nla || skb_tailroom(skb) < sf_acts->actions_len)
848 goto nla_put_failure;
849 memcpy(__skb_put(skb, sf_acts->actions_len), sf_acts->actions, sf_acts->actions_len);
850 nla_nest_end(skb, nla);
852 spin_lock_bh(&flow->lock);
854 stats.n_packets = flow->packet_count;
855 stats.n_bytes = flow->byte_count;
856 tcp_flags = flow->tcp_flags;
857 spin_unlock_bh(&flow->lock);
860 NLA_PUT_MSECS(skb, ODP_FLOW_ATTR_USED, used);
863 NLA_PUT(skb, ODP_FLOW_ATTR_STATS, sizeof(struct odp_flow_stats), &stats);
866 NLA_PUT_U8(skb, ODP_FLOW_ATTR_TCP_FLAGS, tcp_flags);
869 NLA_PUT_U64(skb, ODP_FLOW_ATTR_STATE, state);
871 if (skb->len > total_len)
872 goto nla_put_failure;
874 odp_flow->len = skb->len;
875 err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
886 /* Called with genl_mutex. */
887 static struct sk_buff *copy_flow_from_user(struct odp_flow __user *uodp_flow,
888 struct dp_flowcmd *flowcmd)
890 struct nlattr *a[ODP_FLOW_ATTR_MAX + 1];
891 struct odp_flow *odp_flow;
896 if (get_user(len, &uodp_flow->len))
897 return ERR_PTR(-EFAULT);
898 if (len < sizeof(struct odp_flow))
899 return ERR_PTR(-EINVAL);
901 skb = alloc_skb(len, GFP_KERNEL);
903 return ERR_PTR(-ENOMEM);
906 if (copy_from_user(__skb_put(skb, len), uodp_flow, len))
909 odp_flow = (struct odp_flow *)skb->data;
911 if (odp_flow->len != len)
914 flowcmd->nlmsg_flags = odp_flow->nlmsg_flags;
915 flowcmd->dp_idx = odp_flow->dp_idx;
916 flowcmd->total_len = odp_flow->total_len;
918 err = nla_parse(a, ODP_FLOW_ATTR_MAX,
919 (struct nlattr *)(skb->data + sizeof(struct odp_flow)),
920 skb->len - sizeof(struct odp_flow), flow_policy);
924 /* ODP_FLOW_ATTR_KEY. */
925 if (a[ODP_FLOW_ATTR_KEY]) {
926 err = flow_from_nlattrs(&flowcmd->key, a[ODP_FLOW_ATTR_KEY]);
930 memset(&flowcmd->key, 0, sizeof(struct sw_flow_key));
932 /* ODP_FLOW_ATTR_ACTIONS. */
933 if (a[ODP_FLOW_ATTR_ACTIONS]) {
934 flowcmd->actions = nla_data(a[ODP_FLOW_ATTR_ACTIONS]);
935 flowcmd->actions_len = nla_len(a[ODP_FLOW_ATTR_ACTIONS]);
936 err = validate_actions(flowcmd->actions, flowcmd->actions_len);
940 flowcmd->actions = NULL;
941 flowcmd->actions_len = 0;
944 flowcmd->clear = a[ODP_FLOW_ATTR_CLEAR] != NULL;
946 flowcmd->state = a[ODP_FLOW_ATTR_STATE] ? nla_get_u64(a[ODP_FLOW_ATTR_STATE]) : 0;
955 static int new_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
957 struct tbl_node *flow_node;
958 struct dp_flowcmd flowcmd;
959 struct sw_flow *flow;
966 skb = copy_flow_from_user(uodp_flow, &flowcmd);
967 error = PTR_ERR(skb);
971 dp = get_dp(flowcmd.dp_idx);
976 hash = flow_hash(&flowcmd.key);
977 table = get_table_protected(dp);
978 flow_node = tbl_lookup(table, &flowcmd.key, hash, flow_cmp);
980 struct sw_flow_actions *acts;
982 /* Bail out if we're not allowed to create a new flow. */
984 if (cmd == ODP_FLOW_SET)
987 /* Expand table, if necessary, to make room. */
988 if (tbl_count(table) >= tbl_n_buckets(table)) {
989 error = expand_table(dp);
992 table = get_table_protected(dp);
998 error = PTR_ERR(flow);
1001 flow->key = flowcmd.key;
1004 /* Obtain actions. */
1005 acts = get_actions(&flowcmd);
1006 error = PTR_ERR(acts);
1008 goto error_free_flow;
1009 rcu_assign_pointer(flow->sf_acts, acts);
1011 error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
1013 goto error_free_flow;
1015 /* Put flow in bucket. */
1016 error = tbl_insert(table, &flow->tbl_node, hash);
1018 goto error_free_flow;
1020 /* We found a matching flow. */
1021 struct sw_flow_actions *old_acts;
1023 /* Bail out if we're not allowed to modify an existing flow.
1024 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1025 * because Generic Netlink treats the latter as a dump
1026 * request. We also accept NLM_F_EXCL in case that bug ever
1030 if (flowcmd.nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1031 goto error_kfree_skb;
1033 /* Update actions. */
1034 flow = flow_cast(flow_node);
1035 old_acts = rcu_dereference_protected(flow->sf_acts,
1036 lockdep_genl_is_held());
1037 if (flowcmd.actions &&
1038 (old_acts->actions_len != flowcmd.actions_len ||
1039 memcmp(old_acts->actions, flowcmd.actions,
1040 flowcmd.actions_len))) {
1041 struct sw_flow_actions *new_acts;
1043 new_acts = get_actions(&flowcmd);
1044 error = PTR_ERR(new_acts);
1045 if (IS_ERR(new_acts))
1046 goto error_kfree_skb;
1048 rcu_assign_pointer(flow->sf_acts, new_acts);
1049 flow_deferred_free_acts(old_acts);
1052 error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
1054 goto error_kfree_skb;
1057 if (flowcmd.clear) {
1058 spin_lock_bh(&flow->lock);
1060 spin_unlock_bh(&flow->lock);
1074 static int get_or_del_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
1076 struct tbl_node *flow_node;
1077 struct dp_flowcmd flowcmd;
1078 struct sw_flow *flow;
1079 struct sk_buff *skb;
1080 struct datapath *dp;
1084 skb = copy_flow_from_user(uodp_flow, &flowcmd);
1086 return PTR_ERR(skb);
1088 dp = get_dp(flowcmd.dp_idx);
1092 table = get_table_protected(dp);
1093 flow_node = tbl_lookup(table, &flowcmd.key, flow_hash(&flowcmd.key), flow_cmp);
1097 if (cmd == ODP_FLOW_DEL) {
1098 err = tbl_remove(table, flow_node);
1103 flow = flow_cast(flow_node);
1104 err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
1105 if (!err && cmd == ODP_FLOW_DEL)
1106 flow_deferred_free(flow);
1111 static int dump_flow(struct odp_flow __user *uodp_flow)
1113 struct tbl_node *flow_node;
1114 struct dp_flowcmd flowcmd;
1115 struct sw_flow *flow;
1116 struct sk_buff *skb;
1117 struct datapath *dp;
1121 skb = copy_flow_from_user(uodp_flow, &flowcmd);
1126 dp = get_dp(flowcmd.dp_idx);
1129 goto exit_kfree_skb;
1131 bucket = flowcmd.state >> 32;
1132 obj = flowcmd.state;
1133 flow_node = tbl_next(get_table_protected(dp), &bucket, &obj);
1136 goto exit_kfree_skb;
1138 flow = flow_cast(flow_node);
1139 err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len,
1140 ((u64)bucket << 32) | obj);
1148 static const struct nla_policy datapath_policy[ODP_DP_ATTR_MAX + 1] = {
1149 [ODP_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1150 [ODP_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
1151 [ODP_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
1154 /* Called with genl_mutex. */
1155 static int copy_datapath_to_user(void __user *dst, struct datapath *dp, uint32_t total_len)
1157 struct odp_datapath *odp_datapath;
1158 struct sk_buff *skb;
1162 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1167 odp_datapath = (struct odp_datapath*)__skb_put(skb, sizeof(struct odp_datapath));
1168 odp_datapath->dp_idx = dp->dp_idx;
1169 odp_datapath->total_len = total_len;
1172 err = nla_put_string(skb, ODP_DP_ATTR_NAME, dp_name(dp));
1175 goto nla_put_failure;
1177 nla = nla_reserve(skb, ODP_DP_ATTR_STATS, sizeof(struct odp_stats));
1179 goto nla_put_failure;
1180 get_dp_stats(dp, nla_data(nla));
1182 NLA_PUT_U32(skb, ODP_DP_ATTR_IPV4_FRAGS,
1183 dp->drop_frags ? ODP_DP_FRAG_DROP : ODP_DP_FRAG_ZERO);
1185 if (dp->sflow_probability)
1186 NLA_PUT_U32(skb, ODP_DP_ATTR_SAMPLING, dp->sflow_probability);
1188 if (skb->len > total_len)
1189 goto nla_put_failure;
1191 odp_datapath->len = skb->len;
1192 err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
1203 /* Called with genl_mutex. */
1204 static struct sk_buff *copy_datapath_from_user(struct odp_datapath __user *uodp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1206 struct odp_datapath *odp_datapath;
1207 struct sk_buff *skb;
1211 if (get_user(len, &uodp_datapath->len))
1212 return ERR_PTR(-EFAULT);
1213 if (len < sizeof(struct odp_datapath))
1214 return ERR_PTR(-EINVAL);
1216 skb = alloc_skb(len, GFP_KERNEL);
1218 return ERR_PTR(-ENOMEM);
1221 if (copy_from_user(__skb_put(skb, len), uodp_datapath, len))
1222 goto error_free_skb;
1224 odp_datapath = (struct odp_datapath *)skb->data;
1226 if (odp_datapath->len != len)
1227 goto error_free_skb;
1229 err = nla_parse(a, ODP_DP_ATTR_MAX,
1230 (struct nlattr *)(skb->data + sizeof(struct odp_datapath)),
1231 skb->len - sizeof(struct odp_datapath), datapath_policy);
1233 goto error_free_skb;
1235 if (a[ODP_DP_ATTR_IPV4_FRAGS]) {
1236 u32 frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]);
1239 if (frags != ODP_DP_FRAG_ZERO && frags != ODP_DP_FRAG_DROP)
1240 goto error_free_skb;
1243 err = VERIFY_NUL_STRING(a[ODP_DP_ATTR_NAME], IFNAMSIZ - 1);
1245 goto error_free_skb;
1251 return ERR_PTR(err);
1254 /* Called with genl_mutex and optionally with RTNL lock also. */
1255 static struct datapath *lookup_datapath(struct odp_datapath *odp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1257 if (!a[ODP_DP_ATTR_NAME]) {
1258 struct datapath *dp = get_dp(odp_datapath->dp_idx);
1260 return ERR_PTR(-ENODEV);
1263 struct vport *vport;
1267 vport = vport_locate(nla_data(a[ODP_DP_ATTR_NAME]));
1268 dp_idx = vport && vport->port_no == ODPP_LOCAL ? vport->dp->dp_idx : -1;
1272 return ERR_PTR(-ENODEV);
1277 /* Called with genl_mutex. */
1278 static void change_datapath(struct datapath *dp, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1280 if (a[ODP_DP_ATTR_IPV4_FRAGS])
1281 dp->drop_frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]) == ODP_DP_FRAG_DROP;
1282 if (a[ODP_DP_ATTR_SAMPLING])
1283 dp->sflow_probability = nla_get_u32(a[ODP_DP_ATTR_SAMPLING]);
1286 static int new_datapath(struct odp_datapath __user *uodp_datapath)
1288 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1289 struct odp_datapath *odp_datapath;
1290 struct vport_parms parms;
1291 struct sk_buff *skb;
1292 struct datapath *dp;
1293 struct vport *vport;
1298 skb = copy_datapath_from_user(uodp_datapath, a);
1302 odp_datapath = (struct odp_datapath *)skb->data;
1305 if (!a[ODP_DP_ATTR_NAME])
1310 if (!try_module_get(THIS_MODULE))
1311 goto err_unlock_rtnl;
1313 dp_idx = odp_datapath->dp_idx;
1316 for (dp_idx = 0; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
1322 } else if (dp_idx < ARRAY_SIZE(dps))
1323 err = get_dp(dp_idx) ? -EBUSY : 0;
1327 goto err_put_module;
1330 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1332 goto err_put_module;
1333 INIT_LIST_HEAD(&dp->port_list);
1334 dp->dp_idx = dp_idx;
1335 for (i = 0; i < DP_N_QUEUES; i++)
1336 skb_queue_head_init(&dp->queues[i]);
1337 init_waitqueue_head(&dp->waitqueue);
1339 /* Initialize kobject for bridge. This will be added as
1340 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1341 dp->ifobj.kset = NULL;
1342 kobject_init(&dp->ifobj, &dp_ktype);
1344 /* Allocate table. */
1346 rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
1350 /* Set up our datapath device. */
1351 parms.name = nla_data(a[ODP_DP_ATTR_NAME]);
1352 parms.type = ODP_VPORT_TYPE_INTERNAL;
1353 parms.options = NULL;
1355 parms.port_no = ODPP_LOCAL;
1356 vport = new_vport(&parms);
1357 if (IS_ERR(vport)) {
1358 err = PTR_ERR(vport);
1362 goto err_destroy_table;
1366 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1367 if (!dp->stats_percpu) {
1369 goto err_destroy_local_port;
1372 change_datapath(dp, a);
1374 rcu_assign_pointer(dps[dp_idx], dp);
1375 dp_sysfs_add_dp(dp);
1381 err_destroy_local_port:
1382 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1384 tbl_destroy(get_table_protected(dp), NULL);
1388 module_put(THIS_MODULE);
1397 static int del_datapath(struct odp_datapath __user *uodp_datapath)
1399 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1400 struct vport *vport, *next_vport;
1401 struct datapath *dp;
1402 struct sk_buff *skb;
1405 skb = copy_datapath_from_user(uodp_datapath, a);
1411 dp = lookup_datapath((struct odp_datapath *)skb->data, a);
1416 list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
1417 if (vport->port_no != ODPP_LOCAL)
1418 dp_detach_port(vport);
1420 dp_sysfs_del_dp(dp);
1421 rcu_assign_pointer(dps[dp->dp_idx], NULL);
1422 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1424 call_rcu(&dp->rcu, destroy_dp_rcu);
1425 module_put(THIS_MODULE);
1436 static int set_datapath(struct odp_datapath __user *uodp_datapath)
1438 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1439 struct datapath *dp;
1440 struct sk_buff *skb;
1443 skb = copy_datapath_from_user(uodp_datapath, a);
1448 dp = lookup_datapath((struct odp_datapath *)skb->data, a);
1453 change_datapath(dp, a);
1462 static int get_datapath(struct odp_datapath __user *uodp_datapath)
1464 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1465 struct odp_datapath *odp_datapath;
1466 struct datapath *dp;
1467 struct sk_buff *skb;
1470 skb = copy_datapath_from_user(uodp_datapath, a);
1474 odp_datapath = (struct odp_datapath *)skb->data;
1476 dp = lookup_datapath(odp_datapath, a);
1482 err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
1489 static int dump_datapath(struct odp_datapath __user *uodp_datapath)
1491 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1492 struct odp_datapath *odp_datapath;
1493 struct sk_buff *skb;
1497 skb = copy_datapath_from_user(uodp_datapath, a);
1501 odp_datapath = (struct odp_datapath *)skb->data;
1504 for (dp_idx = odp_datapath->dp_idx; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
1505 struct datapath *dp = get_dp(dp_idx);
1509 err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
1517 static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
1518 [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1519 [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1520 [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1521 [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
1522 [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1523 [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
1524 [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1527 /* Called with RCU read lock. */
1528 static struct sk_buff *odp_vport_build_info(struct vport *vport, uint32_t total_len)
1530 struct odp_vport *odp_vport;
1531 struct sk_buff *skb;
1533 int ifindex, iflink;
1536 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
1541 odp_vport = (struct odp_vport*)__skb_put(skb, sizeof(struct odp_vport));
1542 odp_vport->dp_idx = vport->dp->dp_idx;
1543 odp_vport->total_len = total_len;
1545 NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
1546 NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
1547 NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
1549 nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
1551 goto nla_put_failure;
1552 if (vport_get_stats(vport, nla_data(nla)))
1553 __skb_trim(skb, skb->len - nla->nla_len);
1555 NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1557 NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, vport_get_mtu(vport));
1559 err = vport_get_options(vport, skb);
1561 ifindex = vport_get_ifindex(vport);
1563 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
1565 iflink = vport_get_iflink(vport);
1567 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
1570 if (skb->len > total_len)
1573 odp_vport->len = skb->len;
1581 return ERR_PTR(err);
1584 static struct sk_buff *copy_vport_from_user(struct odp_vport __user *uodp_vport,
1585 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1587 struct odp_vport *odp_vport;
1588 struct sk_buff *skb;
1592 if (get_user(len, &uodp_vport->len))
1593 return ERR_PTR(-EFAULT);
1594 if (len < sizeof(struct odp_vport))
1595 return ERR_PTR(-EINVAL);
1597 skb = alloc_skb(len, GFP_KERNEL);
1599 return ERR_PTR(-ENOMEM);
1602 if (copy_from_user(__skb_put(skb, len), uodp_vport, len))
1603 goto error_free_skb;
1605 odp_vport = (struct odp_vport *)skb->data;
1607 if (odp_vport->len != len)
1608 goto error_free_skb;
1610 err = nla_parse(a, ODP_VPORT_ATTR_MAX, (struct nlattr *)(skb->data + sizeof(struct odp_vport)),
1611 skb->len - sizeof(struct odp_vport), vport_policy);
1613 goto error_free_skb;
1615 err = VERIFY_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1617 goto error_free_skb;
1623 return ERR_PTR(err);
1626 /* Called with RTNL lock or RCU read lock. */
1627 static struct vport *lookup_vport(struct odp_vport *odp_vport,
1628 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1630 struct datapath *dp;
1631 struct vport *vport;
1633 if (a[ODP_VPORT_ATTR_NAME]) {
1634 vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
1636 return ERR_PTR(-ENODEV);
1638 } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
1639 u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1641 if (port_no >= DP_MAX_PORTS)
1642 return ERR_PTR(-EINVAL);
1644 dp = get_dp(odp_vport->dp_idx);
1646 return ERR_PTR(-ENODEV);
1648 vport = get_vport_protected(dp, port_no);
1650 return ERR_PTR(-ENOENT);
1653 return ERR_PTR(-EINVAL);
1656 /* Called with RTNL lock. */
1657 static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1660 if (a[ODP_VPORT_ATTR_STATS])
1661 err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
1662 if (!err && a[ODP_VPORT_ATTR_ADDRESS])
1663 err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
1664 if (!err && a[ODP_VPORT_ATTR_MTU])
1665 err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
1669 static int attach_vport(struct odp_vport __user *uodp_vport)
1671 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1672 struct odp_vport *odp_vport;
1673 struct vport_parms parms;
1674 struct sk_buff *reply;
1675 struct vport *vport;
1676 struct sk_buff *skb;
1677 struct datapath *dp;
1681 skb = copy_vport_from_user(uodp_vport, a);
1685 odp_vport = (struct odp_vport *)skb->data;
1688 if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
1689 goto exit_kfree_skb;
1692 dp = get_dp(odp_vport->dp_idx);
1697 if (a[ODP_VPORT_ATTR_PORT_NO]) {
1698 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1701 if (port_no >= DP_MAX_PORTS)
1704 vport = get_vport_protected(dp, port_no);
1709 for (port_no = 1; ; port_no++) {
1710 if (port_no >= DP_MAX_PORTS) {
1714 vport = get_vport_protected(dp, port_no);
1720 parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
1721 parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
1722 parms.options = a[ODP_VPORT_ATTR_OPTIONS];
1724 parms.port_no = port_no;
1726 vport = new_vport(&parms);
1727 err = PTR_ERR(vport);
1731 set_internal_devs_mtu(dp);
1732 dp_sysfs_add_if(vport);
1734 err = change_vport(vport, a);
1736 dp_detach_port(vport);
1740 reply = odp_vport_build_info(vport, odp_vport->total_len);
1741 err = PTR_ERR(reply);
1745 err = copy_to_user(uodp_vport, reply->data, reply->len) ? -EFAULT : 0;
1756 static int set_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
1758 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1759 struct vport *vport;
1760 struct sk_buff *skb;
1763 skb = copy_vport_from_user(uodp_vport, a);
1769 vport = lookup_vport((struct odp_vport *)skb->data, a);
1770 err = PTR_ERR(vport);
1775 if (a[ODP_VPORT_ATTR_OPTIONS])
1776 err = vport_set_options(vport, a[ODP_VPORT_ATTR_OPTIONS]);
1778 err = change_vport(vport, a);
1787 static int del_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
1789 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1790 struct vport *vport;
1791 struct sk_buff *skb;
1794 skb = copy_vport_from_user(uodp_vport, a);
1800 vport = lookup_vport((struct odp_vport *)skb->data, a);
1801 err = PTR_ERR(vport);
1803 err = dp_detach_port(vport);
1811 static int get_vport(struct odp_vport __user *uodp_vport)
1813 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1814 struct odp_vport *odp_vport;
1815 struct sk_buff *reply;
1816 struct vport *vport;
1817 struct sk_buff *skb;
1820 skb = copy_vport_from_user(uodp_vport, a);
1824 odp_vport = (struct odp_vport *)skb->data;
1827 vport = lookup_vport(odp_vport, a);
1828 err = PTR_ERR(vport);
1830 goto err_unlock_rcu;
1831 reply = odp_vport_build_info(vport, odp_vport->total_len);
1834 err = PTR_ERR(reply);
1838 err = copy_to_user(uodp_vport, reply->data, reply->len) ? -EFAULT : 0;
1852 static int dump_vport(struct odp_vport __user *uodp_vport)
1854 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1855 struct odp_vport *odp_vport;
1856 struct sk_buff *skb;
1857 struct datapath *dp;
1861 skb = copy_vport_from_user(uodp_vport, a);
1865 odp_vport = (struct odp_vport *)skb->data;
1867 dp = get_dp(odp_vport->dp_idx);
1873 if (a[ODP_VPORT_ATTR_PORT_NO])
1874 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1877 for (; port_no < DP_MAX_PORTS; port_no++) {
1878 struct sk_buff *skb_out;
1879 struct vport *vport;
1882 vport = get_vport_protected(dp, port_no);
1886 skb_out = odp_vport_build_info(vport, odp_vport->total_len);
1889 err = PTR_ERR(skb_out);
1890 if (IS_ERR(skb_out))
1893 retval = copy_to_user(uodp_vport, skb_out->data, skb_out->len);
1897 return retval ? -EFAULT : 0;
1908 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1916 /* Handle commands with special locking requirements up front. */
1919 err = new_datapath((struct odp_datapath __user *)argp);
1923 err = get_datapath((struct odp_datapath __user *)argp);
1927 err = del_datapath((struct odp_datapath __user *)argp);
1931 err = set_datapath((struct odp_datapath __user *)argp);
1935 err = dump_datapath((struct odp_datapath __user *)argp);
1939 err = attach_vport((struct odp_vport __user *)argp);
1943 err = get_vport((struct odp_vport __user *)argp);
1947 err = del_vport(cmd, (struct odp_vport __user *)argp);
1951 err = set_vport(cmd, (struct odp_vport __user *)argp);
1954 case ODP_VPORT_DUMP:
1955 err = dump_vport((struct odp_vport __user *)argp);
1958 case ODP_FLOW_FLUSH:
1959 err = flush_flows(argp);
1964 err = new_flow(cmd, (struct odp_flow __user *)argp);
1969 err = get_or_del_flow(cmd, (struct odp_flow __user *)argp);
1973 err = dump_flow((struct odp_flow __user *)argp);
1977 err = execute_packet((struct odp_packet __user *)argp);
1982 case ODP_GET_LISTEN_MASK:
1983 err = put_user(get_listen_mask(f), (int __user *)argp);
1986 case ODP_SET_LISTEN_MASK:
1987 err = get_user(listeners, (int __user *)argp);
1991 if (listeners & ~ODPL_ALL)
1994 set_listen_mask(f, listeners);
2006 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
2009 for (i = 0; i < DP_N_QUEUES; i++) {
2010 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
2016 #ifdef CONFIG_COMPAT
2017 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
2020 case ODP_FLOW_FLUSH:
2021 /* Ioctls that don't need any translation at all. */
2022 return openvswitch_ioctl(f, cmd, argp);
2033 case ODP_VPORT_DUMP:
2039 case ODP_SET_LISTEN_MASK:
2040 case ODP_GET_LISTEN_MASK:
2042 /* Ioctls that just need their pointer argument extended. */
2043 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
2046 return -ENOIOCTLCMD;
2051 static struct sk_buff *openvswitch_try_read(struct file *f, struct datapath *dp)
2053 int listeners = get_listen_mask(f);
2056 for (i = 0; i < DP_N_QUEUES; i++) {
2057 if (listeners & (1 << i)) {
2058 struct sk_buff *skb = skb_dequeue(&dp->queues[i]);
2064 if (f->f_flags & O_NONBLOCK)
2065 return ERR_PTR(-EAGAIN);
2067 wait_event_interruptible(dp->waitqueue,
2068 dp_has_packet_of_interest(dp, listeners));
2070 if (signal_pending(current))
2071 return ERR_PTR(-ERESTARTSYS);
2076 static ssize_t openvswitch_read(struct file *f, char __user *buf,
2077 size_t nbytes, loff_t *ppos)
2079 int dp_idx = iminor(f->f_dentry->d_inode);
2080 struct datapath *dp;
2081 struct sk_buff *skb;
2087 dp = get_dp(dp_idx);
2093 if (nbytes == 0 || !get_listen_mask(f))
2097 skb = openvswitch_try_read(f, dp);
2102 return PTR_ERR(skb);
2105 iov.iov_len = min_t(size_t, skb->len, nbytes);
2106 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
2118 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
2120 int dp_idx = iminor(file->f_dentry->d_inode);
2121 struct datapath *dp;
2125 dp = get_dp(dp_idx);
2128 poll_wait(file, &dp->waitqueue, wait);
2129 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
2130 mask |= POLLIN | POLLRDNORM;
2132 mask = POLLIN | POLLRDNORM | POLLHUP;
2139 static struct file_operations openvswitch_fops = {
2140 .owner = THIS_MODULE,
2141 .read = openvswitch_read,
2142 .poll = openvswitch_poll,
2143 .unlocked_ioctl = openvswitch_ioctl,
2144 #ifdef CONFIG_COMPAT
2145 .compat_ioctl = openvswitch_compat_ioctl,
2151 static int __init dp_init(void)
2153 struct sk_buff *dummy_skb;
2156 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2158 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2166 goto error_flow_exit;
2168 err = register_netdevice_notifier(&dp_device_notifier);
2170 goto error_vport_exit;
2172 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2174 goto error_unreg_notifier;
2178 error_unreg_notifier:
2179 unregister_netdevice_notifier(&dp_device_notifier);
2188 static void dp_cleanup(void)
2191 unregister_chrdev(major, "openvswitch");
2192 unregister_netdevice_notifier(&dp_device_notifier);
2197 module_init(dp_init);
2198 module_exit(dp_cleanup);
2200 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2201 MODULE_LICENSE("GPL");