2 * Copyright (c) 2007, 2008, 2009 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #include <linux/init.h>
12 #include <linux/module.h>
14 #include <linux/if_arp.h>
15 #include <linux/if_bridge.h>
16 #include <linux/if_vlan.h>
19 #include <linux/delay.h>
20 #include <linux/time.h>
21 #include <linux/etherdevice.h>
22 #include <linux/kernel.h>
23 #include <linux/kthread.h>
24 #include <linux/llc.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/random.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/workqueue.h>
43 #include <linux/dmi.h>
46 #include "openvswitch/datapath-protocol.h"
55 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
56 EXPORT_SYMBOL(dp_ioctl_hook);
58 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
59 * by dp_mutex. dp_mutex is almost completely redundant with genl_mutex
60 * maintained by the Generic Netlink code, but the timeout path needs mutual
63 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
66 * It is safe to access the datapath and net_bridge_port structures with just
69 static struct datapath *dps[ODP_MAX];
70 static DEFINE_MUTEX(dp_mutex);
72 /* Number of milliseconds between runs of the maintenance thread. */
73 #define MAINT_SLEEP_MSECS 1000
75 static int new_nbp(struct datapath *, struct net_device *, int port_no);
77 /* Must be called with rcu_read_lock or dp_mutex. */
78 struct datapath *get_dp(int dp_idx)
80 if (dp_idx < 0 || dp_idx >= ODP_MAX)
82 return rcu_dereference(dps[dp_idx]);
84 EXPORT_SYMBOL_GPL(get_dp);
86 struct datapath *get_dp_locked(int dp_idx)
90 mutex_lock(&dp_mutex);
93 mutex_lock(&dp->mutex);
94 mutex_unlock(&dp_mutex);
98 static inline size_t br_nlmsg_size(void)
100 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
101 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
102 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
103 + nla_total_size(4) /* IFLA_MASTER */
104 + nla_total_size(4) /* IFLA_MTU */
105 + nla_total_size(4) /* IFLA_LINK */
106 + nla_total_size(1); /* IFLA_OPERSTATE */
109 static int dp_fill_ifinfo(struct sk_buff *skb,
110 const struct net_bridge_port *port,
111 int event, unsigned int flags)
113 const struct datapath *dp = port->dp;
114 const struct net_device *dev = port->dev;
115 struct ifinfomsg *hdr;
116 struct nlmsghdr *nlh;
118 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
122 hdr = nlmsg_data(nlh);
123 hdr->ifi_family = AF_BRIDGE;
125 hdr->ifi_type = dev->type;
126 hdr->ifi_index = dev->ifindex;
127 hdr->ifi_flags = dev_get_flags(dev);
130 NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
131 NLA_PUT_U32(skb, IFLA_MASTER, dp->ports[ODPP_LOCAL]->dev->ifindex);
132 NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
133 #ifdef IFLA_OPERSTATE
134 NLA_PUT_U8(skb, IFLA_OPERSTATE,
135 netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
139 NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
141 if (dev->ifindex != dev->iflink)
142 NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
144 return nlmsg_end(skb, nlh);
147 nlmsg_cancel(skb, nlh);
151 static void dp_ifinfo_notify(int event, struct net_bridge_port *port)
153 struct net *net = dev_net(port->dev);
157 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
161 err = dp_fill_ifinfo(skb, port, event, 0);
163 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
164 WARN_ON(err == -EMSGSIZE);
168 err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
171 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
174 static void release_dp(struct kobject *kobj)
176 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
180 struct kobj_type dp_ktype = {
181 .release = release_dp
184 static int create_dp(int dp_idx, const char __user *devnamep)
186 struct net_device *dp_dev;
187 char devname[IFNAMSIZ];
194 if (strncpy_from_user(devname, devnamep, IFNAMSIZ - 1) < 0)
196 devname[IFNAMSIZ - 1] = '\0';
198 snprintf(devname, sizeof devname, "of%d", dp_idx);
202 mutex_lock(&dp_mutex);
204 if (!try_module_get(THIS_MODULE))
207 /* Exit early if a datapath with that number already exists.
208 * (We don't use -EEXIST because that's ambiguous with 'devname'
209 * conflicting with an existing network device name.) */
215 dp = kzalloc(sizeof *dp, GFP_KERNEL);
218 INIT_LIST_HEAD(&dp->port_list);
219 mutex_init(&dp->mutex);
221 for (i = 0; i < DP_N_QUEUES; i++)
222 skb_queue_head_init(&dp->queues[i]);
223 init_waitqueue_head(&dp->waitqueue);
225 /* Initialize kobject for bridge. This will be added as
226 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
227 kobject_set_name(&dp->ifobj, SYSFS_BRIDGE_PORT_SUBDIR); /* "brif" */
228 dp->ifobj.kset = NULL;
229 dp->ifobj.parent = NULL;
230 kobject_init(&dp->ifobj, &dp_ktype);
232 /* Allocate table. */
234 rcu_assign_pointer(dp->table, dp_table_create(DP_L1_SIZE));
238 /* Setup our datapath device */
239 dp_dev = dp_dev_create(dp, devname, ODPP_LOCAL);
240 err = PTR_ERR(dp_dev);
242 goto err_destroy_table;
244 err = new_nbp(dp, dp_dev, ODPP_LOCAL);
246 dp_dev_destroy(dp_dev);
247 goto err_destroy_table;
251 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
252 if (!dp->stats_percpu)
253 goto err_destroy_local_port;
255 rcu_assign_pointer(dps[dp_idx], dp);
256 mutex_unlock(&dp_mutex);
265 err_destroy_local_port:
266 dp_del_port(dp->ports[ODPP_LOCAL]);
268 dp_table_destroy(dp->table, 0);
272 module_put(THIS_MODULE);
274 mutex_unlock(&dp_mutex);
280 static void do_destroy_dp(struct datapath *dp)
282 struct net_bridge_port *p, *n;
285 list_for_each_entry_safe (p, n, &dp->port_list, node)
286 if (p->port_no != ODPP_LOCAL)
293 rcu_assign_pointer(dps[dp->dp_idx], NULL);
295 dp_del_port(dp->ports[ODPP_LOCAL]);
297 dp_table_destroy(dp->table, 1);
299 for (i = 0; i < DP_N_QUEUES; i++)
300 skb_queue_purge(&dp->queues[i]);
301 for (i = 0; i < DP_MAX_GROUPS; i++)
302 kfree(dp->groups[i]);
303 free_percpu(dp->stats_percpu);
304 kobject_put(&dp->ifobj);
305 module_put(THIS_MODULE);
308 static int destroy_dp(int dp_idx)
314 mutex_lock(&dp_mutex);
324 mutex_unlock(&dp_mutex);
329 static void release_nbp(struct kobject *kobj)
331 struct net_bridge_port *p = container_of(kobj, struct net_bridge_port, kobj);
335 struct kobj_type brport_ktype = {
337 .sysfs_ops = &brport_sysfs_ops,
339 .release = release_nbp
342 /* Called with RTNL lock and dp_mutex. */
343 static int new_nbp(struct datapath *dp, struct net_device *dev, int port_no)
345 struct net_bridge_port *p;
347 if (dev->br_port != NULL)
350 p = kzalloc(sizeof(*p), GFP_KERNEL);
354 dev_set_promiscuity(dev, 1);
356 p->port_no = port_no;
360 rcu_assign_pointer(dev->br_port, p);
362 /* It would make sense to assign dev->br_port here too, but
363 * that causes packets received on internal ports to get caught
364 * in dp_frame_hook(). In turn dp_frame_hook() can reject them
365 * back to network stack, but that's a waste of time. */
367 rcu_assign_pointer(dp->ports[port_no], p);
368 list_add_rcu(&p->node, &dp->port_list);
371 /* Initialize kobject for bridge. This will be added as
372 * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
373 kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR); /* "brport" */
375 p->kobj.parent = &p->dev->NETDEV_DEV_MEMBER.kobj;
376 kobject_init(&p->kobj, &brport_ktype);
378 dp_ifinfo_notify(RTM_NEWLINK, p);
383 static int add_port(int dp_idx, struct odp_port __user *portp)
385 struct net_device *dev;
387 struct odp_port port;
392 if (copy_from_user(&port, portp, sizeof port))
394 port.devname[IFNAMSIZ - 1] = '\0';
398 if (port_no < 0 || port_no >= DP_MAX_PORTS)
402 dp = get_dp_locked(dp_idx);
405 goto out_unlock_rtnl;
408 if (dp->ports[port_no])
411 if (!(port.flags & ODP_PORT_INTERNAL)) {
413 dev = dev_get_by_name(&init_net, port.devname);
418 if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER ||
422 dev = dp_dev_create(dp, port.devname, port_no);
429 err = new_nbp(dp, dev, port_no);
434 dp_sysfs_add_if(dp->ports[port_no]);
440 mutex_unlock(&dp->mutex);
447 int dp_del_port(struct net_bridge_port *p)
452 if (p->port_no != ODPP_LOCAL)
455 dp_ifinfo_notify(RTM_DELLINK, p);
459 if (is_dp_dev(p->dev)) {
460 /* Make sure that no packets arrive from now on, since
461 * dp_dev_xmit() will try to find itself through
462 * p->dp->ports[], and we're about to set that to null. */
463 netif_tx_disable(p->dev);
466 /* First drop references to device. */
467 dev_set_promiscuity(p->dev, -1);
468 list_del_rcu(&p->node);
469 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
470 rcu_assign_pointer(p->dev->br_port, NULL);
472 /* Then wait until no one is still using it, and destroy it. */
475 if (is_dp_dev(p->dev))
476 dp_dev_destroy(p->dev);
478 kobject_put(&p->kobj);
483 static int del_port(int dp_idx, int port_no)
485 struct net_bridge_port *p;
491 if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL)
495 dp = get_dp_locked(dp_idx);
498 goto out_unlock_rtnl;
500 p = dp->ports[port_no];
505 err = dp_del_port(p);
508 mutex_unlock(&dp->mutex);
515 /* Must be called with rcu_read_lock. */
517 do_port_input(struct net_bridge_port *p, struct sk_buff *skb)
519 /* Make our own copy of the packet. Otherwise we will mangle the
520 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
521 * (No one comes after us, since we tell handle_bridge() that we took
523 skb = skb_share_check(skb, GFP_ATOMIC);
527 /* Push the Ethernet header back on. */
528 skb_push(skb, ETH_HLEN);
529 skb_reset_mac_header(skb);
530 dp_process_received_packet(skb, p);
533 /* Must be called with rcu_read_lock and with bottom-halves disabled. */
534 void dp_process_received_packet(struct sk_buff *skb, struct net_bridge_port *p)
536 struct datapath *dp = p->dp;
537 struct dp_stats_percpu *stats;
538 struct odp_flow_key key;
539 struct sw_flow *flow;
541 WARN_ON_ONCE(skb_shared(skb));
543 /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
544 stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
546 if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) {
547 if (dp->drop_frags) {
554 flow = dp_table_lookup(rcu_dereference(dp->table), &key);
556 struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts);
557 flow_used(flow, skb);
558 execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
563 dp_output_control(dp, skb, _ODPL_MISS_NR, 0);
568 * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on
569 * different set of devices!)
571 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
572 /* Called with rcu_read_lock and bottom-halves disabled. */
573 static struct sk_buff *dp_frame_hook(struct net_bridge_port *p,
576 do_port_input(p, skb);
579 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
580 /* Called with rcu_read_lock and bottom-halves disabled. */
581 static int dp_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb)
583 do_port_input(p, *pskb);
591 /* This code is copied verbatim from net/dev/core.c in Xen's
592 * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call those functions
593 * directly because they aren't exported. */
594 static int skb_pull_up_to(struct sk_buff *skb, void *ptr)
596 if (ptr < (void *)skb->tail)
598 if (__pskb_pull_tail(skb,
599 ptr - (void *)skb->data - skb_headlen(skb))) {
606 int skb_checksum_setup(struct sk_buff *skb)
608 if (skb->proto_csum_blank) {
609 if (skb->protocol != htons(ETH_P_IP))
611 if (!skb_pull_up_to(skb, skb->nh.iph + 1))
613 skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
614 switch (skb->nh.iph->protocol) {
616 skb->csum = offsetof(struct tcphdr, check);
619 skb->csum = offsetof(struct udphdr, check);
623 printk(KERN_ERR "Attempting to checksum a non-"
624 "TCP/UDP packet, dropping a protocol"
625 " %d packet", skb->nh.iph->protocol);
628 if (!skb_pull_up_to(skb, skb->h.raw + skb->csum + 2))
630 skb->ip_summed = CHECKSUM_HW;
631 skb->proto_csum_blank = 0;
640 dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
643 struct dp_stats_percpu *stats;
644 struct sk_buff_head *queue;
648 WARN_ON_ONCE(skb_shared(skb));
649 BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR);
651 queue = &dp->queues[queue_no];
653 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
656 /* If a checksum-deferred packet is forwarded to the controller,
657 * correct the pointers and checksum. This happens on a regular basis
658 * only on Xen (the CHECKSUM_HW case), on which VMs can pass up packets
659 * that do not have their checksum computed. We also implement it for
660 * the non-Xen case, but it is difficult to trigger or test this case
661 * there, hence the WARN_ON_ONCE().
663 err = skb_checksum_setup(skb);
667 if (skb->ip_summed == CHECKSUM_PARTIAL) {
669 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
670 /* Until 2.6.22, the start of the transport header was also the
671 * start of data to be checksummed. Linux 2.6.22 introduced
672 * the csum_start field for this purpose, but we should point
673 * the transport header to it anyway for backward
674 * compatibility, as dev_queue_xmit() does even in 2.6.28. */
675 skb_set_transport_header(skb, skb->csum_start -
678 err = skb_checksum_help(skb);
683 if (skb->ip_summed == CHECKSUM_HW) {
684 err = skb_checksum_help(skb, 0);
690 /* Break apart GSO packets into their component pieces. Otherwise
691 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
692 if (skb_is_gso(skb)) {
693 struct sk_buff *nskb = skb_gso_segment(skb, 0);
697 if (unlikely(IS_ERR(skb))) {
702 /* XXX This case might not be possible. It's hard to
703 * tell from the skb_gso_segment() code and comment. */
707 /* Figure out port number. */
708 port_no = ODPP_LOCAL;
710 if (skb->dev->br_port)
711 port_no = skb->dev->br_port->port_no;
712 else if (is_dp_dev(skb->dev))
713 port_no = dp_dev_priv(skb->dev)->port_no;
716 /* Append each packet to queue. There will be only one packet unless
717 * we broke up a GSO packet above. */
719 struct odp_msg *header;
720 struct sk_buff *nskb = skb->next;
723 err = skb_cow(skb, sizeof *header);
733 header = (struct odp_msg*)__skb_push(skb, sizeof *header);
734 header->type = queue_no;
735 header->length = skb->len;
736 header->port = port_no;
737 header->reserved = 0;
739 skb_queue_tail(queue, skb);
744 wake_up_interruptible(&dp->waitqueue);
750 stats = percpu_ptr(dp->stats_percpu, get_cpu());
757 static int flush_flows(struct datapath *dp)
760 return dp_table_flush(dp);
763 static int validate_actions(const struct sw_flow_actions *actions)
767 for (i = 0; i < actions->n_actions; i++) {
768 const union odp_action *a = &actions->actions[i];
771 if (a->output.port >= DP_MAX_PORTS)
775 case ODPAT_OUTPUT_GROUP:
776 if (a->output_group.group >= DP_MAX_GROUPS)
780 case ODPAT_SET_VLAN_VID:
781 if (a->vlan_vid.vlan_vid & htons(~VLAN_VID_MASK))
785 case ODPAT_SET_VLAN_PCP:
786 if (a->vlan_pcp.vlan_pcp
787 & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT))
792 if (a->type >= ODPAT_N_ACTIONS)
801 static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
803 struct sw_flow_actions *actions;
806 actions = flow_actions_alloc(flow->n_actions);
807 error = PTR_ERR(actions);
812 if (copy_from_user(actions->actions, flow->actions,
813 flow->n_actions * sizeof(union odp_action)))
814 goto error_free_actions;
815 error = validate_actions(actions);
817 goto error_free_actions;
824 return ERR_PTR(error);
827 static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
829 if (flow->used.tv_sec) {
830 stats->used_sec = flow->used.tv_sec;
831 stats->used_nsec = flow->used.tv_nsec;
834 stats->used_nsec = 0;
836 stats->n_packets = flow->packet_count;
837 stats->n_bytes = flow->byte_count;
838 stats->ip_tos = flow->ip_tos;
839 stats->tcp_flags = flow->tcp_flags;
842 static void clear_stats(struct sw_flow *flow)
844 flow->used.tv_sec = flow->used.tv_nsec = 0;
847 flow->packet_count = 0;
848 flow->byte_count = 0;
851 static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
853 struct odp_flow_put uf;
854 struct sw_flow *flow;
855 struct dp_table *table;
856 struct odp_flow_stats stats;
860 if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
862 uf.flow.key.reserved = 0;
864 table = rcu_dereference(dp->table);
865 flow = dp_table_lookup(table, &uf.flow.key);
868 struct sw_flow_actions *acts;
871 if (!(uf.flags & ODPPF_CREATE))
874 /* Expand table, if necessary, to make room. */
875 if (dp->n_flows >= table->n_buckets) {
877 if (table->n_buckets >= DP_MAX_BUCKETS)
880 error = dp_table_expand(dp);
883 table = rcu_dereference(dp->table);
888 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
891 flow->key = uf.flow.key;
892 spin_lock_init(&flow->lock);
895 /* Obtain actions. */
896 acts = get_actions(&uf.flow);
897 error = PTR_ERR(acts);
899 goto error_free_flow;
900 rcu_assign_pointer(flow->sf_acts, acts);
902 /* Put flow in bucket. */
903 error = dp_table_insert(table, flow);
905 goto error_free_flow_acts;
907 memset(&stats, 0, sizeof(struct odp_flow_stats));
909 /* We found a matching flow. */
910 struct sw_flow_actions *old_acts, *new_acts;
911 unsigned long int flags;
913 /* Bail out if we're not allowed to modify an existing flow. */
915 if (!(uf.flags & ODPPF_MODIFY))
919 new_acts = get_actions(&uf.flow);
920 error = PTR_ERR(new_acts);
921 if (IS_ERR(new_acts))
923 old_acts = rcu_dereference(flow->sf_acts);
924 if (old_acts->n_actions != new_acts->n_actions ||
925 memcmp(old_acts->actions, new_acts->actions,
926 sizeof(union odp_action) * old_acts->n_actions)) {
927 rcu_assign_pointer(flow->sf_acts, new_acts);
928 flow_deferred_free_acts(old_acts);
933 /* Fetch stats, then clear them if necessary. */
934 spin_lock_irqsave(&flow->lock, flags);
935 get_stats(flow, &stats);
936 if (uf.flags & ODPPF_ZERO_STATS)
938 spin_unlock_irqrestore(&flow->lock, flags);
941 /* Copy stats to userspace. */
942 if (__copy_to_user(&ufp->flow.stats, &stats,
943 sizeof(struct odp_flow_stats)))
947 error_free_flow_acts:
948 kfree(flow->sf_acts);
950 kmem_cache_free(flow_cache, flow);
955 static int put_actions(const struct sw_flow *flow, struct odp_flow __user *ufp)
957 union odp_action __user *actions;
958 struct sw_flow_actions *sf_acts;
961 if (__get_user(actions, &ufp->actions) ||
962 __get_user(n_actions, &ufp->n_actions))
967 if (ufp->n_actions > INT_MAX / sizeof(union odp_action))
970 sf_acts = rcu_dereference(flow->sf_acts);
971 if (__put_user(sf_acts->n_actions, &ufp->n_actions) ||
972 (actions && copy_to_user(actions, sf_acts->actions,
973 sizeof(union odp_action) *
974 min(sf_acts->n_actions, n_actions))))
980 static int answer_query(struct sw_flow *flow, struct odp_flow __user *ufp)
982 struct odp_flow_stats stats;
983 unsigned long int flags;
985 spin_lock_irqsave(&flow->lock, flags);
986 get_stats(flow, &stats);
987 spin_unlock_irqrestore(&flow->lock, flags);
989 if (__copy_to_user(&ufp->stats, &stats, sizeof(struct odp_flow_stats)))
991 return put_actions(flow, ufp);
994 static int del_or_query_flow(struct datapath *dp,
995 struct odp_flow __user *ufp,
998 struct dp_table *table = rcu_dereference(dp->table);
1000 struct sw_flow *flow;
1004 if (copy_from_user(&uf, ufp, sizeof uf))
1006 uf.key.reserved = 0;
1008 flow = dp_table_lookup(table, &uf.key);
1013 if (cmd == ODP_FLOW_DEL) {
1014 /* XXX redundant lookup */
1015 error = dp_table_delete(table, flow);
1019 /* XXX These statistics might lose a few packets, since other
1020 * CPUs can be using this flow. We used to synchronize_rcu()
1021 * to make sure that we get completely accurate stats, but that
1022 * blows our performance, badly. */
1024 error = answer_query(flow, ufp);
1025 flow_deferred_free(flow);
1027 error = answer_query(flow, ufp);
1034 static int query_multiple_flows(struct datapath *dp,
1035 const struct odp_flowvec *flowvec)
1037 struct dp_table *table = rcu_dereference(dp->table);
1039 for (i = 0; i < flowvec->n_flows; i++) {
1040 struct __user odp_flow *ufp = &flowvec->flows[i];
1042 struct sw_flow *flow;
1045 if (__copy_from_user(&uf, ufp, sizeof uf))
1047 uf.key.reserved = 0;
1049 flow = dp_table_lookup(table, &uf.key);
1051 error = __clear_user(&ufp->stats, sizeof ufp->stats);
1053 error = answer_query(flow, ufp);
1057 return flowvec->n_flows;
1060 struct list_flows_cbdata {
1061 struct odp_flow __user *uflows;
1066 static int list_flow(struct sw_flow *flow, void *cbdata_)
1068 struct list_flows_cbdata *cbdata = cbdata_;
1069 struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1072 if (__copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1074 error = answer_query(flow, ufp);
1078 if (cbdata->listed_flows >= cbdata->n_flows)
1079 return cbdata->listed_flows;
1083 static int list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1085 struct list_flows_cbdata cbdata;
1088 if (!flowvec->n_flows)
1091 cbdata.uflows = flowvec->flows;
1092 cbdata.n_flows = flowvec->n_flows;
1093 cbdata.listed_flows = 0;
1094 error = dp_table_foreach(rcu_dereference(dp->table),
1095 list_flow, &cbdata);
1096 return error ? error : cbdata.listed_flows;
1099 static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1100 int (*function)(struct datapath *,
1101 const struct odp_flowvec *))
1103 struct odp_flowvec __user *uflowvec;
1104 struct odp_flowvec flowvec;
1107 uflowvec = (struct odp_flowvec __user *)argp;
1108 if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
1109 copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1112 if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
1115 if (!access_ok(VERIFY_WRITE, flowvec.flows,
1116 flowvec.n_flows * sizeof(struct odp_flow)))
1119 retval = function(dp, &flowvec);
1120 return (retval < 0 ? retval
1121 : retval == flowvec.n_flows ? 0
1122 : __put_user(retval, &uflowvec->n_flows));
1125 static int do_execute(struct datapath *dp, const struct odp_execute *executep)
1127 struct odp_execute execute;
1128 struct odp_flow_key key;
1129 struct sk_buff *skb;
1130 struct sw_flow_actions *actions;
1135 if (copy_from_user(&execute, executep, sizeof execute))
1139 if (execute.length < ETH_HLEN || execute.length > 65535)
1143 actions = flow_actions_alloc(execute.n_actions);
1148 if (copy_from_user(actions->actions, execute.actions,
1149 execute.n_actions * sizeof *execute.actions))
1150 goto error_free_actions;
1152 err = validate_actions(actions);
1154 goto error_free_actions;
1157 skb = alloc_skb(execute.length, GFP_KERNEL);
1159 goto error_free_actions;
1160 if (execute.in_port < DP_MAX_PORTS) {
1161 struct net_bridge_port *p = dp->ports[execute.in_port];
1167 if (copy_from_user(skb_put(skb, execute.length), execute.data,
1169 goto error_free_skb;
1171 skb_reset_mac_header(skb);
1174 /* Normally, setting the skb 'protocol' field would be handled by a
1175 * call to eth_type_trans(), but it assumes there's a sending
1176 * device, which we may not have. */
1177 if (ntohs(eth->h_proto) >= 1536)
1178 skb->protocol = eth->h_proto;
1180 skb->protocol = htons(ETH_P_802_2);
1182 flow_extract(skb, execute.in_port, &key);
1183 err = execute_actions(dp, skb, &key, actions->actions,
1184 actions->n_actions, GFP_KERNEL);
1197 get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
1199 struct odp_stats stats;
1202 stats.n_flows = dp->n_flows;
1203 stats.cur_capacity = rcu_dereference(dp->table)->n_buckets;
1204 stats.max_capacity = DP_MAX_BUCKETS;
1205 stats.n_ports = dp->n_ports;
1206 stats.max_ports = DP_MAX_PORTS;
1207 stats.max_groups = DP_MAX_GROUPS;
1208 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1209 for_each_possible_cpu(i) {
1210 const struct dp_stats_percpu *s;
1211 s = percpu_ptr(dp->stats_percpu, i);
1212 stats.n_frags += s->n_frags;
1213 stats.n_hit += s->n_hit;
1214 stats.n_missed += s->n_missed;
1215 stats.n_lost += s->n_lost;
1217 stats.max_miss_queue = DP_MAX_QUEUE_LEN;
1218 stats.max_action_queue = DP_MAX_QUEUE_LEN;
1219 return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
1222 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1223 int dp_min_mtu(const struct datapath *dp)
1225 struct net_bridge_port *p;
1230 list_for_each_entry_rcu (p, &dp->port_list, node) {
1231 struct net_device *dev = p->dev;
1233 /* Skip any internal ports, since that's what we're trying to
1238 if (!mtu || dev->mtu < mtu)
1242 return mtu ? mtu : ETH_DATA_LEN;
1246 put_port(const struct net_bridge_port *p, struct odp_port __user *uop)
1249 memset(&op, 0, sizeof op);
1250 strncpy(op.devname, p->dev->name, sizeof op.devname);
1251 op.port = p->port_no;
1252 op.flags = is_dp_dev(p->dev) ? ODP_PORT_INTERNAL : 0;
1253 return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
1257 query_port(struct datapath *dp, struct odp_port __user *uport)
1259 struct odp_port port;
1261 if (copy_from_user(&port, uport, sizeof port))
1263 if (port.devname[0]) {
1264 struct net_bridge_port *p;
1265 struct net_device *dev;
1268 port.devname[IFNAMSIZ - 1] = '\0';
1270 dev = dev_get_by_name(&init_net, port.devname);
1275 if (!p && is_dp_dev(dev)) {
1276 struct dp_dev *dp_dev = dp_dev_priv(dev);
1277 if (dp_dev->dp == dp)
1278 p = dp->ports[dp_dev->port_no];
1280 err = p && p->dp == dp ? put_port(p, uport) : -ENOENT;
1285 if (port.port >= DP_MAX_PORTS)
1287 if (!dp->ports[port.port])
1289 return put_port(dp->ports[port.port], uport);
1294 list_ports(struct datapath *dp, struct odp_portvec __user *pvp)
1296 struct odp_portvec pv;
1297 struct net_bridge_port *p;
1300 if (copy_from_user(&pv, pvp, sizeof pv))
1305 list_for_each_entry_rcu (p, &dp->port_list, node) {
1306 if (put_port(p, &pv.ports[idx]))
1308 if (idx++ >= pv.n_ports)
1312 return put_user(idx, &pvp->n_ports);
1315 /* RCU callback for freeing a dp_port_group */
1316 static void free_port_group(struct rcu_head *rcu)
1318 struct dp_port_group *g = container_of(rcu, struct dp_port_group, rcu);
1323 set_port_group(struct datapath *dp, const struct odp_port_group __user *upg)
1325 struct odp_port_group pg;
1326 struct dp_port_group *new_group, *old_group;
1330 if (copy_from_user(&pg, upg, sizeof pg))
1334 if (pg.n_ports > DP_MAX_PORTS || pg.group >= DP_MAX_GROUPS)
1338 new_group = kmalloc(sizeof *new_group + sizeof(u16) * pg.n_ports,
1343 new_group->n_ports = pg.n_ports;
1345 if (copy_from_user(new_group->ports, pg.ports,
1346 sizeof(u16) * pg.n_ports))
1349 old_group = rcu_dereference(dp->groups[pg.group]);
1350 rcu_assign_pointer(dp->groups[pg.group], new_group);
1352 call_rcu(&old_group->rcu, free_port_group);
1362 get_port_group(struct datapath *dp, struct odp_port_group *upg)
1364 struct odp_port_group pg;
1365 struct dp_port_group *g;
1368 if (copy_from_user(&pg, upg, sizeof pg))
1371 if (pg.group >= DP_MAX_GROUPS)
1374 g = dp->groups[pg.group];
1375 n_copy = g ? min_t(int, g->n_ports, pg.n_ports) : 0;
1376 if (n_copy && copy_to_user(pg.ports, g->ports, n_copy * sizeof(u16)))
1379 if (put_user(g ? g->n_ports : 0, &upg->n_ports))
1385 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1388 int dp_idx = iminor(f->f_dentry->d_inode);
1389 struct datapath *dp;
1390 int drop_frags, listeners, port_no;
1393 /* Handle commands with special locking requirements up front. */
1396 return create_dp(dp_idx, (char __user *)argp);
1398 case ODP_DP_DESTROY:
1399 return destroy_dp(dp_idx);
1402 return add_port(dp_idx, (struct odp_port __user *)argp);
1405 err = get_user(port_no, (int __user *)argp);
1408 return del_port(dp_idx, port_no);
1411 dp = get_dp_locked(dp_idx);
1417 err = get_dp_stats(dp, (struct odp_stats __user *)argp);
1420 case ODP_GET_DROP_FRAGS:
1421 err = put_user(dp->drop_frags, (int __user *)argp);
1424 case ODP_SET_DROP_FRAGS:
1425 err = get_user(drop_frags, (int __user *)argp);
1429 if (drop_frags != 0 && drop_frags != 1)
1431 dp->drop_frags = drop_frags;
1435 case ODP_GET_LISTEN_MASK:
1436 err = put_user((int)f->private_data, (int __user *)argp);
1439 case ODP_SET_LISTEN_MASK:
1440 err = get_user(listeners, (int __user *)argp);
1444 if (listeners & ~ODPL_ALL)
1447 f->private_data = (void*)listeners;
1450 case ODP_PORT_QUERY:
1451 err = query_port(dp, (struct odp_port __user *)argp);
1455 err = list_ports(dp, (struct odp_portvec __user *)argp);
1458 case ODP_PORT_GROUP_SET:
1459 err = set_port_group(dp, (struct odp_port_group __user *)argp);
1462 case ODP_PORT_GROUP_GET:
1463 err = get_port_group(dp, (struct odp_port_group __user *)argp);
1466 case ODP_FLOW_FLUSH:
1467 err = flush_flows(dp);
1471 err = put_flow(dp, (struct odp_flow_put __user *)argp);
1476 err = del_or_query_flow(dp, (struct odp_flow __user *)argp,
1480 case ODP_FLOW_GET_MULTIPLE:
1481 err = do_flowvec_ioctl(dp, argp, query_multiple_flows);
1485 err = do_flowvec_ioctl(dp, argp, list_flows);
1489 err = do_execute(dp, (struct odp_execute __user *)argp);
1496 mutex_unlock(&dp->mutex);
1500 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1503 for (i = 0; i < DP_N_QUEUES; i++) {
1504 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1510 ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes,
1513 /* XXX is there sufficient synchronization here? */
1514 int listeners = (int) f->private_data;
1515 int dp_idx = iminor(f->f_dentry->d_inode);
1516 struct datapath *dp = get_dp(dp_idx);
1517 struct sk_buff *skb;
1518 struct iovec __user iov;
1525 if (nbytes == 0 || !listeners)
1531 for (i = 0; i < DP_N_QUEUES; i++) {
1532 if (listeners & (1 << i)) {
1533 skb = skb_dequeue(&dp->queues[i]);
1539 if (f->f_flags & O_NONBLOCK) {
1544 wait_event_interruptible(dp->waitqueue,
1545 dp_has_packet_of_interest(dp,
1548 if (signal_pending(current)) {
1549 retval = -ERESTARTSYS;
1554 copy_bytes = min(skb->len, nbytes);
1556 iov.iov_len = copy_bytes;
1557 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
1559 retval = copy_bytes;
1566 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
1568 /* XXX is there sufficient synchronization here? */
1569 int dp_idx = iminor(file->f_dentry->d_inode);
1570 struct datapath *dp = get_dp(dp_idx);
1575 poll_wait(file, &dp->waitqueue, wait);
1576 if (dp_has_packet_of_interest(dp, (int)file->private_data))
1577 mask |= POLLIN | POLLRDNORM;
1579 mask = POLLIN | POLLRDNORM | POLLHUP;
1584 struct file_operations openvswitch_fops = {
1585 /* XXX .aio_read = openvswitch_aio_read, */
1586 .read = openvswitch_read,
1587 .poll = openvswitch_poll,
1588 .unlocked_ioctl = openvswitch_ioctl,
1589 /* XXX .fasync = openvswitch_fasync, */
1593 static struct llc_sap *dp_stp_sap;
1595 static int dp_stp_rcv(struct sk_buff *skb, struct net_device *dev,
1596 struct packet_type *pt, struct net_device *orig_dev)
1598 /* We don't really care about STP packets, we just listen for them for
1599 * mutual exclusion with the bridge module, so this just discards
1605 static int __init dp_init(void)
1609 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
1611 /* Register to receive STP packets because the bridge module also
1612 * attempts to do so. Since there can only be a single listener for a
1613 * given protocol, this provides mutual exclusion against the bridge
1614 * module, preventing both of them from being loaded at the same
1616 dp_stp_sap = llc_sap_open(LLC_SAP_BSPAN, dp_stp_rcv);
1618 printk(KERN_ERR "openvswitch: can't register sap for STP (probably the bridge module is loaded)\n");
1626 err = register_netdevice_notifier(&dp_device_notifier);
1628 goto error_flow_exit;
1630 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
1632 goto error_unreg_notifier;
1634 /* Hook into callback used by the bridge to intercept packets.
1635 * Parasites we are. */
1636 br_handle_frame_hook = dp_frame_hook;
1640 error_unreg_notifier:
1641 unregister_netdevice_notifier(&dp_device_notifier);
1648 static void dp_cleanup(void)
1651 unregister_chrdev(major, "openvswitch");
1652 unregister_netdevice_notifier(&dp_device_notifier);
1654 br_handle_frame_hook = NULL;
1655 llc_sap_put(dp_stp_sap);
1658 module_init(dp_init);
1659 module_exit(dp_cleanup);
1661 MODULE_DESCRIPTION("Open vSwitch switching datapath");
1662 MODULE_LICENSE("GPL");