2 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #include <linux/init.h>
12 #include <linux/module.h>
14 #include <linux/if_arp.h>
15 #include <linux/if_vlan.h>
18 #include <linux/delay.h>
19 #include <linux/time.h>
20 #include <linux/etherdevice.h>
21 #include <linux/kernel.h>
22 #include <linux/kthread.h>
23 #include <linux/mutex.h>
24 #include <linux/percpu.h>
25 #include <linux/rcupdate.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/version.h>
29 #include <linux/ethtool.h>
30 #include <linux/random.h>
31 #include <linux/wait.h>
32 #include <asm/system.h>
33 #include <asm/div64.h>
35 #include <linux/netfilter_bridge.h>
36 #include <linux/netfilter_ipv4.h>
37 #include <linux/inetdevice.h>
38 #include <linux/list.h>
39 #include <linux/rculist.h>
40 #include <linux/workqueue.h>
41 #include <linux/dmi.h>
42 #include <net/inet_ecn.h>
43 #include <linux/compat.h>
45 #include "openvswitch/datapath-protocol.h"
49 #include "odp-compat.h"
51 #include "vport-internal_dev.h"
56 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
57 EXPORT_SYMBOL(dp_ioctl_hook);
59 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
62 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
65 * It is safe to access the datapath and dp_port structures with just
68 static struct datapath *dps[ODP_MAX];
69 static DEFINE_MUTEX(dp_mutex);
71 /* Number of milliseconds between runs of the maintenance thread. */
72 #define MAINT_SLEEP_MSECS 1000
74 static int new_dp_port(struct datapath *, struct odp_port *, int port_no);
76 /* Must be called with rcu_read_lock or dp_mutex. */
77 struct datapath *get_dp(int dp_idx)
79 if (dp_idx < 0 || dp_idx >= ODP_MAX)
81 return rcu_dereference(dps[dp_idx]);
83 EXPORT_SYMBOL_GPL(get_dp);
85 static struct datapath *get_dp_locked(int dp_idx)
89 mutex_lock(&dp_mutex);
92 mutex_lock(&dp->mutex);
93 mutex_unlock(&dp_mutex);
97 /* Must be called with rcu_read_lock or RTNL lock. */
98 const char *dp_name(const struct datapath *dp)
100 return vport_get_name(dp->ports[ODPP_LOCAL]->vport);
103 static inline size_t br_nlmsg_size(void)
105 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
106 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
107 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
108 + nla_total_size(4) /* IFLA_MASTER */
109 + nla_total_size(4) /* IFLA_MTU */
110 + nla_total_size(4) /* IFLA_LINK */
111 + nla_total_size(1); /* IFLA_OPERSTATE */
114 static int dp_fill_ifinfo(struct sk_buff *skb,
115 const struct dp_port *port,
116 int event, unsigned int flags)
118 const struct datapath *dp = port->dp;
119 int ifindex = vport_get_ifindex(port->vport);
120 int iflink = vport_get_iflink(port->vport);
121 struct ifinfomsg *hdr;
122 struct nlmsghdr *nlh;
130 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
134 hdr = nlmsg_data(nlh);
135 hdr->ifi_family = AF_BRIDGE;
137 hdr->ifi_type = ARPHRD_ETHER;
138 hdr->ifi_index = ifindex;
139 hdr->ifi_flags = vport_get_flags(port->vport);
142 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port->vport));
143 NLA_PUT_U32(skb, IFLA_MASTER, vport_get_ifindex(dp->ports[ODPP_LOCAL]->vport));
144 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port->vport));
145 #ifdef IFLA_OPERSTATE
146 NLA_PUT_U8(skb, IFLA_OPERSTATE,
147 vport_is_running(port->vport)
148 ? vport_get_operstate(port->vport)
152 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN,
153 vport_get_addr(port->vport));
155 if (ifindex != iflink)
156 NLA_PUT_U32(skb, IFLA_LINK,iflink);
158 return nlmsg_end(skb, nlh);
161 nlmsg_cancel(skb, nlh);
165 static void dp_ifinfo_notify(int event, struct dp_port *port)
170 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
174 err = dp_fill_ifinfo(skb, port, event, 0);
176 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
177 WARN_ON(err == -EMSGSIZE);
181 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
185 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
188 static void release_dp(struct kobject *kobj)
190 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
194 static struct kobj_type dp_ktype = {
195 .release = release_dp
198 static int create_dp(int dp_idx, const char __user *devnamep)
200 struct odp_port internal_dev_port;
201 char devname[IFNAMSIZ];
207 int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
211 } else if (retval >= IFNAMSIZ) {
216 snprintf(devname, sizeof devname, "of%d", dp_idx);
220 mutex_lock(&dp_mutex);
222 if (!try_module_get(THIS_MODULE))
225 /* Exit early if a datapath with that number already exists.
226 * (We don't use -EEXIST because that's ambiguous with 'devname'
227 * conflicting with an existing network device name.) */
233 dp = kzalloc(sizeof *dp, GFP_KERNEL);
236 INIT_LIST_HEAD(&dp->port_list);
237 mutex_init(&dp->mutex);
239 for (i = 0; i < DP_N_QUEUES; i++)
240 skb_queue_head_init(&dp->queues[i]);
241 init_waitqueue_head(&dp->waitqueue);
243 /* Initialize kobject for bridge. This will be added as
244 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
245 dp->ifobj.kset = NULL;
246 kobject_init(&dp->ifobj, &dp_ktype);
248 /* Allocate table. */
250 rcu_assign_pointer(dp->table, tbl_create(0));
254 /* Set up our datapath device. */
255 BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
256 strcpy(internal_dev_port.devname, devname);
257 internal_dev_port.flags = ODP_PORT_INTERNAL;
258 err = new_dp_port(dp, &internal_dev_port, ODPP_LOCAL);
263 goto err_destroy_table;
267 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
268 if (!dp->stats_percpu)
269 goto err_destroy_local_port;
271 rcu_assign_pointer(dps[dp_idx], dp);
272 mutex_unlock(&dp_mutex);
279 err_destroy_local_port:
280 dp_detach_port(dp->ports[ODPP_LOCAL], 1);
282 tbl_destroy(dp->table, NULL);
286 module_put(THIS_MODULE);
288 mutex_unlock(&dp_mutex);
294 static void do_destroy_dp(struct datapath *dp)
296 struct dp_port *p, *n;
299 list_for_each_entry_safe (p, n, &dp->port_list, node)
300 if (p->port_no != ODPP_LOCAL)
301 dp_detach_port(p, 1);
305 rcu_assign_pointer(dps[dp->dp_idx], NULL);
307 dp_detach_port(dp->ports[ODPP_LOCAL], 1);
309 tbl_destroy(dp->table, flow_free_tbl);
311 for (i = 0; i < DP_N_QUEUES; i++)
312 skb_queue_purge(&dp->queues[i]);
313 for (i = 0; i < DP_MAX_GROUPS; i++)
314 kfree(dp->groups[i]);
315 free_percpu(dp->stats_percpu);
316 kobject_put(&dp->ifobj);
317 module_put(THIS_MODULE);
320 static int destroy_dp(int dp_idx)
326 mutex_lock(&dp_mutex);
336 mutex_unlock(&dp_mutex);
341 static void release_dp_port(struct kobject *kobj)
343 struct dp_port *p = container_of(kobj, struct dp_port, kobj);
347 static struct kobj_type brport_ktype = {
349 .sysfs_ops = &brport_sysfs_ops,
351 .release = release_dp_port
354 /* Called with RTNL lock and dp_mutex. */
355 static int new_dp_port(struct datapath *dp, struct odp_port *odp_port, int port_no)
361 vport = vport_locate(odp_port->devname);
365 if (odp_port->flags & ODP_PORT_INTERNAL)
366 vport = __vport_add(odp_port->devname, "internal", NULL);
368 vport = __vport_add(odp_port->devname, "netdev", NULL);
373 return PTR_ERR(vport);
376 p = kzalloc(sizeof(*p), GFP_KERNEL);
380 p->port_no = port_no;
382 atomic_set(&p->sflow_pool, 0);
384 err = vport_attach(vport, p);
390 rcu_assign_pointer(dp->ports[port_no], p);
391 list_add_rcu(&p->node, &dp->port_list);
394 /* Initialize kobject for bridge. This will be added as
395 * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
397 kobject_init(&p->kobj, &brport_ktype);
399 dp_ifinfo_notify(RTM_NEWLINK, p);
404 static int attach_port(int dp_idx, struct odp_port __user *portp)
407 struct odp_port port;
412 if (copy_from_user(&port, portp, sizeof port))
414 port.devname[IFNAMSIZ - 1] = '\0';
417 dp = get_dp_locked(dp_idx);
420 goto out_unlock_rtnl;
422 for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
423 if (!dp->ports[port_no])
429 err = new_dp_port(dp, &port, port_no);
433 set_internal_devs_mtu(dp);
434 dp_sysfs_add_if(dp->ports[port_no]);
436 err = put_user(port_no, &portp->port);
439 mutex_unlock(&dp->mutex);
446 int dp_detach_port(struct dp_port *p, int may_delete)
448 struct vport *vport = p->vport;
453 if (p->port_no != ODPP_LOCAL)
455 dp_ifinfo_notify(RTM_DELLINK, p);
457 /* First drop references to device. */
459 list_del_rcu(&p->node);
460 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
462 err = vport_detach(vport);
466 /* Then wait until no one is still using it, and destroy it. */
470 const char *port_type = vport_get_type(vport);
472 if (!strcmp(port_type, "netdev") || !strcmp(port_type, "internal")) {
479 kobject_put(&p->kobj);
484 static int detach_port(int dp_idx, int port_no)
491 if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL)
495 dp = get_dp_locked(dp_idx);
498 goto out_unlock_rtnl;
500 p = dp->ports[port_no];
505 err = dp_detach_port(p, 1);
508 mutex_unlock(&dp->mutex);
515 /* Must be called with rcu_read_lock and with bottom-halves disabled. */
516 void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb)
518 struct datapath *dp = p->dp;
519 struct dp_stats_percpu *stats;
520 struct odp_flow_key key;
521 struct tbl_node *flow_node;
523 WARN_ON_ONCE(skb_shared(skb));
524 skb_warn_if_lro(skb);
526 OVS_CB(skb)->dp_port = p;
528 /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
529 stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
531 if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) {
532 if (dp->drop_frags) {
539 flow_node = tbl_lookup(rcu_dereference(dp->table), &key, flow_hash(&key), flow_cmp);
541 struct sw_flow *flow = flow_cast(flow_node);
542 struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts);
543 flow_used(flow, skb);
544 execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
549 dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
553 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
554 /* This code is based on skb_checksum_setup() from Xen's net/dev/core.c. We
555 * can't call this function directly because it isn't exported in all
557 int vswitch_skb_checksum_setup(struct sk_buff *skb)
562 __u16 csum_start, csum_offset;
564 if (!skb->proto_csum_blank)
567 if (skb->protocol != htons(ETH_P_IP))
570 if (!pskb_may_pull(skb, skb_network_header(skb) + sizeof(struct iphdr) - skb->data))
574 th = skb_network_header(skb) + 4 * iph->ihl;
576 csum_start = th - skb->head;
577 switch (iph->protocol) {
579 csum_offset = offsetof(struct tcphdr, check);
582 csum_offset = offsetof(struct udphdr, check);
586 printk(KERN_ERR "Attempting to checksum a non-"
587 "TCP/UDP packet, dropping a protocol"
588 " %d packet", iph->protocol);
592 if (!pskb_may_pull(skb, th + csum_offset + 2 - skb->data))
595 skb->ip_summed = CHECKSUM_PARTIAL;
596 skb->proto_csum_blank = 0;
598 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
599 skb->csum_start = csum_start;
600 skb->csum_offset = csum_offset;
602 skb_set_transport_header(skb, csum_start - skb_headroom(skb));
603 skb->csum = csum_offset;
611 #endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */
613 /* Types of checksums that we can receive (these all refer to L4 checksums):
614 * 1. CHECKSUM_NONE: Device that did not compute checksum, contains full
615 * (though not verified) checksum in packet but not in skb->csum. Packets
616 * from the bridge local port will also have this type.
617 * 2. CHECKSUM_COMPLETE (CHECKSUM_HW): Good device that computes checksums,
618 * also the GRE module. This is the same as CHECKSUM_NONE, except it has
619 * a valid skb->csum. Importantly, both contain a full checksum (not
620 * verified) in the packet itself. The only difference is that if the
621 * packet gets to L4 processing on this machine (not in DomU) we won't
622 * have to recompute the checksum to verify. Most hardware devices do not
623 * produce packets with this type, even if they support receive checksum
624 * offloading (they produce type #5).
625 * 3. CHECKSUM_PARTIAL (CHECKSUM_HW): Packet without full checksum and needs to
626 * be computed if it is sent off box. Unfortunately on earlier kernels,
627 * this case is impossible to distinguish from #2, despite having opposite
628 * meanings. Xen adds an extra field on earlier kernels (see #4) in order
629 * to distinguish the different states.
630 * 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
631 * generated locally by a Xen DomU and has a partial checksum. If it is
632 * handled on this machine (Dom0 or DomU), then the checksum will not be
633 * computed. If it goes off box, the checksum in the packet needs to be
634 * completed. Calling skb_checksum_setup converts this to CHECKSUM_HW
635 * (CHECKSUM_PARTIAL) so that the checksum can be completed. In later
636 * kernels, this combination is replaced with CHECKSUM_PARTIAL.
637 * 5. CHECKSUM_UNNECESSARY (with proto_csum_blank false): Packet with a correct
638 * full checksum or using a protocol without a checksum. skb->csum is
639 * undefined. This is common from devices with receive checksum
640 * offloading. This is somewhat similar to CHECKSUM_NONE, except that
641 * nobody will try to verify the checksum with CHECKSUM_UNNECESSARY.
643 * Note that on earlier kernels, CHECKSUM_COMPLETE and CHECKSUM_PARTIAL are
644 * both defined as CHECKSUM_HW. Normally the meaning of CHECKSUM_HW is clear
645 * based on whether it is on the transmit or receive path. After the datapath
646 * it will be intepreted as CHECKSUM_PARTIAL. If the packet already has a
647 * checksum, we will panic. Since we can receive packets with checksums, we
648 * assume that all CHECKSUM_HW packets have checksums and map them to
649 * CHECKSUM_NONE, which has a similar meaning (the it is only different if the
650 * packet is processed by the local IP stack, in which case it will need to
651 * be reverified). If we receive a packet with CHECKSUM_HW that really means
652 * CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
653 * shouldn't be any devices that do this with bridging. */
655 compute_ip_summed(struct sk_buff *skb, bool xmit)
657 /* For our convenience these defines change repeatedly between kernel
658 * versions, so we can't just copy them over... */
659 switch (skb->ip_summed) {
661 OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
663 case CHECKSUM_UNNECESSARY:
664 OVS_CB(skb)->ip_summed = OVS_CSUM_UNNECESSARY;
667 /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
668 * However, on the receive side we should only get CHECKSUM_PARTIAL
669 * packets from Xen, which uses some special fields to represent this
670 * (see below). Since we can only make one type work, pick the one
671 * that actually happens in practice.
673 * On the transmit side (basically after skb_checksum_setup()
674 * has been run or on internal dev transmit), packets with
675 * CHECKSUM_COMPLETE aren't generated, so assume CHECKSUM_PARTIAL. */
678 OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
680 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
684 case CHECKSUM_COMPLETE:
685 OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
687 case CHECKSUM_PARTIAL:
688 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
692 printk(KERN_ERR "openvswitch: unknown checksum type %d\n",
694 /* None seems the safest... */
695 OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
698 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
699 /* Xen has a special way of representing CHECKSUM_PARTIAL on older
700 * kernels. It should not be set on the transmit path though. */
701 if (skb->proto_csum_blank)
702 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
704 WARN_ON_ONCE(skb->proto_csum_blank && xmit);
708 /* This function closely resembles skb_forward_csum() used by the bridge. It
709 * is slightly different because we are only concerned with bridging and not
710 * other types of forwarding and can get away with slightly more optimal
713 forward_ip_summed(struct sk_buff *skb)
716 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
717 skb->ip_summed = CHECKSUM_NONE;
721 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
722 * unless we broke up a GSO packet. */
724 queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
725 int queue_no, u32 arg)
727 struct sk_buff *nskb;
731 if (OVS_CB(skb)->dp_port)
732 port_no = OVS_CB(skb)->dp_port->port_no;
734 port_no = ODPP_LOCAL;
737 struct odp_msg *header;
742 /* If a checksum-deferred packet is forwarded to the
743 * controller, correct the pointers and checksum.
745 err = vswitch_skb_checksum_setup(skb);
749 if (skb->ip_summed == CHECKSUM_PARTIAL) {
751 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
752 /* Until 2.6.22, the start of the transport header was
753 * also the start of data to be checksummed. Linux
754 * 2.6.22 introduced the csum_start field for this
755 * purpose, but we should point the transport header to
756 * it anyway for backward compatibility, as
757 * dev_queue_xmit() does even in 2.6.28. */
758 skb_set_transport_header(skb, skb->csum_start -
762 err = skb_checksum_help(skb);
767 err = skb_cow(skb, sizeof *header);
771 header = (struct odp_msg*)__skb_push(skb, sizeof *header);
772 header->type = queue_no;
773 header->length = skb->len;
774 header->port = port_no;
775 header->reserved = 0;
777 skb_queue_tail(queue, skb);
785 while ((skb = nskb) != NULL) {
793 dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
796 struct dp_stats_percpu *stats;
797 struct sk_buff_head *queue;
800 WARN_ON_ONCE(skb_shared(skb));
801 BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR && queue_no != _ODPL_SFLOW_NR);
802 queue = &dp->queues[queue_no];
804 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
807 forward_ip_summed(skb);
809 /* Break apart GSO packets into their component pieces. Otherwise
810 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
811 if (skb_is_gso(skb)) {
812 struct sk_buff *nskb = skb_gso_segment(skb, 0);
816 if (unlikely(IS_ERR(skb))) {
821 /* XXX This case might not be possible. It's hard to
822 * tell from the skb_gso_segment() code and comment. */
826 err = queue_control_packets(skb, queue, queue_no, arg);
827 wake_up_interruptible(&dp->waitqueue);
833 stats = percpu_ptr(dp->stats_percpu, get_cpu());
840 static int flush_flows(struct datapath *dp)
842 struct tbl *old_table = rcu_dereference(dp->table);
843 struct tbl *new_table;
845 new_table = tbl_create(0);
849 rcu_assign_pointer(dp->table, new_table);
851 tbl_deferred_destroy(old_table, flow_free_tbl);
856 static int validate_actions(const struct sw_flow_actions *actions)
860 for (i = 0; i < actions->n_actions; i++) {
861 const union odp_action *a = &actions->actions[i];
864 if (a->output.port >= DP_MAX_PORTS)
868 case ODPAT_OUTPUT_GROUP:
869 if (a->output_group.group >= DP_MAX_GROUPS)
873 case ODPAT_SET_VLAN_VID:
874 if (a->vlan_vid.vlan_vid & htons(~VLAN_VID_MASK))
878 case ODPAT_SET_VLAN_PCP:
879 if (a->vlan_pcp.vlan_pcp
880 & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT))
884 case ODPAT_SET_NW_TOS:
885 if (a->nw_tos.nw_tos & INET_ECN_MASK)
890 if (a->type >= ODPAT_N_ACTIONS)
899 static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
901 struct sw_flow_actions *actions;
904 actions = flow_actions_alloc(flow->n_actions);
905 error = PTR_ERR(actions);
910 if (copy_from_user(actions->actions, flow->actions,
911 flow->n_actions * sizeof(union odp_action)))
912 goto error_free_actions;
913 error = validate_actions(actions);
915 goto error_free_actions;
922 return ERR_PTR(error);
925 static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
927 if (flow->used.tv_sec) {
928 stats->used_sec = flow->used.tv_sec;
929 stats->used_nsec = flow->used.tv_nsec;
932 stats->used_nsec = 0;
934 stats->n_packets = flow->packet_count;
935 stats->n_bytes = flow->byte_count;
936 stats->ip_tos = flow->ip_tos;
937 stats->tcp_flags = flow->tcp_flags;
941 static void clear_stats(struct sw_flow *flow)
943 flow->used.tv_sec = flow->used.tv_nsec = 0;
946 flow->packet_count = 0;
947 flow->byte_count = 0;
950 static int expand_table(struct datapath *dp)
952 struct tbl *old_table = rcu_dereference(dp->table);
953 struct tbl *new_table;
955 new_table = tbl_expand(old_table);
956 if (IS_ERR(new_table))
957 return PTR_ERR(new_table);
959 rcu_assign_pointer(dp->table, new_table);
960 tbl_deferred_destroy(old_table, NULL);
965 static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
966 struct odp_flow_stats *stats)
968 struct tbl_node *flow_node;
969 struct sw_flow *flow;
973 memset(uf->flow.key.reserved, 0, sizeof uf->flow.key.reserved);
975 table = rcu_dereference(dp->table);
976 flow_node = tbl_lookup(table, &uf->flow.key, flow_hash(&uf->flow.key), flow_cmp);
979 struct sw_flow_actions *acts;
982 if (!(uf->flags & ODPPF_CREATE))
985 /* Expand table, if necessary, to make room. */
986 if (tbl_count(table) >= tbl_n_buckets(table)) {
987 error = expand_table(dp);
990 table = rcu_dereference(dp->table);
995 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
998 flow->key = uf->flow.key;
999 spin_lock_init(&flow->lock);
1002 /* Obtain actions. */
1003 acts = get_actions(&uf->flow);
1004 error = PTR_ERR(acts);
1006 goto error_free_flow;
1007 rcu_assign_pointer(flow->sf_acts, acts);
1009 /* Put flow in bucket. */
1010 error = tbl_insert(table, &flow->tbl_node, flow_hash(&flow->key));
1012 goto error_free_flow_acts;
1014 memset(stats, 0, sizeof(struct odp_flow_stats));
1016 /* We found a matching flow. */
1017 struct sw_flow_actions *old_acts, *new_acts;
1018 unsigned long int flags;
1020 flow = flow_cast(flow_node);
1022 /* Bail out if we're not allowed to modify an existing flow. */
1024 if (!(uf->flags & ODPPF_MODIFY))
1028 new_acts = get_actions(&uf->flow);
1029 error = PTR_ERR(new_acts);
1030 if (IS_ERR(new_acts))
1032 old_acts = rcu_dereference(flow->sf_acts);
1033 if (old_acts->n_actions != new_acts->n_actions ||
1034 memcmp(old_acts->actions, new_acts->actions,
1035 sizeof(union odp_action) * old_acts->n_actions)) {
1036 rcu_assign_pointer(flow->sf_acts, new_acts);
1037 flow_deferred_free_acts(old_acts);
1042 /* Fetch stats, then clear them if necessary. */
1043 spin_lock_irqsave(&flow->lock, flags);
1044 get_stats(flow, stats);
1045 if (uf->flags & ODPPF_ZERO_STATS)
1047 spin_unlock_irqrestore(&flow->lock, flags);
1052 error_free_flow_acts:
1053 kfree(flow->sf_acts);
1055 kmem_cache_free(flow_cache, flow);
1060 static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
1062 struct odp_flow_stats stats;
1063 struct odp_flow_put uf;
1066 if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
1069 error = do_put_flow(dp, &uf, &stats);
1073 if (copy_to_user(&ufp->flow.stats, &stats,
1074 sizeof(struct odp_flow_stats)))
1080 static int do_answer_query(struct sw_flow *flow, u32 query_flags,
1081 struct odp_flow_stats __user *ustats,
1082 union odp_action __user *actions,
1083 u32 __user *n_actionsp)
1085 struct sw_flow_actions *sf_acts;
1086 struct odp_flow_stats stats;
1087 unsigned long int flags;
1090 spin_lock_irqsave(&flow->lock, flags);
1091 get_stats(flow, &stats);
1092 if (query_flags & ODPFF_ZERO_TCP_FLAGS) {
1093 flow->tcp_flags = 0;
1095 spin_unlock_irqrestore(&flow->lock, flags);
1097 if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
1098 get_user(n_actions, n_actionsp))
1104 sf_acts = rcu_dereference(flow->sf_acts);
1105 if (put_user(sf_acts->n_actions, n_actionsp) ||
1106 (actions && copy_to_user(actions, sf_acts->actions,
1107 sizeof(union odp_action) *
1108 min(sf_acts->n_actions, n_actions))))
1114 static int answer_query(struct sw_flow *flow, u32 query_flags,
1115 struct odp_flow __user *ufp)
1117 union odp_action *actions;
1119 if (get_user(actions, &ufp->actions))
1122 return do_answer_query(flow, query_flags,
1123 &ufp->stats, actions, &ufp->n_actions);
1126 static struct sw_flow *do_del_flow(struct datapath *dp, struct odp_flow_key *key)
1128 struct tbl *table = rcu_dereference(dp->table);
1129 struct tbl_node *flow_node;
1132 memset(key->reserved, 0, sizeof key->reserved);
1133 flow_node = tbl_lookup(table, key, flow_hash(key), flow_cmp);
1135 return ERR_PTR(-ENOENT);
1137 error = tbl_remove(table, flow_node);
1139 return ERR_PTR(error);
1141 /* XXX Returned flow_node's statistics might lose a few packets, since
1142 * other CPUs can be using this flow. We used to synchronize_rcu() to
1143 * make sure that we get completely accurate stats, but that blows our
1144 * performance, badly. */
1145 return flow_cast(flow_node);
1148 static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
1150 struct sw_flow *flow;
1154 if (copy_from_user(&uf, ufp, sizeof uf))
1157 flow = do_del_flow(dp, &uf.key);
1159 return PTR_ERR(flow);
1161 error = answer_query(flow, 0, ufp);
1162 flow_deferred_free(flow);
1166 static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1168 struct tbl *table = rcu_dereference(dp->table);
1171 for (i = 0; i < flowvec->n_flows; i++) {
1172 struct odp_flow __user *ufp = &flowvec->flows[i];
1174 struct tbl_node *flow_node;
1177 if (copy_from_user(&uf, ufp, sizeof uf))
1179 memset(uf.key.reserved, 0, sizeof uf.key.reserved);
1181 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1183 error = put_user(ENOENT, &ufp->stats.error);
1185 error = answer_query(flow_cast(flow_node), uf.flags, ufp);
1189 return flowvec->n_flows;
1192 struct list_flows_cbdata {
1193 struct odp_flow __user *uflows;
1198 static int list_flow(struct tbl_node *node, void *cbdata_)
1200 struct sw_flow *flow = flow_cast(node);
1201 struct list_flows_cbdata *cbdata = cbdata_;
1202 struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1205 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1207 error = answer_query(flow, 0, ufp);
1211 if (cbdata->listed_flows >= cbdata->n_flows)
1212 return cbdata->listed_flows;
1216 static int do_list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1218 struct list_flows_cbdata cbdata;
1221 if (!flowvec->n_flows)
1224 cbdata.uflows = flowvec->flows;
1225 cbdata.n_flows = flowvec->n_flows;
1226 cbdata.listed_flows = 0;
1227 error = tbl_foreach(rcu_dereference(dp->table), list_flow, &cbdata);
1228 return error ? error : cbdata.listed_flows;
1231 static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1232 int (*function)(struct datapath *,
1233 const struct odp_flowvec *))
1235 struct odp_flowvec __user *uflowvec;
1236 struct odp_flowvec flowvec;
1239 uflowvec = (struct odp_flowvec __user *)argp;
1240 if (copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1243 if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
1246 retval = function(dp, &flowvec);
1247 return (retval < 0 ? retval
1248 : retval == flowvec.n_flows ? 0
1249 : put_user(retval, &uflowvec->n_flows));
1252 static int do_execute(struct datapath *dp, const struct odp_execute *execute)
1254 struct odp_flow_key key;
1255 struct sk_buff *skb;
1256 struct sw_flow_actions *actions;
1261 if (execute->length < ETH_HLEN || execute->length > 65535)
1265 actions = flow_actions_alloc(execute->n_actions);
1270 if (copy_from_user(actions->actions, execute->actions,
1271 execute->n_actions * sizeof *execute->actions))
1272 goto error_free_actions;
1274 err = validate_actions(actions);
1276 goto error_free_actions;
1279 skb = alloc_skb(execute->length, GFP_KERNEL);
1281 goto error_free_actions;
1283 if (execute->in_port < DP_MAX_PORTS)
1284 OVS_CB(skb)->dp_port = dp->ports[execute->in_port];
1286 OVS_CB(skb)->dp_port = NULL;
1289 if (copy_from_user(skb_put(skb, execute->length), execute->data,
1291 goto error_free_skb;
1293 skb_reset_mac_header(skb);
1296 /* Normally, setting the skb 'protocol' field would be handled by a
1297 * call to eth_type_trans(), but it assumes there's a sending
1298 * device, which we may not have. */
1299 if (ntohs(eth->h_proto) >= 1536)
1300 skb->protocol = eth->h_proto;
1302 skb->protocol = htons(ETH_P_802_2);
1304 flow_extract(skb, execute->in_port, &key);
1305 err = execute_actions(dp, skb, &key, actions->actions,
1306 actions->n_actions, GFP_KERNEL);
1318 static int execute_packet(struct datapath *dp, const struct odp_execute __user *executep)
1320 struct odp_execute execute;
1322 if (copy_from_user(&execute, executep, sizeof execute))
1325 return do_execute(dp, &execute);
1328 static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
1330 struct tbl *table = rcu_dereference(dp->table);
1331 struct odp_stats stats;
1334 stats.n_flows = tbl_count(table);
1335 stats.cur_capacity = tbl_n_buckets(table);
1336 stats.max_capacity = TBL_MAX_BUCKETS;
1337 stats.n_ports = dp->n_ports;
1338 stats.max_ports = DP_MAX_PORTS;
1339 stats.max_groups = DP_MAX_GROUPS;
1340 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1341 for_each_possible_cpu(i) {
1342 const struct dp_stats_percpu *s;
1343 s = percpu_ptr(dp->stats_percpu, i);
1344 stats.n_frags += s->n_frags;
1345 stats.n_hit += s->n_hit;
1346 stats.n_missed += s->n_missed;
1347 stats.n_lost += s->n_lost;
1349 stats.max_miss_queue = DP_MAX_QUEUE_LEN;
1350 stats.max_action_queue = DP_MAX_QUEUE_LEN;
1351 return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
1354 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1355 int dp_min_mtu(const struct datapath *dp)
1362 list_for_each_entry_rcu (p, &dp->port_list, node) {
1365 /* Skip any internal ports, since that's what we're trying to
1367 if (is_internal_vport(p->vport))
1370 dev_mtu = vport_get_mtu(p->vport);
1371 if (!mtu || dev_mtu < mtu)
1375 return mtu ? mtu : ETH_DATA_LEN;
1378 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
1379 * be called with RTNL lock. */
1380 void set_internal_devs_mtu(const struct datapath *dp)
1387 mtu = dp_min_mtu(dp);
1389 list_for_each_entry_rcu (p, &dp->port_list, node) {
1390 if (is_internal_vport(p->vport))
1391 vport_set_mtu(p->vport, mtu);
1396 put_port(const struct dp_port *p, struct odp_port __user *uop)
1400 memset(&op, 0, sizeof op);
1403 strncpy(op.devname, vport_get_name(p->vport), sizeof op.devname);
1406 op.port = p->port_no;
1407 op.flags = is_internal_vport(p->vport) ? ODP_PORT_INTERNAL : 0;
1409 return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
1413 query_port(struct datapath *dp, struct odp_port __user *uport)
1415 struct odp_port port;
1417 if (copy_from_user(&port, uport, sizeof port))
1420 if (port.devname[0]) {
1421 struct vport *vport;
1422 struct dp_port *dp_port;
1425 port.devname[IFNAMSIZ - 1] = '\0';
1430 vport = vport_locate(port.devname);
1436 dp_port = vport_get_dp_port(vport);
1437 if (!dp_port || dp_port->dp != dp) {
1442 port.port = dp_port->port_no;
1451 if (port.port >= DP_MAX_PORTS)
1453 if (!dp->ports[port.port])
1457 return put_port(dp->ports[port.port], uport);
1461 do_list_ports(struct datapath *dp, struct odp_port __user *uports, int n_ports)
1467 list_for_each_entry_rcu (p, &dp->port_list, node) {
1468 if (put_port(p, &uports[idx]))
1470 if (idx++ >= n_ports)
1478 list_ports(struct datapath *dp, struct odp_portvec __user *upv)
1480 struct odp_portvec pv;
1483 if (copy_from_user(&pv, upv, sizeof pv))
1486 retval = do_list_ports(dp, pv.ports, pv.n_ports);
1490 return put_user(retval, &upv->n_ports);
1493 /* RCU callback for freeing a dp_port_group */
1494 static void free_port_group(struct rcu_head *rcu)
1496 struct dp_port_group *g = container_of(rcu, struct dp_port_group, rcu);
1501 do_set_port_group(struct datapath *dp, u16 __user *ports, int n_ports, int group)
1503 struct dp_port_group *new_group, *old_group;
1507 if (n_ports > DP_MAX_PORTS || group >= DP_MAX_GROUPS)
1511 new_group = kmalloc(sizeof *new_group + sizeof(u16) * n_ports, GFP_KERNEL);
1515 new_group->n_ports = n_ports;
1517 if (copy_from_user(new_group->ports, ports, sizeof(u16) * n_ports))
1520 old_group = rcu_dereference(dp->groups[group]);
1521 rcu_assign_pointer(dp->groups[group], new_group);
1523 call_rcu(&old_group->rcu, free_port_group);
1533 set_port_group(struct datapath *dp, const struct odp_port_group __user *upg)
1535 struct odp_port_group pg;
1537 if (copy_from_user(&pg, upg, sizeof pg))
1540 return do_set_port_group(dp, pg.ports, pg.n_ports, pg.group);
1544 do_get_port_group(struct datapath *dp,
1545 u16 __user *ports, int n_ports, int group,
1546 u16 __user *n_portsp)
1548 struct dp_port_group *g;
1551 if (group >= DP_MAX_GROUPS)
1554 g = dp->groups[group];
1555 n_copy = g ? min_t(int, g->n_ports, n_ports) : 0;
1556 if (n_copy && copy_to_user(ports, g->ports, n_copy * sizeof(u16)))
1559 if (put_user(g ? g->n_ports : 0, n_portsp))
1565 static int get_port_group(struct datapath *dp, struct odp_port_group __user *upg)
1567 struct odp_port_group pg;
1569 if (copy_from_user(&pg, upg, sizeof pg))
1572 return do_get_port_group(dp, pg.ports, pg.n_ports, pg.group, &pg.n_ports);
1575 static int get_listen_mask(const struct file *f)
1577 return (long)f->private_data;
1580 static void set_listen_mask(struct file *f, int listen_mask)
1582 f->private_data = (void*)(long)listen_mask;
1585 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1588 int dp_idx = iminor(f->f_dentry->d_inode);
1589 struct datapath *dp;
1590 int drop_frags, listeners, port_no;
1591 unsigned int sflow_probability;
1594 /* Handle commands with special locking requirements up front. */
1597 err = create_dp(dp_idx, (char __user *)argp);
1600 case ODP_DP_DESTROY:
1601 err = destroy_dp(dp_idx);
1604 case ODP_PORT_ATTACH:
1605 err = attach_port(dp_idx, (struct odp_port __user *)argp);
1608 case ODP_PORT_DETACH:
1609 err = get_user(port_no, (int __user *)argp);
1611 err = detach_port(dp_idx, port_no);
1615 err = vport_add((struct odp_vport_add __user *)argp);
1619 err = vport_mod((struct odp_vport_mod __user *)argp);
1623 err = vport_del((char __user *)argp);
1626 case ODP_VPORT_STATS_GET:
1627 err = vport_stats_get((struct odp_vport_stats_req __user *)argp);
1630 case ODP_VPORT_ETHER_GET:
1631 err = vport_ether_get((struct odp_vport_ether __user *)argp);
1634 case ODP_VPORT_ETHER_SET:
1635 err = vport_ether_set((struct odp_vport_ether __user *)argp);
1638 case ODP_VPORT_MTU_GET:
1639 err = vport_mtu_get((struct odp_vport_mtu __user *)argp);
1642 case ODP_VPORT_MTU_SET:
1643 err = vport_mtu_set((struct odp_vport_mtu __user *)argp);
1647 dp = get_dp_locked(dp_idx);
1654 err = get_dp_stats(dp, (struct odp_stats __user *)argp);
1657 case ODP_GET_DROP_FRAGS:
1658 err = put_user(dp->drop_frags, (int __user *)argp);
1661 case ODP_SET_DROP_FRAGS:
1662 err = get_user(drop_frags, (int __user *)argp);
1666 if (drop_frags != 0 && drop_frags != 1)
1668 dp->drop_frags = drop_frags;
1672 case ODP_GET_LISTEN_MASK:
1673 err = put_user(get_listen_mask(f), (int __user *)argp);
1676 case ODP_SET_LISTEN_MASK:
1677 err = get_user(listeners, (int __user *)argp);
1681 if (listeners & ~ODPL_ALL)
1684 set_listen_mask(f, listeners);
1687 case ODP_GET_SFLOW_PROBABILITY:
1688 err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
1691 case ODP_SET_SFLOW_PROBABILITY:
1692 err = get_user(sflow_probability, (unsigned int __user *)argp);
1694 dp->sflow_probability = sflow_probability;
1697 case ODP_PORT_QUERY:
1698 err = query_port(dp, (struct odp_port __user *)argp);
1702 err = list_ports(dp, (struct odp_portvec __user *)argp);
1705 case ODP_PORT_GROUP_SET:
1706 err = set_port_group(dp, (struct odp_port_group __user *)argp);
1709 case ODP_PORT_GROUP_GET:
1710 err = get_port_group(dp, (struct odp_port_group __user *)argp);
1713 case ODP_FLOW_FLUSH:
1714 err = flush_flows(dp);
1718 err = put_flow(dp, (struct odp_flow_put __user *)argp);
1722 err = del_flow(dp, (struct odp_flow __user *)argp);
1726 err = do_flowvec_ioctl(dp, argp, do_query_flows);
1730 err = do_flowvec_ioctl(dp, argp, do_list_flows);
1734 err = execute_packet(dp, (struct odp_execute __user *)argp);
1741 mutex_unlock(&dp->mutex);
1746 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1749 for (i = 0; i < DP_N_QUEUES; i++) {
1750 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1756 #ifdef CONFIG_COMPAT
1757 static int compat_list_ports(struct datapath *dp, struct compat_odp_portvec __user *upv)
1759 struct compat_odp_portvec pv;
1762 if (copy_from_user(&pv, upv, sizeof pv))
1765 retval = do_list_ports(dp, compat_ptr(pv.ports), pv.n_ports);
1769 return put_user(retval, &upv->n_ports);
1772 static int compat_set_port_group(struct datapath *dp, const struct compat_odp_port_group __user *upg)
1774 struct compat_odp_port_group pg;
1776 if (copy_from_user(&pg, upg, sizeof pg))
1779 return do_set_port_group(dp, compat_ptr(pg.ports), pg.n_ports, pg.group);
1782 static int compat_get_port_group(struct datapath *dp, struct compat_odp_port_group __user *upg)
1784 struct compat_odp_port_group pg;
1786 if (copy_from_user(&pg, upg, sizeof pg))
1789 return do_get_port_group(dp, compat_ptr(pg.ports), pg.n_ports,
1790 pg.group, &pg.n_ports);
1793 static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
1795 compat_uptr_t actions;
1797 if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_flow)) ||
1798 __copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
1799 __copy_from_user(&flow->key, &compat->key, sizeof(struct odp_flow_key)) ||
1800 __get_user(actions, &compat->actions) ||
1801 __get_user(flow->n_actions, &compat->n_actions) ||
1802 __get_user(flow->flags, &compat->flags))
1805 flow->actions = compat_ptr(actions);
1809 static int compat_put_flow(struct datapath *dp, struct compat_odp_flow_put __user *ufp)
1811 struct odp_flow_stats stats;
1812 struct odp_flow_put fp;
1815 if (compat_get_flow(&fp.flow, &ufp->flow) ||
1816 get_user(fp.flags, &ufp->flags))
1819 error = do_put_flow(dp, &fp, &stats);
1823 if (copy_to_user(&ufp->flow.stats, &stats,
1824 sizeof(struct odp_flow_stats)))
1830 static int compat_answer_query(struct sw_flow *flow, u32 query_flags,
1831 struct compat_odp_flow __user *ufp)
1833 compat_uptr_t actions;
1835 if (get_user(actions, &ufp->actions))
1838 return do_answer_query(flow, query_flags, &ufp->stats,
1839 compat_ptr(actions), &ufp->n_actions);
1842 static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *ufp)
1844 struct sw_flow *flow;
1848 if (compat_get_flow(&uf, ufp))
1851 flow = do_del_flow(dp, &uf.key);
1853 return PTR_ERR(flow);
1855 error = compat_answer_query(flow, 0, ufp);
1856 flow_deferred_free(flow);
1860 static int compat_query_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows)
1862 struct tbl *table = rcu_dereference(dp->table);
1865 for (i = 0; i < n_flows; i++) {
1866 struct compat_odp_flow __user *ufp = &flows[i];
1868 struct tbl_node *flow_node;
1871 if (compat_get_flow(&uf, ufp))
1873 memset(uf.key.reserved, 0, sizeof uf.key.reserved);
1875 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1877 error = put_user(ENOENT, &ufp->stats.error);
1879 error = compat_answer_query(flow_cast(flow_node), uf.flags, ufp);
1886 struct compat_list_flows_cbdata {
1887 struct compat_odp_flow __user *uflows;
1892 static int compat_list_flow(struct tbl_node *node, void *cbdata_)
1894 struct sw_flow *flow = flow_cast(node);
1895 struct compat_list_flows_cbdata *cbdata = cbdata_;
1896 struct compat_odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1899 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1901 error = compat_answer_query(flow, 0, ufp);
1905 if (cbdata->listed_flows >= cbdata->n_flows)
1906 return cbdata->listed_flows;
1910 static int compat_list_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows)
1912 struct compat_list_flows_cbdata cbdata;
1918 cbdata.uflows = flows;
1919 cbdata.n_flows = n_flows;
1920 cbdata.listed_flows = 0;
1921 error = tbl_foreach(rcu_dereference(dp->table), compat_list_flow, &cbdata);
1922 return error ? error : cbdata.listed_flows;
1925 static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1926 int (*function)(struct datapath *,
1927 struct compat_odp_flow *,
1930 struct compat_odp_flowvec __user *uflowvec;
1931 struct compat_odp_flow __user *flows;
1932 struct compat_odp_flowvec flowvec;
1935 uflowvec = compat_ptr(argp);
1936 if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
1937 copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1940 if (flowvec.n_flows > INT_MAX / sizeof(struct compat_odp_flow))
1943 flows = compat_ptr(flowvec.flows);
1944 if (!access_ok(VERIFY_WRITE, flows,
1945 flowvec.n_flows * sizeof(struct compat_odp_flow)))
1948 retval = function(dp, flows, flowvec.n_flows);
1949 return (retval < 0 ? retval
1950 : retval == flowvec.n_flows ? 0
1951 : put_user(retval, &uflowvec->n_flows));
1954 static int compat_execute(struct datapath *dp, const struct compat_odp_execute __user *uexecute)
1956 struct odp_execute execute;
1957 compat_uptr_t actions;
1960 if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
1961 __get_user(execute.in_port, &uexecute->in_port) ||
1962 __get_user(actions, &uexecute->actions) ||
1963 __get_user(execute.n_actions, &uexecute->n_actions) ||
1964 __get_user(data, &uexecute->data) ||
1965 __get_user(execute.length, &uexecute->length))
1968 execute.actions = compat_ptr(actions);
1969 execute.data = compat_ptr(data);
1971 return do_execute(dp, &execute);
1974 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
1976 int dp_idx = iminor(f->f_dentry->d_inode);
1977 struct datapath *dp;
1981 case ODP_DP_DESTROY:
1982 case ODP_FLOW_FLUSH:
1983 /* Ioctls that don't need any translation at all. */
1984 return openvswitch_ioctl(f, cmd, argp);
1987 case ODP_PORT_ATTACH:
1988 case ODP_PORT_DETACH:
1990 case ODP_VPORT_MTU_SET:
1991 case ODP_VPORT_MTU_GET:
1992 case ODP_VPORT_ETHER_SET:
1993 case ODP_VPORT_ETHER_GET:
1994 case ODP_VPORT_STATS_GET:
1996 case ODP_GET_DROP_FRAGS:
1997 case ODP_SET_DROP_FRAGS:
1998 case ODP_SET_LISTEN_MASK:
1999 case ODP_GET_LISTEN_MASK:
2000 case ODP_SET_SFLOW_PROBABILITY:
2001 case ODP_GET_SFLOW_PROBABILITY:
2002 case ODP_PORT_QUERY:
2003 /* Ioctls that just need their pointer argument extended. */
2004 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
2006 case ODP_VPORT_ADD32:
2007 return compat_vport_add(compat_ptr(argp));
2009 case ODP_VPORT_MOD32:
2010 return compat_vport_mod(compat_ptr(argp));
2013 dp = get_dp_locked(dp_idx);
2019 case ODP_PORT_LIST32:
2020 err = compat_list_ports(dp, compat_ptr(argp));
2023 case ODP_PORT_GROUP_SET32:
2024 err = compat_set_port_group(dp, compat_ptr(argp));
2027 case ODP_PORT_GROUP_GET32:
2028 err = compat_get_port_group(dp, compat_ptr(argp));
2031 case ODP_FLOW_PUT32:
2032 err = compat_put_flow(dp, compat_ptr(argp));
2035 case ODP_FLOW_DEL32:
2036 err = compat_del_flow(dp, compat_ptr(argp));
2039 case ODP_FLOW_GET32:
2040 err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
2043 case ODP_FLOW_LIST32:
2044 err = compat_flowvec_ioctl(dp, argp, compat_list_flows);
2048 err = compat_execute(dp, compat_ptr(argp));
2055 mutex_unlock(&dp->mutex);
2061 ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes,
2064 /* XXX is there sufficient synchronization here? */
2065 int listeners = get_listen_mask(f);
2066 int dp_idx = iminor(f->f_dentry->d_inode);
2067 struct datapath *dp = get_dp(dp_idx);
2068 struct sk_buff *skb;
2069 struct iovec __user iov;
2076 if (nbytes == 0 || !listeners)
2082 for (i = 0; i < DP_N_QUEUES; i++) {
2083 if (listeners & (1 << i)) {
2084 skb = skb_dequeue(&dp->queues[i]);
2090 if (f->f_flags & O_NONBLOCK) {
2095 wait_event_interruptible(dp->waitqueue,
2096 dp_has_packet_of_interest(dp,
2099 if (signal_pending(current)) {
2100 retval = -ERESTARTSYS;
2105 copy_bytes = min_t(size_t, skb->len, nbytes);
2107 iov.iov_len = copy_bytes;
2108 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
2110 retval = copy_bytes;
2117 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
2119 /* XXX is there sufficient synchronization here? */
2120 int dp_idx = iminor(file->f_dentry->d_inode);
2121 struct datapath *dp = get_dp(dp_idx);
2126 poll_wait(file, &dp->waitqueue, wait);
2127 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
2128 mask |= POLLIN | POLLRDNORM;
2130 mask = POLLIN | POLLRDNORM | POLLHUP;
2135 struct file_operations openvswitch_fops = {
2136 /* XXX .aio_read = openvswitch_aio_read, */
2137 .read = openvswitch_read,
2138 .poll = openvswitch_poll,
2139 .unlocked_ioctl = openvswitch_ioctl,
2140 #ifdef CONFIG_COMPAT
2141 .compat_ioctl = openvswitch_compat_ioctl,
2143 /* XXX .fasync = openvswitch_fasync, */
2148 static int __init dp_init(void)
2150 struct sk_buff *dummy_skb;
2153 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2155 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2163 goto error_flow_exit;
2165 err = register_netdevice_notifier(&dp_device_notifier);
2167 goto error_vport_exit;
2169 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2171 goto error_unreg_notifier;
2175 error_unreg_notifier:
2176 unregister_netdevice_notifier(&dp_device_notifier);
2185 static void dp_cleanup(void)
2188 unregister_chrdev(major, "openvswitch");
2189 unregister_netdevice_notifier(&dp_device_notifier);
2194 module_init(dp_init);
2195 module_exit(dp_cleanup);
2197 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2198 MODULE_LICENSE("GPL");