2 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/kernel.h>
24 #include <linux/kthread.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/wait.h>
33 #include <asm/system.h>
34 #include <asm/div64.h>
36 #include <linux/highmem.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/dmi.h>
43 #include <net/inet_ecn.h>
44 #include <linux/compat.h>
46 #include "openvswitch/datapath-protocol.h"
50 #include "loop_counter.h"
51 #include "odp-compat.h"
53 #include "vport-internal_dev.h"
57 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
58 EXPORT_SYMBOL(dp_ioctl_hook);
60 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
63 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
66 * It is safe to access the datapath and vport structures with just
69 static struct datapath *dps[ODP_MAX];
70 static DEFINE_MUTEX(dp_mutex);
72 static int new_vport(struct datapath *, struct odp_port *, int port_no);
74 /* Must be called with rcu_read_lock or dp_mutex. */
75 struct datapath *get_dp(int dp_idx)
77 if (dp_idx < 0 || dp_idx >= ODP_MAX)
79 return rcu_dereference(dps[dp_idx]);
81 EXPORT_SYMBOL_GPL(get_dp);
83 static struct datapath *get_dp_locked(int dp_idx)
87 mutex_lock(&dp_mutex);
90 mutex_lock(&dp->mutex);
91 mutex_unlock(&dp_mutex);
95 /* Must be called with rcu_read_lock or RTNL lock. */
96 const char *dp_name(const struct datapath *dp)
98 return vport_get_name(dp->ports[ODPP_LOCAL]);
101 static inline size_t br_nlmsg_size(void)
103 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
104 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
105 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
106 + nla_total_size(4) /* IFLA_MASTER */
107 + nla_total_size(4) /* IFLA_MTU */
108 + nla_total_size(4) /* IFLA_LINK */
109 + nla_total_size(1); /* IFLA_OPERSTATE */
112 static int dp_fill_ifinfo(struct sk_buff *skb,
113 const struct vport *port,
114 int event, unsigned int flags)
116 const struct datapath *dp = port->dp;
117 int ifindex = vport_get_ifindex(port);
118 int iflink = vport_get_iflink(port);
119 struct ifinfomsg *hdr;
120 struct nlmsghdr *nlh;
128 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
132 hdr = nlmsg_data(nlh);
133 hdr->ifi_family = AF_BRIDGE;
135 hdr->ifi_type = ARPHRD_ETHER;
136 hdr->ifi_index = ifindex;
137 hdr->ifi_flags = vport_get_flags(port);
140 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
141 NLA_PUT_U32(skb, IFLA_MASTER, vport_get_ifindex(dp->ports[ODPP_LOCAL]));
142 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
143 #ifdef IFLA_OPERSTATE
144 NLA_PUT_U8(skb, IFLA_OPERSTATE,
145 vport_is_running(port)
146 ? vport_get_operstate(port)
150 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
152 if (ifindex != iflink)
153 NLA_PUT_U32(skb, IFLA_LINK,iflink);
155 return nlmsg_end(skb, nlh);
158 nlmsg_cancel(skb, nlh);
162 static void dp_ifinfo_notify(int event, struct vport *port)
167 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
171 err = dp_fill_ifinfo(skb, port, event, 0);
173 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
174 WARN_ON(err == -EMSGSIZE);
178 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
182 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
185 static void release_dp(struct kobject *kobj)
187 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
191 static struct kobj_type dp_ktype = {
192 .release = release_dp
195 static int create_dp(int dp_idx, const char __user *devnamep)
197 struct odp_port internal_dev_port;
198 char devname[IFNAMSIZ];
204 int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
208 } else if (retval >= IFNAMSIZ) {
213 snprintf(devname, sizeof devname, "of%d", dp_idx);
217 mutex_lock(&dp_mutex);
219 if (!try_module_get(THIS_MODULE))
222 /* Exit early if a datapath with that number already exists.
223 * (We don't use -EEXIST because that's ambiguous with 'devname'
224 * conflicting with an existing network device name.) */
230 dp = kzalloc(sizeof *dp, GFP_KERNEL);
233 INIT_LIST_HEAD(&dp->port_list);
234 mutex_init(&dp->mutex);
236 for (i = 0; i < DP_N_QUEUES; i++)
237 skb_queue_head_init(&dp->queues[i]);
238 init_waitqueue_head(&dp->waitqueue);
240 /* Initialize kobject for bridge. This will be added as
241 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
242 dp->ifobj.kset = NULL;
243 kobject_init(&dp->ifobj, &dp_ktype);
245 /* Allocate table. */
247 rcu_assign_pointer(dp->table, tbl_create(0));
251 /* Set up our datapath device. */
252 BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
253 strcpy(internal_dev_port.devname, devname);
254 strcpy(internal_dev_port.type, "internal");
255 err = new_vport(dp, &internal_dev_port, ODPP_LOCAL);
260 goto err_destroy_table;
264 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
265 if (!dp->stats_percpu)
266 goto err_destroy_local_port;
268 rcu_assign_pointer(dps[dp_idx], dp);
269 mutex_unlock(&dp_mutex);
276 err_destroy_local_port:
277 dp_detach_port(dp->ports[ODPP_LOCAL]);
279 tbl_destroy(dp->table, NULL);
283 module_put(THIS_MODULE);
285 mutex_unlock(&dp_mutex);
291 static void do_destroy_dp(struct datapath *dp)
296 list_for_each_entry_safe (p, n, &dp->port_list, node)
297 if (p->port_no != ODPP_LOCAL)
302 rcu_assign_pointer(dps[dp->dp_idx], NULL);
304 dp_detach_port(dp->ports[ODPP_LOCAL]);
306 tbl_destroy(dp->table, flow_free_tbl);
308 for (i = 0; i < DP_N_QUEUES; i++)
309 skb_queue_purge(&dp->queues[i]);
310 free_percpu(dp->stats_percpu);
311 kobject_put(&dp->ifobj);
312 module_put(THIS_MODULE);
315 static int destroy_dp(int dp_idx)
321 mutex_lock(&dp_mutex);
331 mutex_unlock(&dp_mutex);
336 /* Called with RTNL lock and dp_mutex. */
337 static int new_vport(struct datapath *dp, struct odp_port *odp_port, int port_no)
339 struct vport_parms parms;
342 parms.name = odp_port->devname;
343 parms.type = odp_port->type;
344 parms.config = odp_port->config;
346 parms.port_no = port_no;
349 vport = vport_add(&parms);
353 return PTR_ERR(vport);
355 rcu_assign_pointer(dp->ports[port_no], vport);
356 list_add_rcu(&vport->node, &dp->port_list);
359 dp_ifinfo_notify(RTM_NEWLINK, vport);
364 static int attach_port(int dp_idx, struct odp_port __user *portp)
367 struct odp_port port;
372 if (copy_from_user(&port, portp, sizeof port))
374 port.devname[IFNAMSIZ - 1] = '\0';
375 port.type[VPORT_TYPE_SIZE - 1] = '\0';
378 dp = get_dp_locked(dp_idx);
381 goto out_unlock_rtnl;
383 for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
384 if (!dp->ports[port_no])
390 err = new_vport(dp, &port, port_no);
394 set_internal_devs_mtu(dp);
395 dp_sysfs_add_if(dp->ports[port_no]);
397 err = put_user(port_no, &portp->port);
400 mutex_unlock(&dp->mutex);
407 int dp_detach_port(struct vport *p)
413 if (p->port_no != ODPP_LOCAL)
415 dp_ifinfo_notify(RTM_DELLINK, p);
417 /* First drop references to device. */
419 list_del_rcu(&p->node);
420 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
422 /* Then destroy it. */
430 static int detach_port(int dp_idx, int port_no)
437 if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL)
441 dp = get_dp_locked(dp_idx);
444 goto out_unlock_rtnl;
446 p = dp->ports[port_no];
451 err = dp_detach_port(p);
454 mutex_unlock(&dp->mutex);
461 /* Must be called with rcu_read_lock. */
462 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
464 struct datapath *dp = p->dp;
465 struct dp_stats_percpu *stats;
466 int stats_counter_off;
467 struct sw_flow_actions *acts;
468 struct loop_counter *loop;
471 OVS_CB(skb)->vport = p;
473 if (!OVS_CB(skb)->flow) {
474 struct odp_flow_key key;
475 struct tbl_node *flow_node;
478 /* Extract flow from 'skb' into 'key'. */
479 error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
480 if (unlikely(error)) {
485 if (is_frag && dp->drop_frags) {
487 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
492 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
493 flow_hash(&key), flow_cmp);
494 if (unlikely(!flow_node)) {
495 dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
496 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
500 OVS_CB(skb)->flow = flow_cast(flow_node);
503 flow_used(OVS_CB(skb)->flow, skb);
505 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
507 /* Check whether we've looped too much. */
508 loop = loop_get_counter();
509 if (unlikely(++loop->count > MAX_LOOPS))
510 loop->looping = true;
511 if (unlikely(loop->looping)) {
512 loop_suppress(dp, acts);
516 /* Execute actions. */
517 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
519 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
521 /* Check whether sub-actions looped too much. */
522 if (unlikely(loop->looping))
523 loop_suppress(dp, acts);
526 /* Decrement loop counter. */
528 loop->looping = false;
532 /* Update datapath statistics. */
534 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
536 write_seqcount_begin(&stats->seqlock);
537 (*(u64 *)((u8 *)stats + stats_counter_off))++;
538 write_seqcount_end(&stats->seqlock);
543 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
544 /* This code is based on skb_checksum_setup() from Xen's net/dev/core.c. We
545 * can't call this function directly because it isn't exported in all
547 int vswitch_skb_checksum_setup(struct sk_buff *skb)
552 __u16 csum_start, csum_offset;
554 if (!skb->proto_csum_blank)
557 if (skb->protocol != htons(ETH_P_IP))
560 if (!pskb_may_pull(skb, skb_network_header(skb) + sizeof(struct iphdr) - skb->data))
564 th = skb_network_header(skb) + 4 * iph->ihl;
566 csum_start = th - skb->head;
567 switch (iph->protocol) {
569 csum_offset = offsetof(struct tcphdr, check);
572 csum_offset = offsetof(struct udphdr, check);
576 pr_err("Attempting to checksum a non-TCP/UDP packet, "
577 "dropping a protocol %d packet",
582 if (!pskb_may_pull(skb, th + csum_offset + 2 - skb->data))
585 skb->ip_summed = CHECKSUM_PARTIAL;
586 skb->proto_csum_blank = 0;
588 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
589 skb->csum_start = csum_start;
590 skb->csum_offset = csum_offset;
592 skb_set_transport_header(skb, csum_start - skb_headroom(skb));
593 skb->csum = csum_offset;
601 #endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */
603 /* Types of checksums that we can receive (these all refer to L4 checksums):
604 * 1. CHECKSUM_NONE: Device that did not compute checksum, contains full
605 * (though not verified) checksum in packet but not in skb->csum. Packets
606 * from the bridge local port will also have this type.
607 * 2. CHECKSUM_COMPLETE (CHECKSUM_HW): Good device that computes checksums,
608 * also the GRE module. This is the same as CHECKSUM_NONE, except it has
609 * a valid skb->csum. Importantly, both contain a full checksum (not
610 * verified) in the packet itself. The only difference is that if the
611 * packet gets to L4 processing on this machine (not in DomU) we won't
612 * have to recompute the checksum to verify. Most hardware devices do not
613 * produce packets with this type, even if they support receive checksum
614 * offloading (they produce type #5).
615 * 3. CHECKSUM_PARTIAL (CHECKSUM_HW): Packet without full checksum and needs to
616 * be computed if it is sent off box. Unfortunately on earlier kernels,
617 * this case is impossible to distinguish from #2, despite having opposite
618 * meanings. Xen adds an extra field on earlier kernels (see #4) in order
619 * to distinguish the different states.
620 * 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
621 * generated locally by a Xen DomU and has a partial checksum. If it is
622 * handled on this machine (Dom0 or DomU), then the checksum will not be
623 * computed. If it goes off box, the checksum in the packet needs to be
624 * completed. Calling skb_checksum_setup converts this to CHECKSUM_HW
625 * (CHECKSUM_PARTIAL) so that the checksum can be completed. In later
626 * kernels, this combination is replaced with CHECKSUM_PARTIAL.
627 * 5. CHECKSUM_UNNECESSARY (with proto_csum_blank false): Packet with a correct
628 * full checksum or using a protocol without a checksum. skb->csum is
629 * undefined. This is common from devices with receive checksum
630 * offloading. This is somewhat similar to CHECKSUM_NONE, except that
631 * nobody will try to verify the checksum with CHECKSUM_UNNECESSARY.
633 * Note that on earlier kernels, CHECKSUM_COMPLETE and CHECKSUM_PARTIAL are
634 * both defined as CHECKSUM_HW. Normally the meaning of CHECKSUM_HW is clear
635 * based on whether it is on the transmit or receive path. After the datapath
636 * it will be intepreted as CHECKSUM_PARTIAL. If the packet already has a
637 * checksum, we will panic. Since we can receive packets with checksums, we
638 * assume that all CHECKSUM_HW packets have checksums and map them to
639 * CHECKSUM_NONE, which has a similar meaning (the it is only different if the
640 * packet is processed by the local IP stack, in which case it will need to
641 * be reverified). If we receive a packet with CHECKSUM_HW that really means
642 * CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
643 * shouldn't be any devices that do this with bridging. */
644 void compute_ip_summed(struct sk_buff *skb, bool xmit)
646 /* For our convenience these defines change repeatedly between kernel
647 * versions, so we can't just copy them over... */
648 switch (skb->ip_summed) {
650 OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
652 case CHECKSUM_UNNECESSARY:
653 OVS_CB(skb)->ip_summed = OVS_CSUM_UNNECESSARY;
656 /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
657 * However, on the receive side we should only get CHECKSUM_PARTIAL
658 * packets from Xen, which uses some special fields to represent this
659 * (see below). Since we can only make one type work, pick the one
660 * that actually happens in practice.
662 * On the transmit side (basically after skb_checksum_setup()
663 * has been run or on internal dev transmit), packets with
664 * CHECKSUM_COMPLETE aren't generated, so assume CHECKSUM_PARTIAL. */
667 OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
669 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
673 case CHECKSUM_COMPLETE:
674 OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
676 case CHECKSUM_PARTIAL:
677 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
681 pr_err("unknown checksum type %d\n", skb->ip_summed);
682 /* None seems the safest... */
683 OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
686 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
687 /* Xen has a special way of representing CHECKSUM_PARTIAL on older
688 * kernels. It should not be set on the transmit path though. */
689 if (skb->proto_csum_blank)
690 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
692 WARN_ON_ONCE(skb->proto_csum_blank && xmit);
696 /* This function closely resembles skb_forward_csum() used by the bridge. It
697 * is slightly different because we are only concerned with bridging and not
698 * other types of forwarding and can get away with slightly more optimal
700 void forward_ip_summed(struct sk_buff *skb)
703 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
704 skb->ip_summed = CHECKSUM_NONE;
708 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
709 * unless we broke up a GSO packet. */
710 static int queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
711 int queue_no, u32 arg)
713 struct sk_buff *nskb;
717 if (OVS_CB(skb)->vport)
718 port_no = OVS_CB(skb)->vport->port_no;
720 port_no = ODPP_LOCAL;
723 struct odp_msg *header;
728 err = skb_cow(skb, sizeof *header);
732 header = (struct odp_msg*)__skb_push(skb, sizeof *header);
733 header->type = queue_no;
734 header->length = skb->len;
735 header->port = port_no;
736 header->reserved = 0;
738 skb_queue_tail(queue, skb);
746 while ((skb = nskb) != NULL) {
753 int dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
756 struct dp_stats_percpu *stats;
757 struct sk_buff_head *queue;
760 WARN_ON_ONCE(skb_shared(skb));
761 BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR && queue_no != _ODPL_SFLOW_NR);
762 queue = &dp->queues[queue_no];
764 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
767 forward_ip_summed(skb);
769 err = vswitch_skb_checksum_setup(skb);
773 /* Break apart GSO packets into their component pieces. Otherwise
774 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
775 if (skb_is_gso(skb)) {
776 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
780 if (unlikely(IS_ERR(skb))) {
785 /* XXX This case might not be possible. It's hard to
786 * tell from the skb_gso_segment() code and comment. */
790 err = queue_control_packets(skb, queue, queue_no, arg);
791 wake_up_interruptible(&dp->waitqueue);
798 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
800 write_seqcount_begin(&stats->seqlock);
802 write_seqcount_end(&stats->seqlock);
809 static int flush_flows(struct datapath *dp)
811 struct tbl *old_table = rcu_dereference(dp->table);
812 struct tbl *new_table;
814 new_table = tbl_create(0);
818 rcu_assign_pointer(dp->table, new_table);
820 tbl_deferred_destroy(old_table, flow_free_tbl);
825 static int validate_actions(const struct sw_flow_actions *actions)
829 for (i = 0; i < actions->n_actions; i++) {
830 const union odp_action *a = &actions->actions[i];
833 case ODPAT_CONTROLLER:
834 case ODPAT_STRIP_VLAN:
835 case ODPAT_SET_DL_SRC:
836 case ODPAT_SET_DL_DST:
837 case ODPAT_SET_NW_SRC:
838 case ODPAT_SET_NW_DST:
839 case ODPAT_SET_TP_SRC:
840 case ODPAT_SET_TP_DST:
841 case ODPAT_SET_TUNNEL:
842 case ODPAT_SET_PRIORITY:
843 case ODPAT_POP_PRIORITY:
844 case ODPAT_DROP_SPOOFED_ARP:
845 /* No validation needed. */
849 if (a->output.port >= DP_MAX_PORTS)
853 case ODPAT_SET_DL_TCI:
854 if (a->dl_tci.tci & htons(VLAN_CFI_MASK))
858 case ODPAT_SET_NW_TOS:
859 if (a->nw_tos.nw_tos & INET_ECN_MASK)
871 static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
873 struct sw_flow_actions *actions;
876 actions = flow_actions_alloc(flow->n_actions);
877 error = PTR_ERR(actions);
882 if (copy_from_user(actions->actions, flow->actions,
883 flow->n_actions * sizeof(union odp_action)))
884 goto error_free_actions;
885 error = validate_actions(actions);
887 goto error_free_actions;
894 return ERR_PTR(error);
897 static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
900 struct timespec offset_ts, used, now_mono;
902 ktime_get_ts(&now_mono);
903 jiffies_to_timespec(jiffies - flow->used, &offset_ts);
904 set_normalized_timespec(&used, now_mono.tv_sec - offset_ts.tv_sec,
905 now_mono.tv_nsec - offset_ts.tv_nsec);
907 stats->used_sec = used.tv_sec;
908 stats->used_nsec = used.tv_nsec;
911 stats->used_nsec = 0;
914 stats->n_packets = flow->packet_count;
915 stats->n_bytes = flow->byte_count;
917 stats->tcp_flags = flow->tcp_flags;
921 static void clear_stats(struct sw_flow *flow)
925 flow->packet_count = 0;
926 flow->byte_count = 0;
929 static int expand_table(struct datapath *dp)
931 struct tbl *old_table = rcu_dereference(dp->table);
932 struct tbl *new_table;
934 new_table = tbl_expand(old_table);
935 if (IS_ERR(new_table))
936 return PTR_ERR(new_table);
938 rcu_assign_pointer(dp->table, new_table);
939 tbl_deferred_destroy(old_table, NULL);
944 static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
945 struct odp_flow_stats *stats)
947 struct tbl_node *flow_node;
948 struct sw_flow *flow;
952 table = rcu_dereference(dp->table);
953 flow_node = tbl_lookup(table, &uf->flow.key, flow_hash(&uf->flow.key), flow_cmp);
956 struct sw_flow_actions *acts;
959 if (!(uf->flags & ODPPF_CREATE))
962 /* Expand table, if necessary, to make room. */
963 if (tbl_count(table) >= tbl_n_buckets(table)) {
964 error = expand_table(dp);
967 table = rcu_dereference(dp->table);
973 error = PTR_ERR(flow);
976 flow->key = uf->flow.key;
979 /* Obtain actions. */
980 acts = get_actions(&uf->flow);
981 error = PTR_ERR(acts);
983 goto error_free_flow;
984 rcu_assign_pointer(flow->sf_acts, acts);
986 /* Put flow in bucket. */
987 error = tbl_insert(table, &flow->tbl_node, flow_hash(&flow->key));
989 goto error_free_flow_acts;
991 memset(stats, 0, sizeof(struct odp_flow_stats));
993 /* We found a matching flow. */
994 struct sw_flow_actions *old_acts, *new_acts;
996 flow = flow_cast(flow_node);
998 /* Bail out if we're not allowed to modify an existing flow. */
1000 if (!(uf->flags & ODPPF_MODIFY))
1004 new_acts = get_actions(&uf->flow);
1005 error = PTR_ERR(new_acts);
1006 if (IS_ERR(new_acts))
1008 old_acts = rcu_dereference(flow->sf_acts);
1009 if (old_acts->n_actions != new_acts->n_actions ||
1010 memcmp(old_acts->actions, new_acts->actions,
1011 sizeof(union odp_action) * old_acts->n_actions)) {
1012 rcu_assign_pointer(flow->sf_acts, new_acts);
1013 flow_deferred_free_acts(old_acts);
1018 /* Fetch stats, then clear them if necessary. */
1019 spin_lock_bh(&flow->lock);
1020 get_stats(flow, stats);
1021 if (uf->flags & ODPPF_ZERO_STATS)
1023 spin_unlock_bh(&flow->lock);
1028 error_free_flow_acts:
1029 kfree(flow->sf_acts);
1031 flow->sf_acts = NULL;
1037 static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
1039 struct odp_flow_stats stats;
1040 struct odp_flow_put uf;
1043 if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
1046 error = do_put_flow(dp, &uf, &stats);
1050 if (copy_to_user(&ufp->flow.stats, &stats,
1051 sizeof(struct odp_flow_stats)))
1057 static int do_answer_query(struct sw_flow *flow, u32 query_flags,
1058 struct odp_flow_stats __user *ustats,
1059 union odp_action __user *actions,
1060 u32 __user *n_actionsp)
1062 struct sw_flow_actions *sf_acts;
1063 struct odp_flow_stats stats;
1066 spin_lock_bh(&flow->lock);
1067 get_stats(flow, &stats);
1068 if (query_flags & ODPFF_ZERO_TCP_FLAGS)
1069 flow->tcp_flags = 0;
1071 spin_unlock_bh(&flow->lock);
1073 if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
1074 get_user(n_actions, n_actionsp))
1080 sf_acts = rcu_dereference(flow->sf_acts);
1081 if (put_user(sf_acts->n_actions, n_actionsp) ||
1082 (actions && copy_to_user(actions, sf_acts->actions,
1083 sizeof(union odp_action) *
1084 min(sf_acts->n_actions, n_actions))))
1090 static int answer_query(struct sw_flow *flow, u32 query_flags,
1091 struct odp_flow __user *ufp)
1093 union odp_action *actions;
1095 if (get_user(actions, &ufp->actions))
1098 return do_answer_query(flow, query_flags,
1099 &ufp->stats, actions, &ufp->n_actions);
1102 static struct sw_flow *do_del_flow(struct datapath *dp, struct odp_flow_key *key)
1104 struct tbl *table = rcu_dereference(dp->table);
1105 struct tbl_node *flow_node;
1108 flow_node = tbl_lookup(table, key, flow_hash(key), flow_cmp);
1110 return ERR_PTR(-ENOENT);
1112 error = tbl_remove(table, flow_node);
1114 return ERR_PTR(error);
1116 /* XXX Returned flow_node's statistics might lose a few packets, since
1117 * other CPUs can be using this flow. We used to synchronize_rcu() to
1118 * make sure that we get completely accurate stats, but that blows our
1119 * performance, badly. */
1120 return flow_cast(flow_node);
1123 static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
1125 struct sw_flow *flow;
1129 if (copy_from_user(&uf, ufp, sizeof uf))
1132 flow = do_del_flow(dp, &uf.key);
1134 return PTR_ERR(flow);
1136 error = answer_query(flow, 0, ufp);
1137 flow_deferred_free(flow);
1141 static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1143 struct tbl *table = rcu_dereference(dp->table);
1146 for (i = 0; i < flowvec->n_flows; i++) {
1147 struct odp_flow __user *ufp = &flowvec->flows[i];
1149 struct tbl_node *flow_node;
1152 if (copy_from_user(&uf, ufp, sizeof uf))
1155 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1157 error = put_user(ENOENT, &ufp->stats.error);
1159 error = answer_query(flow_cast(flow_node), uf.flags, ufp);
1163 return flowvec->n_flows;
1166 struct list_flows_cbdata {
1167 struct odp_flow __user *uflows;
1172 static int list_flow(struct tbl_node *node, void *cbdata_)
1174 struct sw_flow *flow = flow_cast(node);
1175 struct list_flows_cbdata *cbdata = cbdata_;
1176 struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1179 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1181 error = answer_query(flow, 0, ufp);
1185 if (cbdata->listed_flows >= cbdata->n_flows)
1186 return cbdata->listed_flows;
1190 static int do_list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1192 struct list_flows_cbdata cbdata;
1195 if (!flowvec->n_flows)
1198 cbdata.uflows = flowvec->flows;
1199 cbdata.n_flows = flowvec->n_flows;
1200 cbdata.listed_flows = 0;
1202 error = tbl_foreach(rcu_dereference(dp->table), list_flow, &cbdata);
1203 return error ? error : cbdata.listed_flows;
1206 static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1207 int (*function)(struct datapath *,
1208 const struct odp_flowvec *))
1210 struct odp_flowvec __user *uflowvec;
1211 struct odp_flowvec flowvec;
1214 uflowvec = (struct odp_flowvec __user *)argp;
1215 if (copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1218 if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
1221 retval = function(dp, &flowvec);
1222 return (retval < 0 ? retval
1223 : retval == flowvec.n_flows ? 0
1224 : put_user(retval, &uflowvec->n_flows));
1227 static int do_execute(struct datapath *dp, const struct odp_execute *execute)
1229 struct odp_flow_key key;
1230 struct sk_buff *skb;
1231 struct sw_flow_actions *actions;
1237 if (execute->length < ETH_HLEN || execute->length > 65535)
1240 actions = flow_actions_alloc(execute->n_actions);
1241 if (IS_ERR(actions)) {
1242 err = PTR_ERR(actions);
1247 if (copy_from_user(actions->actions, execute->actions,
1248 execute->n_actions * sizeof *execute->actions))
1249 goto error_free_actions;
1251 err = validate_actions(actions);
1253 goto error_free_actions;
1256 skb = alloc_skb(execute->length, GFP_KERNEL);
1258 goto error_free_actions;
1261 if (copy_from_user(skb_put(skb, execute->length), execute->data,
1263 goto error_free_skb;
1265 skb_reset_mac_header(skb);
1268 /* Normally, setting the skb 'protocol' field would be handled by a
1269 * call to eth_type_trans(), but it assumes there's a sending
1270 * device, which we may not have. */
1271 if (ntohs(eth->h_proto) >= 1536)
1272 skb->protocol = eth->h_proto;
1274 skb->protocol = htons(ETH_P_802_2);
1276 err = flow_extract(skb, -1, &key, &is_frag);
1278 goto error_free_skb;
1281 err = execute_actions(dp, skb, &key, actions->actions, actions->n_actions);
1295 static int execute_packet(struct datapath *dp, const struct odp_execute __user *executep)
1297 struct odp_execute execute;
1299 if (copy_from_user(&execute, executep, sizeof execute))
1302 return do_execute(dp, &execute);
1305 static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
1307 struct tbl *table = rcu_dereference(dp->table);
1308 struct odp_stats stats;
1311 stats.n_flows = tbl_count(table);
1312 stats.cur_capacity = tbl_n_buckets(table);
1313 stats.max_capacity = TBL_MAX_BUCKETS;
1314 stats.n_ports = dp->n_ports;
1315 stats.max_ports = DP_MAX_PORTS;
1316 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1317 for_each_possible_cpu(i) {
1318 const struct dp_stats_percpu *percpu_stats;
1319 struct dp_stats_percpu local_stats;
1322 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
1325 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
1326 local_stats = *percpu_stats;
1327 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
1329 stats.n_frags += local_stats.n_frags;
1330 stats.n_hit += local_stats.n_hit;
1331 stats.n_missed += local_stats.n_missed;
1332 stats.n_lost += local_stats.n_lost;
1334 stats.max_miss_queue = DP_MAX_QUEUE_LEN;
1335 stats.max_action_queue = DP_MAX_QUEUE_LEN;
1336 return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
1339 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1340 int dp_min_mtu(const struct datapath *dp)
1347 list_for_each_entry_rcu (p, &dp->port_list, node) {
1350 /* Skip any internal ports, since that's what we're trying to
1352 if (is_internal_vport(p))
1355 dev_mtu = vport_get_mtu(p);
1356 if (!mtu || dev_mtu < mtu)
1360 return mtu ? mtu : ETH_DATA_LEN;
1363 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
1364 * be called with RTNL lock. */
1365 void set_internal_devs_mtu(const struct datapath *dp)
1372 mtu = dp_min_mtu(dp);
1374 list_for_each_entry_rcu (p, &dp->port_list, node) {
1375 if (is_internal_vport(p))
1376 vport_set_mtu(p, mtu);
1380 static int put_port(const struct vport *p, struct odp_port __user *uop)
1384 memset(&op, 0, sizeof op);
1387 strncpy(op.devname, vport_get_name(p), sizeof op.devname);
1388 strncpy(op.type, vport_get_type(p), sizeof op.type);
1391 op.port = p->port_no;
1393 return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
1396 static int query_port(struct datapath *dp, struct odp_port __user *uport)
1398 struct odp_port port;
1400 if (copy_from_user(&port, uport, sizeof port))
1403 if (port.devname[0]) {
1404 struct vport *vport;
1407 port.devname[IFNAMSIZ - 1] = '\0';
1412 vport = vport_locate(port.devname);
1417 if (vport->dp != dp) {
1422 port.port = vport->port_no;
1431 if (port.port >= DP_MAX_PORTS)
1433 if (!dp->ports[port.port])
1437 return put_port(dp->ports[port.port], uport);
1440 static int do_list_ports(struct datapath *dp, struct odp_port __user *uports,
1447 list_for_each_entry_rcu (p, &dp->port_list, node) {
1448 if (put_port(p, &uports[idx]))
1450 if (idx++ >= n_ports)
1457 static int list_ports(struct datapath *dp, struct odp_portvec __user *upv)
1459 struct odp_portvec pv;
1462 if (copy_from_user(&pv, upv, sizeof pv))
1465 retval = do_list_ports(dp, pv.ports, pv.n_ports);
1469 return put_user(retval, &upv->n_ports);
1472 static int get_listen_mask(const struct file *f)
1474 return (long)f->private_data;
1477 static void set_listen_mask(struct file *f, int listen_mask)
1479 f->private_data = (void*)(long)listen_mask;
1482 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1485 int dp_idx = iminor(f->f_dentry->d_inode);
1486 struct datapath *dp;
1487 int drop_frags, listeners, port_no;
1488 unsigned int sflow_probability;
1491 /* Handle commands with special locking requirements up front. */
1494 err = create_dp(dp_idx, (char __user *)argp);
1497 case ODP_DP_DESTROY:
1498 err = destroy_dp(dp_idx);
1501 case ODP_VPORT_ATTACH:
1502 err = attach_port(dp_idx, (struct odp_port __user *)argp);
1505 case ODP_VPORT_DETACH:
1506 err = get_user(port_no, (int __user *)argp);
1508 err = detach_port(dp_idx, port_no);
1512 err = vport_user_mod((struct odp_port __user *)argp);
1515 case ODP_VPORT_STATS_GET:
1516 err = vport_user_stats_get((struct odp_vport_stats_req __user *)argp);
1519 case ODP_VPORT_STATS_SET:
1520 err = vport_user_stats_set((struct odp_vport_stats_req __user *)argp);
1523 case ODP_VPORT_ETHER_GET:
1524 err = vport_user_ether_get((struct odp_vport_ether __user *)argp);
1527 case ODP_VPORT_ETHER_SET:
1528 err = vport_user_ether_set((struct odp_vport_ether __user *)argp);
1531 case ODP_VPORT_MTU_GET:
1532 err = vport_user_mtu_get((struct odp_vport_mtu __user *)argp);
1535 case ODP_VPORT_MTU_SET:
1536 err = vport_user_mtu_set((struct odp_vport_mtu __user *)argp);
1540 dp = get_dp_locked(dp_idx);
1547 err = get_dp_stats(dp, (struct odp_stats __user *)argp);
1550 case ODP_GET_DROP_FRAGS:
1551 err = put_user(dp->drop_frags, (int __user *)argp);
1554 case ODP_SET_DROP_FRAGS:
1555 err = get_user(drop_frags, (int __user *)argp);
1559 if (drop_frags != 0 && drop_frags != 1)
1561 dp->drop_frags = drop_frags;
1565 case ODP_GET_LISTEN_MASK:
1566 err = put_user(get_listen_mask(f), (int __user *)argp);
1569 case ODP_SET_LISTEN_MASK:
1570 err = get_user(listeners, (int __user *)argp);
1574 if (listeners & ~ODPL_ALL)
1577 set_listen_mask(f, listeners);
1580 case ODP_GET_SFLOW_PROBABILITY:
1581 err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
1584 case ODP_SET_SFLOW_PROBABILITY:
1585 err = get_user(sflow_probability, (unsigned int __user *)argp);
1587 dp->sflow_probability = sflow_probability;
1590 case ODP_VPORT_QUERY:
1591 err = query_port(dp, (struct odp_port __user *)argp);
1594 case ODP_VPORT_LIST:
1595 err = list_ports(dp, (struct odp_portvec __user *)argp);
1598 case ODP_FLOW_FLUSH:
1599 err = flush_flows(dp);
1603 err = put_flow(dp, (struct odp_flow_put __user *)argp);
1607 err = del_flow(dp, (struct odp_flow __user *)argp);
1611 err = do_flowvec_ioctl(dp, argp, do_query_flows);
1615 err = do_flowvec_ioctl(dp, argp, do_list_flows);
1619 err = execute_packet(dp, (struct odp_execute __user *)argp);
1626 mutex_unlock(&dp->mutex);
1631 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1634 for (i = 0; i < DP_N_QUEUES; i++) {
1635 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1641 #ifdef CONFIG_COMPAT
1642 static int compat_list_ports(struct datapath *dp, struct compat_odp_portvec __user *upv)
1644 struct compat_odp_portvec pv;
1647 if (copy_from_user(&pv, upv, sizeof pv))
1650 retval = do_list_ports(dp, compat_ptr(pv.ports), pv.n_ports);
1654 return put_user(retval, &upv->n_ports);
1657 static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
1659 compat_uptr_t actions;
1661 if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_flow)) ||
1662 __copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
1663 __copy_from_user(&flow->key, &compat->key, sizeof(struct odp_flow_key)) ||
1664 __get_user(actions, &compat->actions) ||
1665 __get_user(flow->n_actions, &compat->n_actions) ||
1666 __get_user(flow->flags, &compat->flags))
1669 flow->actions = compat_ptr(actions);
1673 static int compat_put_flow(struct datapath *dp, struct compat_odp_flow_put __user *ufp)
1675 struct odp_flow_stats stats;
1676 struct odp_flow_put fp;
1679 if (compat_get_flow(&fp.flow, &ufp->flow) ||
1680 get_user(fp.flags, &ufp->flags))
1683 error = do_put_flow(dp, &fp, &stats);
1687 if (copy_to_user(&ufp->flow.stats, &stats,
1688 sizeof(struct odp_flow_stats)))
1694 static int compat_answer_query(struct sw_flow *flow, u32 query_flags,
1695 struct compat_odp_flow __user *ufp)
1697 compat_uptr_t actions;
1699 if (get_user(actions, &ufp->actions))
1702 return do_answer_query(flow, query_flags, &ufp->stats,
1703 compat_ptr(actions), &ufp->n_actions);
1706 static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *ufp)
1708 struct sw_flow *flow;
1712 if (compat_get_flow(&uf, ufp))
1715 flow = do_del_flow(dp, &uf.key);
1717 return PTR_ERR(flow);
1719 error = compat_answer_query(flow, 0, ufp);
1720 flow_deferred_free(flow);
1724 static int compat_query_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows)
1726 struct tbl *table = rcu_dereference(dp->table);
1729 for (i = 0; i < n_flows; i++) {
1730 struct compat_odp_flow __user *ufp = &flows[i];
1732 struct tbl_node *flow_node;
1735 if (compat_get_flow(&uf, ufp))
1738 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1740 error = put_user(ENOENT, &ufp->stats.error);
1742 error = compat_answer_query(flow_cast(flow_node), uf.flags, ufp);
1749 struct compat_list_flows_cbdata {
1750 struct compat_odp_flow __user *uflows;
1755 static int compat_list_flow(struct tbl_node *node, void *cbdata_)
1757 struct sw_flow *flow = flow_cast(node);
1758 struct compat_list_flows_cbdata *cbdata = cbdata_;
1759 struct compat_odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1762 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1764 error = compat_answer_query(flow, 0, ufp);
1768 if (cbdata->listed_flows >= cbdata->n_flows)
1769 return cbdata->listed_flows;
1773 static int compat_list_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows)
1775 struct compat_list_flows_cbdata cbdata;
1781 cbdata.uflows = flows;
1782 cbdata.n_flows = n_flows;
1783 cbdata.listed_flows = 0;
1785 error = tbl_foreach(rcu_dereference(dp->table), compat_list_flow, &cbdata);
1786 return error ? error : cbdata.listed_flows;
1789 static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1790 int (*function)(struct datapath *,
1791 struct compat_odp_flow *,
1794 struct compat_odp_flowvec __user *uflowvec;
1795 struct compat_odp_flow __user *flows;
1796 struct compat_odp_flowvec flowvec;
1799 uflowvec = compat_ptr(argp);
1800 if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
1801 copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1804 if (flowvec.n_flows > INT_MAX / sizeof(struct compat_odp_flow))
1807 flows = compat_ptr(flowvec.flows);
1808 if (!access_ok(VERIFY_WRITE, flows,
1809 flowvec.n_flows * sizeof(struct compat_odp_flow)))
1812 retval = function(dp, flows, flowvec.n_flows);
1813 return (retval < 0 ? retval
1814 : retval == flowvec.n_flows ? 0
1815 : put_user(retval, &uflowvec->n_flows));
1818 static int compat_execute(struct datapath *dp, const struct compat_odp_execute __user *uexecute)
1820 struct odp_execute execute;
1821 compat_uptr_t actions;
1824 if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
1825 __get_user(actions, &uexecute->actions) ||
1826 __get_user(execute.n_actions, &uexecute->n_actions) ||
1827 __get_user(data, &uexecute->data) ||
1828 __get_user(execute.length, &uexecute->length))
1831 execute.actions = compat_ptr(actions);
1832 execute.data = compat_ptr(data);
1834 return do_execute(dp, &execute);
1837 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
1839 int dp_idx = iminor(f->f_dentry->d_inode);
1840 struct datapath *dp;
1844 case ODP_DP_DESTROY:
1845 case ODP_FLOW_FLUSH:
1846 /* Ioctls that don't need any translation at all. */
1847 return openvswitch_ioctl(f, cmd, argp);
1850 case ODP_VPORT_ATTACH:
1851 case ODP_VPORT_DETACH:
1853 case ODP_VPORT_MTU_SET:
1854 case ODP_VPORT_MTU_GET:
1855 case ODP_VPORT_ETHER_SET:
1856 case ODP_VPORT_ETHER_GET:
1857 case ODP_VPORT_STATS_SET:
1858 case ODP_VPORT_STATS_GET:
1860 case ODP_GET_DROP_FRAGS:
1861 case ODP_SET_DROP_FRAGS:
1862 case ODP_SET_LISTEN_MASK:
1863 case ODP_GET_LISTEN_MASK:
1864 case ODP_SET_SFLOW_PROBABILITY:
1865 case ODP_GET_SFLOW_PROBABILITY:
1866 case ODP_VPORT_QUERY:
1867 /* Ioctls that just need their pointer argument extended. */
1868 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
1871 dp = get_dp_locked(dp_idx);
1877 case ODP_VPORT_LIST32:
1878 err = compat_list_ports(dp, compat_ptr(argp));
1881 case ODP_FLOW_PUT32:
1882 err = compat_put_flow(dp, compat_ptr(argp));
1885 case ODP_FLOW_DEL32:
1886 err = compat_del_flow(dp, compat_ptr(argp));
1889 case ODP_FLOW_GET32:
1890 err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
1893 case ODP_FLOW_LIST32:
1894 err = compat_flowvec_ioctl(dp, argp, compat_list_flows);
1898 err = compat_execute(dp, compat_ptr(argp));
1905 mutex_unlock(&dp->mutex);
1911 /* Unfortunately this function is not exported so this is a verbatim copy
1912 * from net/core/datagram.c in 2.6.30. */
1913 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
1914 u8 __user *to, int len,
1917 int start = skb_headlen(skb);
1919 int i, copy = start - offset;
1926 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
1930 if ((len -= copy) == 0)
1937 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1940 WARN_ON(start > offset + len);
1942 end = start + skb_shinfo(skb)->frags[i].size;
1943 if ((copy = end - offset) > 0) {
1947 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1948 struct page *page = frag->page;
1953 csum2 = csum_and_copy_to_user(vaddr +
1960 *csump = csum_block_add(*csump, csum2, pos);
1970 if (skb_shinfo(skb)->frag_list) {
1971 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1973 for (; list; list=list->next) {
1976 WARN_ON(start > offset + len);
1978 end = start + list->len;
1979 if ((copy = end - offset) > 0) {
1983 if (skb_copy_and_csum_datagram(list,
1988 *csump = csum_block_add(*csump, csum2, pos);
1989 if ((len -= copy) == 0)
2005 ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes,
2008 /* XXX is there sufficient synchronization here? */
2009 int listeners = get_listen_mask(f);
2010 int dp_idx = iminor(f->f_dentry->d_inode);
2011 struct datapath *dp = get_dp(dp_idx);
2012 struct sk_buff *skb;
2013 size_t copy_bytes, tot_copy_bytes;
2019 if (nbytes == 0 || !listeners)
2025 for (i = 0; i < DP_N_QUEUES; i++) {
2026 if (listeners & (1 << i)) {
2027 skb = skb_dequeue(&dp->queues[i]);
2033 if (f->f_flags & O_NONBLOCK) {
2038 wait_event_interruptible(dp->waitqueue,
2039 dp_has_packet_of_interest(dp,
2042 if (signal_pending(current)) {
2043 retval = -ERESTARTSYS;
2048 copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
2051 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2052 if (copy_bytes == skb->len) {
2054 unsigned int csum_start, csum_offset;
2056 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
2057 csum_start = skb->csum_start - skb_headroom(skb);
2058 csum_offset = skb->csum_offset;
2060 csum_start = skb_transport_header(skb) - skb->data;
2061 csum_offset = skb->csum;
2063 BUG_ON(csum_start >= skb_headlen(skb));
2064 retval = skb_copy_and_csum_datagram(skb, csum_start, buf + csum_start,
2065 copy_bytes - csum_start, &csum);
2067 __sum16 __user *csump;
2069 copy_bytes = csum_start;
2070 csump = (__sum16 __user *)(buf + csum_start + csum_offset);
2072 BUG_ON((char *)csump + sizeof(__sum16) > buf + nbytes);
2073 put_user(csum_fold(csum), csump);
2076 retval = skb_checksum_help(skb);
2080 struct iovec __user iov;
2083 iov.iov_len = copy_bytes;
2084 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
2088 retval = tot_copy_bytes;
2096 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
2098 /* XXX is there sufficient synchronization here? */
2099 int dp_idx = iminor(file->f_dentry->d_inode);
2100 struct datapath *dp = get_dp(dp_idx);
2105 poll_wait(file, &dp->waitqueue, wait);
2106 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
2107 mask |= POLLIN | POLLRDNORM;
2109 mask = POLLIN | POLLRDNORM | POLLHUP;
2114 struct file_operations openvswitch_fops = {
2115 /* XXX .aio_read = openvswitch_aio_read, */
2116 .read = openvswitch_read,
2117 .poll = openvswitch_poll,
2118 .unlocked_ioctl = openvswitch_ioctl,
2119 #ifdef CONFIG_COMPAT
2120 .compat_ioctl = openvswitch_compat_ioctl,
2122 /* XXX .fasync = openvswitch_fasync, */
2127 static int __init dp_init(void)
2129 struct sk_buff *dummy_skb;
2132 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2134 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2142 goto error_flow_exit;
2144 err = register_netdevice_notifier(&dp_device_notifier);
2146 goto error_vport_exit;
2148 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2150 goto error_unreg_notifier;
2154 error_unreg_notifier:
2155 unregister_netdevice_notifier(&dp_device_notifier);
2164 static void dp_cleanup(void)
2167 unregister_chrdev(major, "openvswitch");
2168 unregister_netdevice_notifier(&dp_device_notifier);
2173 module_init(dp_init);
2174 module_exit(dp_cleanup);
2176 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2177 MODULE_LICENSE("GPL");