2 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/kernel.h>
24 #include <linux/kthread.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/wait.h>
33 #include <asm/system.h>
34 #include <asm/div64.h>
36 #include <linux/highmem.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/dmi.h>
43 #include <net/inet_ecn.h>
44 #include <net/genetlink.h>
45 #include <linux/compat.h>
47 #include "openvswitch/datapath-protocol.h"
52 #include "loop_counter.h"
53 #include "odp-compat.h"
55 #include "vport-internal_dev.h"
57 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
58 EXPORT_SYMBOL(dp_ioctl_hook);
60 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
63 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
66 * It is safe to access the datapath and vport structures with just
69 static struct datapath __rcu *dps[ODP_MAX];
70 static DEFINE_MUTEX(dp_mutex);
72 static int new_vport(struct datapath *, struct odp_port *, int port_no);
74 /* Must be called with rcu_read_lock or dp_mutex. */
75 struct datapath *get_dp(int dp_idx)
77 if (dp_idx < 0 || dp_idx >= ODP_MAX)
79 return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
80 lockdep_is_held(&dp_mutex));
82 EXPORT_SYMBOL_GPL(get_dp);
84 static struct datapath *get_dp_locked(int dp_idx)
88 mutex_lock(&dp_mutex);
91 mutex_lock(&dp->mutex);
92 mutex_unlock(&dp_mutex);
96 static struct tbl *get_table_protected(struct datapath *dp)
98 return rcu_dereference_protected(dp->table,
99 lockdep_is_held(&dp->mutex));
102 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
104 return rcu_dereference_protected(dp->ports[port_no],
105 lockdep_is_held(&dp->mutex));
108 /* Must be called with rcu_read_lock or RTNL lock. */
109 const char *dp_name(const struct datapath *dp)
111 return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
114 static inline size_t br_nlmsg_size(void)
116 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
117 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
118 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
119 + nla_total_size(4) /* IFLA_MASTER */
120 + nla_total_size(4) /* IFLA_MTU */
121 + nla_total_size(4) /* IFLA_LINK */
122 + nla_total_size(1); /* IFLA_OPERSTATE */
125 static int dp_fill_ifinfo(struct sk_buff *skb,
126 const struct vport *port,
127 int event, unsigned int flags)
129 struct datapath *dp = port->dp;
130 int ifindex = vport_get_ifindex(port);
131 int iflink = vport_get_iflink(port);
132 struct ifinfomsg *hdr;
133 struct nlmsghdr *nlh;
141 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
145 hdr = nlmsg_data(nlh);
146 hdr->ifi_family = AF_BRIDGE;
148 hdr->ifi_type = ARPHRD_ETHER;
149 hdr->ifi_index = ifindex;
150 hdr->ifi_flags = vport_get_flags(port);
153 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
154 NLA_PUT_U32(skb, IFLA_MASTER,
155 vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
156 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
157 #ifdef IFLA_OPERSTATE
158 NLA_PUT_U8(skb, IFLA_OPERSTATE,
159 vport_is_running(port)
160 ? vport_get_operstate(port)
164 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
166 if (ifindex != iflink)
167 NLA_PUT_U32(skb, IFLA_LINK,iflink);
169 return nlmsg_end(skb, nlh);
172 nlmsg_cancel(skb, nlh);
176 static void dp_ifinfo_notify(int event, struct vport *port)
181 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
185 err = dp_fill_ifinfo(skb, port, event, 0);
187 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
188 WARN_ON(err == -EMSGSIZE);
192 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
196 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
199 static void release_dp(struct kobject *kobj)
201 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
205 static struct kobj_type dp_ktype = {
206 .release = release_dp
209 static int create_dp(int dp_idx, const char __user *devnamep)
211 struct odp_port internal_dev_port;
212 char devname[IFNAMSIZ];
218 int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
222 } else if (retval >= IFNAMSIZ) {
227 snprintf(devname, sizeof(devname), "of%d", dp_idx);
231 mutex_lock(&dp_mutex);
233 if (!try_module_get(THIS_MODULE))
236 /* Exit early if a datapath with that number already exists.
237 * (We don't use -EEXIST because that's ambiguous with 'devname'
238 * conflicting with an existing network device name.) */
244 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
247 INIT_LIST_HEAD(&dp->port_list);
248 mutex_init(&dp->mutex);
249 mutex_lock(&dp->mutex);
251 for (i = 0; i < DP_N_QUEUES; i++)
252 skb_queue_head_init(&dp->queues[i]);
253 init_waitqueue_head(&dp->waitqueue);
255 /* Initialize kobject for bridge. This will be added as
256 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
257 dp->ifobj.kset = NULL;
258 kobject_init(&dp->ifobj, &dp_ktype);
260 /* Allocate table. */
262 rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
266 /* Set up our datapath device. */
267 BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
268 strcpy(internal_dev_port.devname, devname);
269 strcpy(internal_dev_port.type, "internal");
270 err = new_vport(dp, &internal_dev_port, ODPP_LOCAL);
275 goto err_destroy_table;
279 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
280 if (!dp->stats_percpu) {
282 goto err_destroy_local_port;
285 rcu_assign_pointer(dps[dp_idx], dp);
288 mutex_unlock(&dp->mutex);
289 mutex_unlock(&dp_mutex);
294 err_destroy_local_port:
295 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
297 tbl_destroy(get_table_protected(dp), NULL);
299 mutex_unlock(&dp->mutex);
302 module_put(THIS_MODULE);
304 mutex_unlock(&dp_mutex);
310 static void destroy_dp_rcu(struct rcu_head *rcu)
312 struct datapath *dp = container_of(rcu, struct datapath, rcu);
315 for (i = 0; i < DP_N_QUEUES; i++)
316 skb_queue_purge(&dp->queues[i]);
318 tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
319 free_percpu(dp->stats_percpu);
320 kobject_put(&dp->ifobj);
323 static int destroy_dp(int dp_idx)
330 mutex_lock(&dp_mutex);
337 mutex_lock(&dp->mutex);
339 list_for_each_entry_safe (p, n, &dp->port_list, node)
340 if (p->port_no != ODPP_LOCAL)
344 rcu_assign_pointer(dps[dp->dp_idx], NULL);
345 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
347 mutex_unlock(&dp->mutex);
348 call_rcu(&dp->rcu, destroy_dp_rcu);
349 module_put(THIS_MODULE);
352 mutex_unlock(&dp_mutex);
357 /* Called with RTNL lock and dp->mutex. */
358 static int new_vport(struct datapath *dp, struct odp_port *odp_port, int port_no)
360 struct vport_parms parms;
363 parms.name = odp_port->devname;
364 parms.type = odp_port->type;
365 parms.config = odp_port->config;
367 parms.port_no = port_no;
370 vport = vport_add(&parms);
374 return PTR_ERR(vport);
376 rcu_assign_pointer(dp->ports[port_no], vport);
377 list_add_rcu(&vport->node, &dp->port_list);
380 dp_ifinfo_notify(RTM_NEWLINK, vport);
385 static int attach_port(int dp_idx, struct odp_port __user *portp)
388 struct odp_port port;
393 if (copy_from_user(&port, portp, sizeof(port)))
395 port.devname[IFNAMSIZ - 1] = '\0';
396 port.type[VPORT_TYPE_SIZE - 1] = '\0';
399 dp = get_dp_locked(dp_idx);
402 goto out_unlock_rtnl;
404 for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
405 if (!dp->ports[port_no])
411 err = new_vport(dp, &port, port_no);
415 set_internal_devs_mtu(dp);
416 dp_sysfs_add_if(get_vport_protected(dp, port_no));
418 err = put_user(port_no, &portp->port);
421 mutex_unlock(&dp->mutex);
428 int dp_detach_port(struct vport *p)
434 if (p->port_no != ODPP_LOCAL)
436 dp_ifinfo_notify(RTM_DELLINK, p);
438 /* First drop references to device. */
440 list_del_rcu(&p->node);
441 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
443 /* Then destroy it. */
451 static int detach_port(int dp_idx, int port_no)
458 if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL)
462 dp = get_dp_locked(dp_idx);
465 goto out_unlock_rtnl;
467 p = get_vport_protected(dp, port_no);
472 err = dp_detach_port(p);
475 mutex_unlock(&dp->mutex);
482 /* Must be called with rcu_read_lock. */
483 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
485 struct datapath *dp = p->dp;
486 struct dp_stats_percpu *stats;
487 int stats_counter_off;
488 struct sw_flow_actions *acts;
489 struct loop_counter *loop;
492 OVS_CB(skb)->vport = p;
494 if (!OVS_CB(skb)->flow) {
495 struct sw_flow_key key;
496 struct tbl_node *flow_node;
499 /* Extract flow from 'skb' into 'key'. */
500 error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
501 if (unlikely(error)) {
506 if (is_frag && dp->drop_frags) {
508 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
513 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
514 flow_hash(&key), flow_cmp);
515 if (unlikely(!flow_node)) {
516 struct dp_upcall_info upcall;
518 upcall.type = _ODPL_MISS_NR;
521 upcall.sample_pool = 0;
522 upcall.actions = NULL;
523 upcall.actions_len = 0;
524 dp_upcall(dp, skb, &upcall);
525 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
529 OVS_CB(skb)->flow = flow_cast(flow_node);
532 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
533 flow_used(OVS_CB(skb)->flow, skb);
535 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
537 /* Check whether we've looped too much. */
538 loop = loop_get_counter();
539 if (unlikely(++loop->count > MAX_LOOPS))
540 loop->looping = true;
541 if (unlikely(loop->looping)) {
542 loop_suppress(dp, acts);
547 /* Execute actions. */
548 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
551 /* Check whether sub-actions looped too much. */
552 if (unlikely(loop->looping))
553 loop_suppress(dp, acts);
556 /* Decrement loop counter. */
558 loop->looping = false;
562 /* Update datapath statistics. */
564 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
566 write_seqcount_begin(&stats->seqlock);
567 (*(u64 *)((u8 *)stats + stats_counter_off))++;
568 write_seqcount_end(&stats->seqlock);
573 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
575 u16 csum_start, csum_offset;
578 get_skb_csum_pointers(skb, &csum_start, &csum_offset);
579 csum_start -= skb_headroom(skb);
580 BUG_ON(csum_start >= skb_headlen(skb));
582 skb_copy_bits(skb, 0, to, csum_start);
584 csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
585 skb->len - csum_start, 0);
586 *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
589 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
590 * unless we broke up a GSO packet. */
591 static int queue_control_packets(struct datapath *dp, struct sk_buff *skb,
592 const struct dp_upcall_info *upcall_info)
594 struct sk_buff *nskb;
598 if (OVS_CB(skb)->vport)
599 port_no = OVS_CB(skb)->vport->port_no;
601 port_no = ODPP_LOCAL;
604 struct odp_packet *upcall;
605 struct sk_buff *user_skb; /* to be queued to userspace */
612 len = sizeof(struct odp_packet);
613 len += nla_total_size(4); /* ODP_PACKET_ATTR_TYPE. */
614 len += nla_total_size(skb->len);
615 len += nla_total_size(FLOW_BUFSIZE);
616 if (upcall_info->userdata)
617 len += nla_total_size(8);
618 if (upcall_info->sample_pool)
619 len += nla_total_size(4);
620 if (upcall_info->actions_len)
621 len += nla_total_size(upcall_info->actions_len);
623 user_skb = alloc_skb(len, GFP_ATOMIC);
627 upcall = (struct odp_packet *)__skb_put(user_skb, sizeof(*upcall));
628 upcall->dp_idx = dp->dp_idx;
630 nla_put_u32(user_skb, ODP_PACKET_ATTR_TYPE, upcall_info->type);
632 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
633 flow_to_nlattrs(upcall_info->key, user_skb);
634 nla_nest_end(user_skb, nla);
636 if (upcall_info->userdata)
637 nla_put_u64(user_skb, ODP_PACKET_ATTR_USERDATA, upcall_info->userdata);
638 if (upcall_info->sample_pool)
639 nla_put_u32(user_skb, ODP_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
640 if (upcall_info->actions_len) {
641 const struct nlattr *actions = upcall_info->actions;
642 u32 actions_len = upcall_info->actions_len;
644 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_ACTIONS);
645 memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
646 nla_nest_end(user_skb, nla);
649 nla = __nla_reserve(user_skb, ODP_PACKET_ATTR_PACKET, skb->len);
650 if (skb->ip_summed == CHECKSUM_PARTIAL)
651 copy_and_csum_skb(skb, nla_data(nla));
653 skb_copy_bits(skb, 0, nla_data(nla), skb->len);
655 upcall->len = user_skb->len;
656 skb_queue_tail(&dp->queues[upcall_info->type], user_skb);
665 while ((skb = nskb) != NULL) {
672 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
674 struct dp_stats_percpu *stats;
675 struct sk_buff_head *queue;
678 WARN_ON_ONCE(skb_shared(skb));
679 BUG_ON(upcall_info->type >= DP_N_QUEUES);
681 queue = &dp->queues[upcall_info->type];
683 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
686 forward_ip_summed(skb);
688 err = vswitch_skb_checksum_setup(skb);
692 /* Break apart GSO packets into their component pieces. Otherwise
693 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
694 if (skb_is_gso(skb)) {
695 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
705 err = queue_control_packets(dp, skb, upcall_info);
706 wake_up_interruptible(&dp->waitqueue);
713 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
715 write_seqcount_begin(&stats->seqlock);
717 write_seqcount_end(&stats->seqlock);
724 static int flush_flows(struct datapath *dp)
726 struct tbl *old_table = get_table_protected(dp);
727 struct tbl *new_table;
729 new_table = tbl_create(TBL_MIN_BUCKETS);
733 rcu_assign_pointer(dp->table, new_table);
735 tbl_deferred_destroy(old_table, flow_free_tbl);
740 static int validate_actions(const struct nlattr *actions, u32 actions_len)
742 const struct nlattr *a;
745 nla_for_each_attr(a, actions, actions_len, rem) {
746 static const u32 action_lens[ODPAT_MAX + 1] = {
748 [ODPAT_CONTROLLER] = 8,
749 [ODPAT_SET_DL_TCI] = 2,
750 [ODPAT_STRIP_VLAN] = 0,
751 [ODPAT_SET_DL_SRC] = ETH_ALEN,
752 [ODPAT_SET_DL_DST] = ETH_ALEN,
753 [ODPAT_SET_NW_SRC] = 4,
754 [ODPAT_SET_NW_DST] = 4,
755 [ODPAT_SET_NW_TOS] = 1,
756 [ODPAT_SET_TP_SRC] = 2,
757 [ODPAT_SET_TP_DST] = 2,
758 [ODPAT_SET_TUNNEL] = 8,
759 [ODPAT_SET_PRIORITY] = 4,
760 [ODPAT_POP_PRIORITY] = 0,
761 [ODPAT_DROP_SPOOFED_ARP] = 0,
763 int type = nla_type(a);
765 if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
772 case ODPAT_CONTROLLER:
773 case ODPAT_STRIP_VLAN:
774 case ODPAT_SET_DL_SRC:
775 case ODPAT_SET_DL_DST:
776 case ODPAT_SET_NW_SRC:
777 case ODPAT_SET_NW_DST:
778 case ODPAT_SET_TP_SRC:
779 case ODPAT_SET_TP_DST:
780 case ODPAT_SET_TUNNEL:
781 case ODPAT_SET_PRIORITY:
782 case ODPAT_POP_PRIORITY:
783 case ODPAT_DROP_SPOOFED_ARP:
784 /* No validation needed. */
788 if (nla_get_u32(a) >= DP_MAX_PORTS)
792 case ODPAT_SET_DL_TCI:
793 if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
797 case ODPAT_SET_NW_TOS:
798 if (nla_get_u8(a) & INET_ECN_MASK)
813 static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
815 struct sw_flow_actions *actions;
818 actions = flow_actions_alloc(flow->actions_len);
819 error = PTR_ERR(actions);
824 if (copy_from_user(actions->actions,
825 (struct nlattr __user __force *)flow->actions,
827 goto error_free_actions;
828 error = validate_actions(actions->actions, actions->actions_len);
830 goto error_free_actions;
837 return ERR_PTR(error);
840 static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
843 struct timespec offset_ts, used, now_mono;
845 ktime_get_ts(&now_mono);
846 jiffies_to_timespec(jiffies - flow->used, &offset_ts);
847 set_normalized_timespec(&used, now_mono.tv_sec - offset_ts.tv_sec,
848 now_mono.tv_nsec - offset_ts.tv_nsec);
850 stats->used_sec = used.tv_sec;
851 stats->used_nsec = used.tv_nsec;
854 stats->used_nsec = 0;
857 stats->n_packets = flow->packet_count;
858 stats->n_bytes = flow->byte_count;
860 stats->tcp_flags = flow->tcp_flags;
864 static void clear_stats(struct sw_flow *flow)
868 flow->packet_count = 0;
869 flow->byte_count = 0;
872 static int expand_table(struct datapath *dp)
874 struct tbl *old_table = get_table_protected(dp);
875 struct tbl *new_table;
877 new_table = tbl_expand(old_table);
878 if (IS_ERR(new_table))
879 return PTR_ERR(new_table);
881 rcu_assign_pointer(dp->table, new_table);
882 tbl_deferred_destroy(old_table, NULL);
887 static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
888 struct odp_flow_stats *stats)
890 struct tbl_node *flow_node;
891 struct sw_flow_key key;
892 struct sw_flow *flow;
894 struct sw_flow_actions *acts = NULL;
898 error = flow_copy_from_user(&key, (const struct nlattr __force __user *)uf->flow.key,
903 hash = flow_hash(&key);
904 table = get_table_protected(dp);
905 flow_node = tbl_lookup(table, &key, hash, flow_cmp);
909 if (!(uf->flags & ODPPF_CREATE))
912 /* Expand table, if necessary, to make room. */
913 if (tbl_count(table) >= tbl_n_buckets(table)) {
914 error = expand_table(dp);
917 table = get_table_protected(dp);
923 error = PTR_ERR(flow);
929 /* Obtain actions. */
930 acts = get_actions(&uf->flow);
931 error = PTR_ERR(acts);
933 goto error_free_flow;
934 rcu_assign_pointer(flow->sf_acts, acts);
936 /* Put flow in bucket. */
937 error = tbl_insert(table, &flow->tbl_node, hash);
939 goto error_free_flow_acts;
941 memset(stats, 0, sizeof(struct odp_flow_stats));
943 /* We found a matching flow. */
944 struct sw_flow_actions *old_acts, *new_acts;
946 flow = flow_cast(flow_node);
948 /* Bail out if we're not allowed to modify an existing flow. */
950 if (!(uf->flags & ODPPF_MODIFY))
954 new_acts = get_actions(&uf->flow);
955 error = PTR_ERR(new_acts);
956 if (IS_ERR(new_acts))
959 old_acts = rcu_dereference_protected(flow->sf_acts,
960 lockdep_is_held(&dp->mutex));
961 if (old_acts->actions_len != new_acts->actions_len ||
962 memcmp(old_acts->actions, new_acts->actions,
963 old_acts->actions_len)) {
964 rcu_assign_pointer(flow->sf_acts, new_acts);
965 flow_deferred_free_acts(old_acts);
970 /* Fetch stats, then clear them if necessary. */
971 spin_lock_bh(&flow->lock);
972 get_stats(flow, stats);
973 if (uf->flags & ODPPF_ZERO_STATS)
975 spin_unlock_bh(&flow->lock);
980 error_free_flow_acts:
983 flow->sf_acts = NULL;
989 static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
991 struct odp_flow_stats stats;
992 struct odp_flow_put uf;
995 if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
998 error = do_put_flow(dp, &uf, &stats);
1002 if (copy_to_user(&ufp->flow.stats, &stats,
1003 sizeof(struct odp_flow_stats)))
1009 static int do_answer_query(struct datapath *dp, struct sw_flow *flow,
1011 struct odp_flow_stats __user *ustats,
1012 struct nlattr __user *actions,
1013 u32 __user *actions_lenp)
1015 struct sw_flow_actions *sf_acts;
1016 struct odp_flow_stats stats;
1019 spin_lock_bh(&flow->lock);
1020 get_stats(flow, &stats);
1021 if (query_flags & ODPFF_ZERO_TCP_FLAGS)
1022 flow->tcp_flags = 0;
1024 spin_unlock_bh(&flow->lock);
1026 if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
1027 get_user(actions_len, actions_lenp))
1033 sf_acts = rcu_dereference_protected(flow->sf_acts,
1034 lockdep_is_held(&dp->mutex));
1035 if (put_user(sf_acts->actions_len, actions_lenp) ||
1036 (actions && copy_to_user(actions, sf_acts->actions,
1037 min(sf_acts->actions_len, actions_len))))
1043 static int answer_query(struct datapath *dp, struct sw_flow *flow,
1044 u32 query_flags, struct odp_flow __user *ufp)
1046 struct nlattr __user *actions;
1048 if (get_user(actions, (struct nlattr __user * __user *)&ufp->actions))
1051 return do_answer_query(dp, flow, query_flags,
1052 &ufp->stats, actions, &ufp->actions_len);
1055 static struct sw_flow *do_del_flow(struct datapath *dp, const struct nlattr __user *key, u32 key_len)
1057 struct tbl *table = get_table_protected(dp);
1058 struct tbl_node *flow_node;
1059 struct sw_flow_key swkey;
1062 error = flow_copy_from_user(&swkey, key, key_len);
1064 return ERR_PTR(error);
1066 flow_node = tbl_lookup(table, &swkey, flow_hash(&swkey), flow_cmp);
1068 return ERR_PTR(-ENOENT);
1070 error = tbl_remove(table, flow_node);
1072 return ERR_PTR(error);
1074 /* XXX Returned flow_node's statistics might lose a few packets, since
1075 * other CPUs can be using this flow. We used to synchronize_rcu() to
1076 * make sure that we get completely accurate stats, but that blows our
1077 * performance, badly. */
1078 return flow_cast(flow_node);
1081 static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
1083 struct sw_flow *flow;
1087 if (copy_from_user(&uf, ufp, sizeof(uf)))
1090 flow = do_del_flow(dp, (const struct nlattr __force __user *)uf.key, uf.key_len);
1092 return PTR_ERR(flow);
1094 error = answer_query(dp, flow, 0, ufp);
1095 flow_deferred_free(flow);
1099 static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1101 struct tbl *table = get_table_protected(dp);
1104 for (i = 0; i < flowvec->n_flows; i++) {
1105 struct odp_flow __user *ufp = (struct odp_flow __user __force *)&flowvec->flows[i];
1106 struct sw_flow_key key;
1108 struct tbl_node *flow_node;
1111 if (copy_from_user(&uf, ufp, sizeof(uf)))
1114 error = flow_copy_from_user(&key, (const struct nlattr __force __user *)uf.key, uf.key_len);
1118 flow_node = tbl_lookup(table, &uf.key, flow_hash(&key), flow_cmp);
1120 error = put_user(ENOENT, &ufp->stats.error);
1122 error = answer_query(dp, flow_cast(flow_node), uf.flags, ufp);
1126 return flowvec->n_flows;
1129 static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1130 int (*function)(struct datapath *,
1131 const struct odp_flowvec *))
1133 struct odp_flowvec __user *uflowvec;
1134 struct odp_flowvec flowvec;
1137 uflowvec = (struct odp_flowvec __user *)argp;
1138 if (copy_from_user(&flowvec, uflowvec, sizeof(flowvec)))
1141 if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
1144 retval = function(dp, &flowvec);
1145 return (retval < 0 ? retval
1146 : retval == flowvec.n_flows ? 0
1147 : put_user(retval, &uflowvec->n_flows));
1150 static struct sw_flow *do_dump_flow(struct datapath *dp, u32 __user *state)
1152 struct tbl *table = get_table_protected(dp);
1153 struct tbl_node *tbl_node;
1156 if (get_user(bucket, &state[0]) || get_user(obj, &state[1]))
1157 return ERR_PTR(-EFAULT);
1159 tbl_node = tbl_next(table, &bucket, &obj);
1161 if (put_user(bucket, &state[0]) || put_user(obj, &state[1]))
1162 return ERR_PTR(-EFAULT);
1164 return tbl_node ? flow_cast(tbl_node) : NULL;
1167 static int dump_flow(struct datapath *dp, struct odp_flow_dump __user *udumpp)
1169 struct odp_flow __user *uflowp;
1170 struct nlattr __user *ukey;
1171 struct sw_flow *flow;
1174 flow = do_dump_flow(dp, udumpp->state);
1176 return PTR_ERR(flow);
1178 if (get_user(uflowp, (struct odp_flow __user *__user*)&udumpp->flow))
1182 return put_user(ODPFF_EOF, &uflowp->flags);
1184 if (put_user(0, &uflowp->flags) ||
1185 get_user(ukey, (struct nlattr __user * __user*)&uflowp->key) ||
1186 get_user(key_len, &uflowp->key_len))
1189 key_len = flow_copy_to_user(ukey, &flow->key, key_len);
1192 if (put_user(key_len, &uflowp->key_len))
1195 return answer_query(dp, flow, 0, uflowp);
1198 static int do_execute(struct datapath *dp, const struct odp_execute *execute)
1200 struct sw_flow_key key;
1201 struct sk_buff *skb;
1202 struct sw_flow_actions *actions;
1208 if (execute->length < ETH_HLEN || execute->length > 65535)
1211 actions = flow_actions_alloc(execute->actions_len);
1212 if (IS_ERR(actions)) {
1213 err = PTR_ERR(actions);
1218 if (copy_from_user(actions->actions,
1219 (struct nlattr __user __force *)execute->actions, execute->actions_len))
1220 goto error_free_actions;
1222 err = validate_actions(actions->actions, execute->actions_len);
1224 goto error_free_actions;
1227 skb = alloc_skb(execute->length, GFP_KERNEL);
1229 goto error_free_actions;
1232 if (copy_from_user(skb_put(skb, execute->length),
1233 (const void __user __force *)execute->data,
1235 goto error_free_skb;
1237 skb_reset_mac_header(skb);
1240 /* Normally, setting the skb 'protocol' field would be handled by a
1241 * call to eth_type_trans(), but it assumes there's a sending
1242 * device, which we may not have. */
1243 if (ntohs(eth->h_proto) >= 1536)
1244 skb->protocol = eth->h_proto;
1246 skb->protocol = htons(ETH_P_802_2);
1248 err = flow_extract(skb, -1, &key, &is_frag);
1250 goto error_free_skb;
1253 err = execute_actions(dp, skb, &key, actions->actions, actions->actions_len);
1267 static int execute_packet(struct datapath *dp, const struct odp_execute __user *executep)
1269 struct odp_execute execute;
1271 if (copy_from_user(&execute, executep, sizeof(execute)))
1274 return do_execute(dp, &execute);
1277 static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
1279 struct tbl *table = get_table_protected(dp);
1280 struct odp_stats stats;
1283 stats.n_flows = tbl_count(table);
1284 stats.cur_capacity = tbl_n_buckets(table);
1285 stats.max_capacity = TBL_MAX_BUCKETS;
1286 stats.n_ports = dp->n_ports;
1287 stats.max_ports = DP_MAX_PORTS;
1288 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1289 for_each_possible_cpu(i) {
1290 const struct dp_stats_percpu *percpu_stats;
1291 struct dp_stats_percpu local_stats;
1294 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
1297 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
1298 local_stats = *percpu_stats;
1299 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
1301 stats.n_frags += local_stats.n_frags;
1302 stats.n_hit += local_stats.n_hit;
1303 stats.n_missed += local_stats.n_missed;
1304 stats.n_lost += local_stats.n_lost;
1306 stats.max_miss_queue = DP_MAX_QUEUE_LEN;
1307 stats.max_action_queue = DP_MAX_QUEUE_LEN;
1308 return copy_to_user(statsp, &stats, sizeof(stats)) ? -EFAULT : 0;
1311 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1312 int dp_min_mtu(const struct datapath *dp)
1319 list_for_each_entry_rcu (p, &dp->port_list, node) {
1322 /* Skip any internal ports, since that's what we're trying to
1324 if (is_internal_vport(p))
1327 dev_mtu = vport_get_mtu(p);
1328 if (!mtu || dev_mtu < mtu)
1332 return mtu ? mtu : ETH_DATA_LEN;
1335 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
1336 * be called with RTNL lock. */
1337 void set_internal_devs_mtu(const struct datapath *dp)
1344 mtu = dp_min_mtu(dp);
1346 list_for_each_entry_rcu (p, &dp->port_list, node) {
1347 if (is_internal_vport(p))
1348 vport_set_mtu(p, mtu);
1352 static void compose_odp_port(const struct vport *vport, struct odp_port *odp_port)
1355 strncpy(odp_port->devname, vport_get_name(vport), sizeof(odp_port->devname));
1356 strncpy(odp_port->type, vport_get_type(vport), sizeof(odp_port->type));
1357 vport_get_config(vport, odp_port->config);
1358 odp_port->port = vport->port_no;
1359 odp_port->dp_idx = vport->dp->dp_idx;
1363 static int query_port(int dp_idx, struct odp_port __user *uport)
1365 struct odp_port port;
1367 if (copy_from_user(&port, uport, sizeof(port)))
1370 if (port.devname[0]) {
1371 struct vport *vport;
1373 port.devname[IFNAMSIZ - 1] = '\0';
1376 vport = vport_locate(port.devname);
1378 compose_odp_port(vport, &port);
1384 struct vport *vport;
1385 struct datapath *dp;
1387 if (port.port >= DP_MAX_PORTS)
1390 dp = get_dp_locked(dp_idx);
1394 vport = get_vport_protected(dp, port.port);
1396 compose_odp_port(vport, &port);
1397 mutex_unlock(&dp->mutex);
1403 return copy_to_user(uport, &port, sizeof(struct odp_port));
1406 static int do_dump_port(struct datapath *dp, struct odp_vport_dump *dump)
1410 for (port_no = dump->port_no; port_no < DP_MAX_PORTS; port_no++) {
1411 struct vport *vport = get_vport_protected(dp, port_no);
1413 struct odp_port odp_port;
1415 compose_odp_port(vport, &odp_port);
1416 return copy_to_user((struct odp_port __force __user*)dump->port, &odp_port, sizeof(struct odp_port));
1420 return put_user('\0', (char __force __user*)&dump->port->devname[0]);
1423 static int dump_port(struct datapath *dp, struct odp_vport_dump __user *udump)
1425 struct odp_vport_dump dump;
1427 if (copy_from_user(&dump, udump, sizeof(dump)))
1430 return do_dump_port(dp, &dump);
1433 static int get_listen_mask(const struct file *f)
1435 return (long)f->private_data;
1438 static void set_listen_mask(struct file *f, int listen_mask)
1440 f->private_data = (void*)(long)listen_mask;
1443 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1446 int dp_idx = iminor(f->f_dentry->d_inode);
1447 struct datapath *dp;
1448 int drop_frags, listeners, port_no;
1449 unsigned int sflow_probability;
1452 /* Handle commands with special locking requirements up front. */
1455 err = create_dp(dp_idx, (char __user *)argp);
1458 case ODP_DP_DESTROY:
1459 err = destroy_dp(dp_idx);
1462 case ODP_VPORT_ATTACH:
1463 err = attach_port(dp_idx, (struct odp_port __user *)argp);
1466 case ODP_VPORT_DETACH:
1467 err = get_user(port_no, (int __user *)argp);
1469 err = detach_port(dp_idx, port_no);
1472 case ODP_VPORT_QUERY:
1473 err = query_port(dp_idx, (struct odp_port __user *)argp);
1477 err = vport_user_mod((struct odp_port __user *)argp);
1480 case ODP_VPORT_STATS_GET:
1481 err = vport_user_stats_get((struct odp_vport_stats_req __user *)argp);
1484 case ODP_VPORT_STATS_SET:
1485 err = vport_user_stats_set((struct odp_vport_stats_req __user *)argp);
1488 case ODP_VPORT_ETHER_GET:
1489 err = vport_user_ether_get((struct odp_vport_ether __user *)argp);
1492 case ODP_VPORT_ETHER_SET:
1493 err = vport_user_ether_set((struct odp_vport_ether __user *)argp);
1496 case ODP_VPORT_MTU_GET:
1497 err = vport_user_mtu_get((struct odp_vport_mtu __user *)argp);
1500 case ODP_VPORT_MTU_SET:
1501 err = vport_user_mtu_set((struct odp_vport_mtu __user *)argp);
1505 dp = get_dp_locked(dp_idx);
1512 err = get_dp_stats(dp, (struct odp_stats __user *)argp);
1515 case ODP_GET_DROP_FRAGS:
1516 err = put_user(dp->drop_frags, (int __user *)argp);
1519 case ODP_SET_DROP_FRAGS:
1520 err = get_user(drop_frags, (int __user *)argp);
1524 if (drop_frags != 0 && drop_frags != 1)
1526 dp->drop_frags = drop_frags;
1530 case ODP_GET_LISTEN_MASK:
1531 err = put_user(get_listen_mask(f), (int __user *)argp);
1534 case ODP_SET_LISTEN_MASK:
1535 err = get_user(listeners, (int __user *)argp);
1539 if (listeners & ~ODPL_ALL)
1542 set_listen_mask(f, listeners);
1545 case ODP_GET_SFLOW_PROBABILITY:
1546 err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
1549 case ODP_SET_SFLOW_PROBABILITY:
1550 err = get_user(sflow_probability, (unsigned int __user *)argp);
1552 dp->sflow_probability = sflow_probability;
1555 case ODP_VPORT_DUMP:
1556 err = dump_port(dp, (struct odp_vport_dump __user *)argp);
1559 case ODP_FLOW_FLUSH:
1560 err = flush_flows(dp);
1564 err = put_flow(dp, (struct odp_flow_put __user *)argp);
1568 err = del_flow(dp, (struct odp_flow __user *)argp);
1572 err = do_flowvec_ioctl(dp, argp, do_query_flows);
1576 err = dump_flow(dp, (struct odp_flow_dump __user *)argp);
1580 err = execute_packet(dp, (struct odp_execute __user *)argp);
1587 mutex_unlock(&dp->mutex);
1592 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1595 for (i = 0; i < DP_N_QUEUES; i++) {
1596 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1602 #ifdef CONFIG_COMPAT
1603 static int compat_dump_port(struct datapath *dp, struct compat_odp_vport_dump __user *compat)
1605 struct odp_vport_dump dump;
1608 if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_vport_dump)) ||
1609 __get_user(port, &compat->port) ||
1610 __get_user(dump.port_no, &compat->port_no))
1613 dump.port = (struct odp_port __force *)compat_ptr(port);
1614 return do_dump_port(dp, &dump);
1617 static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
1619 compat_uptr_t key, actions;
1621 if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_flow)) ||
1622 __copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
1623 __get_user(key, &compat->key) ||
1624 __get_user(flow->key_len, &compat->key_len) ||
1625 __get_user(actions, &compat->actions) ||
1626 __get_user(flow->actions_len, &compat->actions_len) ||
1627 __get_user(flow->flags, &compat->flags))
1630 flow->key = (struct nlattr __force *)compat_ptr(key);
1631 flow->actions = (struct nlattr __force *)compat_ptr(actions);
1635 static int compat_put_flow(struct datapath *dp, struct compat_odp_flow_put __user *ufp)
1637 struct odp_flow_stats stats;
1638 struct odp_flow_put fp;
1641 if (compat_get_flow(&fp.flow, &ufp->flow) ||
1642 get_user(fp.flags, &ufp->flags))
1645 error = do_put_flow(dp, &fp, &stats);
1649 if (copy_to_user(&ufp->flow.stats, &stats,
1650 sizeof(struct odp_flow_stats)))
1656 static int compat_answer_query(struct datapath *dp, struct sw_flow *flow,
1658 struct compat_odp_flow __user *ufp)
1660 compat_uptr_t actions;
1662 if (get_user(actions, &ufp->actions))
1665 return do_answer_query(dp, flow, query_flags, &ufp->stats,
1666 compat_ptr(actions), &ufp->actions_len);
1669 static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *ufp)
1671 struct sw_flow *flow;
1675 if (compat_get_flow(&uf, ufp))
1678 flow = do_del_flow(dp, (const struct nlattr __force __user *)uf.key, uf.key_len);
1680 return PTR_ERR(flow);
1682 error = compat_answer_query(dp, flow, 0, ufp);
1683 flow_deferred_free(flow);
1687 static int compat_query_flows(struct datapath *dp,
1688 struct compat_odp_flow __user *flows,
1691 struct tbl *table = get_table_protected(dp);
1694 for (i = 0; i < n_flows; i++) {
1695 struct compat_odp_flow __user *ufp = &flows[i];
1697 struct tbl_node *flow_node;
1698 struct sw_flow_key key;
1701 if (compat_get_flow(&uf, ufp))
1704 error = flow_copy_from_user(&key, (const struct nlattr __force __user *) uf.key, uf.key_len);
1708 flow_node = tbl_lookup(table, &key, flow_hash(&key), flow_cmp);
1710 error = put_user(ENOENT, &ufp->stats.error);
1712 error = compat_answer_query(dp, flow_cast(flow_node),
1720 static int compat_dump_flow(struct datapath *dp, struct compat_odp_flow_dump __user *udumpp)
1722 struct compat_odp_flow __user *uflowp;
1723 compat_uptr_t compat_ufp;
1724 struct sw_flow *flow;
1728 flow = do_dump_flow(dp, udumpp->state);
1730 return PTR_ERR(flow);
1732 if (get_user(compat_ufp, &udumpp->flow))
1734 uflowp = compat_ptr(compat_ufp);
1737 return put_user(ODPFF_EOF, &uflowp->flags);
1739 if (put_user(0, &uflowp->flags) ||
1740 get_user(ukey, &uflowp->key) ||
1741 get_user(key_len, &uflowp->key_len))
1744 key_len = flow_copy_to_user(compat_ptr(ukey), &flow->key, key_len);
1747 if (put_user(key_len, &uflowp->key_len))
1750 return compat_answer_query(dp, flow, 0, uflowp);
1753 static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1754 int (*function)(struct datapath *,
1755 struct compat_odp_flow __user *,
1758 struct compat_odp_flowvec __user *uflowvec;
1759 struct compat_odp_flow __user *flows;
1760 struct compat_odp_flowvec flowvec;
1763 uflowvec = compat_ptr(argp);
1764 if (!access_ok(VERIFY_WRITE, uflowvec, sizeof(*uflowvec)) ||
1765 copy_from_user(&flowvec, uflowvec, sizeof(flowvec)))
1768 if (flowvec.n_flows > INT_MAX / sizeof(struct compat_odp_flow))
1771 flows = compat_ptr(flowvec.flows);
1772 if (!access_ok(VERIFY_WRITE, flows,
1773 flowvec.n_flows * sizeof(struct compat_odp_flow)))
1776 retval = function(dp, flows, flowvec.n_flows);
1777 return (retval < 0 ? retval
1778 : retval == flowvec.n_flows ? 0
1779 : put_user(retval, &uflowvec->n_flows));
1782 static int compat_execute(struct datapath *dp, const struct compat_odp_execute __user *uexecute)
1784 struct odp_execute execute;
1785 compat_uptr_t actions;
1788 if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
1789 __get_user(actions, &uexecute->actions) ||
1790 __get_user(execute.actions_len, &uexecute->actions_len) ||
1791 __get_user(data, &uexecute->data) ||
1792 __get_user(execute.length, &uexecute->length))
1795 execute.actions = (struct nlattr __force *)compat_ptr(actions);
1796 execute.data = (const void __force *)compat_ptr(data);
1798 return do_execute(dp, &execute);
1801 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
1803 int dp_idx = iminor(f->f_dentry->d_inode);
1804 struct datapath *dp;
1808 case ODP_DP_DESTROY:
1809 case ODP_FLOW_FLUSH:
1810 /* Ioctls that don't need any translation at all. */
1811 return openvswitch_ioctl(f, cmd, argp);
1814 case ODP_VPORT_ATTACH:
1815 case ODP_VPORT_DETACH:
1817 case ODP_VPORT_MTU_SET:
1818 case ODP_VPORT_MTU_GET:
1819 case ODP_VPORT_ETHER_SET:
1820 case ODP_VPORT_ETHER_GET:
1821 case ODP_VPORT_STATS_SET:
1822 case ODP_VPORT_STATS_GET:
1824 case ODP_GET_DROP_FRAGS:
1825 case ODP_SET_DROP_FRAGS:
1826 case ODP_SET_LISTEN_MASK:
1827 case ODP_GET_LISTEN_MASK:
1828 case ODP_SET_SFLOW_PROBABILITY:
1829 case ODP_GET_SFLOW_PROBABILITY:
1830 case ODP_VPORT_QUERY:
1831 /* Ioctls that just need their pointer argument extended. */
1832 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
1835 dp = get_dp_locked(dp_idx);
1841 case ODP_VPORT_DUMP32:
1842 err = compat_dump_port(dp, compat_ptr(argp));
1845 case ODP_FLOW_PUT32:
1846 err = compat_put_flow(dp, compat_ptr(argp));
1849 case ODP_FLOW_DEL32:
1850 err = compat_del_flow(dp, compat_ptr(argp));
1853 case ODP_FLOW_GET32:
1854 err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
1857 case ODP_FLOW_DUMP32:
1858 err = compat_dump_flow(dp, compat_ptr(argp));
1862 err = compat_execute(dp, compat_ptr(argp));
1869 mutex_unlock(&dp->mutex);
1875 static ssize_t openvswitch_read(struct file *f, char __user *buf,
1876 size_t nbytes, loff_t *ppos)
1878 int listeners = get_listen_mask(f);
1879 int dp_idx = iminor(f->f_dentry->d_inode);
1880 struct datapath *dp = get_dp_locked(dp_idx);
1881 struct sk_buff *skb;
1888 if (nbytes == 0 || !listeners)
1894 for (i = 0; i < DP_N_QUEUES; i++) {
1895 if (listeners & (1 << i)) {
1896 skb = skb_dequeue(&dp->queues[i]);
1902 if (f->f_flags & O_NONBLOCK) {
1907 wait_event_interruptible(dp->waitqueue,
1908 dp_has_packet_of_interest(dp,
1911 if (signal_pending(current)) {
1912 retval = -ERESTARTSYS;
1917 mutex_unlock(&dp->mutex);
1920 iov.iov_len = min_t(size_t, skb->len, nbytes);
1921 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
1929 mutex_unlock(&dp->mutex);
1933 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
1935 int dp_idx = iminor(file->f_dentry->d_inode);
1936 struct datapath *dp = get_dp_locked(dp_idx);
1941 poll_wait(file, &dp->waitqueue, wait);
1942 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
1943 mask |= POLLIN | POLLRDNORM;
1944 mutex_unlock(&dp->mutex);
1946 mask = POLLIN | POLLRDNORM | POLLHUP;
1951 static struct file_operations openvswitch_fops = {
1952 .owner = THIS_MODULE,
1953 .read = openvswitch_read,
1954 .poll = openvswitch_poll,
1955 .unlocked_ioctl = openvswitch_ioctl,
1956 #ifdef CONFIG_COMPAT
1957 .compat_ioctl = openvswitch_compat_ioctl,
1963 static int __init dp_init(void)
1965 struct sk_buff *dummy_skb;
1968 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
1970 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
1978 goto error_flow_exit;
1980 err = register_netdevice_notifier(&dp_device_notifier);
1982 goto error_vport_exit;
1984 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
1986 goto error_unreg_notifier;
1990 error_unreg_notifier:
1991 unregister_netdevice_notifier(&dp_device_notifier);
2000 static void dp_cleanup(void)
2003 unregister_chrdev(major, "openvswitch");
2004 unregister_netdevice_notifier(&dp_device_notifier);
2009 module_init(dp_init);
2010 module_exit(dp_cleanup);
2012 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2013 MODULE_LICENSE("GPL");