2 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/kernel.h>
24 #include <linux/kthread.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/wait.h>
33 #include <asm/system.h>
34 #include <asm/div64.h>
36 #include <linux/highmem.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/dmi.h>
43 #include <net/inet_ecn.h>
44 #include <net/genetlink.h>
45 #include <linux/compat.h>
47 #include "openvswitch/datapath-protocol.h"
52 #include "loop_counter.h"
53 #include "odp-compat.h"
55 #include "vport-internal_dev.h"
57 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
58 EXPORT_SYMBOL(dp_ioctl_hook);
60 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
63 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
66 * It is safe to access the datapath and vport structures with just
69 static struct datapath __rcu *dps[256];
70 static DEFINE_MUTEX(dp_mutex);
72 static struct vport *new_vport(const struct vport_parms *);
74 /* Must be called with rcu_read_lock or dp_mutex. */
75 struct datapath *get_dp(int dp_idx)
77 if (dp_idx < 0 || dp_idx >= ARRAY_SIZE(dps))
79 return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
80 lockdep_is_held(&dp_mutex));
82 EXPORT_SYMBOL_GPL(get_dp);
84 static struct datapath *get_dp_locked(int dp_idx)
88 mutex_lock(&dp_mutex);
91 mutex_lock(&dp->mutex);
92 mutex_unlock(&dp_mutex);
96 static struct tbl *get_table_protected(struct datapath *dp)
98 return rcu_dereference_protected(dp->table,
99 lockdep_is_held(&dp->mutex));
102 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
104 return rcu_dereference_protected(dp->ports[port_no],
105 lockdep_is_held(&dp->mutex));
108 /* Must be called with rcu_read_lock or RTNL lock. */
109 const char *dp_name(const struct datapath *dp)
111 return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
114 static inline size_t br_nlmsg_size(void)
116 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
117 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
118 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
119 + nla_total_size(4) /* IFLA_MASTER */
120 + nla_total_size(4) /* IFLA_MTU */
121 + nla_total_size(4) /* IFLA_LINK */
122 + nla_total_size(1); /* IFLA_OPERSTATE */
125 static int dp_fill_ifinfo(struct sk_buff *skb,
126 const struct vport *port,
127 int event, unsigned int flags)
129 struct datapath *dp = port->dp;
130 int ifindex = vport_get_ifindex(port);
131 int iflink = vport_get_iflink(port);
132 struct ifinfomsg *hdr;
133 struct nlmsghdr *nlh;
141 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
145 hdr = nlmsg_data(nlh);
146 hdr->ifi_family = AF_BRIDGE;
148 hdr->ifi_type = ARPHRD_ETHER;
149 hdr->ifi_index = ifindex;
150 hdr->ifi_flags = vport_get_flags(port);
153 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
154 NLA_PUT_U32(skb, IFLA_MASTER,
155 vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
156 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
157 #ifdef IFLA_OPERSTATE
158 NLA_PUT_U8(skb, IFLA_OPERSTATE,
159 vport_is_running(port)
160 ? vport_get_operstate(port)
164 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
166 if (ifindex != iflink)
167 NLA_PUT_U32(skb, IFLA_LINK,iflink);
169 return nlmsg_end(skb, nlh);
172 nlmsg_cancel(skb, nlh);
176 static void dp_ifinfo_notify(int event, struct vport *port)
181 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
185 err = dp_fill_ifinfo(skb, port, event, 0);
187 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
188 WARN_ON(err == -EMSGSIZE);
192 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
196 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
199 static void release_dp(struct kobject *kobj)
201 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
205 static struct kobj_type dp_ktype = {
206 .release = release_dp
209 static void destroy_dp_rcu(struct rcu_head *rcu)
211 struct datapath *dp = container_of(rcu, struct datapath, rcu);
214 for (i = 0; i < DP_N_QUEUES; i++)
215 skb_queue_purge(&dp->queues[i]);
217 tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
218 free_percpu(dp->stats_percpu);
219 kobject_put(&dp->ifobj);
222 /* Caller must hold RTNL, dp_mutex, and dp->mutex. */
223 static void destroy_dp(struct datapath *dp)
227 list_for_each_entry_safe (p, n, &dp->port_list, node)
228 if (p->port_no != ODPP_LOCAL)
232 rcu_assign_pointer(dps[dp->dp_idx], NULL);
233 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
235 mutex_unlock(&dp->mutex);
236 call_rcu(&dp->rcu, destroy_dp_rcu);
237 module_put(THIS_MODULE);
240 /* Called with RTNL lock and dp->mutex. */
241 static struct vport *new_vport(const struct vport_parms *parms)
246 vport = vport_add(parms);
247 if (!IS_ERR(vport)) {
248 struct datapath *dp = parms->dp;
250 rcu_assign_pointer(dp->ports[parms->port_no], vport);
251 list_add_rcu(&vport->node, &dp->port_list);
253 dp_ifinfo_notify(RTM_NEWLINK, vport);
260 int dp_detach_port(struct vport *p)
266 if (p->port_no != ODPP_LOCAL)
268 dp_ifinfo_notify(RTM_DELLINK, p);
270 /* First drop references to device. */
271 list_del_rcu(&p->node);
272 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
274 /* Then destroy it. */
282 /* Must be called with rcu_read_lock. */
283 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
285 struct datapath *dp = p->dp;
286 struct dp_stats_percpu *stats;
287 int stats_counter_off;
288 struct sw_flow_actions *acts;
289 struct loop_counter *loop;
292 OVS_CB(skb)->vport = p;
294 if (!OVS_CB(skb)->flow) {
295 struct sw_flow_key key;
296 struct tbl_node *flow_node;
299 /* Extract flow from 'skb' into 'key'. */
300 error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
301 if (unlikely(error)) {
306 if (is_frag && dp->drop_frags) {
308 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
313 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
314 flow_hash(&key), flow_cmp);
315 if (unlikely(!flow_node)) {
316 struct dp_upcall_info upcall;
318 upcall.type = _ODPL_MISS_NR;
321 upcall.sample_pool = 0;
322 upcall.actions = NULL;
323 upcall.actions_len = 0;
324 dp_upcall(dp, skb, &upcall);
325 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
329 OVS_CB(skb)->flow = flow_cast(flow_node);
332 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
333 flow_used(OVS_CB(skb)->flow, skb);
335 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
337 /* Check whether we've looped too much. */
338 loop = loop_get_counter();
339 if (unlikely(++loop->count > MAX_LOOPS))
340 loop->looping = true;
341 if (unlikely(loop->looping)) {
342 loop_suppress(dp, acts);
347 /* Execute actions. */
348 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
351 /* Check whether sub-actions looped too much. */
352 if (unlikely(loop->looping))
353 loop_suppress(dp, acts);
356 /* Decrement loop counter. */
358 loop->looping = false;
362 /* Update datapath statistics. */
364 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
366 write_seqcount_begin(&stats->seqlock);
367 (*(u64 *)((u8 *)stats + stats_counter_off))++;
368 write_seqcount_end(&stats->seqlock);
373 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
375 u16 csum_start, csum_offset;
378 get_skb_csum_pointers(skb, &csum_start, &csum_offset);
379 csum_start -= skb_headroom(skb);
380 BUG_ON(csum_start >= skb_headlen(skb));
382 skb_copy_bits(skb, 0, to, csum_start);
384 csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
385 skb->len - csum_start, 0);
386 *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
389 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
390 * unless we broke up a GSO packet. */
391 static int queue_control_packets(struct datapath *dp, struct sk_buff *skb,
392 const struct dp_upcall_info *upcall_info)
394 struct sk_buff *nskb;
398 if (OVS_CB(skb)->vport)
399 port_no = OVS_CB(skb)->vport->port_no;
401 port_no = ODPP_LOCAL;
404 struct odp_packet *upcall;
405 struct sk_buff *user_skb; /* to be queued to userspace */
412 len = sizeof(struct odp_packet);
413 len += nla_total_size(4); /* ODP_PACKET_ATTR_TYPE. */
414 len += nla_total_size(skb->len);
415 len += nla_total_size(FLOW_BUFSIZE);
416 if (upcall_info->userdata)
417 len += nla_total_size(8);
418 if (upcall_info->sample_pool)
419 len += nla_total_size(4);
420 if (upcall_info->actions_len)
421 len += nla_total_size(upcall_info->actions_len);
423 user_skb = alloc_skb(len, GFP_ATOMIC);
427 upcall = (struct odp_packet *)__skb_put(user_skb, sizeof(*upcall));
428 upcall->dp_idx = dp->dp_idx;
430 nla_put_u32(user_skb, ODP_PACKET_ATTR_TYPE, upcall_info->type);
432 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
433 flow_to_nlattrs(upcall_info->key, user_skb);
434 nla_nest_end(user_skb, nla);
436 if (upcall_info->userdata)
437 nla_put_u64(user_skb, ODP_PACKET_ATTR_USERDATA, upcall_info->userdata);
438 if (upcall_info->sample_pool)
439 nla_put_u32(user_skb, ODP_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
440 if (upcall_info->actions_len) {
441 const struct nlattr *actions = upcall_info->actions;
442 u32 actions_len = upcall_info->actions_len;
444 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_ACTIONS);
445 memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
446 nla_nest_end(user_skb, nla);
449 nla = __nla_reserve(user_skb, ODP_PACKET_ATTR_PACKET, skb->len);
450 if (skb->ip_summed == CHECKSUM_PARTIAL)
451 copy_and_csum_skb(skb, nla_data(nla));
453 skb_copy_bits(skb, 0, nla_data(nla), skb->len);
455 upcall->len = user_skb->len;
456 skb_queue_tail(&dp->queues[upcall_info->type], user_skb);
465 while ((skb = nskb) != NULL) {
472 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
474 struct dp_stats_percpu *stats;
475 struct sk_buff_head *queue;
478 WARN_ON_ONCE(skb_shared(skb));
479 BUG_ON(upcall_info->type >= DP_N_QUEUES);
481 queue = &dp->queues[upcall_info->type];
483 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
486 forward_ip_summed(skb);
488 err = vswitch_skb_checksum_setup(skb);
492 /* Break apart GSO packets into their component pieces. Otherwise
493 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
494 if (skb_is_gso(skb)) {
495 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
505 err = queue_control_packets(dp, skb, upcall_info);
506 wake_up_interruptible(&dp->waitqueue);
513 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
515 write_seqcount_begin(&stats->seqlock);
517 write_seqcount_end(&stats->seqlock);
524 static int flush_flows(int dp_idx)
526 struct tbl *old_table;
527 struct tbl *new_table;
531 dp = get_dp_locked(dp_idx);
536 old_table = get_table_protected(dp);
537 new_table = tbl_create(TBL_MIN_BUCKETS);
542 rcu_assign_pointer(dp->table, new_table);
544 tbl_deferred_destroy(old_table, flow_free_tbl);
549 mutex_unlock(&dp->mutex);
554 static int validate_actions(const struct nlattr *actions, u32 actions_len)
556 const struct nlattr *a;
559 nla_for_each_attr(a, actions, actions_len, rem) {
560 static const u32 action_lens[ODPAT_MAX + 1] = {
562 [ODPAT_CONTROLLER] = 8,
563 [ODPAT_SET_DL_TCI] = 2,
564 [ODPAT_STRIP_VLAN] = 0,
565 [ODPAT_SET_DL_SRC] = ETH_ALEN,
566 [ODPAT_SET_DL_DST] = ETH_ALEN,
567 [ODPAT_SET_NW_SRC] = 4,
568 [ODPAT_SET_NW_DST] = 4,
569 [ODPAT_SET_NW_TOS] = 1,
570 [ODPAT_SET_TP_SRC] = 2,
571 [ODPAT_SET_TP_DST] = 2,
572 [ODPAT_SET_TUNNEL] = 8,
573 [ODPAT_SET_PRIORITY] = 4,
574 [ODPAT_POP_PRIORITY] = 0,
575 [ODPAT_DROP_SPOOFED_ARP] = 0,
577 int type = nla_type(a);
579 if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
586 case ODPAT_CONTROLLER:
587 case ODPAT_STRIP_VLAN:
588 case ODPAT_SET_DL_SRC:
589 case ODPAT_SET_DL_DST:
590 case ODPAT_SET_NW_SRC:
591 case ODPAT_SET_NW_DST:
592 case ODPAT_SET_TP_SRC:
593 case ODPAT_SET_TP_DST:
594 case ODPAT_SET_TUNNEL:
595 case ODPAT_SET_PRIORITY:
596 case ODPAT_POP_PRIORITY:
597 case ODPAT_DROP_SPOOFED_ARP:
598 /* No validation needed. */
602 if (nla_get_u32(a) >= DP_MAX_PORTS)
606 case ODPAT_SET_DL_TCI:
607 if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
611 case ODPAT_SET_NW_TOS:
612 if (nla_get_u8(a) & INET_ECN_MASK)
631 struct sw_flow_key key;
632 const struct nlattr *actions;
638 static struct sw_flow_actions *get_actions(const struct dp_flowcmd *flowcmd)
640 struct sw_flow_actions *actions;
642 actions = flow_actions_alloc(flowcmd->actions_len);
643 if (!IS_ERR(actions) && flowcmd->actions_len)
644 memcpy(actions->actions, flowcmd->actions, flowcmd->actions_len);
648 static void clear_stats(struct sw_flow *flow)
652 flow->packet_count = 0;
653 flow->byte_count = 0;
656 static int expand_table(struct datapath *dp)
658 struct tbl *old_table = get_table_protected(dp);
659 struct tbl *new_table;
661 new_table = tbl_expand(old_table);
662 if (IS_ERR(new_table))
663 return PTR_ERR(new_table);
665 rcu_assign_pointer(dp->table, new_table);
666 tbl_deferred_destroy(old_table, NULL);
671 static int do_execute(struct datapath *dp, const struct odp_execute *execute)
673 struct sw_flow_key key;
675 struct sw_flow_actions *actions;
681 if (execute->length < ETH_HLEN || execute->length > 65535)
684 actions = flow_actions_alloc(execute->actions_len);
685 if (IS_ERR(actions)) {
686 err = PTR_ERR(actions);
691 if (copy_from_user(actions->actions,
692 (struct nlattr __user __force *)execute->actions, execute->actions_len))
693 goto error_free_actions;
695 err = validate_actions(actions->actions, execute->actions_len);
697 goto error_free_actions;
700 skb = alloc_skb(execute->length, GFP_KERNEL);
702 goto error_free_actions;
705 if (copy_from_user(skb_put(skb, execute->length),
706 (const void __user __force *)execute->data,
710 skb_reset_mac_header(skb);
713 /* Normally, setting the skb 'protocol' field would be handled by a
714 * call to eth_type_trans(), but it assumes there's a sending
715 * device, which we may not have. */
716 if (ntohs(eth->h_proto) >= 1536)
717 skb->protocol = eth->h_proto;
719 skb->protocol = htons(ETH_P_802_2);
721 err = flow_extract(skb, -1, &key, &is_frag);
726 err = execute_actions(dp, skb, &key, actions->actions, actions->actions_len);
740 static int execute_packet(const struct odp_execute __user *executep)
742 struct odp_execute execute;
746 if (copy_from_user(&execute, executep, sizeof(execute)))
749 dp = get_dp_locked(execute.dp_idx);
752 error = do_execute(dp, &execute);
753 mutex_unlock(&dp->mutex);
758 static void get_dp_stats(struct datapath *dp, struct odp_stats *stats)
762 stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
763 for_each_possible_cpu(i) {
764 const struct dp_stats_percpu *percpu_stats;
765 struct dp_stats_percpu local_stats;
768 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
771 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
772 local_stats = *percpu_stats;
773 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
775 stats->n_frags += local_stats.n_frags;
776 stats->n_hit += local_stats.n_hit;
777 stats->n_missed += local_stats.n_missed;
778 stats->n_lost += local_stats.n_lost;
782 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
783 int dp_min_mtu(const struct datapath *dp)
790 list_for_each_entry_rcu (p, &dp->port_list, node) {
793 /* Skip any internal ports, since that's what we're trying to
795 if (is_internal_vport(p))
798 dev_mtu = vport_get_mtu(p);
799 if (!mtu || dev_mtu < mtu)
803 return mtu ? mtu : ETH_DATA_LEN;
806 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
807 * be called with RTNL lock. */
808 void set_internal_devs_mtu(const struct datapath *dp)
815 mtu = dp_min_mtu(dp);
817 list_for_each_entry_rcu (p, &dp->port_list, node) {
818 if (is_internal_vport(p))
819 vport_set_mtu(p, mtu);
823 static int get_listen_mask(const struct file *f)
825 return (long)f->private_data;
828 static void set_listen_mask(struct file *f, int listen_mask)
830 f->private_data = (void*)(long)listen_mask;
833 static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
834 [ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
835 [ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
836 [ODP_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
837 [ODP_FLOW_ATTR_STATE] = { .type = NLA_U64 },
840 static int copy_flow_to_user(struct odp_flow __user *dst, struct datapath *dp,
841 struct sw_flow *flow, u32 total_len, u64 state)
843 const struct sw_flow_actions *sf_acts;
844 struct odp_flow_stats stats;
845 struct odp_flow *odp_flow;
852 sf_acts = rcu_dereference_protected(flow->sf_acts,
853 lockdep_is_held(&dp->mutex));
855 skb = alloc_skb(128 + FLOW_BUFSIZE + sf_acts->actions_len, GFP_KERNEL);
861 odp_flow = (struct odp_flow*)__skb_put(skb, sizeof(struct odp_flow));
862 odp_flow->dp_idx = dp->dp_idx;
863 odp_flow->total_len = total_len;
865 nla = nla_nest_start(skb, ODP_FLOW_ATTR_KEY);
867 goto nla_put_failure;
868 err = flow_to_nlattrs(&flow->key, skb);
871 nla_nest_end(skb, nla);
873 nla = nla_nest_start(skb, ODP_FLOW_ATTR_ACTIONS);
874 if (!nla || skb_tailroom(skb) < sf_acts->actions_len)
875 goto nla_put_failure;
876 memcpy(__skb_put(skb, sf_acts->actions_len), sf_acts->actions, sf_acts->actions_len);
877 nla_nest_end(skb, nla);
879 spin_lock_bh(&flow->lock);
881 stats.n_packets = flow->packet_count;
882 stats.n_bytes = flow->byte_count;
883 tcp_flags = flow->tcp_flags;
884 spin_unlock_bh(&flow->lock);
887 NLA_PUT_MSECS(skb, ODP_FLOW_ATTR_USED, used);
890 NLA_PUT(skb, ODP_FLOW_ATTR_STATS, sizeof(struct odp_flow_stats), &stats);
893 NLA_PUT_U8(skb, ODP_FLOW_ATTR_TCP_FLAGS, tcp_flags);
896 NLA_PUT_U64(skb, ODP_FLOW_ATTR_STATE, state);
898 if (skb->len > total_len)
899 goto nla_put_failure;
901 odp_flow->len = skb->len;
902 err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
914 static struct sk_buff *copy_flow_from_user(struct odp_flow __user *uodp_flow,
915 struct dp_flowcmd *flowcmd)
917 struct nlattr *a[ODP_FLOW_ATTR_MAX + 1];
918 struct odp_flow *odp_flow;
923 if (get_user(len, &uodp_flow->len))
924 return ERR_PTR(-EFAULT);
925 if (len < sizeof(struct odp_flow))
926 return ERR_PTR(-EINVAL);
928 skb = alloc_skb(len, GFP_KERNEL);
930 return ERR_PTR(-ENOMEM);
933 if (copy_from_user(__skb_put(skb, len), uodp_flow, len))
936 odp_flow = (struct odp_flow *)skb->data;
938 if (odp_flow->len != len)
941 flowcmd->nlmsg_flags = odp_flow->nlmsg_flags;
942 flowcmd->dp_idx = odp_flow->dp_idx;
943 flowcmd->total_len = odp_flow->total_len;
945 err = nla_parse(a, ODP_FLOW_ATTR_MAX,
946 (struct nlattr *)(skb->data + sizeof(struct odp_flow)),
947 skb->len - sizeof(struct odp_flow), flow_policy);
951 /* ODP_FLOW_ATTR_KEY. */
952 if (a[ODP_FLOW_ATTR_KEY]) {
953 err = flow_from_nlattrs(&flowcmd->key, a[ODP_FLOW_ATTR_KEY]);
957 memset(&flowcmd->key, 0, sizeof(struct sw_flow_key));
959 /* ODP_FLOW_ATTR_ACTIONS. */
960 if (a[ODP_FLOW_ATTR_ACTIONS]) {
961 flowcmd->actions = nla_data(a[ODP_FLOW_ATTR_ACTIONS]);
962 flowcmd->actions_len = nla_len(a[ODP_FLOW_ATTR_ACTIONS]);
963 err = validate_actions(flowcmd->actions, flowcmd->actions_len);
967 flowcmd->actions = NULL;
968 flowcmd->actions_len = 0;
971 flowcmd->clear = a[ODP_FLOW_ATTR_CLEAR] != NULL;
973 flowcmd->state = a[ODP_FLOW_ATTR_STATE] ? nla_get_u64(a[ODP_FLOW_ATTR_STATE]) : 0;
982 static int new_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
984 struct tbl_node *flow_node;
985 struct dp_flowcmd flowcmd;
986 struct sw_flow *flow;
993 skb = copy_flow_from_user(uodp_flow, &flowcmd);
994 error = PTR_ERR(skb);
998 dp = get_dp_locked(flowcmd.dp_idx);
1001 goto error_kfree_skb;
1003 hash = flow_hash(&flowcmd.key);
1004 table = get_table_protected(dp);
1005 flow_node = tbl_lookup(table, &flowcmd.key, hash, flow_cmp);
1007 struct sw_flow_actions *acts;
1009 /* Bail out if we're not allowed to create a new flow. */
1011 if (cmd == ODP_FLOW_SET)
1012 goto error_unlock_dp;
1014 /* Expand table, if necessary, to make room. */
1015 if (tbl_count(table) >= tbl_n_buckets(table)) {
1016 error = expand_table(dp);
1018 goto error_unlock_dp;
1019 table = get_table_protected(dp);
1022 /* Allocate flow. */
1023 flow = flow_alloc();
1025 error = PTR_ERR(flow);
1026 goto error_unlock_dp;
1028 flow->key = flowcmd.key;
1031 /* Obtain actions. */
1032 acts = get_actions(&flowcmd);
1033 error = PTR_ERR(acts);
1035 goto error_free_flow;
1036 rcu_assign_pointer(flow->sf_acts, acts);
1038 error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
1040 goto error_free_flow;
1042 /* Put flow in bucket. */
1043 error = tbl_insert(table, &flow->tbl_node, hash);
1045 goto error_free_flow;
1047 /* We found a matching flow. */
1048 struct sw_flow_actions *old_acts;
1050 /* Bail out if we're not allowed to modify an existing flow.
1051 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1052 * because Generic Netlink treats the latter as a dump
1053 * request. We also accept NLM_F_EXCL in case that bug ever
1057 if (flowcmd.nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1058 goto error_kfree_skb;
1060 /* Update actions. */
1061 flow = flow_cast(flow_node);
1062 old_acts = rcu_dereference_protected(flow->sf_acts,
1063 lockdep_is_held(&dp->mutex));
1064 if (flowcmd.actions &&
1065 (old_acts->actions_len != flowcmd.actions_len ||
1066 memcmp(old_acts->actions, flowcmd.actions,
1067 flowcmd.actions_len))) {
1068 struct sw_flow_actions *new_acts;
1070 new_acts = get_actions(&flowcmd);
1071 error = PTR_ERR(new_acts);
1072 if (IS_ERR(new_acts))
1073 goto error_kfree_skb;
1075 rcu_assign_pointer(flow->sf_acts, new_acts);
1076 flow_deferred_free_acts(old_acts);
1079 error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
1081 goto error_kfree_skb;
1084 if (flowcmd.clear) {
1085 spin_lock_bh(&flow->lock);
1087 spin_unlock_bh(&flow->lock);
1091 mutex_unlock(&dp->mutex);
1097 mutex_unlock(&dp->mutex);
1104 static int get_or_del_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
1106 struct tbl_node *flow_node;
1107 struct dp_flowcmd flowcmd;
1108 struct sw_flow *flow;
1109 struct sk_buff *skb;
1110 struct datapath *dp;
1114 skb = copy_flow_from_user(uodp_flow, &flowcmd);
1119 dp = get_dp_locked(flowcmd.dp_idx);
1122 goto exit_kfree_skb;
1124 table = get_table_protected(dp);
1125 flow_node = tbl_lookup(table, &flowcmd.key, flow_hash(&flowcmd.key), flow_cmp);
1128 goto exit_unlock_dp;
1130 if (cmd == ODP_FLOW_DEL) {
1131 err = tbl_remove(table, flow_node);
1133 goto exit_unlock_dp;
1136 flow = flow_cast(flow_node);
1137 err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
1138 if (!err && cmd == ODP_FLOW_DEL)
1139 flow_deferred_free(flow);
1142 mutex_unlock(&dp->mutex);
1149 static int dump_flow(struct odp_flow __user *uodp_flow)
1151 struct tbl_node *flow_node;
1152 struct dp_flowcmd flowcmd;
1153 struct sw_flow *flow;
1154 struct sk_buff *skb;
1155 struct datapath *dp;
1159 skb = copy_flow_from_user(uodp_flow, &flowcmd);
1164 dp = get_dp_locked(flowcmd.dp_idx);
1169 bucket = flowcmd.state >> 32;
1170 obj = flowcmd.state;
1171 flow_node = tbl_next(dp->table, &bucket, &obj);
1174 goto exit_unlock_dp;
1176 flow = flow_cast(flow_node);
1177 err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len,
1178 ((u64)bucket << 32) | obj);
1181 mutex_unlock(&dp->mutex);
1188 static const struct nla_policy datapath_policy[ODP_DP_ATTR_MAX + 1] = {
1189 [ODP_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1190 [ODP_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
1191 [ODP_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
1194 static int copy_datapath_to_user(void __user *dst, struct datapath *dp, uint32_t total_len)
1196 struct odp_datapath *odp_datapath;
1197 struct sk_buff *skb;
1201 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1206 odp_datapath = (struct odp_datapath*)__skb_put(skb, sizeof(struct odp_datapath));
1207 odp_datapath->dp_idx = dp->dp_idx;
1208 odp_datapath->total_len = total_len;
1211 err = nla_put_string(skb, ODP_DP_ATTR_NAME, dp_name(dp));
1214 goto nla_put_failure;
1216 nla = nla_reserve(skb, ODP_DP_ATTR_STATS, sizeof(struct odp_stats));
1218 goto nla_put_failure;
1219 get_dp_stats(dp, nla_data(nla));
1221 NLA_PUT_U32(skb, ODP_DP_ATTR_IPV4_FRAGS,
1222 dp->drop_frags ? ODP_DP_FRAG_DROP : ODP_DP_FRAG_ZERO);
1224 if (dp->sflow_probability)
1225 NLA_PUT_U32(skb, ODP_DP_ATTR_SAMPLING, dp->sflow_probability);
1227 if (skb->len > total_len)
1228 goto nla_put_failure;
1230 odp_datapath->len = skb->len;
1231 err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
1242 static struct sk_buff *copy_datapath_from_user(struct odp_datapath __user *uodp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1244 struct odp_datapath *odp_datapath;
1245 struct sk_buff *skb;
1249 if (get_user(len, &uodp_datapath->len))
1250 return ERR_PTR(-EFAULT);
1251 if (len < sizeof(struct odp_datapath))
1252 return ERR_PTR(-EINVAL);
1254 skb = alloc_skb(len, GFP_KERNEL);
1256 return ERR_PTR(-ENOMEM);
1259 if (copy_from_user(__skb_put(skb, len), uodp_datapath, len))
1260 goto error_free_skb;
1262 odp_datapath = (struct odp_datapath *)skb->data;
1264 if (odp_datapath->len != len)
1265 goto error_free_skb;
1267 err = nla_parse(a, ODP_DP_ATTR_MAX,
1268 (struct nlattr *)(skb->data + sizeof(struct odp_datapath)),
1269 skb->len - sizeof(struct odp_datapath), datapath_policy);
1271 goto error_free_skb;
1273 if (a[ODP_DP_ATTR_IPV4_FRAGS]) {
1274 u32 frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]);
1277 if (frags != ODP_DP_FRAG_ZERO && frags != ODP_DP_FRAG_DROP)
1278 goto error_free_skb;
1281 err = VERIFY_NUL_STRING(a[ODP_DP_ATTR_NAME], IFNAMSIZ - 1);
1283 goto error_free_skb;
1289 return ERR_PTR(err);
1292 /* Called with dp_mutex and optionally with RTNL lock also.
1293 * Holds the returned datapath's mutex on return.
1295 static struct datapath *lookup_datapath(struct odp_datapath *odp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1297 WARN_ON_ONCE(!mutex_is_locked(&dp_mutex));
1299 if (!a[ODP_DP_ATTR_NAME]) {
1300 struct datapath *dp;
1302 dp = get_dp(odp_datapath->dp_idx);
1304 return ERR_PTR(-ENODEV);
1305 mutex_lock(&dp->mutex);
1308 struct datapath *dp;
1309 struct vport *vport;
1313 vport = vport_locate(nla_data(a[ODP_DP_ATTR_NAME]));
1314 dp_idx = vport && vport->port_no == ODPP_LOCAL ? vport->dp->dp_idx : -1;
1318 return ERR_PTR(-ENODEV);
1320 dp = get_dp(dp_idx);
1321 mutex_lock(&dp->mutex);
1326 static void change_datapath(struct datapath *dp, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1328 if (a[ODP_DP_ATTR_IPV4_FRAGS])
1329 dp->drop_frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]) == ODP_DP_FRAG_DROP;
1330 if (a[ODP_DP_ATTR_SAMPLING])
1331 dp->sflow_probability = nla_get_u32(a[ODP_DP_ATTR_SAMPLING]);
1334 static int new_datapath(struct odp_datapath __user *uodp_datapath)
1336 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1337 struct odp_datapath *odp_datapath;
1338 struct vport_parms parms;
1339 struct sk_buff *skb;
1340 struct datapath *dp;
1341 struct vport *vport;
1346 skb = copy_datapath_from_user(uodp_datapath, a);
1350 odp_datapath = (struct odp_datapath *)skb->data;
1353 if (!a[ODP_DP_ATTR_NAME])
1357 mutex_lock(&dp_mutex);
1359 if (!try_module_get(THIS_MODULE))
1360 goto err_unlock_dp_mutex;
1362 dp_idx = odp_datapath->dp_idx;
1365 for (dp_idx = 0; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
1371 } else if (dp_idx < ARRAY_SIZE(dps))
1372 err = get_dp(dp_idx) ? -EBUSY : 0;
1376 goto err_put_module;
1379 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1381 goto err_put_module;
1382 INIT_LIST_HEAD(&dp->port_list);
1383 mutex_init(&dp->mutex);
1384 mutex_lock(&dp->mutex);
1385 dp->dp_idx = dp_idx;
1386 for (i = 0; i < DP_N_QUEUES; i++)
1387 skb_queue_head_init(&dp->queues[i]);
1388 init_waitqueue_head(&dp->waitqueue);
1390 /* Initialize kobject for bridge. This will be added as
1391 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1392 dp->ifobj.kset = NULL;
1393 kobject_init(&dp->ifobj, &dp_ktype);
1395 /* Allocate table. */
1397 rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
1401 /* Set up our datapath device. */
1402 parms.name = nla_data(a[ODP_DP_ATTR_NAME]);
1403 parms.type = ODP_VPORT_TYPE_INTERNAL;
1404 parms.options = NULL;
1406 parms.port_no = ODPP_LOCAL;
1407 vport = new_vport(&parms);
1408 if (IS_ERR(vport)) {
1409 err = PTR_ERR(vport);
1413 goto err_destroy_table;
1417 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1418 if (!dp->stats_percpu) {
1420 goto err_destroy_local_port;
1423 change_datapath(dp, a);
1425 rcu_assign_pointer(dps[dp_idx], dp);
1426 dp_sysfs_add_dp(dp);
1428 mutex_unlock(&dp->mutex);
1429 mutex_unlock(&dp_mutex);
1434 err_destroy_local_port:
1435 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1437 tbl_destroy(get_table_protected(dp), NULL);
1439 mutex_unlock(&dp->mutex);
1442 module_put(THIS_MODULE);
1443 err_unlock_dp_mutex:
1444 mutex_unlock(&dp_mutex);
1452 static int del_datapath(struct odp_datapath __user *uodp_datapath)
1454 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1455 struct datapath *dp;
1456 struct sk_buff *skb;
1459 skb = copy_datapath_from_user(uodp_datapath, a);
1465 mutex_lock(&dp_mutex);
1466 dp = lookup_datapath((struct odp_datapath *)skb->data, a);
1476 mutex_unlock(&dp_mutex);
1482 static int set_datapath(struct odp_datapath __user *uodp_datapath)
1484 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1485 struct datapath *dp;
1486 struct sk_buff *skb;
1489 skb = copy_datapath_from_user(uodp_datapath, a);
1494 mutex_lock(&dp_mutex);
1495 dp = lookup_datapath((struct odp_datapath *)skb->data, a);
1500 change_datapath(dp, a);
1501 mutex_unlock(&dp->mutex);
1506 mutex_unlock(&dp_mutex);
1511 static int get_datapath(struct odp_datapath __user *uodp_datapath)
1513 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1514 struct odp_datapath *odp_datapath;
1515 struct datapath *dp;
1516 struct sk_buff *skb;
1519 skb = copy_datapath_from_user(uodp_datapath, a);
1523 odp_datapath = (struct odp_datapath *)skb->data;
1525 mutex_lock(&dp_mutex);
1526 dp = lookup_datapath(odp_datapath, a);
1527 mutex_unlock(&dp_mutex);
1533 err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
1534 mutex_unlock(&dp->mutex);
1541 static int dump_datapath(struct odp_datapath __user *uodp_datapath)
1543 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1544 struct odp_datapath *odp_datapath;
1545 struct sk_buff *skb;
1549 skb = copy_datapath_from_user(uodp_datapath, a);
1553 odp_datapath = (struct odp_datapath *)skb->data;
1555 mutex_lock(&dp_mutex);
1556 for (dp_idx = odp_datapath->dp_idx; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
1557 struct datapath *dp = get_dp(dp_idx);
1561 mutex_lock(&dp->mutex);
1562 mutex_unlock(&dp_mutex);
1563 err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
1564 mutex_unlock(&dp->mutex);
1567 mutex_unlock(&dp_mutex);
1576 static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
1577 [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1578 [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1579 [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1580 [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
1581 [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1582 [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
1583 [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1586 static int copy_vport_to_user(void __user *dst, struct vport *vport, uint32_t total_len)
1588 struct odp_vport *odp_vport;
1589 struct sk_buff *skb;
1591 int ifindex, iflink;
1594 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1600 odp_vport = (struct odp_vport*)__skb_put(skb, sizeof(struct odp_vport));
1601 odp_vport->dp_idx = vport->dp->dp_idx;
1602 odp_vport->total_len = total_len;
1604 NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
1605 NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
1606 NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
1608 nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
1610 goto nla_put_failure;
1611 if (vport_get_stats(vport, nla_data(nla)))
1612 __skb_trim(skb, skb->len - nla->nla_len);
1614 NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1616 NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, vport_get_mtu(vport));
1618 err = vport_get_options(vport, skb);
1620 ifindex = vport_get_ifindex(vport);
1622 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
1624 iflink = vport_get_iflink(vport);
1626 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
1629 if (skb->len > total_len)
1632 odp_vport->len = skb->len;
1633 err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
1645 static struct sk_buff *copy_vport_from_user(struct odp_vport __user *uodp_vport,
1646 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1648 struct odp_vport *odp_vport;
1649 struct sk_buff *skb;
1653 if (get_user(len, &uodp_vport->len))
1654 return ERR_PTR(-EFAULT);
1655 if (len < sizeof(struct odp_vport))
1656 return ERR_PTR(-EINVAL);
1658 skb = alloc_skb(len, GFP_KERNEL);
1660 return ERR_PTR(-ENOMEM);
1663 if (copy_from_user(__skb_put(skb, len), uodp_vport, len))
1664 goto error_free_skb;
1666 odp_vport = (struct odp_vport *)skb->data;
1668 if (odp_vport->len != len)
1669 goto error_free_skb;
1671 err = nla_parse(a, ODP_VPORT_ATTR_MAX, (struct nlattr *)(skb->data + sizeof(struct odp_vport)),
1672 skb->len - sizeof(struct odp_vport), vport_policy);
1674 goto error_free_skb;
1676 err = VERIFY_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1678 goto error_free_skb;
1684 return ERR_PTR(err);
1688 /* Called without any locks (or with RTNL lock).
1689 * Returns holding vport->dp->mutex.
1691 static struct vport *lookup_vport(struct odp_vport *odp_vport,
1692 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1694 struct datapath *dp;
1695 struct vport *vport;
1697 if (a[ODP_VPORT_ATTR_NAME]) {
1698 int dp_idx, port_no;
1702 vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
1705 return ERR_PTR(-ENODEV);
1707 dp_idx = vport->dp->dp_idx;
1708 port_no = vport->port_no;
1711 dp = get_dp_locked(dp_idx);
1715 vport = get_vport_protected(dp, port_no);
1717 strcmp(vport_get_name(vport), nla_data(a[ODP_VPORT_ATTR_NAME]))) {
1718 mutex_unlock(&dp->mutex);
1723 } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
1724 u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1726 if (port_no >= DP_MAX_PORTS)
1727 return ERR_PTR(-EINVAL);
1729 dp = get_dp_locked(odp_vport->dp_idx);
1731 return ERR_PTR(-ENODEV);
1733 vport = get_vport_protected(dp, port_no);
1735 mutex_unlock(&dp->mutex);
1736 return ERR_PTR(-ENOENT);
1740 return ERR_PTR(-EINVAL);
1743 static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1746 if (a[ODP_VPORT_ATTR_STATS])
1747 err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
1748 if (!err && a[ODP_VPORT_ATTR_ADDRESS])
1749 err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
1750 if (!err && a[ODP_VPORT_ATTR_MTU])
1751 err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
1755 static int attach_vport(struct odp_vport __user *uodp_vport)
1757 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1758 struct odp_vport *odp_vport;
1759 struct vport_parms parms;
1760 struct vport *vport;
1761 struct sk_buff *skb;
1762 struct datapath *dp;
1766 skb = copy_vport_from_user(uodp_vport, a);
1770 odp_vport = (struct odp_vport *)skb->data;
1773 if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
1774 goto exit_kfree_skb;
1778 dp = get_dp_locked(odp_vport->dp_idx);
1781 goto exit_unlock_rtnl;
1783 if (a[ODP_VPORT_ATTR_PORT_NO]) {
1784 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1787 if (port_no >= DP_MAX_PORTS)
1788 goto exit_unlock_dp;
1790 vport = get_vport_protected(dp, port_no);
1793 goto exit_unlock_dp;
1795 for (port_no = 1; ; port_no++) {
1796 if (port_no >= DP_MAX_PORTS) {
1798 goto exit_unlock_dp;
1800 vport = get_vport_protected(dp, port_no);
1806 parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
1807 parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
1808 parms.options = a[ODP_VPORT_ATTR_OPTIONS];
1810 parms.port_no = port_no;
1812 vport = new_vport(&parms);
1813 err = PTR_ERR(vport);
1815 goto exit_unlock_dp;
1817 set_internal_devs_mtu(dp);
1818 dp_sysfs_add_if(vport);
1820 err = change_vport(vport, a);
1822 dp_detach_port(vport);
1823 goto exit_unlock_dp;
1826 err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
1829 mutex_unlock(&dp->mutex);
1838 static int set_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
1840 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1841 struct vport *vport;
1842 struct sk_buff *skb;
1845 skb = copy_vport_from_user(uodp_vport, a);
1851 vport = lookup_vport((struct odp_vport *)skb->data, a);
1852 err = PTR_ERR(vport);
1857 if (a[ODP_VPORT_ATTR_OPTIONS])
1858 err = vport_set_options(vport, a[ODP_VPORT_ATTR_OPTIONS]);
1860 err = change_vport(vport, a);
1862 mutex_unlock(&vport->dp->mutex);
1870 static int del_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
1872 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1873 struct datapath *dp;
1874 struct vport *vport;
1875 struct sk_buff *skb;
1878 skb = copy_vport_from_user(uodp_vport, a);
1884 vport = lookup_vport((struct odp_vport *)skb->data, a);
1885 err = PTR_ERR(vport);
1891 if (vport->port_no == ODPP_LOCAL)
1894 err = dp_detach_port(vport);
1895 mutex_unlock(&dp->mutex);
1903 static int get_vport(struct odp_vport __user *uodp_vport)
1905 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1906 struct odp_vport *odp_vport;
1907 struct vport *vport;
1908 struct sk_buff *skb;
1911 skb = copy_vport_from_user(uodp_vport, a);
1915 odp_vport = (struct odp_vport *)skb->data;
1917 vport = lookup_vport(odp_vport, a);
1918 err = PTR_ERR(vport);
1922 err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
1923 mutex_unlock(&vport->dp->mutex);
1930 static int dump_vport(struct odp_vport __user *uodp_vport)
1932 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1933 struct odp_vport *odp_vport;
1934 struct sk_buff *skb;
1935 struct datapath *dp;
1939 skb = copy_vport_from_user(uodp_vport, a);
1943 odp_vport = (struct odp_vport *)skb->data;
1945 dp = get_dp_locked(odp_vport->dp_idx);
1951 if (a[ODP_VPORT_ATTR_PORT_NO])
1952 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1953 for (; port_no < DP_MAX_PORTS; port_no++) {
1954 struct vport *vport = get_vport_protected(dp, port_no);
1956 err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
1957 goto exit_unlock_dp;
1963 mutex_unlock(&dp->mutex);
1970 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1973 int dp_idx = iminor(f->f_dentry->d_inode);
1974 struct datapath *dp;
1978 /* Handle commands with special locking requirements up front. */
1981 err = new_datapath((struct odp_datapath __user *)argp);
1985 err = get_datapath((struct odp_datapath __user *)argp);
1989 err = del_datapath((struct odp_datapath __user *)argp);
1993 err = set_datapath((struct odp_datapath __user *)argp);
1997 err = dump_datapath((struct odp_datapath __user *)argp);
2001 err = attach_vport((struct odp_vport __user *)argp);
2005 err = get_vport((struct odp_vport __user *)argp);
2009 err = del_vport(cmd, (struct odp_vport __user *)argp);
2013 err = set_vport(cmd, (struct odp_vport __user *)argp);
2016 case ODP_VPORT_DUMP:
2017 err = dump_vport((struct odp_vport __user *)argp);
2020 case ODP_FLOW_FLUSH:
2021 err = flush_flows(argp);
2026 err = new_flow(cmd, (struct odp_flow __user *)argp);
2031 err = get_or_del_flow(cmd, (struct odp_flow __user *)argp);
2035 err = dump_flow((struct odp_flow __user *)argp);
2039 err = execute_packet((struct odp_execute __user *)argp);
2043 dp = get_dp_locked(dp_idx);
2049 case ODP_GET_LISTEN_MASK:
2050 err = put_user(get_listen_mask(f), (int __user *)argp);
2053 case ODP_SET_LISTEN_MASK:
2054 err = get_user(listeners, (int __user *)argp);
2058 if (listeners & ~ODPL_ALL)
2061 set_listen_mask(f, listeners);
2068 mutex_unlock(&dp->mutex);
2073 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
2076 for (i = 0; i < DP_N_QUEUES; i++) {
2077 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
2083 #ifdef CONFIG_COMPAT
2084 static int compat_execute(const struct compat_odp_execute __user *uexecute)
2086 struct odp_execute execute;
2087 compat_uptr_t actions;
2089 struct datapath *dp;
2092 if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
2093 __get_user(execute.dp_idx, &uexecute->dp_idx) ||
2094 __get_user(actions, &uexecute->actions) ||
2095 __get_user(execute.actions_len, &uexecute->actions_len) ||
2096 __get_user(data, &uexecute->data) ||
2097 __get_user(execute.length, &uexecute->length))
2100 execute.actions = (struct nlattr __force *)compat_ptr(actions);
2101 execute.data = (const void __force *)compat_ptr(data);
2103 dp = get_dp_locked(execute.dp_idx);
2106 error = do_execute(dp, &execute);
2107 mutex_unlock(&dp->mutex);
2112 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
2115 case ODP_FLOW_FLUSH:
2116 /* Ioctls that don't need any translation at all. */
2117 return openvswitch_ioctl(f, cmd, argp);
2128 case ODP_VPORT_DUMP:
2134 case ODP_SET_LISTEN_MASK:
2135 case ODP_GET_LISTEN_MASK:
2136 /* Ioctls that just need their pointer argument extended. */
2137 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
2140 return compat_execute(compat_ptr(argp));
2143 return -ENOIOCTLCMD;
2148 static ssize_t openvswitch_read(struct file *f, char __user *buf,
2149 size_t nbytes, loff_t *ppos)
2151 int listeners = get_listen_mask(f);
2152 int dp_idx = iminor(f->f_dentry->d_inode);
2153 struct datapath *dp = get_dp_locked(dp_idx);
2154 struct sk_buff *skb;
2161 if (nbytes == 0 || !listeners)
2167 for (i = 0; i < DP_N_QUEUES; i++) {
2168 if (listeners & (1 << i)) {
2169 skb = skb_dequeue(&dp->queues[i]);
2175 if (f->f_flags & O_NONBLOCK) {
2180 wait_event_interruptible(dp->waitqueue,
2181 dp_has_packet_of_interest(dp,
2184 if (signal_pending(current)) {
2185 retval = -ERESTARTSYS;
2190 mutex_unlock(&dp->mutex);
2193 iov.iov_len = min_t(size_t, skb->len, nbytes);
2194 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
2202 mutex_unlock(&dp->mutex);
2206 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
2208 int dp_idx = iminor(file->f_dentry->d_inode);
2209 struct datapath *dp = get_dp_locked(dp_idx);
2214 poll_wait(file, &dp->waitqueue, wait);
2215 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
2216 mask |= POLLIN | POLLRDNORM;
2217 mutex_unlock(&dp->mutex);
2219 mask = POLLIN | POLLRDNORM | POLLHUP;
2224 static struct file_operations openvswitch_fops = {
2225 .owner = THIS_MODULE,
2226 .read = openvswitch_read,
2227 .poll = openvswitch_poll,
2228 .unlocked_ioctl = openvswitch_ioctl,
2229 #ifdef CONFIG_COMPAT
2230 .compat_ioctl = openvswitch_compat_ioctl,
2236 static int __init dp_init(void)
2238 struct sk_buff *dummy_skb;
2241 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2243 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2251 goto error_flow_exit;
2253 err = register_netdevice_notifier(&dp_device_notifier);
2255 goto error_vport_exit;
2257 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2259 goto error_unreg_notifier;
2263 error_unreg_notifier:
2264 unregister_netdevice_notifier(&dp_device_notifier);
2273 static void dp_cleanup(void)
2276 unregister_chrdev(major, "openvswitch");
2277 unregister_netdevice_notifier(&dp_device_notifier);
2282 module_init(dp_init);
2283 module_exit(dp_cleanup);
2285 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2286 MODULE_LICENSE("GPL");