2 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/kernel.h>
24 #include <linux/kthread.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/wait.h>
33 #include <asm/system.h>
34 #include <asm/div64.h>
36 #include <linux/highmem.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/dmi.h>
43 #include <net/inet_ecn.h>
44 #include <net/genetlink.h>
45 #include <linux/compat.h>
47 #include "openvswitch/datapath-protocol.h"
52 #include "loop_counter.h"
54 #include "vport-internal_dev.h"
56 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
57 EXPORT_SYMBOL(dp_ioctl_hook);
59 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
62 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
65 * It is safe to access the datapath and vport structures with just
68 static struct datapath __rcu *dps[256];
69 static DEFINE_MUTEX(dp_mutex);
71 static struct vport *new_vport(const struct vport_parms *);
73 /* Must be called with rcu_read_lock or dp_mutex. */
74 struct datapath *get_dp(int dp_idx)
76 if (dp_idx < 0 || dp_idx >= ARRAY_SIZE(dps))
78 return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
79 lockdep_is_held(&dp_mutex));
81 EXPORT_SYMBOL_GPL(get_dp);
83 static struct datapath *get_dp_locked(int dp_idx)
87 mutex_lock(&dp_mutex);
90 mutex_lock(&dp->mutex);
91 mutex_unlock(&dp_mutex);
95 static struct tbl *get_table_protected(struct datapath *dp)
97 return rcu_dereference_protected(dp->table,
98 lockdep_is_held(&dp->mutex));
101 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
103 return rcu_dereference_protected(dp->ports[port_no],
104 lockdep_is_held(&dp->mutex));
107 /* Must be called with rcu_read_lock or RTNL lock. */
108 const char *dp_name(const struct datapath *dp)
110 return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
113 static inline size_t br_nlmsg_size(void)
115 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
116 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
117 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
118 + nla_total_size(4) /* IFLA_MASTER */
119 + nla_total_size(4) /* IFLA_MTU */
120 + nla_total_size(4) /* IFLA_LINK */
121 + nla_total_size(1); /* IFLA_OPERSTATE */
124 static int dp_fill_ifinfo(struct sk_buff *skb,
125 const struct vport *port,
126 int event, unsigned int flags)
128 struct datapath *dp = port->dp;
129 int ifindex = vport_get_ifindex(port);
130 int iflink = vport_get_iflink(port);
131 struct ifinfomsg *hdr;
132 struct nlmsghdr *nlh;
140 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
144 hdr = nlmsg_data(nlh);
145 hdr->ifi_family = AF_BRIDGE;
147 hdr->ifi_type = ARPHRD_ETHER;
148 hdr->ifi_index = ifindex;
149 hdr->ifi_flags = vport_get_flags(port);
152 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
153 NLA_PUT_U32(skb, IFLA_MASTER,
154 vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
155 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
156 #ifdef IFLA_OPERSTATE
157 NLA_PUT_U8(skb, IFLA_OPERSTATE,
158 vport_is_running(port)
159 ? vport_get_operstate(port)
163 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
165 if (ifindex != iflink)
166 NLA_PUT_U32(skb, IFLA_LINK,iflink);
168 return nlmsg_end(skb, nlh);
171 nlmsg_cancel(skb, nlh);
175 static void dp_ifinfo_notify(int event, struct vport *port)
180 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
184 err = dp_fill_ifinfo(skb, port, event, 0);
186 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
187 WARN_ON(err == -EMSGSIZE);
191 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
195 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
198 static void release_dp(struct kobject *kobj)
200 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
204 static struct kobj_type dp_ktype = {
205 .release = release_dp
208 static void destroy_dp_rcu(struct rcu_head *rcu)
210 struct datapath *dp = container_of(rcu, struct datapath, rcu);
213 for (i = 0; i < DP_N_QUEUES; i++)
214 skb_queue_purge(&dp->queues[i]);
216 tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
217 free_percpu(dp->stats_percpu);
218 kobject_put(&dp->ifobj);
221 /* Caller must hold RTNL, dp_mutex, and dp->mutex. */
222 static void destroy_dp(struct datapath *dp)
226 list_for_each_entry_safe (p, n, &dp->port_list, node)
227 if (p->port_no != ODPP_LOCAL)
231 rcu_assign_pointer(dps[dp->dp_idx], NULL);
232 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
234 mutex_unlock(&dp->mutex);
235 call_rcu(&dp->rcu, destroy_dp_rcu);
236 module_put(THIS_MODULE);
239 /* Called with RTNL lock and dp->mutex. */
240 static struct vport *new_vport(const struct vport_parms *parms)
244 vport = vport_add(parms);
245 if (!IS_ERR(vport)) {
246 struct datapath *dp = parms->dp;
248 rcu_assign_pointer(dp->ports[parms->port_no], vport);
249 list_add_rcu(&vport->node, &dp->port_list);
251 dp_ifinfo_notify(RTM_NEWLINK, vport);
257 int dp_detach_port(struct vport *p)
261 if (p->port_no != ODPP_LOCAL)
263 dp_ifinfo_notify(RTM_DELLINK, p);
265 /* First drop references to device. */
266 list_del_rcu(&p->node);
267 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
269 /* Then destroy it. */
273 /* Must be called with rcu_read_lock. */
274 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
276 struct datapath *dp = p->dp;
277 struct dp_stats_percpu *stats;
278 int stats_counter_off;
279 struct sw_flow_actions *acts;
280 struct loop_counter *loop;
283 OVS_CB(skb)->vport = p;
285 if (!OVS_CB(skb)->flow) {
286 struct sw_flow_key key;
287 struct tbl_node *flow_node;
290 /* Extract flow from 'skb' into 'key'. */
291 error = flow_extract(skb, p->port_no, &key, &is_frag);
292 if (unlikely(error)) {
297 if (is_frag && dp->drop_frags) {
299 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
304 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
305 flow_hash(&key), flow_cmp);
306 if (unlikely(!flow_node)) {
307 struct dp_upcall_info upcall;
309 upcall.type = _ODPL_MISS_NR;
312 upcall.sample_pool = 0;
313 upcall.actions = NULL;
314 upcall.actions_len = 0;
315 dp_upcall(dp, skb, &upcall);
316 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
320 OVS_CB(skb)->flow = flow_cast(flow_node);
323 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
324 flow_used(OVS_CB(skb)->flow, skb);
326 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
328 /* Check whether we've looped too much. */
329 loop = loop_get_counter();
330 if (unlikely(++loop->count > MAX_LOOPS))
331 loop->looping = true;
332 if (unlikely(loop->looping)) {
333 loop_suppress(dp, acts);
338 /* Execute actions. */
339 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
342 /* Check whether sub-actions looped too much. */
343 if (unlikely(loop->looping))
344 loop_suppress(dp, acts);
347 /* Decrement loop counter. */
349 loop->looping = false;
353 /* Update datapath statistics. */
355 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
357 write_seqcount_begin(&stats->seqlock);
358 (*(u64 *)((u8 *)stats + stats_counter_off))++;
359 write_seqcount_end(&stats->seqlock);
364 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
366 u16 csum_start, csum_offset;
369 get_skb_csum_pointers(skb, &csum_start, &csum_offset);
370 csum_start -= skb_headroom(skb);
371 BUG_ON(csum_start >= skb_headlen(skb));
373 skb_copy_bits(skb, 0, to, csum_start);
375 csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
376 skb->len - csum_start, 0);
377 *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
380 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
381 * unless we broke up a GSO packet. */
382 static int queue_control_packets(struct datapath *dp, struct sk_buff *skb,
383 const struct dp_upcall_info *upcall_info)
385 struct sk_buff *nskb;
389 if (OVS_CB(skb)->vport)
390 port_no = OVS_CB(skb)->vport->port_no;
392 port_no = ODPP_LOCAL;
395 struct odp_packet *upcall;
396 struct sk_buff *user_skb; /* to be queued to userspace */
403 len = sizeof(struct odp_packet);
404 len += nla_total_size(4); /* ODP_PACKET_ATTR_TYPE. */
405 len += nla_total_size(skb->len);
406 len += nla_total_size(FLOW_BUFSIZE);
407 if (upcall_info->userdata)
408 len += nla_total_size(8);
409 if (upcall_info->sample_pool)
410 len += nla_total_size(4);
411 if (upcall_info->actions_len)
412 len += nla_total_size(upcall_info->actions_len);
414 user_skb = alloc_skb(len, GFP_ATOMIC);
418 upcall = (struct odp_packet *)__skb_put(user_skb, sizeof(*upcall));
419 upcall->dp_idx = dp->dp_idx;
421 nla_put_u32(user_skb, ODP_PACKET_ATTR_TYPE, upcall_info->type);
423 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
424 flow_to_nlattrs(upcall_info->key, user_skb);
425 nla_nest_end(user_skb, nla);
427 if (upcall_info->userdata)
428 nla_put_u64(user_skb, ODP_PACKET_ATTR_USERDATA, upcall_info->userdata);
429 if (upcall_info->sample_pool)
430 nla_put_u32(user_skb, ODP_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
431 if (upcall_info->actions_len) {
432 const struct nlattr *actions = upcall_info->actions;
433 u32 actions_len = upcall_info->actions_len;
435 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_ACTIONS);
436 memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
437 nla_nest_end(user_skb, nla);
440 nla = __nla_reserve(user_skb, ODP_PACKET_ATTR_PACKET, skb->len);
441 if (skb->ip_summed == CHECKSUM_PARTIAL)
442 copy_and_csum_skb(skb, nla_data(nla));
444 skb_copy_bits(skb, 0, nla_data(nla), skb->len);
446 upcall->len = user_skb->len;
447 skb_queue_tail(&dp->queues[upcall_info->type], user_skb);
456 while ((skb = nskb) != NULL) {
463 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
465 struct dp_stats_percpu *stats;
466 struct sk_buff_head *queue;
469 WARN_ON_ONCE(skb_shared(skb));
470 BUG_ON(upcall_info->type >= DP_N_QUEUES);
472 queue = &dp->queues[upcall_info->type];
474 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
477 forward_ip_summed(skb);
479 err = vswitch_skb_checksum_setup(skb);
483 /* Break apart GSO packets into their component pieces. Otherwise
484 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
485 if (skb_is_gso(skb)) {
486 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
496 err = queue_control_packets(dp, skb, upcall_info);
497 wake_up_interruptible(&dp->waitqueue);
504 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
506 write_seqcount_begin(&stats->seqlock);
508 write_seqcount_end(&stats->seqlock);
515 static int flush_flows(int dp_idx)
517 struct tbl *old_table;
518 struct tbl *new_table;
522 dp = get_dp_locked(dp_idx);
527 old_table = get_table_protected(dp);
528 new_table = tbl_create(TBL_MIN_BUCKETS);
533 rcu_assign_pointer(dp->table, new_table);
535 tbl_deferred_destroy(old_table, flow_free_tbl);
540 mutex_unlock(&dp->mutex);
545 static int validate_actions(const struct nlattr *actions, u32 actions_len)
547 const struct nlattr *a;
550 nla_for_each_attr(a, actions, actions_len, rem) {
551 static const u32 action_lens[ODPAT_MAX + 1] = {
553 [ODPAT_CONTROLLER] = 8,
554 [ODPAT_SET_DL_TCI] = 2,
555 [ODPAT_STRIP_VLAN] = 0,
556 [ODPAT_SET_DL_SRC] = ETH_ALEN,
557 [ODPAT_SET_DL_DST] = ETH_ALEN,
558 [ODPAT_SET_NW_SRC] = 4,
559 [ODPAT_SET_NW_DST] = 4,
560 [ODPAT_SET_NW_TOS] = 1,
561 [ODPAT_SET_TP_SRC] = 2,
562 [ODPAT_SET_TP_DST] = 2,
563 [ODPAT_SET_TUNNEL] = 8,
564 [ODPAT_SET_PRIORITY] = 4,
565 [ODPAT_POP_PRIORITY] = 0,
566 [ODPAT_DROP_SPOOFED_ARP] = 0,
568 int type = nla_type(a);
570 if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
577 case ODPAT_CONTROLLER:
578 case ODPAT_STRIP_VLAN:
579 case ODPAT_SET_DL_SRC:
580 case ODPAT_SET_DL_DST:
581 case ODPAT_SET_NW_SRC:
582 case ODPAT_SET_NW_DST:
583 case ODPAT_SET_TP_SRC:
584 case ODPAT_SET_TP_DST:
585 case ODPAT_SET_TUNNEL:
586 case ODPAT_SET_PRIORITY:
587 case ODPAT_POP_PRIORITY:
588 case ODPAT_DROP_SPOOFED_ARP:
589 /* No validation needed. */
593 if (nla_get_u32(a) >= DP_MAX_PORTS)
597 case ODPAT_SET_DL_TCI:
598 if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
602 case ODPAT_SET_NW_TOS:
603 if (nla_get_u8(a) & INET_ECN_MASK)
622 struct sw_flow_key key;
623 const struct nlattr *actions;
629 static struct sw_flow_actions *get_actions(const struct dp_flowcmd *flowcmd)
631 struct sw_flow_actions *actions;
633 actions = flow_actions_alloc(flowcmd->actions_len);
634 if (!IS_ERR(actions) && flowcmd->actions_len)
635 memcpy(actions->actions, flowcmd->actions, flowcmd->actions_len);
639 static void clear_stats(struct sw_flow *flow)
643 flow->packet_count = 0;
644 flow->byte_count = 0;
647 static int expand_table(struct datapath *dp)
649 struct tbl *old_table = get_table_protected(dp);
650 struct tbl *new_table;
652 new_table = tbl_expand(old_table);
653 if (IS_ERR(new_table))
654 return PTR_ERR(new_table);
656 rcu_assign_pointer(dp->table, new_table);
657 tbl_deferred_destroy(old_table, NULL);
662 static const struct nla_policy execute_policy[ODP_PACKET_ATTR_MAX + 1] = {
663 [ODP_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
664 [ODP_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
667 static int execute_packet(const struct odp_packet __user *uodp_packet)
669 struct nlattr *a[ODP_PACKET_ATTR_MAX + 1];
670 struct odp_packet *odp_packet;
671 struct sk_buff *skb, *packet;
672 unsigned int actions_len;
673 struct nlattr *actions;
674 struct sw_flow_key key;
681 if (get_user(len, &uodp_packet->len))
683 if (len < sizeof(struct odp_packet))
686 skb = alloc_skb(len, GFP_KERNEL);
691 if (copy_from_user(__skb_put(skb, len), uodp_packet, len))
694 odp_packet = (struct odp_packet *)skb->data;
696 if (odp_packet->len != len)
699 __skb_pull(skb, sizeof(struct odp_packet));
700 err = nla_parse(a, ODP_PACKET_ATTR_MAX, (struct nlattr *)skb->data,
701 skb->len, execute_policy);
706 if (!a[ODP_PACKET_ATTR_PACKET] || !a[ODP_PACKET_ATTR_ACTIONS] ||
707 nla_len(a[ODP_PACKET_ATTR_PACKET]) < ETH_HLEN)
710 actions = nla_data(a[ODP_PACKET_ATTR_ACTIONS]);
711 actions_len = nla_len(a[ODP_PACKET_ATTR_ACTIONS]);
712 err = validate_actions(actions, actions_len);
716 packet = skb_clone(skb, GFP_KERNEL);
720 packet->data = nla_data(a[ODP_PACKET_ATTR_PACKET]);
721 packet->len = nla_len(a[ODP_PACKET_ATTR_PACKET]);
723 skb_reset_mac_header(packet);
724 eth = eth_hdr(packet);
726 /* Normally, setting the skb 'protocol' field would be handled by a
727 * call to eth_type_trans(), but it assumes there's a sending
728 * device, which we may not have. */
729 if (ntohs(eth->h_proto) >= 1536)
730 packet->protocol = eth->h_proto;
732 packet->protocol = htons(ETH_P_802_2);
734 err = flow_extract(packet, -1, &key, &is_frag);
739 dp = get_dp(odp_packet->dp_idx);
742 err = execute_actions(dp, packet, &key, actions, actions_len);
750 static void get_dp_stats(struct datapath *dp, struct odp_stats *stats)
754 stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
755 for_each_possible_cpu(i) {
756 const struct dp_stats_percpu *percpu_stats;
757 struct dp_stats_percpu local_stats;
760 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
763 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
764 local_stats = *percpu_stats;
765 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
767 stats->n_frags += local_stats.n_frags;
768 stats->n_hit += local_stats.n_hit;
769 stats->n_missed += local_stats.n_missed;
770 stats->n_lost += local_stats.n_lost;
774 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
775 int dp_min_mtu(const struct datapath *dp)
782 list_for_each_entry_rcu (p, &dp->port_list, node) {
785 /* Skip any internal ports, since that's what we're trying to
787 if (is_internal_vport(p))
790 dev_mtu = vport_get_mtu(p);
791 if (!mtu || dev_mtu < mtu)
795 return mtu ? mtu : ETH_DATA_LEN;
798 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
799 * be called with RTNL lock. */
800 void set_internal_devs_mtu(const struct datapath *dp)
807 mtu = dp_min_mtu(dp);
809 list_for_each_entry_rcu (p, &dp->port_list, node) {
810 if (is_internal_vport(p))
811 vport_set_mtu(p, mtu);
815 static int get_listen_mask(const struct file *f)
817 return (long)f->private_data;
820 static void set_listen_mask(struct file *f, int listen_mask)
822 f->private_data = (void*)(long)listen_mask;
825 static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
826 [ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
827 [ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
828 [ODP_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
829 [ODP_FLOW_ATTR_STATE] = { .type = NLA_U64 },
832 static int copy_flow_to_user(struct odp_flow __user *dst, struct datapath *dp,
833 struct sw_flow *flow, u32 total_len, u64 state)
835 const struct sw_flow_actions *sf_acts;
836 struct odp_flow_stats stats;
837 struct odp_flow *odp_flow;
844 sf_acts = rcu_dereference_protected(flow->sf_acts,
845 lockdep_is_held(&dp->mutex));
847 skb = alloc_skb(128 + FLOW_BUFSIZE + sf_acts->actions_len, GFP_KERNEL);
853 odp_flow = (struct odp_flow*)__skb_put(skb, sizeof(struct odp_flow));
854 odp_flow->dp_idx = dp->dp_idx;
855 odp_flow->total_len = total_len;
857 nla = nla_nest_start(skb, ODP_FLOW_ATTR_KEY);
859 goto nla_put_failure;
860 err = flow_to_nlattrs(&flow->key, skb);
863 nla_nest_end(skb, nla);
865 nla = nla_nest_start(skb, ODP_FLOW_ATTR_ACTIONS);
866 if (!nla || skb_tailroom(skb) < sf_acts->actions_len)
867 goto nla_put_failure;
868 memcpy(__skb_put(skb, sf_acts->actions_len), sf_acts->actions, sf_acts->actions_len);
869 nla_nest_end(skb, nla);
871 spin_lock_bh(&flow->lock);
873 stats.n_packets = flow->packet_count;
874 stats.n_bytes = flow->byte_count;
875 tcp_flags = flow->tcp_flags;
876 spin_unlock_bh(&flow->lock);
879 NLA_PUT_MSECS(skb, ODP_FLOW_ATTR_USED, used);
882 NLA_PUT(skb, ODP_FLOW_ATTR_STATS, sizeof(struct odp_flow_stats), &stats);
885 NLA_PUT_U8(skb, ODP_FLOW_ATTR_TCP_FLAGS, tcp_flags);
888 NLA_PUT_U64(skb, ODP_FLOW_ATTR_STATE, state);
890 if (skb->len > total_len)
891 goto nla_put_failure;
893 odp_flow->len = skb->len;
894 err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
906 static struct sk_buff *copy_flow_from_user(struct odp_flow __user *uodp_flow,
907 struct dp_flowcmd *flowcmd)
909 struct nlattr *a[ODP_FLOW_ATTR_MAX + 1];
910 struct odp_flow *odp_flow;
915 if (get_user(len, &uodp_flow->len))
916 return ERR_PTR(-EFAULT);
917 if (len < sizeof(struct odp_flow))
918 return ERR_PTR(-EINVAL);
920 skb = alloc_skb(len, GFP_KERNEL);
922 return ERR_PTR(-ENOMEM);
925 if (copy_from_user(__skb_put(skb, len), uodp_flow, len))
928 odp_flow = (struct odp_flow *)skb->data;
930 if (odp_flow->len != len)
933 flowcmd->nlmsg_flags = odp_flow->nlmsg_flags;
934 flowcmd->dp_idx = odp_flow->dp_idx;
935 flowcmd->total_len = odp_flow->total_len;
937 err = nla_parse(a, ODP_FLOW_ATTR_MAX,
938 (struct nlattr *)(skb->data + sizeof(struct odp_flow)),
939 skb->len - sizeof(struct odp_flow), flow_policy);
943 /* ODP_FLOW_ATTR_KEY. */
944 if (a[ODP_FLOW_ATTR_KEY]) {
945 err = flow_from_nlattrs(&flowcmd->key, a[ODP_FLOW_ATTR_KEY]);
949 memset(&flowcmd->key, 0, sizeof(struct sw_flow_key));
951 /* ODP_FLOW_ATTR_ACTIONS. */
952 if (a[ODP_FLOW_ATTR_ACTIONS]) {
953 flowcmd->actions = nla_data(a[ODP_FLOW_ATTR_ACTIONS]);
954 flowcmd->actions_len = nla_len(a[ODP_FLOW_ATTR_ACTIONS]);
955 err = validate_actions(flowcmd->actions, flowcmd->actions_len);
959 flowcmd->actions = NULL;
960 flowcmd->actions_len = 0;
963 flowcmd->clear = a[ODP_FLOW_ATTR_CLEAR] != NULL;
965 flowcmd->state = a[ODP_FLOW_ATTR_STATE] ? nla_get_u64(a[ODP_FLOW_ATTR_STATE]) : 0;
974 static int new_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
976 struct tbl_node *flow_node;
977 struct dp_flowcmd flowcmd;
978 struct sw_flow *flow;
985 skb = copy_flow_from_user(uodp_flow, &flowcmd);
986 error = PTR_ERR(skb);
990 dp = get_dp_locked(flowcmd.dp_idx);
993 goto error_kfree_skb;
995 hash = flow_hash(&flowcmd.key);
996 table = get_table_protected(dp);
997 flow_node = tbl_lookup(table, &flowcmd.key, hash, flow_cmp);
999 struct sw_flow_actions *acts;
1001 /* Bail out if we're not allowed to create a new flow. */
1003 if (cmd == ODP_FLOW_SET)
1004 goto error_unlock_dp;
1006 /* Expand table, if necessary, to make room. */
1007 if (tbl_count(table) >= tbl_n_buckets(table)) {
1008 error = expand_table(dp);
1010 goto error_unlock_dp;
1011 table = get_table_protected(dp);
1014 /* Allocate flow. */
1015 flow = flow_alloc();
1017 error = PTR_ERR(flow);
1018 goto error_unlock_dp;
1020 flow->key = flowcmd.key;
1023 /* Obtain actions. */
1024 acts = get_actions(&flowcmd);
1025 error = PTR_ERR(acts);
1027 goto error_free_flow;
1028 rcu_assign_pointer(flow->sf_acts, acts);
1030 error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
1032 goto error_free_flow;
1034 /* Put flow in bucket. */
1035 error = tbl_insert(table, &flow->tbl_node, hash);
1037 goto error_free_flow;
1039 /* We found a matching flow. */
1040 struct sw_flow_actions *old_acts;
1042 /* Bail out if we're not allowed to modify an existing flow.
1043 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1044 * because Generic Netlink treats the latter as a dump
1045 * request. We also accept NLM_F_EXCL in case that bug ever
1049 if (flowcmd.nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1050 goto error_kfree_skb;
1052 /* Update actions. */
1053 flow = flow_cast(flow_node);
1054 old_acts = rcu_dereference_protected(flow->sf_acts,
1055 lockdep_is_held(&dp->mutex));
1056 if (flowcmd.actions &&
1057 (old_acts->actions_len != flowcmd.actions_len ||
1058 memcmp(old_acts->actions, flowcmd.actions,
1059 flowcmd.actions_len))) {
1060 struct sw_flow_actions *new_acts;
1062 new_acts = get_actions(&flowcmd);
1063 error = PTR_ERR(new_acts);
1064 if (IS_ERR(new_acts))
1065 goto error_kfree_skb;
1067 rcu_assign_pointer(flow->sf_acts, new_acts);
1068 flow_deferred_free_acts(old_acts);
1071 error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
1073 goto error_kfree_skb;
1076 if (flowcmd.clear) {
1077 spin_lock_bh(&flow->lock);
1079 spin_unlock_bh(&flow->lock);
1083 mutex_unlock(&dp->mutex);
1089 mutex_unlock(&dp->mutex);
1096 static int get_or_del_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
1098 struct tbl_node *flow_node;
1099 struct dp_flowcmd flowcmd;
1100 struct sw_flow *flow;
1101 struct sk_buff *skb;
1102 struct datapath *dp;
1106 skb = copy_flow_from_user(uodp_flow, &flowcmd);
1111 dp = get_dp_locked(flowcmd.dp_idx);
1114 goto exit_kfree_skb;
1116 table = get_table_protected(dp);
1117 flow_node = tbl_lookup(table, &flowcmd.key, flow_hash(&flowcmd.key), flow_cmp);
1120 goto exit_unlock_dp;
1122 if (cmd == ODP_FLOW_DEL) {
1123 err = tbl_remove(table, flow_node);
1125 goto exit_unlock_dp;
1128 flow = flow_cast(flow_node);
1129 err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
1130 if (!err && cmd == ODP_FLOW_DEL)
1131 flow_deferred_free(flow);
1134 mutex_unlock(&dp->mutex);
1141 static int dump_flow(struct odp_flow __user *uodp_flow)
1143 struct tbl_node *flow_node;
1144 struct dp_flowcmd flowcmd;
1145 struct sw_flow *flow;
1146 struct sk_buff *skb;
1147 struct datapath *dp;
1151 skb = copy_flow_from_user(uodp_flow, &flowcmd);
1156 dp = get_dp_locked(flowcmd.dp_idx);
1161 bucket = flowcmd.state >> 32;
1162 obj = flowcmd.state;
1163 flow_node = tbl_next(dp->table, &bucket, &obj);
1166 goto exit_unlock_dp;
1168 flow = flow_cast(flow_node);
1169 err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len,
1170 ((u64)bucket << 32) | obj);
1173 mutex_unlock(&dp->mutex);
1180 static const struct nla_policy datapath_policy[ODP_DP_ATTR_MAX + 1] = {
1181 [ODP_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1182 [ODP_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
1183 [ODP_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
1186 static int copy_datapath_to_user(void __user *dst, struct datapath *dp, uint32_t total_len)
1188 struct odp_datapath *odp_datapath;
1189 struct sk_buff *skb;
1193 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1198 odp_datapath = (struct odp_datapath*)__skb_put(skb, sizeof(struct odp_datapath));
1199 odp_datapath->dp_idx = dp->dp_idx;
1200 odp_datapath->total_len = total_len;
1203 err = nla_put_string(skb, ODP_DP_ATTR_NAME, dp_name(dp));
1206 goto nla_put_failure;
1208 nla = nla_reserve(skb, ODP_DP_ATTR_STATS, sizeof(struct odp_stats));
1210 goto nla_put_failure;
1211 get_dp_stats(dp, nla_data(nla));
1213 NLA_PUT_U32(skb, ODP_DP_ATTR_IPV4_FRAGS,
1214 dp->drop_frags ? ODP_DP_FRAG_DROP : ODP_DP_FRAG_ZERO);
1216 if (dp->sflow_probability)
1217 NLA_PUT_U32(skb, ODP_DP_ATTR_SAMPLING, dp->sflow_probability);
1219 if (skb->len > total_len)
1220 goto nla_put_failure;
1222 odp_datapath->len = skb->len;
1223 err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
1234 static struct sk_buff *copy_datapath_from_user(struct odp_datapath __user *uodp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1236 struct odp_datapath *odp_datapath;
1237 struct sk_buff *skb;
1241 if (get_user(len, &uodp_datapath->len))
1242 return ERR_PTR(-EFAULT);
1243 if (len < sizeof(struct odp_datapath))
1244 return ERR_PTR(-EINVAL);
1246 skb = alloc_skb(len, GFP_KERNEL);
1248 return ERR_PTR(-ENOMEM);
1251 if (copy_from_user(__skb_put(skb, len), uodp_datapath, len))
1252 goto error_free_skb;
1254 odp_datapath = (struct odp_datapath *)skb->data;
1256 if (odp_datapath->len != len)
1257 goto error_free_skb;
1259 err = nla_parse(a, ODP_DP_ATTR_MAX,
1260 (struct nlattr *)(skb->data + sizeof(struct odp_datapath)),
1261 skb->len - sizeof(struct odp_datapath), datapath_policy);
1263 goto error_free_skb;
1265 if (a[ODP_DP_ATTR_IPV4_FRAGS]) {
1266 u32 frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]);
1269 if (frags != ODP_DP_FRAG_ZERO && frags != ODP_DP_FRAG_DROP)
1270 goto error_free_skb;
1273 err = VERIFY_NUL_STRING(a[ODP_DP_ATTR_NAME], IFNAMSIZ - 1);
1275 goto error_free_skb;
1281 return ERR_PTR(err);
1284 /* Called with dp_mutex and optionally with RTNL lock also.
1285 * Holds the returned datapath's mutex on return.
1287 static struct datapath *lookup_datapath(struct odp_datapath *odp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1289 WARN_ON_ONCE(!mutex_is_locked(&dp_mutex));
1291 if (!a[ODP_DP_ATTR_NAME]) {
1292 struct datapath *dp;
1294 dp = get_dp(odp_datapath->dp_idx);
1296 return ERR_PTR(-ENODEV);
1297 mutex_lock(&dp->mutex);
1300 struct datapath *dp;
1301 struct vport *vport;
1305 vport = vport_locate(nla_data(a[ODP_DP_ATTR_NAME]));
1306 dp_idx = vport && vport->port_no == ODPP_LOCAL ? vport->dp->dp_idx : -1;
1310 return ERR_PTR(-ENODEV);
1312 dp = get_dp(dp_idx);
1313 mutex_lock(&dp->mutex);
1318 static void change_datapath(struct datapath *dp, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1320 if (a[ODP_DP_ATTR_IPV4_FRAGS])
1321 dp->drop_frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]) == ODP_DP_FRAG_DROP;
1322 if (a[ODP_DP_ATTR_SAMPLING])
1323 dp->sflow_probability = nla_get_u32(a[ODP_DP_ATTR_SAMPLING]);
1326 static int new_datapath(struct odp_datapath __user *uodp_datapath)
1328 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1329 struct odp_datapath *odp_datapath;
1330 struct vport_parms parms;
1331 struct sk_buff *skb;
1332 struct datapath *dp;
1333 struct vport *vport;
1338 skb = copy_datapath_from_user(uodp_datapath, a);
1342 odp_datapath = (struct odp_datapath *)skb->data;
1345 if (!a[ODP_DP_ATTR_NAME])
1349 mutex_lock(&dp_mutex);
1351 if (!try_module_get(THIS_MODULE))
1352 goto err_unlock_dp_mutex;
1354 dp_idx = odp_datapath->dp_idx;
1357 for (dp_idx = 0; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
1363 } else if (dp_idx < ARRAY_SIZE(dps))
1364 err = get_dp(dp_idx) ? -EBUSY : 0;
1368 goto err_put_module;
1371 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1373 goto err_put_module;
1374 INIT_LIST_HEAD(&dp->port_list);
1375 mutex_init(&dp->mutex);
1376 mutex_lock(&dp->mutex);
1377 dp->dp_idx = dp_idx;
1378 for (i = 0; i < DP_N_QUEUES; i++)
1379 skb_queue_head_init(&dp->queues[i]);
1380 init_waitqueue_head(&dp->waitqueue);
1382 /* Initialize kobject for bridge. This will be added as
1383 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1384 dp->ifobj.kset = NULL;
1385 kobject_init(&dp->ifobj, &dp_ktype);
1387 /* Allocate table. */
1389 rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
1393 /* Set up our datapath device. */
1394 parms.name = nla_data(a[ODP_DP_ATTR_NAME]);
1395 parms.type = ODP_VPORT_TYPE_INTERNAL;
1396 parms.options = NULL;
1398 parms.port_no = ODPP_LOCAL;
1399 vport = new_vport(&parms);
1400 if (IS_ERR(vport)) {
1401 err = PTR_ERR(vport);
1405 goto err_destroy_table;
1409 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1410 if (!dp->stats_percpu) {
1412 goto err_destroy_local_port;
1415 change_datapath(dp, a);
1417 rcu_assign_pointer(dps[dp_idx], dp);
1418 dp_sysfs_add_dp(dp);
1420 mutex_unlock(&dp->mutex);
1421 mutex_unlock(&dp_mutex);
1426 err_destroy_local_port:
1427 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1429 tbl_destroy(get_table_protected(dp), NULL);
1431 mutex_unlock(&dp->mutex);
1434 module_put(THIS_MODULE);
1435 err_unlock_dp_mutex:
1436 mutex_unlock(&dp_mutex);
1444 static int del_datapath(struct odp_datapath __user *uodp_datapath)
1446 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1447 struct datapath *dp;
1448 struct sk_buff *skb;
1451 skb = copy_datapath_from_user(uodp_datapath, a);
1457 mutex_lock(&dp_mutex);
1458 dp = lookup_datapath((struct odp_datapath *)skb->data, a);
1468 mutex_unlock(&dp_mutex);
1474 static int set_datapath(struct odp_datapath __user *uodp_datapath)
1476 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1477 struct datapath *dp;
1478 struct sk_buff *skb;
1481 skb = copy_datapath_from_user(uodp_datapath, a);
1486 mutex_lock(&dp_mutex);
1487 dp = lookup_datapath((struct odp_datapath *)skb->data, a);
1492 change_datapath(dp, a);
1493 mutex_unlock(&dp->mutex);
1498 mutex_unlock(&dp_mutex);
1503 static int get_datapath(struct odp_datapath __user *uodp_datapath)
1505 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1506 struct odp_datapath *odp_datapath;
1507 struct datapath *dp;
1508 struct sk_buff *skb;
1511 skb = copy_datapath_from_user(uodp_datapath, a);
1515 odp_datapath = (struct odp_datapath *)skb->data;
1517 mutex_lock(&dp_mutex);
1518 dp = lookup_datapath(odp_datapath, a);
1519 mutex_unlock(&dp_mutex);
1525 err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
1526 mutex_unlock(&dp->mutex);
1533 static int dump_datapath(struct odp_datapath __user *uodp_datapath)
1535 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1536 struct odp_datapath *odp_datapath;
1537 struct sk_buff *skb;
1541 skb = copy_datapath_from_user(uodp_datapath, a);
1545 odp_datapath = (struct odp_datapath *)skb->data;
1547 mutex_lock(&dp_mutex);
1548 for (dp_idx = odp_datapath->dp_idx; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
1549 struct datapath *dp = get_dp(dp_idx);
1553 mutex_lock(&dp->mutex);
1554 mutex_unlock(&dp_mutex);
1555 err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
1556 mutex_unlock(&dp->mutex);
1559 mutex_unlock(&dp_mutex);
1568 static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
1569 [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1570 [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1571 [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1572 [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
1573 [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1574 [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
1575 [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1578 static int copy_vport_to_user(void __user *dst, struct vport *vport, uint32_t total_len)
1580 struct odp_vport *odp_vport;
1581 struct sk_buff *skb;
1583 int ifindex, iflink;
1586 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1592 odp_vport = (struct odp_vport*)__skb_put(skb, sizeof(struct odp_vport));
1593 odp_vport->dp_idx = vport->dp->dp_idx;
1594 odp_vport->total_len = total_len;
1596 NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
1597 NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
1598 NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
1600 nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
1602 goto nla_put_failure;
1603 if (vport_get_stats(vport, nla_data(nla)))
1604 __skb_trim(skb, skb->len - nla->nla_len);
1606 NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1608 NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, vport_get_mtu(vport));
1610 err = vport_get_options(vport, skb);
1612 ifindex = vport_get_ifindex(vport);
1614 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
1616 iflink = vport_get_iflink(vport);
1618 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
1621 if (skb->len > total_len)
1624 odp_vport->len = skb->len;
1625 err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
1637 static struct sk_buff *copy_vport_from_user(struct odp_vport __user *uodp_vport,
1638 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1640 struct odp_vport *odp_vport;
1641 struct sk_buff *skb;
1645 if (get_user(len, &uodp_vport->len))
1646 return ERR_PTR(-EFAULT);
1647 if (len < sizeof(struct odp_vport))
1648 return ERR_PTR(-EINVAL);
1650 skb = alloc_skb(len, GFP_KERNEL);
1652 return ERR_PTR(-ENOMEM);
1655 if (copy_from_user(__skb_put(skb, len), uodp_vport, len))
1656 goto error_free_skb;
1658 odp_vport = (struct odp_vport *)skb->data;
1660 if (odp_vport->len != len)
1661 goto error_free_skb;
1663 err = nla_parse(a, ODP_VPORT_ATTR_MAX, (struct nlattr *)(skb->data + sizeof(struct odp_vport)),
1664 skb->len - sizeof(struct odp_vport), vport_policy);
1666 goto error_free_skb;
1668 err = VERIFY_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1670 goto error_free_skb;
1676 return ERR_PTR(err);
1680 /* Called without any locks (or with RTNL lock).
1681 * Returns holding vport->dp->mutex.
1683 static struct vport *lookup_vport(struct odp_vport *odp_vport,
1684 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1686 struct datapath *dp;
1687 struct vport *vport;
1689 if (a[ODP_VPORT_ATTR_NAME]) {
1690 int dp_idx, port_no;
1694 vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
1697 return ERR_PTR(-ENODEV);
1699 dp_idx = vport->dp->dp_idx;
1700 port_no = vport->port_no;
1703 dp = get_dp_locked(dp_idx);
1707 vport = get_vport_protected(dp, port_no);
1709 strcmp(vport_get_name(vport), nla_data(a[ODP_VPORT_ATTR_NAME]))) {
1710 mutex_unlock(&dp->mutex);
1715 } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
1716 u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1718 if (port_no >= DP_MAX_PORTS)
1719 return ERR_PTR(-EINVAL);
1721 dp = get_dp_locked(odp_vport->dp_idx);
1723 return ERR_PTR(-ENODEV);
1725 vport = get_vport_protected(dp, port_no);
1727 mutex_unlock(&dp->mutex);
1728 return ERR_PTR(-ENOENT);
1732 return ERR_PTR(-EINVAL);
1735 static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1738 if (a[ODP_VPORT_ATTR_STATS])
1739 err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
1740 if (!err && a[ODP_VPORT_ATTR_ADDRESS])
1741 err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
1742 if (!err && a[ODP_VPORT_ATTR_MTU])
1743 err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
1747 static int attach_vport(struct odp_vport __user *uodp_vport)
1749 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1750 struct odp_vport *odp_vport;
1751 struct vport_parms parms;
1752 struct vport *vport;
1753 struct sk_buff *skb;
1754 struct datapath *dp;
1758 skb = copy_vport_from_user(uodp_vport, a);
1762 odp_vport = (struct odp_vport *)skb->data;
1765 if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
1766 goto exit_kfree_skb;
1770 dp = get_dp_locked(odp_vport->dp_idx);
1773 goto exit_unlock_rtnl;
1775 if (a[ODP_VPORT_ATTR_PORT_NO]) {
1776 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1779 if (port_no >= DP_MAX_PORTS)
1780 goto exit_unlock_dp;
1782 vport = get_vport_protected(dp, port_no);
1785 goto exit_unlock_dp;
1787 for (port_no = 1; ; port_no++) {
1788 if (port_no >= DP_MAX_PORTS) {
1790 goto exit_unlock_dp;
1792 vport = get_vport_protected(dp, port_no);
1798 parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
1799 parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
1800 parms.options = a[ODP_VPORT_ATTR_OPTIONS];
1802 parms.port_no = port_no;
1804 vport = new_vport(&parms);
1805 err = PTR_ERR(vport);
1807 goto exit_unlock_dp;
1809 set_internal_devs_mtu(dp);
1810 dp_sysfs_add_if(vport);
1812 err = change_vport(vport, a);
1814 dp_detach_port(vport);
1815 goto exit_unlock_dp;
1818 err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
1821 mutex_unlock(&dp->mutex);
1830 static int set_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
1832 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1833 struct vport *vport;
1834 struct sk_buff *skb;
1837 skb = copy_vport_from_user(uodp_vport, a);
1843 vport = lookup_vport((struct odp_vport *)skb->data, a);
1844 err = PTR_ERR(vport);
1849 if (a[ODP_VPORT_ATTR_OPTIONS])
1850 err = vport_set_options(vport, a[ODP_VPORT_ATTR_OPTIONS]);
1852 err = change_vport(vport, a);
1854 mutex_unlock(&vport->dp->mutex);
1862 static int del_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
1864 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1865 struct datapath *dp;
1866 struct vport *vport;
1867 struct sk_buff *skb;
1870 skb = copy_vport_from_user(uodp_vport, a);
1876 vport = lookup_vport((struct odp_vport *)skb->data, a);
1877 err = PTR_ERR(vport);
1883 if (vport->port_no == ODPP_LOCAL)
1886 err = dp_detach_port(vport);
1887 mutex_unlock(&dp->mutex);
1895 static int get_vport(struct odp_vport __user *uodp_vport)
1897 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1898 struct odp_vport *odp_vport;
1899 struct vport *vport;
1900 struct sk_buff *skb;
1903 skb = copy_vport_from_user(uodp_vport, a);
1907 odp_vport = (struct odp_vport *)skb->data;
1909 vport = lookup_vport(odp_vport, a);
1910 err = PTR_ERR(vport);
1914 err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
1915 mutex_unlock(&vport->dp->mutex);
1922 static int dump_vport(struct odp_vport __user *uodp_vport)
1924 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1925 struct odp_vport *odp_vport;
1926 struct sk_buff *skb;
1927 struct datapath *dp;
1931 skb = copy_vport_from_user(uodp_vport, a);
1935 odp_vport = (struct odp_vport *)skb->data;
1937 dp = get_dp_locked(odp_vport->dp_idx);
1943 if (a[ODP_VPORT_ATTR_PORT_NO])
1944 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1945 for (; port_no < DP_MAX_PORTS; port_no++) {
1946 struct vport *vport = get_vport_protected(dp, port_no);
1948 err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
1949 goto exit_unlock_dp;
1955 mutex_unlock(&dp->mutex);
1962 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1965 int dp_idx = iminor(f->f_dentry->d_inode);
1966 struct datapath *dp;
1970 /* Handle commands with special locking requirements up front. */
1973 err = new_datapath((struct odp_datapath __user *)argp);
1977 err = get_datapath((struct odp_datapath __user *)argp);
1981 err = del_datapath((struct odp_datapath __user *)argp);
1985 err = set_datapath((struct odp_datapath __user *)argp);
1989 err = dump_datapath((struct odp_datapath __user *)argp);
1993 err = attach_vport((struct odp_vport __user *)argp);
1997 err = get_vport((struct odp_vport __user *)argp);
2001 err = del_vport(cmd, (struct odp_vport __user *)argp);
2005 err = set_vport(cmd, (struct odp_vport __user *)argp);
2008 case ODP_VPORT_DUMP:
2009 err = dump_vport((struct odp_vport __user *)argp);
2012 case ODP_FLOW_FLUSH:
2013 err = flush_flows(argp);
2018 err = new_flow(cmd, (struct odp_flow __user *)argp);
2023 err = get_or_del_flow(cmd, (struct odp_flow __user *)argp);
2027 err = dump_flow((struct odp_flow __user *)argp);
2031 err = execute_packet((struct odp_packet __user *)argp);
2035 dp = get_dp_locked(dp_idx);
2041 case ODP_GET_LISTEN_MASK:
2042 err = put_user(get_listen_mask(f), (int __user *)argp);
2045 case ODP_SET_LISTEN_MASK:
2046 err = get_user(listeners, (int __user *)argp);
2050 if (listeners & ~ODPL_ALL)
2053 set_listen_mask(f, listeners);
2060 mutex_unlock(&dp->mutex);
2065 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
2068 for (i = 0; i < DP_N_QUEUES; i++) {
2069 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
2075 #ifdef CONFIG_COMPAT
2076 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
2079 case ODP_FLOW_FLUSH:
2080 /* Ioctls that don't need any translation at all. */
2081 return openvswitch_ioctl(f, cmd, argp);
2092 case ODP_VPORT_DUMP:
2098 case ODP_SET_LISTEN_MASK:
2099 case ODP_GET_LISTEN_MASK:
2101 /* Ioctls that just need their pointer argument extended. */
2102 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
2105 return -ENOIOCTLCMD;
2110 static ssize_t openvswitch_read(struct file *f, char __user *buf,
2111 size_t nbytes, loff_t *ppos)
2113 int listeners = get_listen_mask(f);
2114 int dp_idx = iminor(f->f_dentry->d_inode);
2115 struct datapath *dp = get_dp_locked(dp_idx);
2116 struct sk_buff *skb;
2123 if (nbytes == 0 || !listeners)
2129 for (i = 0; i < DP_N_QUEUES; i++) {
2130 if (listeners & (1 << i)) {
2131 skb = skb_dequeue(&dp->queues[i]);
2137 if (f->f_flags & O_NONBLOCK) {
2142 wait_event_interruptible(dp->waitqueue,
2143 dp_has_packet_of_interest(dp,
2146 if (signal_pending(current)) {
2147 retval = -ERESTARTSYS;
2152 mutex_unlock(&dp->mutex);
2155 iov.iov_len = min_t(size_t, skb->len, nbytes);
2156 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
2164 mutex_unlock(&dp->mutex);
2168 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
2170 int dp_idx = iminor(file->f_dentry->d_inode);
2171 struct datapath *dp = get_dp_locked(dp_idx);
2176 poll_wait(file, &dp->waitqueue, wait);
2177 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
2178 mask |= POLLIN | POLLRDNORM;
2179 mutex_unlock(&dp->mutex);
2181 mask = POLLIN | POLLRDNORM | POLLHUP;
2186 static struct file_operations openvswitch_fops = {
2187 .owner = THIS_MODULE,
2188 .read = openvswitch_read,
2189 .poll = openvswitch_poll,
2190 .unlocked_ioctl = openvswitch_ioctl,
2191 #ifdef CONFIG_COMPAT
2192 .compat_ioctl = openvswitch_compat_ioctl,
2198 static int __init dp_init(void)
2200 struct sk_buff *dummy_skb;
2203 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2205 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2213 goto error_flow_exit;
2215 err = register_netdevice_notifier(&dp_device_notifier);
2217 goto error_vport_exit;
2219 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2221 goto error_unreg_notifier;
2225 error_unreg_notifier:
2226 unregister_netdevice_notifier(&dp_device_notifier);
2235 static void dp_cleanup(void)
2238 unregister_chrdev(major, "openvswitch");
2239 unregister_netdevice_notifier(&dp_device_notifier);
2244 module_init(dp_init);
2245 module_exit(dp_cleanup);
2247 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2248 MODULE_LICENSE("GPL");