2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
7 /* Functions for managing the dp interface/device. */
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/if_arp.h>
12 #include <linux/if_bridge.h>
13 #include <linux/if_vlan.h>
15 #include <net/genetlink.h>
17 #include <linux/delay.h>
18 #include <linux/etherdevice.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/mutex.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/rcupdate.h>
24 #include <linux/version.h>
25 #include <linux/ethtool.h>
26 #include <linux/random.h>
27 #include <asm/system.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/inetdevice.h>
30 #include <linux/list.h>
31 #include <linux/rculist.h>
32 #include <linux/workqueue.h>
34 #include "openflow-netlink.h"
45 /* Strings to describe the manufacturer, hardware, and software. This data
46 * is queriable through the switch description stats message. */
47 static char mfr_desc[DESC_STR_LEN] = "Nicira Networks";
48 static char hw_desc[DESC_STR_LEN] = "Reference Linux Kernel Module";
49 static char sw_desc[DESC_STR_LEN] = VERSION;
50 static char serial_num[SERIAL_NUM_LEN] = "None";
52 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
53 module_param_string(mfr_desc, mfr_desc, sizeof mfr_desc, 0444);
54 module_param_string(hw_desc, hw_desc, sizeof hw_desc, 0444);
55 module_param_string(sw_desc, sw_desc, sizeof sw_desc, 0444);
56 module_param_string(serial_num, serial_num, sizeof serial_num, 0444);
58 MODULE_PARM(mfr_desc, "s");
59 MODULE_PARM(hw_desc, "s");
60 MODULE_PARM(sw_desc, "s");
61 MODULE_PARM(serial_num, "s");
65 /* Number of milliseconds between runs of the maintenance thread. */
66 #define MAINT_SLEEP_MSECS 1000
68 #define UINT32_MAX 4294967295U
69 #define UINT16_MAX 65535
70 #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
72 static struct genl_family dp_genl_family;
73 static struct genl_multicast_group mc_group;
75 /* It's hard to imagine wanting more than one datapath, but... */
78 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
79 * by dp_mutex. dp_mutex is almost completely redundant with genl_mutex
80 * maintained by the Generic Netlink code, but the timeout path needs mutual
83 * It is safe to access the datapath and net_bridge_port structures with just
86 static struct datapath *dps[DP_MAX];
87 DEFINE_MUTEX(dp_mutex);
88 EXPORT_SYMBOL(dp_mutex);
90 static int dp_maint_func(void *data);
91 static void init_port_status(struct net_bridge_port *p);
92 static int dp_genl_openflow_done(struct netlink_callback *);
93 static struct net_bridge_port *new_nbp(struct datapath *,
94 struct net_device *, int port_no);
96 /* nla_shrink - reduce amount of space reserved by nla_reserve
97 * @skb: socket buffer from which to recover room
98 * @nla: netlink attribute to adjust
99 * @len: new length of attribute payload
101 * Reduces amount of space reserved by a call to nla_reserve.
103 * No other attributes may be added between calling nla_reserve and this
104 * function, since it will create a hole in the message.
106 void nla_shrink(struct sk_buff *skb, struct nlattr *nla, int len)
108 int delta = nla_total_size(len) - nla_total_size(nla_len(nla));
112 nla->nla_len = nla_attr_size(len);
115 /* Puts a set of openflow headers for a message of the given 'type' into 'skb'.
116 * If 'sender' is nonnull, then it is used as the message's destination. 'dp'
117 * must specify the datapath to use.
119 * '*max_openflow_len' receives the maximum number of bytes that are available
120 * for the embedded OpenFlow message. The caller must call
121 * resize_openflow_skb() to set the actual size of the message to this number
124 * Returns the openflow header if successful, otherwise (if 'skb' is too small)
127 put_openflow_headers(struct datapath *dp, struct sk_buff *skb, uint8_t type,
128 const struct sender *sender, int *max_openflow_len)
130 struct ofp_header *oh;
134 /* Assemble the Generic Netlink wrapper. */
135 if (!genlmsg_put(skb,
136 sender ? sender->pid : 0,
137 sender ? sender->seq : 0,
138 &dp_genl_family, 0, DP_GENL_C_OPENFLOW))
139 return ERR_PTR(-ENOBUFS);
140 if (nla_put_u32(skb, DP_GENL_A_DP_IDX, dp->dp_idx) < 0)
141 return ERR_PTR(-ENOBUFS);
142 openflow_len = (skb_tailroom(skb) - NLA_HDRLEN) & ~(NLA_ALIGNTO - 1);
143 if (openflow_len < sizeof *oh)
144 return ERR_PTR(-ENOBUFS);
145 *max_openflow_len = openflow_len;
146 attr = nla_reserve(skb, DP_GENL_A_OPENFLOW, openflow_len);
149 /* Fill in the header. The caller is responsible for the length. */
151 oh->version = OFP_VERSION;
153 oh->xid = sender ? sender->xid : 0;
158 /* Resizes OpenFlow header 'oh', which must be at the tail end of 'skb', to new
159 * length 'new_length' (in bytes), adjusting pointers and size values as
162 resize_openflow_skb(struct sk_buff *skb,
163 struct ofp_header *oh, size_t new_length)
165 struct nlattr *attr = ((void *) oh) - NLA_HDRLEN;
166 nla_shrink(skb, attr, new_length);
167 oh->length = htons(new_length);
168 nlmsg_end(skb, (struct nlmsghdr *) skb->data);
171 /* Allocates a new skb to contain an OpenFlow message 'openflow_len' bytes in
172 * length. Returns a null pointer if memory is unavailable, otherwise returns
173 * the OpenFlow header and stores a pointer to the skb in '*pskb'.
175 * 'type' is the OpenFlow message type. If 'sender' is nonnull, then it is
176 * used as the message's destination. 'dp' must specify the datapath to
179 alloc_openflow_skb(struct datapath *dp, size_t openflow_len, uint8_t type,
180 const struct sender *sender, struct sk_buff **pskb)
182 struct ofp_header *oh;
185 int max_openflow_len;
187 if ((openflow_len + sizeof(struct ofp_header)) > UINT16_MAX) {
189 printk("alloc_openflow_skb: openflow message too large: %zu\n",
194 genl_len = nlmsg_total_size(GENL_HDRLEN + dp_genl_family.hdrsize);
195 genl_len += nla_total_size(sizeof(uint32_t)); /* DP_GENL_A_DP_IDX */
196 genl_len += nla_total_size(openflow_len); /* DP_GENL_A_OPENFLOW */
197 skb = *pskb = genlmsg_new(genl_len, GFP_ATOMIC);
200 printk("alloc_openflow_skb: genlmsg_new failed\n");
204 oh = put_openflow_headers(dp, skb, type, sender, &max_openflow_len);
205 BUG_ON(!oh || IS_ERR(oh));
206 resize_openflow_skb(skb, oh, openflow_len);
211 /* Sends 'skb' to 'sender' if it is nonnull, otherwise multicasts 'skb' to all
214 send_openflow_skb(struct sk_buff *skb, const struct sender *sender)
217 ? genlmsg_unicast(skb, sender->pid)
218 : genlmsg_multicast(skb, 0, mc_group.id, GFP_ATOMIC));
221 /* Generates a unique datapath id. It incorporates the datapath index
222 * and a hardware address, if available. If not, it generates a random
226 uint64_t gen_datapath_id(uint16_t dp_idx)
230 struct net_device *dev;
232 /* The top 16 bits are used to identify the datapath. The lower 48 bits
233 * use an interface address. */
234 id = (uint64_t)dp_idx << 48;
235 if ((dev = dev_get_by_name(&init_net, "ctl0"))
236 || (dev = dev_get_by_name(&init_net, "eth0"))) {
237 for (i=0; i<ETH_ALEN; i++) {
238 id |= (uint64_t)dev->dev_addr[i] << (8*(ETH_ALEN-1 - i));
242 /* Randomly choose the lower 48 bits if we cannot find an
243 * address and mark the most significant bit to indicate that
244 * this was randomly generated. */
245 uint8_t rand[ETH_ALEN];
246 get_random_bytes(rand, ETH_ALEN);
247 id |= (uint64_t)1 << 63;
248 for (i=0; i<ETH_ALEN; i++) {
249 id |= (uint64_t)rand[i] << (8*(ETH_ALEN-1 - i));
256 /* Creates a new datapath numbered 'dp_idx'. Returns 0 for success or a
257 * negative error code. */
258 static int new_dp(int dp_idx)
263 if (dp_idx < 0 || dp_idx >= DP_MAX)
266 if (!try_module_get(THIS_MODULE))
269 /* Exit early if a datapath with that number already exists. */
276 dp = kzalloc(sizeof *dp, GFP_KERNEL);
280 /* Setup our "of" device */
281 err = dp_dev_setup(dp);
286 dp->id = gen_datapath_id(dp_idx);
287 dp->chain = chain_create(dp);
288 if (dp->chain == NULL)
289 goto err_destroy_dp_dev;
290 INIT_LIST_HEAD(&dp->port_list);
292 dp->local_port = new_nbp(dp, dp->netdev, OFPP_LOCAL);
293 if (IS_ERR(dp->local_port)) {
294 err = PTR_ERR(dp->local_port);
295 goto err_destroy_local_port;
299 dp->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
301 dp->dp_task = kthread_run(dp_maint_func, dp, "dp%d", dp_idx);
302 if (IS_ERR(dp->dp_task))
303 goto err_destroy_chain;
309 err_destroy_local_port:
310 dp_del_switch_port(dp->local_port);
312 chain_destroy(dp->chain);
318 module_put(THIS_MODULE);
322 /* Find and return a free port number under 'dp'. */
323 static int find_portno(struct datapath *dp)
326 for (i = 0; i < OFPP_MAX; i++)
327 if (dp->ports[i] == NULL)
332 static struct net_bridge_port *new_nbp(struct datapath *dp,
333 struct net_device *dev, int port_no)
335 struct net_bridge_port *p;
337 if (dev->br_port != NULL)
338 return ERR_PTR(-EBUSY);
340 p = kzalloc(sizeof(*p), GFP_KERNEL);
342 return ERR_PTR(-ENOMEM);
345 dev_set_promiscuity(dev, 1);
350 p->port_no = port_no;
351 spin_lock_init(&p->lock);
352 INIT_WORK(&p->port_task, NULL);
353 if (port_no != OFPP_LOCAL)
354 rcu_assign_pointer(dev->br_port, p);
355 if (port_no < OFPP_MAX)
356 rcu_assign_pointer(dp->ports[port_no], p);
357 list_add_rcu(&p->node, &dp->port_list);
362 int add_switch_port(struct datapath *dp, struct net_device *dev)
364 struct net_bridge_port *p;
367 if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER
371 port_no = find_portno(dp);
375 p = new_nbp(dp, dev, port_no);
381 /* Notify the ctlpath that this port has been added */
382 dp_send_port_status(p, OFPPR_ADD);
387 /* Delete 'p' from switch. */
388 int dp_del_switch_port(struct net_bridge_port *p)
390 /* First drop references to device. */
391 cancel_work_sync(&p->port_task);
393 dev_set_promiscuity(p->dev, -1);
395 list_del_rcu(&p->node);
396 if (p->port_no != OFPP_LOCAL)
397 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
398 rcu_assign_pointer(p->dev->br_port, NULL);
400 /* Then wait until no one is still using it, and destroy it. */
403 /* Notify the ctlpath that this port no longer exists */
404 dp_send_port_status(p, OFPPR_DELETE);
412 static void del_dp(struct datapath *dp)
414 struct net_bridge_port *p, *n;
416 kthread_stop(dp->dp_task);
418 /* Drop references to DP. */
419 list_for_each_entry_safe (p, n, &dp->port_list, node)
420 dp_del_switch_port(p);
421 rcu_assign_pointer(dps[dp->dp_idx], NULL);
423 /* Kill off local_port dev references from buffered packets that have
424 * associated dst entries. */
428 /* Destroy dp->netdev. (Must follow deleting switch ports since
429 * dp->local_port has a reference to it.) */
432 /* Wait until no longer in use, then destroy it. */
434 chain_destroy(dp->chain);
436 module_put(THIS_MODULE);
439 static int dp_maint_func(void *data)
441 struct datapath *dp = (struct datapath *) data;
443 while (!kthread_should_stop()) {
444 /* Timeout old entries */
445 chain_timeout(dp->chain);
446 msleep_interruptible(MAINT_SLEEP_MSECS);
453 do_port_input(struct net_bridge_port *p, struct sk_buff *skb)
455 /* Push the Ethernet header back on. */
456 skb_push(skb, ETH_HLEN);
457 fwd_port_input(p->dp->chain, skb, p);
461 * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on
462 * different set of devices!)
464 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
465 /* Called with rcu_read_lock. */
466 static struct sk_buff *dp_frame_hook(struct net_bridge_port *p,
469 do_port_input(p, skb);
472 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
473 static int dp_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb)
475 do_port_input(p, *pskb);
479 /* NB: This has only been tested on 2.4.35 */
480 static void dp_frame_hook(struct sk_buff *skb)
482 struct net_bridge_port *p = skb->dev->br_port;
485 do_port_input(p, skb);
492 /* Forwarding output path.
493 * Based on net/bridge/br_forward.c. */
495 static inline unsigned packet_length(const struct sk_buff *skb)
497 int length = skb->len - ETH_HLEN;
498 if (skb->protocol == htons(ETH_P_8021Q))
503 /* Send packets out all the ports except the originating one. If the
504 * "flood" argument is set, only send along the minimum spanning tree.
507 output_all(struct datapath *dp, struct sk_buff *skb, int flood)
509 u32 disable = flood ? OFPPC_NO_FLOOD : 0;
510 struct net_bridge_port *p;
513 list_for_each_entry_rcu (p, &dp->port_list, node) {
514 if (skb->dev == p->dev || p->config & disable)
516 if (prev_port != -1) {
517 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
522 dp_output_port(dp, clone, prev_port, 0);
524 prev_port = p->port_no;
527 dp_output_port(dp, skb, prev_port, 0);
534 /* Marks 'skb' as having originated from 'in_port' in 'dp'.
535 FIXME: how are devices reference counted? */
536 void dp_set_origin(struct datapath *dp, uint16_t in_port,
539 struct net_bridge_port *p = (in_port < OFPP_MAX ? dp->ports[in_port]
540 : in_port == OFPP_LOCAL ? dp->local_port
548 static int xmit_skb(struct sk_buff *skb)
551 if (packet_length(skb) > skb->dev->mtu) {
552 printk("dropped over-mtu packet: %d > %d\n",
553 packet_length(skb), skb->dev->mtu);
563 /* Takes ownership of 'skb' and transmits it to 'out_port' on 'dp'.
565 int dp_output_port(struct datapath *dp, struct sk_buff *skb, int out_port,
571 /* Send it out the port it came in on, which is already set in
575 printk("skb device not set forwarding to in_port\n");
579 return xmit_skb(skb);
582 int retval = run_flow_through_tables(dp->chain, skb,
590 return output_all(dp, skb, 1);
593 return output_all(dp, skb, 0);
595 case OFPP_CONTROLLER:
596 return dp_output_control(dp, skb, fwd_save_skb(skb), 0,
600 struct net_device *dev = dp->netdev;
601 return dev ? dp_dev_recv(dev, skb) : -ESRCH;
604 case 0 ... OFPP_MAX-1: {
605 struct net_bridge_port *p = dp->ports[out_port];
608 if (p->dev == skb->dev) {
609 /* To send to the input port, must use OFPP_IN_PORT */
612 printk("can't directly forward to input port\n");
615 if (p->config & OFPPC_NO_FWD && !ignore_no_fwd) {
620 return xmit_skb(skb);
630 printk("can't forward to bad port %d\n", out_port);
634 /* Takes ownership of 'skb' and transmits it to 'dp''s control path. If
635 * 'buffer_id' != -1, then only the first 64 bytes of 'skb' are sent;
636 * otherwise, all of 'skb' is sent. 'reason' indicates why 'skb' is being
637 * sent. 'max_len' sets the maximum number of bytes that the caller
638 * wants to be sent; a value of 0 indicates the entire packet should be
641 dp_output_control(struct datapath *dp, struct sk_buff *skb,
642 uint32_t buffer_id, size_t max_len, int reason)
644 /* FIXME? Can we avoid creating a new skbuff in the case where we
645 * forward the whole packet? */
646 struct sk_buff *f_skb;
647 struct ofp_packet_in *opi;
648 struct net_bridge_port *p;
649 size_t fwd_len, opi_len;
653 if ((buffer_id != (uint32_t) -1) && max_len)
654 fwd_len = min(fwd_len, max_len);
656 opi_len = offsetof(struct ofp_packet_in, data) + fwd_len;
657 opi = alloc_openflow_skb(dp, opi_len, OFPT_PACKET_IN, NULL, &f_skb);
662 opi->buffer_id = htonl(buffer_id);
663 opi->total_len = htons(skb->len);
664 p = skb->dev->br_port;
665 opi->in_port = htons(p ? p->port_no : OFPP_LOCAL);
666 opi->reason = reason;
668 memcpy(opi->data, skb_mac_header(skb), fwd_len);
669 err = send_openflow_skb(f_skb, NULL);
676 static void fill_port_desc(struct net_bridge_port *p, struct ofp_phy_port *desc)
679 desc->port_no = htons(p->port_no);
680 strncpy(desc->name, p->dev->name, OFP_MAX_PORT_NAME_LEN);
681 desc->name[OFP_MAX_PORT_NAME_LEN-1] = '\0';
682 memcpy(desc->hw_addr, p->dev->dev_addr, ETH_ALEN);
685 desc->advertised = 0;
688 spin_lock_irqsave(&p->lock, flags);
689 desc->config = htonl(p->config);
690 desc->state = htonl(p->state);
691 spin_unlock_irqrestore(&p->lock, flags);
693 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,24)
694 if (p->dev->ethtool_ops && p->dev->ethtool_ops->get_settings) {
695 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
697 if (!p->dev->ethtool_ops->get_settings(p->dev, &ecmd)) {
698 /* Set the supported features */
699 if (ecmd.supported & SUPPORTED_10baseT_Half)
700 desc->supported |= OFPPF_10MB_HD;
701 if (ecmd.supported & SUPPORTED_10baseT_Full)
702 desc->supported |= OFPPF_10MB_FD;
703 if (ecmd.supported & SUPPORTED_100baseT_Half)
704 desc->supported |= OFPPF_100MB_HD;
705 if (ecmd.supported & SUPPORTED_100baseT_Full)
706 desc->supported |= OFPPF_100MB_FD;
707 if (ecmd.supported & SUPPORTED_1000baseT_Half)
708 desc->supported |= OFPPF_1GB_HD;
709 if (ecmd.supported & SUPPORTED_1000baseT_Full)
710 desc->supported |= OFPPF_1GB_FD;
711 if (ecmd.supported & SUPPORTED_10000baseT_Full)
712 desc->supported |= OFPPF_10GB_FD;
713 if (ecmd.supported & SUPPORTED_TP)
714 desc->supported |= OFPPF_COPPER;
715 if (ecmd.supported & SUPPORTED_FIBRE)
716 desc->supported |= OFPPF_FIBER;
717 if (ecmd.supported & SUPPORTED_Autoneg)
718 desc->supported |= OFPPF_AUTONEG;
719 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
720 if (ecmd.supported & SUPPORTED_Pause)
721 desc->supported |= OFPPF_PAUSE;
722 if (ecmd.supported & SUPPORTED_Asym_Pause)
723 desc->supported |= OFPPF_PAUSE_ASYM;
724 #endif /* kernel >= 2.6.14 */
726 /* Set the advertised features */
727 if (ecmd.advertising & ADVERTISED_10baseT_Half)
728 desc->advertised |= OFPPF_10MB_HD;
729 if (ecmd.advertising & ADVERTISED_10baseT_Full)
730 desc->advertised |= OFPPF_10MB_FD;
731 if (ecmd.advertising & ADVERTISED_100baseT_Half)
732 desc->advertised |= OFPPF_100MB_HD;
733 if (ecmd.advertising & ADVERTISED_100baseT_Full)
734 desc->advertised |= OFPPF_100MB_FD;
735 if (ecmd.advertising & ADVERTISED_1000baseT_Half)
736 desc->advertised |= OFPPF_1GB_HD;
737 if (ecmd.advertising & ADVERTISED_1000baseT_Full)
738 desc->advertised |= OFPPF_1GB_FD;
739 if (ecmd.advertising & ADVERTISED_10000baseT_Full)
740 desc->advertised |= OFPPF_10GB_FD;
741 if (ecmd.advertising & ADVERTISED_TP)
742 desc->advertised |= OFPPF_COPPER;
743 if (ecmd.advertising & ADVERTISED_FIBRE)
744 desc->advertised |= OFPPF_FIBER;
745 if (ecmd.advertising & ADVERTISED_Autoneg)
746 desc->advertised |= OFPPF_AUTONEG;
747 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
748 if (ecmd.advertising & ADVERTISED_Pause)
749 desc->advertised |= OFPPF_PAUSE;
750 if (ecmd.advertising & ADVERTISED_Asym_Pause)
751 desc->advertised |= OFPPF_PAUSE_ASYM;
752 #endif /* kernel >= 2.6.14 */
754 /* Set the current features */
755 if (ecmd.speed == SPEED_10)
756 desc->curr = (ecmd.duplex) ? OFPPF_10MB_FD : OFPPF_10MB_HD;
757 else if (ecmd.speed == SPEED_100)
758 desc->curr = (ecmd.duplex) ? OFPPF_100MB_FD : OFPPF_100MB_HD;
759 else if (ecmd.speed == SPEED_1000)
760 desc->curr = (ecmd.duplex) ? OFPPF_1GB_FD : OFPPF_1GB_HD;
761 else if (ecmd.speed == SPEED_10000)
762 desc->curr = OFPPF_10GB_FD;
764 if (ecmd.port == PORT_TP)
765 desc->curr |= OFPPF_COPPER;
766 else if (ecmd.port == PORT_FIBRE)
767 desc->curr |= OFPPF_FIBER;
770 desc->curr |= OFPPF_AUTONEG;
774 desc->curr = htonl(desc->curr);
775 desc->supported = htonl(desc->supported);
776 desc->advertised = htonl(desc->advertised);
777 desc->peer = htonl(desc->peer);
781 fill_features_reply(struct datapath *dp, struct ofp_switch_features *ofr)
783 struct net_bridge_port *p;
786 ofr->datapath_id = cpu_to_be64(dp->id);
788 ofr->n_buffers = htonl(N_PKT_BUFFERS);
789 ofr->n_tables = dp->chain->n_tables;
790 ofr->capabilities = htonl(OFP_SUPPORTED_CAPABILITIES);
791 ofr->actions = htonl(OFP_SUPPORTED_ACTIONS);
792 memset(ofr->pad, 0, sizeof ofr->pad);
794 list_for_each_entry_rcu (p, &dp->port_list, node) {
795 fill_port_desc(p, &ofr->ports[port_count]);
803 dp_send_features_reply(struct datapath *dp, const struct sender *sender)
806 struct ofp_switch_features *ofr;
807 size_t ofr_len, port_max_len;
811 port_max_len = sizeof(struct ofp_phy_port) * OFPP_MAX;
812 ofr = alloc_openflow_skb(dp, sizeof(*ofr) + port_max_len,
813 OFPT_FEATURES_REPLY, sender, &skb);
818 port_count = fill_features_reply(dp, ofr);
821 ofr_len = sizeof(*ofr) + (sizeof(struct ofp_phy_port) * port_count);
822 resize_openflow_skb(skb, &ofr->header, ofr_len);
823 return send_openflow_skb(skb, sender);
827 dp_send_config_reply(struct datapath *dp, const struct sender *sender)
830 struct ofp_switch_config *osc;
832 osc = alloc_openflow_skb(dp, sizeof *osc, OFPT_GET_CONFIG_REPLY, sender,
837 osc->flags = htons(dp->flags);
838 osc->miss_send_len = htons(dp->miss_send_len);
840 return send_openflow_skb(skb, sender);
844 dp_send_hello(struct datapath *dp, const struct sender *sender,
845 const struct ofp_header *request)
847 if (request->version < OFP_VERSION) {
849 sprintf(err, "Only version 0x%02x supported", OFP_VERSION);
850 dp_send_error_msg(dp, sender, OFPET_HELLO_FAILED,
851 OFPHFC_INCOMPATIBLE, err, strlen(err));
855 struct ofp_header *reply;
857 reply = alloc_openflow_skb(dp, sizeof *reply,
858 OFPT_HELLO, sender, &skb);
862 return send_openflow_skb(skb, sender);
866 /* Callback function for a workqueue to disable an interface */
868 down_port_cb(struct work_struct *work)
870 struct net_bridge_port *p = container_of(work, struct net_bridge_port,
874 if (dev_change_flags(p->dev, p->dev->flags & ~IFF_UP) < 0)
876 printk("problem bringing up port %s\n", p->dev->name);
878 p->config |= OFPPC_PORT_DOWN;
881 /* Callback function for a workqueue to enable an interface */
883 up_port_cb(struct work_struct *work)
885 struct net_bridge_port *p = container_of(work, struct net_bridge_port,
889 if (dev_change_flags(p->dev, p->dev->flags | IFF_UP) < 0)
891 printk("problem bringing down port %s\n", p->dev->name);
893 p->config &= ~OFPPC_PORT_DOWN;
897 dp_update_port_flags(struct datapath *dp, const struct ofp_port_mod *opm)
899 unsigned long int flags;
900 int port_no = ntohs(opm->port_no);
901 struct net_bridge_port *p = (port_no < OFPP_MAX ? dp->ports[port_no]
902 : port_no == OFPP_LOCAL ? dp->local_port
905 /* Make sure the port id hasn't changed since this was sent */
906 if (!p || memcmp(opm->hw_addr, p->dev->dev_addr, ETH_ALEN))
909 spin_lock_irqsave(&p->lock, flags);
911 uint32_t config_mask = ntohl(opm->mask);
912 p->config &= ~config_mask;
913 p->config |= ntohl(opm->config) & config_mask;
916 /* Modifying the status of an interface requires taking a lock
917 * that cannot be done from here. For this reason, we use a shared
918 * workqueue, which will cause it to be executed from a safer
920 if (opm->mask & htonl(OFPPC_PORT_DOWN)) {
921 if ((opm->config & htonl(OFPPC_PORT_DOWN))
922 && (p->config & OFPPC_PORT_DOWN) == 0) {
923 PREPARE_WORK(&p->port_task, down_port_cb);
924 schedule_work(&p->port_task);
925 } else if ((opm->config & htonl(OFPPC_PORT_DOWN)) == 0
926 && (p->config & OFPPC_PORT_DOWN)) {
927 PREPARE_WORK(&p->port_task, up_port_cb);
928 schedule_work(&p->port_task);
931 spin_unlock_irqrestore(&p->lock, flags);
936 /* Initialize the port status field of the bridge port. */
938 init_port_status(struct net_bridge_port *p)
940 unsigned long int flags;
942 spin_lock_irqsave(&p->lock, flags);
944 if (p->dev->flags & IFF_UP)
945 p->config &= ~OFPPC_PORT_DOWN;
947 p->config |= OFPPC_PORT_DOWN;
949 if (netif_carrier_ok(p->dev))
950 p->state &= ~OFPPS_LINK_DOWN;
952 p->state |= OFPPS_LINK_DOWN;
954 spin_unlock_irqrestore(&p->lock, flags);
958 dp_send_port_status(struct net_bridge_port *p, uint8_t status)
961 struct ofp_port_status *ops;
963 ops = alloc_openflow_skb(p->dp, sizeof *ops, OFPT_PORT_STATUS, NULL,
967 ops->reason = status;
968 memset(ops->pad, 0, sizeof ops->pad);
969 fill_port_desc(p, &ops->desc);
971 return send_openflow_skb(skb, NULL);
975 dp_send_flow_expired(struct datapath *dp, struct sw_flow *flow,
976 enum ofp_flow_expired_reason reason)
979 struct ofp_flow_expired *ofe;
981 if (!(dp->flags & OFPC_SEND_FLOW_EXP))
984 ofe = alloc_openflow_skb(dp, sizeof *ofe, OFPT_FLOW_EXPIRED, 0, &skb);
988 flow_fill_match(&ofe->match, &flow->key);
990 ofe->priority = htons(flow->priority);
991 ofe->reason = reason;
992 memset(ofe->pad, 0, sizeof ofe->pad);
994 ofe->duration = htonl((jiffies - flow->init_time) / HZ);
995 memset(ofe->pad2, 0, sizeof ofe->pad2);
996 ofe->packet_count = cpu_to_be64(flow->packet_count);
997 ofe->byte_count = cpu_to_be64(flow->byte_count);
999 return send_openflow_skb(skb, NULL);
1001 EXPORT_SYMBOL(dp_send_flow_expired);
1004 dp_send_error_msg(struct datapath *dp, const struct sender *sender,
1005 uint16_t type, uint16_t code, const void *data, size_t len)
1007 struct sk_buff *skb;
1008 struct ofp_error_msg *oem;
1011 oem = alloc_openflow_skb(dp, sizeof(*oem)+len, OFPT_ERROR,
1016 oem->type = htons(type);
1017 oem->code = htons(code);
1018 memcpy(oem->data, data, len);
1020 return send_openflow_skb(skb, sender);
1024 dp_send_echo_reply(struct datapath *dp, const struct sender *sender,
1025 const struct ofp_header *rq)
1027 struct sk_buff *skb;
1028 struct ofp_header *reply;
1030 reply = alloc_openflow_skb(dp, ntohs(rq->length), OFPT_ECHO_REPLY,
1035 memcpy(reply + 1, rq + 1, ntohs(rq->length) - sizeof *rq);
1036 return send_openflow_skb(skb, sender);
1039 /* Generic Netlink interface.
1041 * See netlink(7) for an introduction to netlink. See
1042 * http://linux-net.osdl.org/index.php/Netlink for more information and
1043 * pointers on how to work with netlink and Generic Netlink in the kernel and
1046 static struct genl_family dp_genl_family = {
1047 .id = GENL_ID_GENERATE,
1049 .name = DP_GENL_FAMILY_NAME,
1051 .maxattr = DP_GENL_A_MAX,
1054 /* Attribute policy: what each attribute may contain. */
1055 static struct nla_policy dp_genl_policy[DP_GENL_A_MAX + 1] = {
1056 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1057 [DP_GENL_A_MC_GROUP] = { .type = NLA_U32 },
1058 [DP_GENL_A_PORTNAME] = { .type = NLA_STRING }
1061 static int dp_genl_add(struct sk_buff *skb, struct genl_info *info)
1063 if (!info->attrs[DP_GENL_A_DP_IDX])
1066 return new_dp(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1069 static struct genl_ops dp_genl_ops_add_dp = {
1070 .cmd = DP_GENL_C_ADD_DP,
1071 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1072 .policy = dp_genl_policy,
1073 .doit = dp_genl_add,
1077 struct datapath *dp_get(int dp_idx)
1079 if (dp_idx < 0 || dp_idx > DP_MAX)
1081 return rcu_dereference(dps[dp_idx]);
1084 static int dp_genl_del(struct sk_buff *skb, struct genl_info *info)
1086 struct datapath *dp;
1089 if (!info->attrs[DP_GENL_A_DP_IDX])
1092 dp = dp_get(nla_get_u32((info->attrs[DP_GENL_A_DP_IDX])));
1102 static struct genl_ops dp_genl_ops_del_dp = {
1103 .cmd = DP_GENL_C_DEL_DP,
1104 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1105 .policy = dp_genl_policy,
1106 .doit = dp_genl_del,
1110 /* Queries a datapath for related information. Currently the only relevant
1111 * information is the datapath's multicast group ID. Really we want one
1112 * multicast group per datapath, but because of locking issues[*] we can't
1113 * easily get one. Thus, every datapath will currently return the same
1114 * global multicast group ID, but in the future it would be nice to fix that.
1116 * [*] dp_genl_add, to add a new datapath, is called under the genl_lock
1117 * mutex, and genl_register_mc_group, called to acquire a new multicast
1118 * group ID, also acquires genl_lock, thus deadlock.
1120 static int dp_genl_query(struct sk_buff *skb, struct genl_info *info)
1122 struct datapath *dp;
1123 struct sk_buff *ans_skb = NULL;
1127 if (!info->attrs[DP_GENL_A_DP_IDX])
1131 dp_idx = nla_get_u32((info->attrs[DP_GENL_A_DP_IDX]));
1132 dp = dp_get(dp_idx);
1137 ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1142 data = genlmsg_put_reply(ans_skb, info, &dp_genl_family,
1143 0, DP_GENL_C_QUERY_DP);
1148 NLA_PUT_U32(ans_skb, DP_GENL_A_DP_IDX, dp_idx);
1149 NLA_PUT_U32(ans_skb, DP_GENL_A_MC_GROUP, mc_group.id);
1151 genlmsg_end(ans_skb, data);
1152 err = genlmsg_reply(ans_skb, info);
1164 static struct genl_ops dp_genl_ops_query_dp = {
1165 .cmd = DP_GENL_C_QUERY_DP,
1166 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1167 .policy = dp_genl_policy,
1168 .doit = dp_genl_query,
1172 static int dp_genl_add_del_port(struct sk_buff *skb, struct genl_info *info)
1174 struct datapath *dp;
1175 struct net_device *port;
1178 if (!info->attrs[DP_GENL_A_DP_IDX] || !info->attrs[DP_GENL_A_PORTNAME])
1182 dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1188 /* Get interface to add/remove. */
1189 port = dev_get_by_name(&init_net,
1190 nla_data(info->attrs[DP_GENL_A_PORTNAME]));
1196 /* Execute operation. */
1197 if (info->genlhdr->cmd == DP_GENL_C_ADD_PORT)
1198 err = add_switch_port(dp, port);
1200 if (port->br_port == NULL || port->br_port->dp != dp) {
1204 err = dp_del_switch_port(port->br_port);
1213 static struct genl_ops dp_genl_ops_add_port = {
1214 .cmd = DP_GENL_C_ADD_PORT,
1215 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1216 .policy = dp_genl_policy,
1217 .doit = dp_genl_add_del_port,
1221 static struct genl_ops dp_genl_ops_del_port = {
1222 .cmd = DP_GENL_C_DEL_PORT,
1223 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1224 .policy = dp_genl_policy,
1225 .doit = dp_genl_add_del_port,
1229 static int dp_genl_openflow(struct sk_buff *skb, struct genl_info *info)
1231 struct nlattr *va = info->attrs[DP_GENL_A_OPENFLOW];
1232 struct datapath *dp;
1233 struct ofp_header *oh;
1234 struct sender sender;
1237 if (!info->attrs[DP_GENL_A_DP_IDX] || !va)
1240 dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1244 if (nla_len(va) < sizeof(struct ofp_header))
1248 sender.xid = oh->xid;
1249 sender.pid = info->snd_pid;
1250 sender.seq = info->snd_seq;
1252 mutex_lock(&dp_mutex);
1253 err = fwd_control_input(dp->chain, &sender,
1254 nla_data(va), nla_len(va));
1255 mutex_unlock(&dp_mutex);
1259 static struct nla_policy dp_genl_openflow_policy[DP_GENL_A_MAX + 1] = {
1260 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1263 static int desc_stats_dump(struct datapath *dp, void *state,
1264 void *body, int *body_len)
1266 struct ofp_desc_stats *ods = body;
1267 int n_bytes = sizeof *ods;
1269 if (n_bytes > *body_len) {
1272 *body_len = n_bytes;
1274 strncpy(ods->mfr_desc, mfr_desc, sizeof ods->mfr_desc);
1275 strncpy(ods->hw_desc, hw_desc, sizeof ods->hw_desc);
1276 strncpy(ods->sw_desc, sw_desc, sizeof ods->sw_desc);
1277 strncpy(ods->serial_num, serial_num, sizeof ods->serial_num);
1282 struct flow_stats_state {
1284 struct sw_table_position position;
1285 const struct ofp_flow_stats_request *rq;
1288 int bytes_used, bytes_allocated;
1291 static int flow_stats_init(struct datapath *dp, const void *body, int body_len,
1294 const struct ofp_flow_stats_request *fsr = body;
1295 struct flow_stats_state *s = kmalloc(sizeof *s, GFP_ATOMIC);
1298 s->table_idx = fsr->table_id == 0xff ? 0 : fsr->table_id;
1299 memset(&s->position, 0, sizeof s->position);
1305 static int flow_stats_dump_callback(struct sw_flow *flow, void *private)
1307 struct sw_flow_actions *sf_acts = rcu_dereference(flow->sf_acts);
1308 struct flow_stats_state *s = private;
1309 struct ofp_flow_stats *ofs;
1312 length = sizeof *ofs + sf_acts->actions_len;
1313 if (length + s->bytes_used > s->bytes_allocated)
1316 ofs = s->body + s->bytes_used;
1317 ofs->length = htons(length);
1318 ofs->table_id = s->table_idx;
1320 ofs->match.wildcards = htonl(flow->key.wildcards);
1321 ofs->match.in_port = flow->key.in_port;
1322 memcpy(ofs->match.dl_src, flow->key.dl_src, ETH_ALEN);
1323 memcpy(ofs->match.dl_dst, flow->key.dl_dst, ETH_ALEN);
1324 ofs->match.dl_vlan = flow->key.dl_vlan;
1325 ofs->match.dl_type = flow->key.dl_type;
1326 ofs->match.nw_src = flow->key.nw_src;
1327 ofs->match.nw_dst = flow->key.nw_dst;
1328 ofs->match.nw_proto = flow->key.nw_proto;
1330 ofs->match.tp_src = flow->key.tp_src;
1331 ofs->match.tp_dst = flow->key.tp_dst;
1332 ofs->duration = htonl((jiffies - flow->init_time) / HZ);
1333 ofs->priority = htons(flow->priority);
1334 ofs->idle_timeout = htons(flow->idle_timeout);
1335 ofs->hard_timeout = htons(flow->hard_timeout);
1336 memset(ofs->pad2, 0, sizeof ofs->pad2);
1337 ofs->packet_count = cpu_to_be64(flow->packet_count);
1338 ofs->byte_count = cpu_to_be64(flow->byte_count);
1339 memcpy(ofs->actions, sf_acts->actions, sf_acts->actions_len);
1341 s->bytes_used += length;
1345 static int flow_stats_dump(struct datapath *dp, void *state,
1346 void *body, int *body_len)
1348 struct flow_stats_state *s = state;
1349 struct sw_flow_key match_key;
1353 s->bytes_allocated = *body_len;
1356 flow_extract_match(&match_key, &s->rq->match);
1357 while (s->table_idx < dp->chain->n_tables
1358 && (s->rq->table_id == 0xff || s->rq->table_id == s->table_idx))
1360 struct sw_table *table = dp->chain->tables[s->table_idx];
1362 error = table->iterate(table, &match_key, &s->position,
1363 flow_stats_dump_callback, s);
1368 memset(&s->position, 0, sizeof s->position);
1370 *body_len = s->bytes_used;
1372 /* If error is 0, we're done.
1373 * Otherwise, if some bytes were used, there are more flows to come.
1374 * Otherwise, we were not able to fit even a single flow in the body,
1375 * which indicates that we have a single flow with too many actions to
1376 * fit. We won't ever make any progress at that rate, so give up. */
1377 return !error ? 0 : s->bytes_used ? 1 : -ENOMEM;
1380 static void flow_stats_done(void *state)
1385 static int aggregate_stats_init(struct datapath *dp,
1386 const void *body, int body_len,
1389 *state = (void *)body;
1393 static int aggregate_stats_dump_callback(struct sw_flow *flow, void *private)
1395 struct ofp_aggregate_stats_reply *rpy = private;
1396 rpy->packet_count += flow->packet_count;
1397 rpy->byte_count += flow->byte_count;
1402 static int aggregate_stats_dump(struct datapath *dp, void *state,
1403 void *body, int *body_len)
1405 struct ofp_aggregate_stats_request *rq = state;
1406 struct ofp_aggregate_stats_reply *rpy;
1407 struct sw_table_position position;
1408 struct sw_flow_key match_key;
1411 if (*body_len < sizeof *rpy)
1414 *body_len = sizeof *rpy;
1416 memset(rpy, 0, sizeof *rpy);
1418 flow_extract_match(&match_key, &rq->match);
1419 table_idx = rq->table_id == 0xff ? 0 : rq->table_id;
1420 memset(&position, 0, sizeof position);
1421 while (table_idx < dp->chain->n_tables
1422 && (rq->table_id == 0xff || rq->table_id == table_idx))
1424 struct sw_table *table = dp->chain->tables[table_idx];
1427 error = table->iterate(table, &match_key, &position,
1428 aggregate_stats_dump_callback, rpy);
1433 memset(&position, 0, sizeof position);
1436 rpy->packet_count = cpu_to_be64(rpy->packet_count);
1437 rpy->byte_count = cpu_to_be64(rpy->byte_count);
1438 rpy->flow_count = htonl(rpy->flow_count);
1442 static int table_stats_dump(struct datapath *dp, void *state,
1443 void *body, int *body_len)
1445 struct ofp_table_stats *ots;
1446 int n_bytes = dp->chain->n_tables * sizeof *ots;
1448 if (n_bytes > *body_len)
1450 *body_len = n_bytes;
1451 for (i = 0, ots = body; i < dp->chain->n_tables; i++, ots++) {
1452 struct sw_table_stats stats;
1453 dp->chain->tables[i]->stats(dp->chain->tables[i], &stats);
1454 strncpy(ots->name, stats.name, sizeof ots->name);
1456 ots->wildcards = htonl(stats.wildcards);
1457 memset(ots->pad, 0, sizeof ots->pad);
1458 ots->max_entries = htonl(stats.max_flows);
1459 ots->active_count = htonl(stats.n_flows);
1460 ots->lookup_count = cpu_to_be64(stats.n_lookup);
1461 ots->matched_count = cpu_to_be64(stats.n_matched);
1466 struct port_stats_state {
1470 static int port_stats_init(struct datapath *dp, const void *body, int body_len,
1473 struct port_stats_state *s = kmalloc(sizeof *s, GFP_ATOMIC);
1481 static int port_stats_dump(struct datapath *dp, void *state,
1482 void *body, int *body_len)
1484 struct port_stats_state *s = state;
1485 struct ofp_port_stats *ops;
1486 int n_ports, max_ports;
1489 max_ports = *body_len / sizeof *ops;
1495 for (i = s->port; i < OFPP_MAX && n_ports < max_ports; i++) {
1496 struct net_bridge_port *p = dp->ports[i];
1497 struct net_device_stats *stats;
1500 stats = p->dev->get_stats(p->dev);
1501 ops->port_no = htons(p->port_no);
1502 memset(ops->pad, 0, sizeof ops->pad);
1503 ops->rx_packets = cpu_to_be64(stats->rx_packets);
1504 ops->tx_packets = cpu_to_be64(stats->tx_packets);
1505 ops->rx_bytes = cpu_to_be64(stats->rx_bytes);
1506 ops->tx_bytes = cpu_to_be64(stats->tx_bytes);
1507 ops->rx_dropped = cpu_to_be64(stats->rx_dropped);
1508 ops->tx_dropped = cpu_to_be64(stats->tx_dropped);
1509 ops->rx_errors = cpu_to_be64(stats->rx_errors);
1510 ops->tx_errors = cpu_to_be64(stats->tx_errors);
1511 ops->rx_frame_err = cpu_to_be64(stats->rx_frame_errors);
1512 ops->rx_over_err = cpu_to_be64(stats->rx_over_errors);
1513 ops->rx_crc_err = cpu_to_be64(stats->rx_crc_errors);
1514 ops->collisions = cpu_to_be64(stats->collisions);
1519 *body_len = n_ports * sizeof *ops;
1520 return n_ports >= max_ports;
1523 static void port_stats_done(void *state)
1529 /* Minimum and maximum acceptable number of bytes in body member of
1530 * struct ofp_stats_request. */
1531 size_t min_body, max_body;
1533 /* Prepares to dump some kind of statistics on 'dp'. 'body' and
1534 * 'body_len' are the 'body' member of the struct ofp_stats_request.
1535 * Returns zero if successful, otherwise a negative error code.
1536 * May initialize '*state' to state information. May be null if no
1537 * initialization is required.*/
1538 int (*init)(struct datapath *dp, const void *body, int body_len,
1541 /* Dumps statistics for 'dp' into the '*body_len' bytes at 'body', and
1542 * modifies '*body_len' to reflect the number of bytes actually used.
1543 * ('body' will be transmitted as the 'body' member of struct
1544 * ofp_stats_reply.) */
1545 int (*dump)(struct datapath *dp, void *state,
1546 void *body, int *body_len);
1548 /* Cleans any state created by the init or dump functions. May be null
1549 * if no cleanup is required. */
1550 void (*done)(void *state);
1553 static const struct stats_type stats[] = {
1562 sizeof(struct ofp_flow_stats_request),
1563 sizeof(struct ofp_flow_stats_request),
1568 [OFPST_AGGREGATE] = {
1569 sizeof(struct ofp_aggregate_stats_request),
1570 sizeof(struct ofp_aggregate_stats_request),
1571 aggregate_stats_init,
1572 aggregate_stats_dump,
1592 dp_genl_openflow_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1594 struct datapath *dp;
1595 struct sender sender;
1596 const struct stats_type *s;
1597 struct ofp_stats_reply *osr;
1599 int max_openflow_len, body_len;
1603 /* Set up the cleanup function for this dump. Linux 2.6.20 and later
1604 * support setting up cleanup functions via the .doneit member of
1605 * struct genl_ops. This kluge supports earlier versions also. */
1606 cb->done = dp_genl_openflow_done;
1608 sender.pid = NETLINK_CB(cb->skb).pid;
1609 sender.seq = cb->nlh->nlmsg_seq;
1611 struct nlattr *attrs[DP_GENL_A_MAX + 1];
1612 struct ofp_stats_request *rq;
1614 size_t len, body_len;
1617 err = nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, DP_GENL_A_MAX,
1618 dp_genl_openflow_policy);
1622 if (!attrs[DP_GENL_A_DP_IDX])
1624 dp_idx = nla_get_u16(attrs[DP_GENL_A_DP_IDX]);
1625 dp = dp_get(dp_idx);
1629 va = attrs[DP_GENL_A_OPENFLOW];
1631 if (!va || len < sizeof *rq)
1635 sender.xid = rq->header.xid;
1636 type = ntohs(rq->type);
1637 if (rq->header.version != OFP_VERSION) {
1638 dp_send_error_msg(dp, &sender, OFPET_BAD_REQUEST,
1639 OFPBRC_BAD_VERSION, rq, len);
1642 if (rq->header.type != OFPT_STATS_REQUEST
1643 || ntohs(rq->header.length) != len)
1646 if (type >= ARRAY_SIZE(stats) || !stats[type].dump) {
1647 dp_send_error_msg(dp, &sender, OFPET_BAD_REQUEST,
1648 OFPBRC_BAD_STAT, rq, len);
1653 body_len = len - offsetof(struct ofp_stats_request, body);
1654 if (body_len < s->min_body || body_len > s->max_body)
1658 cb->args[1] = dp_idx;
1660 cb->args[3] = rq->header.xid;
1663 err = s->init(dp, rq->body, body_len, &state);
1666 cb->args[4] = (long) state;
1668 } else if (cb->args[0] == 1) {
1669 sender.xid = cb->args[3];
1670 dp_idx = cb->args[1];
1671 s = &stats[cb->args[2]];
1673 dp = dp_get(dp_idx);
1680 osr = put_openflow_headers(dp, skb, OFPT_STATS_REPLY, &sender,
1683 return PTR_ERR(osr);
1684 osr->type = htons(s - stats);
1686 resize_openflow_skb(skb, &osr->header, max_openflow_len);
1688 body_len = max_openflow_len - offsetof(struct ofp_stats_reply, body);
1690 err = s->dump(dp, (void *) cb->args[4], body, &body_len);
1695 osr->flags = ntohs(OFPSF_REPLY_MORE);
1696 resize_openflow_skb(skb, &osr->header,
1697 (offsetof(struct ofp_stats_reply, body)
1706 dp_genl_openflow_done(struct netlink_callback *cb)
1709 const struct stats_type *s = &stats[cb->args[2]];
1711 s->done((void *) cb->args[4]);
1716 static struct genl_ops dp_genl_ops_openflow = {
1717 .cmd = DP_GENL_C_OPENFLOW,
1718 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1719 .policy = dp_genl_openflow_policy,
1720 .doit = dp_genl_openflow,
1721 .dumpit = dp_genl_openflow_dumpit,
1724 static struct genl_ops *dp_genl_all_ops[] = {
1725 /* Keep this operation first. Generic Netlink dispatching
1726 * looks up operations with linear search, so we want it at the
1728 &dp_genl_ops_openflow,
1730 &dp_genl_ops_add_dp,
1731 &dp_genl_ops_del_dp,
1732 &dp_genl_ops_query_dp,
1733 &dp_genl_ops_add_port,
1734 &dp_genl_ops_del_port,
1737 static int dp_init_netlink(void)
1742 err = genl_register_family(&dp_genl_family);
1746 for (i = 0; i < ARRAY_SIZE(dp_genl_all_ops); i++) {
1747 err = genl_register_ops(&dp_genl_family, dp_genl_all_ops[i]);
1749 goto err_unregister;
1752 strcpy(mc_group.name, "openflow");
1753 err = genl_register_mc_group(&dp_genl_family, &mc_group);
1755 goto err_unregister;
1760 genl_unregister_family(&dp_genl_family);
1764 static void dp_uninit_netlink(void)
1766 genl_unregister_family(&dp_genl_family);
1769 static int __init dp_init(void)
1773 printk("OpenFlow "VERSION", built "__DATE__" "__TIME__", "
1774 "protocol 0x%02x\n", OFP_VERSION);
1780 err = register_netdevice_notifier(&dp_device_notifier);
1782 goto error_flow_exit;
1784 err = dp_init_netlink();
1786 goto error_unreg_notifier;
1788 /* Hook into callback used by the bridge to intercept packets.
1789 * Parasites we are. */
1790 if (br_handle_frame_hook)
1791 printk("openflow: hijacking bridge hook\n");
1792 br_handle_frame_hook = dp_frame_hook;
1796 error_unreg_notifier:
1797 unregister_netdevice_notifier(&dp_device_notifier);
1801 printk(KERN_EMERG "openflow: failed to install!");
1805 static void dp_cleanup(void)
1808 dp_uninit_netlink();
1809 unregister_netdevice_notifier(&dp_device_notifier);
1811 br_handle_frame_hook = NULL;
1814 module_init(dp_init);
1815 module_exit(dp_cleanup);
1817 MODULE_DESCRIPTION("OpenFlow switching datapath");
1818 MODULE_AUTHOR("Copyright (c) 2007, 2008 The Board of Trustees of The Leland Stanford Junior University");
1819 MODULE_LICENSE("GPL");