-SUBDIRS =
+SUBDIRS =
if LINUX_ENABLED
SUBDIRS += linux
endif
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/completion.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
static struct sk_buff *brc_reply; /* Reply from userspace. */
static u32 brc_seq; /* Sequence number for current op. */
-static struct sk_buff *brc_send_command(struct sk_buff *, struct nlattr **attrs);
+static struct sk_buff *brc_send_command(struct sk_buff *,
+ struct nlattr **attrs);
static int brc_send_simple_command(struct sk_buff *);
static struct sk_buff *brc_make_request(int op, const char *bridge,
int err;
switch (cmd) {
- case SIOCDEVPRIVATE:
- err = old_dev_ioctl(dev, rq, cmd);
- break;
-
- case SIOCBRADDIF:
- return brc_add_del_port(dev, rq->ifr_ifindex, 1);
- case SIOCBRDELIF:
- return brc_add_del_port(dev, rq->ifr_ifindex, 0);
-
- default:
- err = -EOPNOTSUPP;
- break;
+ case SIOCDEVPRIVATE:
+ err = old_dev_ioctl(dev, rq, cmd);
+ break;
+
+ case SIOCBRADDIF:
+ return brc_add_del_port(dev, rq->ifr_ifindex, 1);
+ case SIOCBRDELIF:
+ return brc_add_del_port(dev, rq->ifr_ifindex, 0);
+
+ default:
+ err = -EOPNOTSUPP;
+ break;
}
return err;
if (!wait_for_completion_timeout(&brc_done, BRC_TIMEOUT)) {
pr_warn("timed out waiting for userspace\n");
goto error;
- }
+ }
/* Grab reply. */
spin_lock_irqsave(&brc_lock, flags);
{
int err;
- printk("Open vSwitch Bridge Compatibility, built "__DATE__" "__TIME__"\n");
+ pr_info("Open vSwitch Bridge Compatibility, built "__DATE__" "__TIME__"\n");
/* Set the bridge ioctl handler */
brioctl_set(brc_ioctl_deviceless_stub);
/* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
* However, on the receive side we should only get CHECKSUM_PARTIAL
* packets from Xen, which uses some special fields to represent this
- * (see vswitch_skb_checksum_setup()). Since we can only make one type work,
- * pick the one that actually happens in practice.
+ * (see vswitch_skb_checksum_setup()). Since we can only make one type
+ * work, pick the one that actually happens in practice.
*
* On the transmit side (basically after skb_checksum_setup()
* has been run or on internal dev transmit), packets with
}
/*
- * forward_ip_summed - map internal checksum state back onto native kernel fields
+ * forward_ip_summed - map internal checksum state back onto native
+ * kernel fields.
*
* @skb: Packet to manipulate.
- * @xmit: Whether we are about send on the transmit path the network stack. This
- * follows the same logic as the @xmit field in compute_ip_summed().
- * Generally, a given vport will have opposite values for @xmit passed to these
- * two functions.
+ * @xmit: Whether we are about send on the transmit path the network stack.
+ * This follows the same logic as the @xmit field in compute_ip_summed().
+ * Generally, a given vport will have opposite values for @xmit passed to
+ * these two functions.
*
* When a packet is about to egress from OVS take our internal fields (including
* any modifications we have made) and recreate the correct representation for
*/
void forward_ip_summed(struct sk_buff *skb, bool xmit)
{
- switch(get_ip_summed(skb)) {
+ switch (get_ip_summed(skb)) {
case OVS_CSUM_NONE:
skb->ip_summed = CHECKSUM_NONE;
break;
}
if (get_ip_summed(skb) == OVS_CSUM_PARTIAL)
- skb_set_transport_header(skb, OVS_CB(skb)->csum_start - skb_headroom(skb));
+ skb_set_transport_header(skb, OVS_CB(skb)->csum_start -
+ skb_headroom(skb));
}
u8 get_ip_summed(struct sk_buff *skb)
*csum_offset = skb->csum;
}
-void set_skb_csum_pointers(struct sk_buff *skb, u16 csum_start, u16 csum_offset)
+void set_skb_csum_pointers(struct sk_buff *skb, u16 csum_start,
+ u16 csum_offset)
{
OVS_CB(skb)->csum_start = csum_start;
skb->csum = csum_offset;
void set_ip_summed(struct sk_buff *skb, u8 ip_summed);
void get_skb_csum_pointers(const struct sk_buff *skb, u16 *csum_start,
u16 *csum_offset);
-void set_skb_csum_pointers(struct sk_buff *skb, u16 csum_start, u16 csum_offset);
+void set_skb_csum_pointers(struct sk_buff *skb, u16 csum_start,
+ u16 csum_offset);
#else
static inline int compute_ip_summed(struct sk_buff *skb, bool xmit)
{
update_csum_start(skb, skb_headroom(skb) - old_headroom);
- return 0;
+ return 0;
}
#define pskb_expand_head rpl_pskb_expand_head
#include <linux/wait.h>
#include <asm/system.h>
#include <asm/div64.h>
-#include <asm/bug.h>
#include <linux/highmem.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
return ifindex;
}
-static inline size_t br_nlmsg_size(void)
+static size_t br_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
}
/* Look up flow. */
- flow = flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
+ flow = flow_tbl_lookup(rcu_dereference(dp->table),
+ &key, key_len);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
int rem;
memset(attrs, 0, sizeof(attrs));
- nla_for_each_nested (a, attr, rem) {
+ nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
return -EINVAL;
#define ACTION(act, key) (((act) << 8) | (key))
- switch(ACTION(act_type, key_type)) {
+ switch (ACTION(act_type, key_type)) {
const struct ovs_key_ipv4 *ipv4_key;
const struct ovs_key_8021q *q_key;
static int validate_userspace(const struct nlattr *attr)
{
- static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =
- {
+ static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
[OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
[OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
};
struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
int error;
- error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, attr, userspace_policy);
+ error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
+ attr, userspace_policy);
if (error)
return error;
- if (!a[OVS_USERSPACE_ATTR_PID] || !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
+ if (!a[OVS_USERSPACE_ATTR_PID] ||
+ !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
return -EINVAL;
return 0;
/* Called with genl_lock. */
static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
- struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd)
+ struct sk_buff *skb, u32 pid,
+ u32 seq, u32 flags, u8 cmd)
{
const int skb_orig_len = skb->len;
const struct sw_flow_actions *sf_acts;
NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, flow_used_time(used));
if (stats.n_packets)
- NLA_PUT(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats);
+ NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
+ sizeof(struct ovs_flow_stats), &stats);
if (tcp_flags)
NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
sf_acts = rcu_dereference_protected(flow->sf_acts,
lockdep_genl_is_held());
- len = nla_total_size(FLOW_BUFSIZE); /* OVS_FLOW_ATTR_KEY */
- len += nla_total_size(sf_acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
- len += nla_total_size(sizeof(struct ovs_flow_stats)); /* OVS_FLOW_ATTR_STATS */
- len += nla_total_size(1); /* OVS_FLOW_ATTR_TCP_FLAGS */
- len += nla_total_size(8); /* OVS_FLOW_ATTR_USED */
- return genlmsg_new(NLMSG_ALIGN(sizeof(struct ovs_header)) + len, GFP_KERNEL);
+ /* OVS_FLOW_ATTR_KEY */
+ len = nla_total_size(FLOW_BUFSIZE);
+ /* OVS_FLOW_ATTR_ACTIONS */
+ len += nla_total_size(sf_acts->actions_len);
+ /* OVS_FLOW_ATTR_STATS */
+ len += nla_total_size(sizeof(struct ovs_flow_stats));
+ /* OVS_FLOW_ATTR_TCP_FLAGS */
+ len += nla_total_size(1);
+ /* OVS_FLOW_ATTR_USED */
+ len += nla_total_size(8);
+
+ len += NLMSG_ALIGN(sizeof(struct ovs_header));
+
+ return genlmsg_new(len, GFP_KERNEL);
}
-static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp,
+static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
+ struct datapath *dp,
u32 pid, u32 seq, u8 cmd)
{
struct sk_buff *skb;
flow_tbl_insert(table, flow);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
- info->snd_seq, OVS_FLOW_CMD_NEW);
+ info->snd_seq,
+ OVS_FLOW_CMD_NEW);
} else {
/* We found a matching flow. */
struct sw_flow_actions *old_acts;
+ struct nlattr *acts_attrs;
/* Bail out if we're not allowed to modify an existing flow.
* We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
/* Update actions. */
old_acts = rcu_dereference_protected(flow->sf_acts,
lockdep_genl_is_held());
- if (a[OVS_FLOW_ATTR_ACTIONS] &&
- (old_acts->actions_len != nla_len(a[OVS_FLOW_ATTR_ACTIONS]) ||
- memcmp(old_acts->actions, nla_data(a[OVS_FLOW_ATTR_ACTIONS]),
- old_acts->actions_len))) {
+ acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
+ if (acts_attrs &&
+ (old_acts->actions_len != nla_len(acts_attrs) ||
+ memcmp(old_acts->actions, nla_data(acts_attrs),
+ old_acts->actions_len))) {
struct sw_flow_actions *new_acts;
- new_acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
+ new_acts = flow_actions_alloc(acts_attrs);
error = PTR_ERR(new_acts);
if (IS_ERR(new_acts))
goto error;
}
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
- info->snd_seq, OVS_FLOW_CMD_NEW);
+ info->snd_seq, OVS_FLOW_CMD_NEW);
/* Clear stats. */
if (a[OVS_FLOW_ATTR_CLEAR]) {
if (!IS_ERR(reply))
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
else
netlink_set_err(INIT_NET_GENL_SOCK, 0,
dp_flow_multicast_group.id, PTR_ERR(reply));
if (!flow)
return -ENOENT;
- reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW);
+ reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+ info->snd_seq, OVS_FLOW_CMD_NEW);
if (IS_ERR(reply))
return PTR_ERR(reply);
dp = get_dp(ovs_header->dp_ifindex);
if (!dp)
- return -ENODEV;
+ return -ENODEV;
table = get_table_protected(dp);
flow = flow_tbl_lookup(table, &key, key_len);
if (!flow)
break;
- if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
+ if (ovs_flow_cmd_fill_info(flow, dp, skb,
+ NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
OVS_FLOW_CMD_NEW) < 0)
break;
}
/* Called with genl_mutex and optionally with RTNL lock also. */
-static struct datapath *lookup_datapath(struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1])
+static struct datapath *lookup_datapath(struct ovs_header *ovs_header,
+ struct nlattr *a[OVS_DP_ATTR_MAX + 1])
{
struct datapath *dp;
goto err_destroy_percpu;
}
- reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
+ reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+ info->snd_seq, OVS_DP_CMD_NEW);
err = PTR_ERR(reply);
if (IS_ERR(reply))
goto err_destroy_local_port;
if (IS_ERR(dp))
goto exit_unlock;
- reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_DEL);
+ reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+ info->snd_seq, OVS_DP_CMD_DEL);
err = PTR_ERR(reply);
if (IS_ERR(reply))
goto exit_unlock;
- list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
+ list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
if (vport->port_no != OVSP_LOCAL)
dp_detach_port(vport);
if (IS_ERR(dp))
return PTR_ERR(dp);
- reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
+ reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+ info->snd_seq, OVS_DP_CMD_NEW);
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
netlink_set_err(INIT_NET_GENL_SOCK, 0,
if (IS_ERR(dp))
return PTR_ERR(dp);
- reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
+ reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+ info->snd_seq, OVS_DP_CMD_NEW);
if (IS_ERR(reply))
return PTR_ERR(reply);
int skip = cb->args[0];
int i = 0;
- list_for_each_entry (dp, &dps, list_node) {
+ list_for_each_entry(dp, &dps, list_node) {
if (i < skip)
continue;
if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport_get_name(vport));
NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
- nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats));
+ nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS,
+ sizeof(struct ovs_vport_stats));
if (!nla)
goto nla_put_failure;
}
/* Called with RTNL lock. */
-static int change_vport(struct vport *vport, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
+static int change_vport(struct vport *vport,
+ struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
{
int err = 0;
if (IS_ERR(vport))
goto exit_unlock;
- dp_sysfs_add_if(vport);
+ dp_sysfs_add_if(vport);
err = change_vport(vport, a);
if (!err) {
reply = ovs_vport_cmd_build_info(vport, info->snd_pid,
- info->snd_seq, OVS_VPORT_CMD_NEW);
+ info->snd_seq,
+ OVS_VPORT_CMD_NEW);
if (IS_ERR(reply))
err = PTR_ERR(reply);
}
goto exit_unlock;
err = 0;
- if (a[OVS_VPORT_ATTR_TYPE] && nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport_get_type(vport))
+ if (a[OVS_VPORT_ATTR_TYPE] &&
+ nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport_get_type(vport))
err = -EINVAL;
+
if (!err && a[OVS_VPORT_ATTR_OPTIONS])
err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
if (!err)
BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
- printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
+ pr_info("Open vSwitch %s, built "__DATE__" "__TIME__"\n",
+ VERSION BUILDNR);
err = tnl_init();
if (err)
void dp_process_received_packet(struct vport *, struct sk_buff *);
void dp_detach_port(struct vport *);
-int dp_upcall(struct datapath *, struct sk_buff *, const struct dp_upcall_info *);
+int dp_upcall(struct datapath *, struct sk_buff *,
+ const struct dp_upcall_info *);
struct datapath *get_dp(int dp_idx);
const char *dp_name(const struct datapath *dp);
int dp_sysfs_del_if(struct vport *p);
#ifdef CONFIG_SYSFS
-extern struct sysfs_ops brport_sysfs_ops;
+extern const struct sysfs_ops brport_sysfs_ops;
#endif
#endif /* dp_sysfs.h */
dp = sysfs_get_dp(to_net_dev(d));
if (dp)
- printk("%s: xxx writing dp parms not supported yet!\n",
+ pr_warning("%s: xxx writing dp parms not supported yet!\n",
dp_name(dp));
else
result = -ENODEV;
static void set_forward_delay(struct datapath *dp, unsigned long val)
{
- printk("%s: xxx attempt to set_forward_delay()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_forward_delay()\n", dp_name(dp));
}
static ssize_t store_forward_delay(DEVICE_PARAMS,
static void set_hello_time(struct datapath *dp, unsigned long val)
{
- printk("%s: xxx attempt to set_hello_time()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_hello_time()\n", dp_name(dp));
}
static ssize_t store_hello_time(DEVICE_PARAMS,
static void set_max_age(struct datapath *dp, unsigned long val)
{
- printk("%s: xxx attempt to set_max_age()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_max_age()\n", dp_name(dp));
}
static ssize_t store_max_age(DEVICE_PARAMS,
static void set_ageing_time(struct datapath *dp, unsigned long val)
{
- printk("%s: xxx attempt to set_ageing_time()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_ageing_time()\n", dp_name(dp));
}
static ssize_t store_ageing_time(DEVICE_PARAMS,
dp = sysfs_get_dp(to_net_dev(d));
if (dp)
- printk("%s: xxx attempt to set_stp_state()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_stp_state()\n", dp_name(dp));
else
result = -ENODEV;
static void set_priority(struct datapath *dp, unsigned long val)
{
- printk("%s: xxx attempt to set_priority()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_priority()\n", dp_name(dp));
}
static ssize_t store_priority(DEVICE_PARAMS,
addr = vport_get_addr(vport);
result = sprintf(buf, "%.2x%.2x.%.2x%.2x%.2x%.2x%.2x%.2x\n",
- 0, 0, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ 0, 0, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5]);
} else
result = -ENODEV;
dp = sysfs_get_dp(to_net_dev(d));
if (dp)
- printk("%s: xxx attempt to store_group_addr()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to store_group_addr()\n",
+ dp_name(dp));
else
result = -ENODEV;
err = kobject_add(&dp->ifobj, kobj, SYSFS_BRIDGE_PORT_SUBDIR);
if (err) {
pr_info("%s: can't add kobject (directory) %s/%s\n",
- __FUNCTION__, dp_name(dp), kobject_name(&dp->ifobj));
+ __func__, dp_name(dp), kobject_name(&dp->ifobj));
goto out2;
}
kobject_uevent(&dp->ifobj, KOBJ_ADD);
* This has been shamelessly copied from the kernel sources.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
};
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
-#define BRPORT_ATTR(_name,_mode,_show,_store) \
+#define BRPORT_ATTR(_name, _mode, _show, _store) \
struct brport_attribute brport_attr_##_name = { \
.attr = {.name = __stringify(_name), \
.mode = _mode }, \
.store = _store, \
};
#else
-#define BRPORT_ATTR(_name,_mode,_show,_store) \
-struct brport_attribute brport_attr_##_name = { \
- .attr = {.name = __stringify(_name), \
- .mode = _mode, \
+#define BRPORT_ATTR(_name, _mode, _show, _store) \
+struct brport_attribute brport_attr_##_name = { \
+ .attr = {.name = __stringify(_name), \
+ .mode = _mode, \
.owner = THIS_MODULE, }, \
.show = _show, \
.store = _store, \
}
static BRPORT_ATTR(state, S_IRUGO, show_port_state, NULL);
-static ssize_t show_message_age_timer(struct vport *p,
- char *buf)
+static ssize_t show_message_age_timer(struct vport *p, char *buf)
{
return sprintf(buf, "%d\n", 0);
}
static BRPORT_ATTR(message_age_timer, S_IRUGO, show_message_age_timer, NULL);
-static ssize_t show_forward_delay_timer(struct vport *p,
- char *buf)
+static ssize_t show_forward_delay_timer(struct vport *p, char *buf)
{
return sprintf(buf, "%d\n", 0);
}
static BRPORT_ATTR(forward_delay_timer, S_IRUGO, show_forward_delay_timer, NULL);
-static ssize_t show_hold_timer(struct vport *p,
- char *buf)
+static ssize_t show_hold_timer(struct vport *p, char *buf)
{
return sprintf(buf, "%d\n", 0);
}
#define to_vport_attr(_at) container_of(_at, struct brport_attribute, attr)
#define to_vport(obj) container_of(obj, struct vport, kobj)
-static ssize_t brport_show(struct kobject * kobj,
- struct attribute * attr, char * buf)
+static ssize_t brport_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
{
- struct brport_attribute * brport_attr = to_vport_attr(attr);
- struct vport * p = to_vport(kobj);
+ struct brport_attribute *brport_attr = to_vport_attr(attr);
+ struct vport *p = to_vport(kobj);
return brport_attr->show(p, buf);
}
-static ssize_t brport_store(struct kobject * kobj,
- struct attribute * attr,
- const char * buf, size_t count)
+static ssize_t brport_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
{
- struct vport * p = to_vport(kobj);
+ struct vport *p = to_vport(kobj);
ssize_t ret = -EINVAL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- printk("%s: xxx writing port parms not supported yet!\n",
- dp_name(p->dp));
+ pr_warning("%s: xxx writing port parms not supported yet!\n",
+ dp_name(p->dp));
return ret;
}
-struct sysfs_ops brport_sysfs_ops = {
+const struct sysfs_ops brport_sysfs_ops = {
.show = brport_show,
.store = brport_store,
};
#include "flow.h"
#include "datapath.h"
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
return 0;
}
-static inline bool arphdr_ok(struct sk_buff *skb)
+static bool arphdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_network_offset(skb) +
sizeof(struct arp_eth_header));
}
-static inline int check_iphdr(struct sk_buff *skb)
+static int check_iphdr(struct sk_buff *skb)
{
unsigned int nh_ofs = skb_network_offset(skb);
unsigned int ip_len;
return 0;
}
-static inline bool tcphdr_ok(struct sk_buff *skb)
+static bool tcphdr_ok(struct sk_buff *skb)
{
int th_ofs = skb_transport_offset(skb);
int tcp_len;
return true;
}
-static inline bool udphdr_ok(struct sk_buff *skb)
+static bool udphdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_transport_offset(skb) +
sizeof(struct udphdr));
}
-static inline bool icmphdr_ok(struct sk_buff *skb)
+static bool icmphdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_transport_offset(skb) +
sizeof(struct icmphdr));
}
#define SW_FLOW_KEY_OFFSET(field) \
- offsetof(struct sw_flow_key, field) + \
- FIELD_SIZEOF(struct sw_flow_key, field)
+ (offsetof(struct sw_flow_key, field) + \
+ FIELD_SIZEOF(struct sw_flow_key, field))
/**
* skip_exthdr - skip any IPv6 extension headers
ipv6_addr_copy(&key->ipv6.addr.src, &nh->saddr);
ipv6_addr_copy(&key->ipv6.addr.dst, &nh->daddr);
- payload_ofs = skip_exthdr(skb, payload_ofs, &nexthdr, &key->ip.tos_frag);
+ payload_ofs = skip_exthdr(skb, payload_ofs,
+ &nexthdr, &key->ip.tos_frag);
if (unlikely(payload_ofs < 0))
return -EINVAL;
static struct flex_array __rcu *alloc_buckets(unsigned int n_buckets)
{
- struct flex_array __rcu * buckets;
+ struct flex_array __rcu *buckets;
int i, err;
buckets = flex_array_alloc(sizeof(struct hlist_head *),
return buckets;
}
-static void free_buckets(struct flex_array * buckets)
+static void free_buckets(struct flex_array *buckets)
{
flex_array_free(buckets);
}
void flow_tbl_deferred_destroy(struct flow_table *table)
{
- if (!table)
- return;
+ if (!table)
+ return;
- call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
+ call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
}
struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
icmp_len -= sizeof(*nd);
offset = 0;
while (icmp_len >= 8) {
- struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd->opt + offset);
+ struct nd_opt_hdr *nd_opt =
+ (struct nd_opt_hdr *)(nd->opt + offset);
int opt_len = nd_opt->nd_opt_len * 8;
if (unlikely(!opt_len || opt_len > icmp_len))
if (icmphdr_ok(skb)) {
struct icmphdr *icmp = icmp_hdr(skb);
/* The ICMP type and code fields use the 16-bit
- * transport port fields, so we need to store them
- * in 16-bit network byte order. */
+ * transport port fields, so we need to store
+ * them in 16-bit network byte order. */
key->ipv4.tp.src = htons(icmp->type);
key->ipv4.tp.dst = htons(icmp->code);
}
u32 flow_hash(const struct sw_flow_key *key, int key_len)
{
- return jhash2((u32*)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed);
+ return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed);
}
-struct sw_flow * flow_tbl_lookup(struct flow_table *table,
+struct sw_flow *flow_tbl_lookup(struct flow_table *table,
struct sw_flow_key *key, int key_len)
{
struct sw_flow *flow;
const struct ovs_key_arp *arp_key;
const struct ovs_key_nd *nd_key;
- int type = nla_type(nla);
+ int type = nla_type(nla);
- if (type > OVS_KEY_ATTR_MAX || nla_len(nla) != ovs_key_lens[type])
+ if (type > OVS_KEY_ATTR_MAX ||
+ nla_len(nla) != ovs_key_lens[type])
goto invalid;
#define TRANSITION(PREV_TYPE, TYPE) (((PREV_TYPE) << 16) | (TYPE))
prev_type = OVS_KEY_ATTR_UNSPEC;
nla_for_each_nested(nla, attr, rem) {
- int type = nla_type(nla);
+ int type = nla_type(nla);
if (type > OVS_KEY_ATTR_MAX || nla_len(nla) != ovs_key_lens[type])
return -EINVAL;
u8 tcp_flags; /* Union of seen TCP flags. */
};
-struct arp_eth_header
-{
+struct arp_eth_header {
__be16 ar_hrd; /* format of hardware address */
__be16 ar_pro; /* format of protocol address */
unsigned char ar_hln; /* length of hardware address */
#define TBL_MIN_BUCKETS 1024
struct flow_table {
- struct flex_array *buckets;
- unsigned int count, n_buckets;
- struct rcu_head rcu;
+ struct flex_array *buckets;
+ unsigned int count, n_buckets;
+ struct rcu_head rcu;
};
static inline int flow_tbl_count(struct flow_table *table)
include $(srcdir)/Modules.mk
EXTRA_CFLAGS := -DVERSION=\"$(VERSION)\"
-EXTRA_CFLAGS += -I$(srcdir)/..
+EXTRA_CFLAGS += -I$(srcdir)/..
EXTRA_CFLAGS += -I$(builddir)/..
ifeq '$(BUILDNR)' '0'
EXTRA_CFLAGS += -DBUILDNR=\"\"
linux/compat/skbuff-openvswitch.c \
linux/compat/time.c
openvswitch_headers += \
- linux/compat/include/asm-generic/bug.h \
linux/compat/include/linux/compiler.h \
linux/compat/include/linux/compiler-gcc.h \
linux/compat/include/linux/cpumask.h \
static inline unsigned ipv6_addr_scope2type(unsigned scope)
{
- switch(scope) {
+ switch (scope) {
case IPV6_ADDR_SCOPE_NODELOCAL:
return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) |
IPV6_ADDR_LOOPBACK);
* page pointers that we can fit in the base structure or (using
* integer math):
*
- * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
+ * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
*
* Here's a table showing example capacities. Note that the maximum
* index that the get/put() functions is just nr_objects-1. This
* flex_array_put - copy data into the array at @element_nr
* @fa: the flex array to copy data into
* @element_nr: index of the position in which to insert
- * the new element.
+ * the new element.
* @src: address of data to copy into the array
* @flags: page allocation flags to use for array expansion
*
/**
* flex_array_prealloc - guarantee that array space exists
* @fa: the flex array for which to preallocate parts
- * @start: index of first array element for which space is allocated
+ * @start: index of first array element for which space is
+ * allocated
* @nr_elements: number of elements for which space is allocated
* @flags: page allocation flags
*
+++ /dev/null
-#ifndef __ASM_GENERIC_BUG_WRAPPER_H
-#define __ASM_GENERIC_BUG_WRAPPER_H
-
-#include_next <asm-generic/bug.h>
-
-#ifndef WARN_ON_ONCE
-#define WARN_ON_ONCE(condition) ({ \
- static int __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once) && !__warned) { \
- WARN_ON(1); \
- __warned = 1; \
- } \
- unlikely(__ret_warn_once); \
-})
-#endif
-
-#endif
#include <linux/list.h>
enum dmi_field {
- DMI_NONE,
- DMI_BIOS_VENDOR,
- DMI_BIOS_VERSION,
- DMI_BIOS_DATE,
- DMI_SYS_VENDOR,
- DMI_PRODUCT_NAME,
- DMI_PRODUCT_VERSION,
- DMI_PRODUCT_SERIAL,
- DMI_PRODUCT_UUID,
- DMI_BOARD_VENDOR,
- DMI_BOARD_NAME,
- DMI_BOARD_VERSION,
- DMI_BOARD_SERIAL,
- DMI_BOARD_ASSET_TAG,
- DMI_CHASSIS_VENDOR,
- DMI_CHASSIS_TYPE,
- DMI_CHASSIS_VERSION,
- DMI_CHASSIS_SERIAL,
- DMI_CHASSIS_ASSET_TAG,
- DMI_STRING_MAX,
+ DMI_NONE,
+ DMI_BIOS_VENDOR,
+ DMI_BIOS_VERSION,
+ DMI_BIOS_DATE,
+ DMI_SYS_VENDOR,
+ DMI_PRODUCT_NAME,
+ DMI_PRODUCT_VERSION,
+ DMI_PRODUCT_SERIAL,
+ DMI_PRODUCT_UUID,
+ DMI_BOARD_VENDOR,
+ DMI_BOARD_NAME,
+ DMI_BOARD_VERSION,
+ DMI_BOARD_SERIAL,
+ DMI_BOARD_ASSET_TAG,
+ DMI_CHASSIS_VENDOR,
+ DMI_CHASSIS_TYPE,
+ DMI_CHASSIS_VERSION,
+ DMI_CHASSIS_SERIAL,
+ DMI_CHASSIS_ASSET_TAG,
+ DMI_STRING_MAX,
};
enum dmi_device_type {
- DMI_DEV_TYPE_ANY = 0,
- DMI_DEV_TYPE_OTHER,
- DMI_DEV_TYPE_UNKNOWN,
- DMI_DEV_TYPE_VIDEO,
- DMI_DEV_TYPE_SCSI,
- DMI_DEV_TYPE_ETHERNET,
- DMI_DEV_TYPE_TOKENRING,
- DMI_DEV_TYPE_SOUND,
- DMI_DEV_TYPE_IPMI = -1,
- DMI_DEV_TYPE_OEM_STRING = -2
+ DMI_DEV_TYPE_ANY = 0,
+ DMI_DEV_TYPE_OTHER,
+ DMI_DEV_TYPE_UNKNOWN,
+ DMI_DEV_TYPE_VIDEO,
+ DMI_DEV_TYPE_SCSI,
+ DMI_DEV_TYPE_ETHERNET,
+ DMI_DEV_TYPE_TOKENRING,
+ DMI_DEV_TYPE_SOUND,
+ DMI_DEV_TYPE_IPMI = -1,
+ DMI_DEV_TYPE_OEM_STRING = -2
};
struct dmi_header {
- u8 type;
- u8 length;
- u16 handle;
+ u8 type;
+ u8 length;
+ u16 handle;
};
/*
* DMI callbacks for problem boards
*/
struct dmi_strmatch {
- u8 slot;
- char *substr;
+ u8 slot;
+ char *substr;
};
struct dmi_system_id {
- int (*callback)(struct dmi_system_id *);
- const char *ident;
- struct dmi_strmatch matches[4];
- void *driver_data;
+ int (*callback)(struct dmi_system_id *);
+ const char *ident;
+ struct dmi_strmatch matches[4];
+ void *driver_data;
};
#define DMI_MATCH(a, b) { a, b }
struct dmi_device {
- struct list_head list;
- int type;
- const char *name;
- void *device_data; /* Type specific data */
+ struct list_head list;
+ int type;
+ const char *name;
+ void *device_data; /* Type specific data */
};
/* No CONFIG_DMI before 2.6.16 */
#if defined(CONFIG_DMI) || defined(CONFIG_X86_32)
extern int dmi_check_system(struct dmi_system_id *list);
-extern char * dmi_get_system_info(int field);
-extern struct dmi_device * dmi_find_device(int type, const char *name,
- struct dmi_device *from);
+extern char *dmi_get_system_info(int field);
+extern struct dmi_device *dmi_find_device(int type, const char *name,
+ struct dmi_device *from);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
extern void dmi_scan_machine(void);
#endif
#else
static inline int dmi_check_system(struct dmi_system_id *list) { return 0; }
-static inline char * dmi_get_system_info(int field) { return NULL; }
-static inline struct dmi_device * dmi_find_device(int type, const char *name,
- struct dmi_device *from) { return NULL; }
+static inline char *dmi_get_system_info(int field) { return NULL; }
+static inline struct dmi_device *dmi_find_device(int type, const char *name,
+ struct dmi_device *from) { return NULL; }
static inline int dmi_get_year(int year) { return 0; }
static inline int dmi_name_in_vendors(char *s) { return 0; }
#ifndef HAVE_SKBUFF_HEADER_HELPERS
static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
{
- return (struct icmphdr *)skb_transport_header(skb);
+ return (struct icmphdr *)skb_transport_header(skb);
}
#endif
#ifndef HAVE_ICMP6_HDR
static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
{
- return (struct icmp6hdr *)skb_transport_header(skb);
+ return (struct icmp6hdr *)skb_transport_header(skb);
}
#endif
/* Same as above, but does so with platform independent 64bit types.
* These must be used when utilizing jiffies_64 (i.e. return value of
* get_jiffies_64() */
-#define time_after64(a,b) \
- (typecheck(__u64, a) && \
- typecheck(__u64, b) && \
- ((__s64)(b) - (__s64)(a) < 0))
-#define time_before64(a,b) time_after64(b,a)
+#define time_after64(a, b) \
+ (typecheck(__u64, a) && \
+ typecheck(__u64, b) && \
+ ((__s64)(b) - (__s64)(a) < 0))
+#define time_before64(a, b) time_after64(b, a)
-#define time_after_eq64(a,b) \
- (typecheck(__u64, a) && \
- typecheck(__u64, b) && \
- ((__s64)(a) - (__s64)(b) >= 0))
-#define time_before_eq64(a,b) time_after_eq64(b,a)
+#define time_after_eq64(a, b) \
+ (typecheck(__u64, a) && \
+ typecheck(__u64, b) && \
+ ((__s64)(a) - (__s64)(b) >= 0))
+#define time_before_eq64(a, b) time_after_eq64(b, a)
#endif /* linux kernel < 2.6.19 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
#undef pr_emerg
#define pr_emerg(fmt, ...) \
- printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
#undef pr_alert
#define pr_alert(fmt, ...) \
- printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+ printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
#undef pr_crit
#define pr_crit(fmt, ...) \
- printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+ printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
#undef pr_err
#define pr_err(fmt, ...) \
- printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+ printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
#undef pr_warning
#define pr_warning(fmt, ...) \
- printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+ printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
#undef pr_notice
#define pr_notice(fmt, ...) \
- printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+ printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#undef pr_info
#define pr_info(fmt, ...) \
- printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
#undef pr_cont
#define pr_cont(fmt, ...) \
printk(KERN_CONT fmt, ##__VA_ARGS__)
#endif
#ifndef DIV_ROUND_UP
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#endif
#endif /* linux/kernel.h */
/*
* Lock-class usage-state bits:
*/
-enum lock_usage_bit
-{
+enum lock_usage_bit {
LOCK_USED = 0,
LOCK_USED_IN_HARDIRQ,
LOCK_USED_IN_SOFTIRQ,
struct lockdep_map *instance;
#ifdef CONFIG_LOCK_STAT
- u64 waittime_stamp;
+ u64 waittime_stamp;
u64 holdtime_stamp;
#endif
/*
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
struct mutex {
struct semaphore sema;
#define mutex_destroy(mutex) do { } while (0)
#define __MUTEX_INITIALIZER(name) \
- __SEMAPHORE_INITIALIZER(name,1)
+ __SEMAPHORE_INITIALIZER(name, 1)
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = { __MUTEX_INITIALIZER(mutexname.sema) }
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
/* Linux 2.6.24 added a network namespace pointer to the macro. */
#undef for_each_netdev
-#define for_each_netdev(net,d) list_for_each_entry(d, &dev_base_head, dev_list)
+#define for_each_netdev(net, d) list_for_each_entry(d, &dev_base_head, dev_list)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
+#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
#undef SET_ETHTOOL_OPS
#define SET_ETHTOOL_OPS(netdev, ops) \
- ( (netdev)->ethtool_ops = (struct ethtool_ops *)(ops) )
+ ((netdev)->ethtool_ops = (struct ethtool_ops *)(ops))
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
#define netif_needs_gso rpl_netif_needs_gso
static inline int rpl_netif_needs_gso(struct sk_buff *skb, int features)
{
- return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
- unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
+ return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
+ unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
#endif
#ifndef NLA_TYPE_MASK
#define NLA_F_NESTED (1 << 15)
#define NLA_F_NET_BYTEORDER (1 << 14)
-#define NLA_TYPE_MASK ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER)
+#define NLA_TYPE_MASK (~(NLA_F_NESTED | NLA_F_NET_BYTEORDER))
#endif
#include <net/netlink.h>
#define skb_headroom rpl_skb_headroom
static inline unsigned int rpl_skb_headroom(const struct sk_buff *skb)
{
- return skb->data - skb->head;
+ return skb->data - skb->head;
}
#endif
#ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
- const int offset, void *to,
- const unsigned int len)
+ const int offset, void *to,
+ const unsigned int len)
{
memcpy(to, skb->data + offset, len);
}
static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
- const int offset,
- const void *from,
- const unsigned int len)
+ const int offset,
+ const void *from,
+ const unsigned int len)
{
memcpy(skb->data + offset, from, len);
}
#ifndef HAVE_SKB_COW_HEAD
static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
- int cloned)
+ int cloned)
{
int delta = 0;
static inline int skb_transport_offset(const struct sk_buff *skb)
{
- return skb_transport_header(skb) - skb->data;
+ return skb_transport_header(skb) - skb->data;
}
static inline int skb_network_offset(const struct sk_buff *skb)
static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
{
- return tcp_hdr(skb)->doff * 4;
+ return tcp_hdr(skb)->doff * 4;
}
#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
#include <linux/version.h>
#ifndef RHEL_RELEASE_VERSION
-#define RHEL_RELEASE_VERSION(X,Y) ( 0 )
+#define RHEL_RELEASE_VERSION(X, Y) (0)
#endif
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \
- (!defined(RHEL_RELEASE_CODE) || \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,1))))
+ (!defined(RHEL_RELEASE_CODE) || \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 1))))
extern unsigned long volatile jiffies;
*/
static inline unsigned long round_jiffies(unsigned long j)
{
- return __round_jiffies(j, 0); // FIXME
+ return __round_jiffies(j, 0); /* FIXME */
}
#endif /* linux kernel < 2.6.20 */
* @list: list entry for linking
* @family: pointer to family, need not be set before registering
*/
-struct genl_multicast_group
-{
+struct genl_multicast_group {
struct genl_family *family; /* private */
struct list_head list; /* private */
char name[GENL_NAMSIZ];
#ifndef NLA_PUT_BE16
#define NLA_PUT_BE16(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, __be16, attrtype, value)
+ NLA_PUT_TYPE(skb, __be16, attrtype, value)
#endif /* !NLA_PUT_BE16 */
#ifndef NLA_PUT_BE32
#define NLA_PUT_BE32(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, __be32, attrtype, value)
+ NLA_PUT_TYPE(skb, __be32, attrtype, value)
#endif /* !NLA_PUT_BE32 */
#ifndef NLA_PUT_BE64
#define NLA_PUT_BE64(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, __be64, attrtype, value)
+ NLA_PUT_TYPE(skb, __be64, attrtype, value)
#endif /* !NLA_PUT_BE64 */
#ifndef HAVE_NLA_GET_BE16
*/
static inline __be16 nla_get_be16(const struct nlattr *nla)
{
- return *(__be16 *) nla_data(nla);
+ return *(__be16 *) nla_data(nla);
}
#endif /* !HAVE_NLA_GET_BE16 */
*/
static inline __be32 nla_get_be32(const struct nlattr *nla)
{
- return *(__be32 *) nla_data(nla);
+ return *(__be32 *) nla_data(nla);
}
#endif
#define nla_get_be64 rpl_nla_get_be64
static inline __be64 nla_get_be64(const struct nlattr *nla)
{
- __be64 tmp;
+ __be64 tmp;
/* The additional cast is necessary because */
- nla_memcpy(&tmp, (struct nlattr *) nla, sizeof(tmp));
+ nla_memcpy(&tmp, (struct nlattr *) nla, sizeof(tmp));
- return tmp;
+ return tmp;
}
#endif
*/
static inline int nla_type(const struct nlattr *nla)
{
- return nla->nla_type & NLA_TYPE_MASK;
+ return nla->nla_type & NLA_TYPE_MASK;
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
#define nla_parse_nested(tb, maxtype, nla, policy) \
- nla_parse_nested(tb, maxtype, (struct nlattr *)(nla), (struct nla_policy *)(policy))
+ nla_parse_nested(tb, maxtype, (struct nlattr *)(nla), \
+ (struct nla_policy *)(policy))
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
#define nla_parse_nested(tb, maxtype, nla, policy) \
nla_parse_nested(tb, maxtype, (struct nlattr *)(nla), policy)
* For negative values only the tv_sec field is negative !
*/
void set_normalized_timespec(struct timespec *ts,
- time_t sec, long nsec)
+ time_t sec, long nsec)
{
while (nsec >= NSEC_PER_SEC) {
nsec -= NSEC_PER_SEC;
#define rt_hh(rt) (rt_dst(rt).hh)
#endif
-static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
+static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
{
return vport_from_priv(tnl_vport);
}
/* This is analogous to rtnl_dereference for the tunnel cache. It checks that
* cache_lock is held, so it is only for update side code.
*/
-static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
+static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
{
return rcu_dereference_protected(tnl_vport->cache,
- lockdep_is_held(&tnl_vport->cache_lock));
+ lockdep_is_held(&tnl_vport->cache_lock));
}
-static inline void schedule_cache_cleaner(void)
+static void schedule_cache_cleaner(void)
{
schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
}
static u32 port_hash(const struct port_lookup_key *key)
{
- return jhash2((u32*)key, (PORT_KEY_LEN / sizeof(u32)), 0);
+ return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
}
-static inline struct hlist_head *find_bucket(u32 hash)
+static struct hlist_head *find_bucket(u32 hash)
{
return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
}
struct hlist_node *n;
struct hlist_head *bucket;
u32 hash = port_hash(key);
- struct tnl_vport * tnl_vport;
+ struct tnl_vport *tnl_vport;
bucket = find_bucket(hash);
}
#endif /* IPv6 */
-bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
+bool tnl_frag_needed(struct vport *vport,
+ const struct tnl_mutable_config *mutable,
struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
{
unsigned int eth_hdr_len = ETH_HLEN;
tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
}
-static inline void *get_cached_header(const struct tnl_cache *cache)
+static void *get_cached_header(const struct tnl_cache *cache)
{
return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
}
-static inline bool check_cache_valid(const struct tnl_cache *cache,
- const struct tnl_mutable_config *mutable)
+static bool check_cache_valid(const struct tnl_cache *cache,
+ const struct tnl_mutable_config *mutable)
{
struct hh_cache *hh;
rcu_read_unlock();
}
-static inline void create_eth_hdr(struct tnl_cache *cache,
- struct hh_cache *hh)
+static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh)
{
void *cache_data = get_cached_header(cache);
int hh_off;
u8 ipproto, u8 tos)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
- struct flowi fl = { .nl_u = { .ip4_u =
- { .daddr = mutable->key.daddr,
+ struct flowi fl = { .nl_u = { .ip4_u = {
+ .daddr = mutable->key.daddr,
.saddr = mutable->key.saddr,
.tos = tos } },
.proto = ipproto };
*cache = NULL;
tos = RT_TOS(tos);
- if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) {
+ if (likely(tos == mutable->tos &&
+ check_cache_valid(cur_cache, mutable))) {
*cache = cur_cache;
return cur_cache->rt;
} else {
}
}
-static inline bool need_linearize(const struct sk_buff *skb)
+static bool need_linearize(const struct sk_buff *skb)
{
int i;
iph->frag_off = frag_off;
ip_select_ident(iph, &rt_dst(rt), NULL);
- skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
+ skb = tnl_vport->tnl_ops->update_header(vport, mutable,
+ &rt_dst(rt), skb);
if (unlikely(!skb))
goto next;
if (likely(cache)) {
int orig_len = skb->len - cache->len;
- struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
+ struct vport *cache_vport;
+ cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
skb->protocol = htons(ETH_P_IP);
iph = ip_hdr(skb);
iph->tot_len = htons(skb->len - skb_network_offset(skb));
[OVS_TUNNEL_ATTR_TTL] = { .type = NLA_U8 },
};
-/* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
+/* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
+ * zeroed. */
static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
const struct vport *cur_vport,
struct tnl_mutable_config *mutable)
int i;
for (i = 0; i < PORT_TABLE_SIZE; i++) {
- struct tnl_vport * tnl_vport;
+ struct tnl_vport *tnl_vport;
struct hlist_head *hash_head;
struct hlist_node *n;
#define TNL_T_KEY_MATCH (1 << 11)
/* Private flags not exposed to userspace in this form. */
-#define TNL_F_IN_KEY_MATCH (1 << 16) /* Store the key in tun_id to match in flow table. */
-#define TNL_F_OUT_KEY_ACTION (1 << 17) /* Get the key from a SET_TUNNEL action. */
+#define TNL_F_IN_KEY_MATCH (1 << 16) /* Store the key in tun_id to
+ * match in flow table. */
+#define TNL_F_OUT_KEY_ACTION (1 << 17) /* Get the key from a SET_TUNNEL
+ * action. */
/* All public tunnel flags. */
#define TNL_F_PUBLIC (TNL_F_CSUM | TNL_F_TOS_INHERIT | TNL_F_TTL_INHERIT | \
u32 tunnel_type;
};
-#define PORT_KEY_LEN (offsetof(struct port_lookup_key, tunnel_type) + \
+#define PORT_KEY_LEN (offsetof(struct port_lookup_key, tunnel_type) + \
FIELD_SIZEOF(struct port_lookup_key, tunnel_type))
/**
int len; /* Length of data to be memcpy'd from cache. */
int hh_len; /* Hardware hdr length, cached from hh_cache. */
- /* Sequence number of mutable->seq from which this cache was generated. */
+ /* Sequence number of mutable->seq from which this cache was
+ * generated. */
unsigned mutable_seq;
#ifdef HAVE_HH_SEQ
atomic_t frag_id;
spinlock_t cache_lock;
- struct tnl_cache __rcu *cache; /* Protected by RCU/cache_lock. */
+ struct tnl_cache __rcu *cache; /* Protected by RCU/cache_lock. */
#ifdef NEED_CACHE_TIMEOUT
/*
#define CAPWAP_FRAG_TIMEOUT (30 * HZ)
#define CAPWAP_FRAG_MAX_MEM (256 * 1024)
-#define CAPWAP_FRAG_PRUNE_MEM (192 *1024)
+#define CAPWAP_FRAG_PRUNE_MEM (192 * 1024)
#define CAPWAP_FRAG_SECRET_INTERVAL (10 * 60 * HZ)
/*
/* Flag indicating a 64bit key is stored in WSI data field */
#define CAPWAP_WSI_F_KEY64 0x80
-static inline struct capwaphdr *capwap_hdr(const struct sk_buff *skb)
+static struct capwaphdr *capwap_hdr(const struct sk_buff *skb)
{
return (struct capwaphdr *)(udp_hdr(skb) + 1);
}
if (mutable->flags & TNL_F_CSUM)
return -EINVAL;
- /* if keys are specified, then add WSI field */
+ /* if keys are specified, then add WSI field */
if (mutable->out_key || (mutable->flags & TNL_F_OUT_KEY_ACTION)) {
size += sizeof(struct capwaphdr_wsi) +
sizeof(struct capwaphdr_wsi_key);
return 0;
}
-static inline struct sk_buff *process_capwap_proto(struct sk_buff *skb,
- __be64 *key)
+static struct sk_buff *process_capwap_proto(struct sk_buff *skb, __be64 *key)
{
struct capwaphdr *cwh = capwap_hdr(skb);
int hdr_len = sizeof(struct udphdr);
/* All of the following functions relate to fragmentation reassembly. */
-static inline struct frag_queue *ifq_cast(struct inet_frag_queue *ifq)
+static struct frag_queue *ifq_cast(struct inet_frag_queue *ifq)
{
return container_of(ifq, struct frag_queue, ifq);
}
#endif
};
-static inline struct internal_dev *internal_dev_priv(struct net_device *netdev)
+static struct internal_dev *internal_dev_priv(struct net_device *netdev)
{
return netdev_priv(netdev);
}
return 0;
}
-static int internal_dev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int internal_dev_do_ioctl(struct net_device *dev,
+ struct ifreq *ifr, int cmd)
{
if (dp_ioctl_hook)
return dp_ioctl_hook(dev, ifr, cmd);
struct internal_dev *internal_dev;
int err;
- vport = vport_alloc(sizeof(struct netdev_vport), &internal_vport_ops, parms);
+ vport = vport_alloc(sizeof(struct netdev_vport),
+ &internal_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
netdev_vport = netdev_vport_priv(vport);
- netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev), parms->name, do_setup);
+ netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
+ parms->name, do_setup);
if (!netdev_vport->dev) {
err = -ENOMEM;
goto error_free_vport;
#include "vport-netdev.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) && \
- !defined(HAVE_VLAN_BUG_WORKAROUND)
+ !defined(HAVE_VLAN_BUG_WORKAROUND)
#include <linux/module.h>
-static int vlan_tso __read_mostly = 0;
+static int vlan_tso __read_mostly;
module_param(vlan_tso, int, 0644);
MODULE_PARM_DESC(vlan_tso, "Enable TSO for VLAN packets");
#else
struct netdev_vport *netdev_vport;
int err;
- vport = vport_alloc(sizeof(struct netdev_vport), &netdev_vport_ops, parms);
+ vport = vport_alloc(sizeof(struct netdev_vport),
+ &netdev_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
vport_receive(vport, skb);
}
-static inline unsigned packet_length(const struct sk_buff *skb)
+static unsigned packet_length(const struct sk_buff *skb)
{
unsigned length = skb->len - ETH_HLEN;
#if IFF_BRIDGE_PORT != IFF_OVS_DATAPATH
if (likely(dev->priv_flags & IFF_OVS_DATAPATH))
#else
- if (likely(rcu_access_pointer(dev->rx_handler) == netdev_frame_hook))
+ if (likely(rcu_access_pointer(dev->rx_handler) == netdev_frame_hook))
#endif
return (struct vport *)rcu_dereference_rtnl(dev->rx_handler_data);
else
static void update_peers(const char *name, struct vport *);
-static inline struct patch_vport *patch_vport_priv(const struct vport *vport)
+static struct patch_vport *patch_vport_priv(const struct vport *vport)
{
return vport_priv(vport);
}
struct patch_config *patchconf;
int err;
- vport = vport_alloc(sizeof(struct patch_vport), &patch_vport_ops, parms);
+ vport = vport_alloc(sizeof(struct patch_vport),
+ &patch_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
* vport_priv(). vports that are no longer needed should be released with
* vport_free().
*/
-struct vport *vport_alloc(int priv_size, const struct vport_ops *ops, const struct vport_parms *parms)
+struct vport *vport_alloc(int priv_size, const struct vport_ops *ops,
+ const struct vport_parms *parms)
{
struct vport *vport;
size_t alloc_size;
VPORT_E_TX_ERROR,
};
-struct vport *vport_alloc(int priv_size, const struct vport_ops *, const struct vport_parms *);
+struct vport *vport_alloc(int priv_size, const struct vport_ops *,
+ const struct vport_parms *);
void vport_free(struct vport *);
#define VPORT_ALIGN 8
#define OVS_TUNNEL_ATTR_MAX (__OVS_TUNNEL_ATTR_MAX - 1)
#define TNL_F_CSUM (1 << 0) /* Checksum packets. */
-#define TNL_F_TOS_INHERIT (1 << 1) /* Inherit the ToS from the inner packet. */
-#define TNL_F_TTL_INHERIT (1 << 2) /* Inherit the TTL from the inner packet. */
-#define TNL_F_DF_INHERIT (1 << 3) /* Inherit the DF bit from the inner packet. */
-#define TNL_F_DF_DEFAULT (1 << 4) /* Set the DF bit if inherit off or not IP. */
+#define TNL_F_TOS_INHERIT (1 << 1) /* Inherit ToS from inner packet. */
+#define TNL_F_TTL_INHERIT (1 << 2) /* Inherit TTL from inner packet. */
+#define TNL_F_DF_INHERIT (1 << 3) /* Inherit DF bit from inner packet. */
+#define TNL_F_DF_DEFAULT (1 << 4) /* Set DF bit if inherit off or
+ * not IP. */
#define TNL_F_PMTUD (1 << 5) /* Enable path MTU discovery. */
#define TNL_F_HDR_CACHE (1 << 6) /* Enable tunnel header caching. */
#define TNL_F_IPSEC (1 << 7) /* Traffic is IPsec encrypted. */