return -ENODEV;
}
- vport_send(vport, skb);
+ ovs_vport_send(vport, skb);
return 0;
}
}
}
- return dp_upcall(dp, skb, &upcall);
+ return ovs_dp_upcall(dp, skb, &upcall);
}
static int sample(struct datapath *dp, struct sk_buff *skb,
{
if (net_ratelimit())
pr_warn("%s: flow looped %d times, dropping\n",
- dp_name(dp), MAX_LOOPS);
+ ovs_dp_name(dp), MAX_LOOPS);
actions->actions_len = 0;
return -ELOOP;
}
/* Execute a list of actions against 'skb'. */
-int execute_actions(struct datapath *dp, struct sk_buff *skb)
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
{
struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
struct loop_counter *loop;
brioctl_set(brc_ioctl_deviceless_stub);
/* Set the openvswitch_mod device ioctl handler */
- dp_ioctl_hook = brc_dev_ioctl;
+ ovs_dp_ioctl_hook = brc_dev_ioctl;
/* Randomize the initial sequence number. This is not a security
* feature; it only helps avoid crossed wires between userspace and
static void brc_cleanup(void)
{
/* Unregister ioctl hooks */
- dp_ioctl_hook = NULL;
+ ovs_dp_ioctl_hook = NULL;
brioctl_set(NULL);
genl_unregister_family(&brc_genl_family);
#error Kernels before 2.6.18 or after 3.2 are not supported by this version of Open vSwitch.
#endif
-int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
-EXPORT_SYMBOL(dp_ioctl_hook);
+int (*ovs_dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
+EXPORT_SYMBOL(ovs_dp_ioctl_hook);
/**
* DOC: Locking:
rcu_read_lock();
dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
if (dev) {
- struct vport *vport = internal_dev_get_vport(dev);
+ struct vport *vport = ovs_internal_dev_get_vport(dev);
if (vport)
dp = vport->dp;
}
}
/* Must be called with rcu_read_lock or RTNL lock. */
-const char *dp_name(const struct datapath *dp)
+const char *ovs_dp_name(const struct datapath *dp)
{
struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
return vport->ops->get_name(vport);
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
- flow_tbl_destroy((__force struct flow_table *)dp->table);
+ ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
free_percpu(dp->stats_percpu);
kobject_put(&dp->ifobj);
}
{
struct vport *vport;
- vport = vport_add(parms);
+ vport = ovs_vport_add(parms);
if (!IS_ERR(vport)) {
struct datapath *dp = parms->dp;
}
/* Called with RTNL lock. */
-void dp_detach_port(struct vport *p)
+void ovs_dp_detach_port(struct vport *p)
{
ASSERT_RTNL();
if (p->port_no != OVSP_LOCAL)
- dp_sysfs_del_if(p);
+ ovs_dp_sysfs_del_if(p);
dp_ifinfo_notify(RTM_DELLINK, p);
/* First drop references to device. */
rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
/* Then destroy it. */
- vport_del(p);
+ ovs_vport_del(p);
}
/* Must be called with rcu_read_lock. */
-void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
+void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
struct sw_flow *flow;
int key_len;
/* Extract flow from 'skb' into 'key'. */
- error = flow_extract(skb, p->port_no, &key, &key_len);
+ error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
if (unlikely(error)) {
kfree_skb(skb);
return;
}
/* Look up flow. */
- flow = flow_tbl_lookup(rcu_dereference(dp->table),
- &key, key_len);
+ flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table),
+ &key, key_len);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
upcall.key = &key;
upcall.userdata = NULL;
upcall.pid = p->upcall_pid;
- dp_upcall(dp, skb, &upcall);
+ ovs_dp_upcall(dp, skb, &upcall);
consume_skb(skb);
stats_counter = &stats->n_missed;
goto out;
}
stats_counter = &stats->n_hit;
- flow_used(OVS_CB(skb)->flow, skb);
- execute_actions(dp, skb);
+ ovs_flow_used(OVS_CB(skb)->flow, skb);
+ ovs_execute_actions(dp, skb);
out:
/* Update datapath statistics. */
.maxattr = OVS_PACKET_ATTR_MAX
};
-int dp_upcall(struct datapath *dp, struct sk_buff *skb,
- const struct dp_upcall_info *upcall_info)
+int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
+ const struct dp_upcall_info *upcall_info)
{
struct dp_stats_percpu *stats;
int dp_ifindex;
upcall->dp_ifindex = dp_ifindex;
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
- flow_to_nlattrs(upcall_info->key, user_skb);
+ ovs_flow_to_nlattrs(upcall_info->key, user_skb);
nla_nest_end(user_skb, nla);
if (upcall_info->userdata)
return -ENODEV;
old_table = genl_dereference(dp->table);
- new_table = flow_tbl_alloc(TBL_MIN_BUCKETS);
+ new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
if (!new_table)
return -ENOMEM;
rcu_assign_pointer(dp->table, new_table);
- flow_tbl_deferred_destroy(old_table);
+ ovs_flow_tbl_deferred_destroy(old_table);
return 0;
}
packet->protocol = htons(ETH_P_802_2);
/* Build an sw_flow for sending this packet. */
- flow = flow_alloc();
+ flow = ovs_flow_alloc();
err = PTR_ERR(flow);
if (IS_ERR(flow))
goto err_kfree_skb;
- err = flow_extract(packet, -1, &flow->key, &key_len);
+ err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
if (err)
goto err_flow_put;
- err = flow_metadata_from_nlattrs(&flow->key.phy.priority,
- &flow->key.phy.in_port,
- &flow->key.phy.tun_id,
- a[OVS_PACKET_ATTR_KEY]);
+ err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
+ &flow->key.phy.in_port,
+ &flow->key.phy.tun_id,
+ a[OVS_PACKET_ATTR_KEY]);
if (err)
goto err_flow_put;
if (err)
goto err_flow_put;
- flow->hash = flow_hash(&flow->key, key_len);
+ flow->hash = ovs_flow_hash(&flow->key, key_len);
- acts = flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
+ acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
err = PTR_ERR(acts);
if (IS_ERR(acts))
goto err_flow_put;
goto err_unlock;
local_bh_disable();
- err = execute_actions(dp, packet);
+ err = ovs_execute_actions(dp, packet);
local_bh_enable();
rcu_read_unlock();
- flow_put(flow);
+ ovs_flow_put(flow);
return err;
err_unlock:
rcu_read_unlock();
err_flow_put:
- flow_put(flow);
+ ovs_flow_put(flow);
err_kfree_skb:
kfree_skb(packet);
err:
int i;
struct flow_table *table = genl_dereference(dp->table);
- stats->n_flows = flow_tbl_count(table);
+ stats->n_flows = ovs_flow_tbl_count(table);
stats->n_hit = stats->n_missed = stats->n_lost = 0;
for_each_possible_cpu(i) {
.maxattr = OVS_FLOW_ATTR_MAX
};
-static struct genl_multicast_group dp_flow_multicast_group = {
+static struct genl_multicast_group ovs_dp_flow_multicast_group = {
.name = OVS_FLOW_MCGROUP
};
nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
if (!nla)
goto nla_put_failure;
- err = flow_to_nlattrs(&flow->key, skb);
+ err = ovs_flow_to_nlattrs(&flow->key, skb);
if (err)
goto error;
nla_nest_end(skb, nla);
spin_unlock_bh(&flow->lock);
if (used)
- NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, flow_used_time(used));
+ NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
if (stats.n_packets)
NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
error = -EINVAL;
if (!a[OVS_FLOW_ATTR_KEY])
goto error;
- error = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
if (error)
goto error;
goto error;
table = genl_dereference(dp->table);
- flow = flow_tbl_lookup(table, &key, key_len);
+ flow = ovs_flow_tbl_lookup(table, &key, key_len);
if (!flow) {
struct sw_flow_actions *acts;
goto error;
/* Expand table, if necessary, to make room. */
- if (flow_tbl_need_to_expand(table)) {
+ if (ovs_flow_tbl_need_to_expand(table)) {
struct flow_table *new_table;
- new_table = flow_tbl_expand(table);
+ new_table = ovs_flow_tbl_expand(table);
if (!IS_ERR(new_table)) {
rcu_assign_pointer(dp->table, new_table);
- flow_tbl_deferred_destroy(table);
+ ovs_flow_tbl_deferred_destroy(table);
table = genl_dereference(dp->table);
}
}
/* Allocate flow. */
- flow = flow_alloc();
+ flow = ovs_flow_alloc();
if (IS_ERR(flow)) {
error = PTR_ERR(flow);
goto error;
clear_stats(flow);
/* Obtain actions. */
- acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
+ acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
error = PTR_ERR(acts);
if (IS_ERR(acts))
goto error_free_flow;
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- flow->hash = flow_hash(&key, key_len);
- flow_tbl_insert(table, flow);
+ flow->hash = ovs_flow_hash(&key, key_len);
+ ovs_flow_tbl_insert(table, flow);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
info->snd_seq,
old_acts->actions_len))) {
struct sw_flow_actions *new_acts;
- new_acts = flow_actions_alloc(acts_attrs);
+ new_acts = ovs_flow_actions_alloc(acts_attrs);
error = PTR_ERR(new_acts);
if (IS_ERR(new_acts))
goto error;
rcu_assign_pointer(flow->sf_acts, new_acts);
- flow_deferred_free_acts(old_acts);
+ ovs_flow_deferred_free_acts(old_acts);
}
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
if (!IS_ERR(reply))
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_flow_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
else
netlink_set_err(INIT_NET_GENL_SOCK, 0,
- dp_flow_multicast_group.id, PTR_ERR(reply));
+ ovs_dp_flow_multicast_group.id,
+ PTR_ERR(reply));
return 0;
error_free_flow:
- flow_put(flow);
+ ovs_flow_put(flow);
error:
return error;
}
if (!a[OVS_FLOW_ATTR_KEY])
return -EINVAL;
- err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
if (err)
return err;
return -ENODEV;
table = genl_dereference(dp->table);
- flow = flow_tbl_lookup(table, &key, key_len);
+ flow = ovs_flow_tbl_lookup(table, &key, key_len);
if (!flow)
return -ENOENT;
if (!a[OVS_FLOW_ATTR_KEY])
return flush_flows(ovs_header->dp_ifindex);
- err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
if (err)
return err;
return -ENODEV;
table = genl_dereference(dp->table);
- flow = flow_tbl_lookup(table, &key, key_len);
+ flow = ovs_flow_tbl_lookup(table, &key, key_len);
if (!flow)
return -ENOENT;
if (!reply)
return -ENOMEM;
- flow_tbl_remove(table, flow);
+ ovs_flow_tbl_remove(table, flow);
err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
info->snd_seq, 0, OVS_FLOW_CMD_DEL);
BUG_ON(err < 0);
- flow_deferred_free(flow);
+ ovs_flow_deferred_free(flow);
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
return 0;
}
bucket = cb->args[0];
obj = cb->args[1];
- flow = flow_tbl_next(table, &bucket, &obj);
+ flow = ovs_flow_tbl_next(table, &bucket, &obj);
if (!flow)
break;
.maxattr = OVS_DP_ATTR_MAX
};
-static struct genl_multicast_group dp_datapath_multicast_group = {
+static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
.name = OVS_DATAPATH_MCGROUP
};
ovs_header->dp_ifindex = get_dpifindex(dp);
rcu_read_lock();
- err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp));
+ err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
rcu_read_unlock();
if (err)
goto nla_put_failure;
struct vport *vport;
rcu_read_lock();
- vport = vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
+ vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
rcu_read_unlock();
}
/* Allocate table. */
err = -ENOMEM;
- rcu_assign_pointer(dp->table, flow_tbl_alloc(TBL_MIN_BUCKETS));
+ rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
if (!dp->table)
goto err_free_dp;
goto err_destroy_local_port;
list_add_tail(&dp->list_node, &dps);
- dp_sysfs_add_dp(dp);
+ ovs_dp_sysfs_add_dp(dp);
rtnl_unlock();
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_datapath_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
return 0;
err_destroy_local_port:
- dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+ ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
err_destroy_percpu:
free_percpu(dp->stats_percpu);
err_destroy_table:
- flow_tbl_destroy(genl_dereference(dp->table));
+ ovs_flow_tbl_destroy(genl_dereference(dp->table));
err_free_dp:
kfree(dp);
err_put_module:
list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
if (vport->port_no != OVSP_LOCAL)
- dp_detach_port(vport);
+ ovs_dp_detach_port(vport);
- dp_sysfs_del_dp(dp);
+ ovs_dp_sysfs_del_dp(dp);
list_del(&dp->list_node);
- dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+ ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
/* rtnl_unlock() will wait until all the references to devices that
* are pending unregistration have been dropped. We do it here to
module_put(THIS_MODULE);
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_datapath_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
return 0;
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
netlink_set_err(INIT_NET_GENL_SOCK, 0,
- dp_datapath_multicast_group.id, err);
+ ovs_dp_datapath_multicast_group.id, err);
return 0;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_datapath_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
+
return 0;
}
.maxattr = OVS_VPORT_ATTR_MAX
};
-struct genl_multicast_group dp_vport_multicast_group = {
+struct genl_multicast_group ovs_dp_vport_multicast_group = {
.name = OVS_VPORT_MCGROUP
};
NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
- vport_get_stats(vport, &vport_stats);
+ ovs_vport_get_stats(vport, &vport_stats);
NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
&vport_stats);
NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN,
vport->ops->get_addr(vport));
- err = vport_get_options(vport, skb);
+ err = ovs_vport_get_options(vport, skb);
if (err == -EMSGSIZE)
goto error;
struct vport *vport;
if (a[OVS_VPORT_ATTR_NAME]) {
- vport = vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
+ vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
if (!vport)
return ERR_PTR(-ENODEV);
return vport;
int err = 0;
if (a[OVS_VPORT_ATTR_STATS])
- vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
+ ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
if (a[OVS_VPORT_ATTR_ADDRESS])
- err = vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
+ err = ovs_vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
return err;
}
if (IS_ERR(vport))
goto exit_unlock;
- dp_sysfs_add_if(vport);
+ ovs_dp_sysfs_add_if(vport);
err = change_vport(vport, a);
if (!err) {
err = PTR_ERR(reply);
}
if (err) {
- dp_detach_port(vport);
+ ovs_dp_detach_port(vport);
goto exit_unlock;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
exit_unlock:
err = -EINVAL;
if (!err && a[OVS_VPORT_ATTR_OPTIONS])
- err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
+ err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
if (!err)
err = change_vport(vport, a);
if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
netlink_set_err(INIT_NET_GENL_SOCK, 0,
- dp_vport_multicast_group.id, err);
+ ovs_dp_vport_multicast_group.id, err);
return 0;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
exit_unlock:
rtnl_unlock();
if (IS_ERR(reply))
goto exit_unlock;
- dp_detach_port(vport);
+ ovs_dp_detach_port(vport);
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
exit_unlock:
rtnl_unlock();
static const struct genl_family_and_ops dp_genl_families[] = {
{ &dp_datapath_genl_family,
dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
- &dp_datapath_multicast_group },
+ &ovs_dp_datapath_multicast_group },
{ &dp_vport_genl_family,
dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
- &dp_vport_multicast_group },
+ &ovs_dp_vport_multicast_group },
{ &dp_flow_genl_family,
dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
- &dp_flow_multicast_group },
+ &ovs_dp_flow_multicast_group },
{ &dp_packet_genl_family,
dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
NULL },
pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
VERSION BUILDNR);
- err = tnl_init();
+ err = ovs_tnl_init();
if (err)
goto error;
- err = flow_init();
+ err = ovs_flow_init();
if (err)
goto error_tnl_exit;
- err = vport_init();
+ err = ovs_vport_init();
if (err)
goto error_flow_exit;
- err = register_netdevice_notifier(&dp_device_notifier);
+ err = register_netdevice_notifier(&ovs_dp_device_notifier);
if (err)
goto error_vport_exit;
return 0;
error_unreg_notifier:
- unregister_netdevice_notifier(&dp_device_notifier);
+ unregister_netdevice_notifier(&ovs_dp_device_notifier);
error_vport_exit:
- vport_exit();
+ ovs_vport_exit();
error_flow_exit:
- flow_exit();
+ ovs_flow_exit();
error_tnl_exit:
- tnl_exit();
+ ovs_tnl_exit();
error:
return err;
}
{
rcu_barrier();
dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
- unregister_netdevice_notifier(&dp_device_notifier);
- vport_exit();
- flow_exit();
- tnl_exit();
+ unregister_netdevice_notifier(&ovs_dp_device_notifier);
+ ovs_vport_exit();
+ ovs_flow_exit();
+ ovs_tnl_exit();
}
module_init(dp_init);
u32 pid;
};
-extern struct notifier_block dp_device_notifier;
-extern struct genl_multicast_group dp_vport_multicast_group;
-extern int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
+extern struct notifier_block ovs_dp_device_notifier;
+extern struct genl_multicast_group ovs_dp_vport_multicast_group;
+extern int (*ovs_dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
-void dp_process_received_packet(struct vport *, struct sk_buff *);
-void dp_detach_port(struct vport *);
-int dp_upcall(struct datapath *, struct sk_buff *,
- const struct dp_upcall_info *);
+void ovs_dp_process_received_packet(struct vport *, struct sk_buff *);
+void ovs_dp_detach_port(struct vport *);
+int ovs_dp_upcall(struct datapath *, struct sk_buff *,
+ const struct dp_upcall_info *);
-const char *dp_name(const struct datapath *dp);
+const char *ovs_dp_name(const struct datapath *dp);
struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
u8 cmd);
-int execute_actions(struct datapath *dp, struct sk_buff *skb);
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
#endif /* datapath.h */
struct net_device *dev = ptr;
struct vport *vport;
- if (is_internal_dev(dev))
- vport = internal_dev_get_vport(dev);
+ if (ovs_is_internal_dev(dev))
+ vport = ovs_internal_dev_get_vport(dev);
else
- vport = netdev_get_vport(dev);
+ vport = ovs_netdev_get_vport(dev);
if (!vport)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UNREGISTER:
- if (!is_internal_dev(dev)) {
+ if (!ovs_is_internal_dev(dev)) {
struct sk_buff *notify;
notify = ovs_vport_cmd_build_info(vport, 0, 0,
OVS_VPORT_CMD_DEL);
- dp_detach_port(vport);
+ ovs_dp_detach_port(vport);
if (IS_ERR(notify)) {
netlink_set_err(INIT_NET_GENL_SOCK, 0,
- dp_vport_multicast_group.id,
+ ovs_dp_vport_multicast_group.id,
PTR_ERR(notify));
break;
}
- genlmsg_multicast(notify, 0, dp_vport_multicast_group.id,
+ genlmsg_multicast(notify, 0, ovs_dp_vport_multicast_group.id,
GFP_KERNEL);
}
break;
case NETDEV_CHANGENAME:
if (vport->port_no != OVSP_LOCAL) {
- dp_sysfs_del_if(vport);
- dp_sysfs_add_if(vport);
+ ovs_dp_sysfs_del_if(vport);
+ ovs_dp_sysfs_add_if(vport);
}
break;
}
return NOTIFY_DONE;
}
-struct notifier_block dp_device_notifier = {
+struct notifier_block ovs_dp_device_notifier = {
.notifier_call = dp_device_event
};
struct vport;
/* dp_sysfs_dp.c */
-int dp_sysfs_add_dp(struct datapath *dp);
-int dp_sysfs_del_dp(struct datapath *dp);
+int ovs_dp_sysfs_add_dp(struct datapath *dp);
+int ovs_dp_sysfs_del_dp(struct datapath *dp);
/* dp_sysfs_if.c */
-int dp_sysfs_add_if(struct vport *p);
-int dp_sysfs_del_if(struct vport *p);
+int ovs_dp_sysfs_add_if(struct vport *p);
+int ovs_dp_sysfs_del_if(struct vport *p);
#ifdef CONFIG_SYSFS
-extern struct sysfs_ops brport_sysfs_ops;
+extern struct sysfs_ops ovs_brport_sysfs_ops;
#endif
#endif /* dp_sysfs.h */
static struct datapath *sysfs_get_dp(struct net_device *netdev)
{
- struct vport *vport = internal_dev_get_vport(netdev);
+ struct vport *vport = ovs_internal_dev_get_vport(netdev);
return vport ? vport->dp : NULL;
}
/*
dp = sysfs_get_dp(to_net_dev(d));
if (dp)
pr_warning("%s: xxx writing dp parms not supported yet!\n",
- dp_name(dp));
+ ovs_dp_name(dp));
else
result = -ENODEV;
static void set_forward_delay(struct datapath *dp, unsigned long val)
{
- pr_info("%s: xxx attempt to set_forward_delay()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_forward_delay()\n", ovs_dp_name(dp));
}
static ssize_t store_forward_delay(DEVICE_PARAMS,
static void set_hello_time(struct datapath *dp, unsigned long val)
{
- pr_info("%s: xxx attempt to set_hello_time()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_hello_time()\n", ovs_dp_name(dp));
}
static ssize_t store_hello_time(DEVICE_PARAMS,
static void set_max_age(struct datapath *dp, unsigned long val)
{
- pr_info("%s: xxx attempt to set_max_age()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_max_age()\n", ovs_dp_name(dp));
}
static ssize_t store_max_age(DEVICE_PARAMS,
static void set_ageing_time(struct datapath *dp, unsigned long val)
{
- pr_info("%s: xxx attempt to set_ageing_time()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_ageing_time()\n", ovs_dp_name(dp));
}
static ssize_t store_ageing_time(DEVICE_PARAMS,
dp = sysfs_get_dp(to_net_dev(d));
if (dp)
- pr_info("%s: xxx attempt to set_stp_state()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_stp_state()\n", ovs_dp_name(dp));
else
result = -ENODEV;
static void set_priority(struct datapath *dp, unsigned long val)
{
- pr_info("%s: xxx attempt to set_priority()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_priority()\n", ovs_dp_name(dp));
}
static ssize_t store_priority(DEVICE_PARAMS,
rcu_read_lock();
- vport = internal_dev_get_vport(to_net_dev(d));
+ vport = ovs_internal_dev_get_vport(to_net_dev(d));
if (vport) {
const unsigned char *addr;
dp = sysfs_get_dp(to_net_dev(d));
if (dp)
pr_info("%s: xxx attempt to store_group_addr()\n",
- dp_name(dp));
+ ovs_dp_name(dp));
else
result = -ENODEV;
* to hold links. The ifobj exists in the same data structure
* as its parent the bridge so reference counting works.
*/
-int dp_sysfs_add_dp(struct datapath *dp)
+int ovs_dp_sysfs_add_dp(struct datapath *dp)
{
struct vport *vport = rtnl_dereference(dp->ports[OVSP_LOCAL]);
struct kobject *kobj = vport->ops->get_kobj(vport);
err = sysfs_create_group(kobj, &bridge_group);
if (err) {
pr_info("%s: can't create group %s/%s\n",
- __func__, dp_name(dp), bridge_group.name);
+ __func__, ovs_dp_name(dp), bridge_group.name);
goto out1;
}
err = kobject_add(&dp->ifobj, kobj, SYSFS_BRIDGE_PORT_SUBDIR);
if (err) {
pr_info("%s: can't add kobject (directory) %s/%s\n",
- __func__, dp_name(dp), kobject_name(&dp->ifobj));
+ __func__, ovs_dp_name(dp), kobject_name(&dp->ifobj));
goto out2;
}
kobject_uevent(&dp->ifobj, KOBJ_ADD);
return err;
}
-int dp_sysfs_del_dp(struct datapath *dp)
+int ovs_dp_sysfs_del_dp(struct datapath *dp)
{
struct vport *vport = rtnl_dereference(dp->ports[OVSP_LOCAL]);
struct kobject *kobj = vport->ops->get_kobj(vport);
return 0;
}
#else /* !CONFIG_SYSFS */
-int dp_sysfs_add_dp(struct datapath *dp) { return 0; }
-int dp_sysfs_del_dp(struct datapath *dp) { return 0; }
+int ovs_dp_sysfs_add_dp(struct datapath *dp) { return 0; }
+int ovs_dp_sysfs_del_dp(struct datapath *dp) { return 0; }
int dp_sysfs_add_if(struct vport *p) { return 0; }
int dp_sysfs_del_if(struct vport *p) { return 0; }
#endif /* !CONFIG_SYSFS */
return -EPERM;
pr_warning("%s: xxx writing port parms not supported yet!\n",
- dp_name(p->dp));
+ ovs_dp_name(p->dp));
return ret;
}
-struct sysfs_ops brport_sysfs_ops = {
+struct sysfs_ops ovs_brport_sysfs_ops = {
.show = brport_show,
.store = brport_store,
};
* Creates a brport subdirectory with bridge attributes.
* Puts symlink in bridge's brport subdirectory
*/
-int dp_sysfs_add_if(struct vport *p)
+int ovs_dp_sysfs_add_if(struct vport *p)
{
struct datapath *dp = p->dp;
struct vport *local_port = rtnl_dereference(dp->ports[OVSP_LOCAL]);
return err;
}
-int dp_sysfs_del_if(struct vport *p)
+int ovs_dp_sysfs_del_if(struct vport *p)
{
if (p->linkname[0]) {
sysfs_remove_link(&p->dp->ifobj, p->linkname);
sizeof(struct icmphdr));
}
-u64 flow_used_time(unsigned long flow_jiffies)
+u64 ovs_flow_used_time(unsigned long flow_jiffies)
{
struct timespec cur_ts;
u64 cur_ms, idle_ms;
#define TCP_FLAGS_OFFSET 13
#define TCP_FLAG_MASK 0x3f
-void flow_used(struct sw_flow *flow, struct sk_buff *skb)
+void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
{
u8 tcp_flags = 0;
spin_unlock(&flow->lock);
}
-struct sw_flow_actions *flow_actions_alloc(const struct nlattr *actions)
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
{
int actions_len = nla_len(actions);
struct sw_flow_actions *sfa;
return sfa;
}
-struct sw_flow *flow_alloc(void)
+struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
flex_array_free(buckets);
}
-struct flow_table *flow_tbl_alloc(int new_size)
+struct flow_table *ovs_flow_tbl_alloc(int new_size)
{
struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
static void flow_free(struct sw_flow *flow)
{
flow->dead = true;
- flow_put(flow);
+ ovs_flow_put(flow);
}
-void flow_tbl_destroy(struct flow_table *table)
+void ovs_flow_tbl_destroy(struct flow_table *table)
{
int i;
{
struct flow_table *table = container_of(rcu, struct flow_table, rcu);
- flow_tbl_destroy(table);
+ ovs_flow_tbl_destroy(table);
}
-void flow_tbl_deferred_destroy(struct flow_table *table)
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
{
if (!table)
return;
call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
}
-struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
{
struct sw_flow *flow;
struct hlist_head *head;
return NULL;
}
-struct flow_table *flow_tbl_expand(struct flow_table *table)
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
{
struct flow_table *new_table;
int n_buckets = table->n_buckets * 2;
int i;
- new_table = flow_tbl_alloc(n_buckets);
+ new_table = ovs_flow_tbl_alloc(n_buckets);
if (!new_table)
return ERR_PTR(-ENOMEM);
hlist_for_each_entry_safe(flow, n, pos, head, hash_node) {
hlist_del_init_rcu(&flow->hash_node);
- flow_tbl_insert(new_table, flow);
+ ovs_flow_tbl_insert(new_table, flow);
}
}
return new_table;
}
-/* RCU callback used by flow_deferred_free. */
+/* RCU callback used by ovs_flow_deferred_free. */
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
flow->dead = true;
- flow_put(flow);
+ ovs_flow_put(flow);
}
/* Schedules 'flow' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
-void flow_deferred_free(struct sw_flow *flow)
+void ovs_flow_deferred_free(struct sw_flow *flow)
{
call_rcu(&flow->rcu, rcu_free_flow_callback);
}
-void flow_hold(struct sw_flow *flow)
+void ovs_flow_hold(struct sw_flow *flow)
{
atomic_inc(&flow->refcnt);
}
-void flow_put(struct sw_flow *flow)
+void ovs_flow_put(struct sw_flow *flow)
{
if (unlikely(!flow))
return;
}
}
-/* RCU callback used by flow_deferred_free_acts. */
+/* RCU callback used by ovs_flow_deferred_free_acts. */
static void rcu_free_acts_callback(struct rcu_head *rcu)
{
struct sw_flow_actions *sf_acts = container_of(rcu,
/* Schedules 'sf_acts' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
-void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
{
call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
}
}
/**
- * flow_extract - extracts a flow key from an Ethernet frame.
+ * ovs_flow_extract - extracts a flow key from an Ethernet frame.
* @skb: sk_buff that contains the frame, with skb->data pointing to the
* Ethernet header
* @in_port: port number on which @skb was received.
* of a correct length, otherwise the same as skb->network_header.
* For other key->dl_type values it is left untouched.
*/
-int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
+int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
int *key_lenp)
{
int error = 0;
return error;
}
-u32 flow_hash(const struct sw_flow_key *key, int key_len)
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
{
return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed);
}
-struct sw_flow *flow_tbl_lookup(struct flow_table *table,
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
struct sw_flow_key *key, int key_len)
{
struct sw_flow *flow;
struct hlist_head *head;
u32 hash;
- hash = flow_hash(key, key_len);
+ hash = ovs_flow_hash(key, key_len);
head = find_bucket(table, hash);
hlist_for_each_entry_rcu(flow, n, head, hash_node) {
return NULL;
}
-void flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
{
struct hlist_head *head;
table->count++;
}
-void flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
{
if (!hlist_unhashed(&flow->hash_node)) {
hlist_del_init_rcu(&flow->hash_node);
}
/**
- * flow_from_nlattrs - parses Netlink attributes into a flow key.
+ * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
* @swkey: receives the extracted flow key.
* @key_lenp: number of bytes used in @swkey.
* @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* sequence.
*/
-int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
const struct nlattr *attr)
{
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
}
/**
- * flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
+ * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
* @in_port: receives the extracted input port.
* @tun_id: receives the extracted tunnel ID.
* @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* get the metadata, that is, the parts of the flow key that cannot be
* extracted from the packet itself.
*/
-int flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
- const struct nlattr *attr)
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
+ const struct nlattr *attr)
{
const struct nlattr *nla;
int rem;
return 0;
}
-int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
+int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
{
struct ovs_key_ethernet *eth_key;
struct nlattr *nla, *encap;
/* Initializes the flow module.
* Returns zero if successful or a negative error code. */
-int flow_init(void)
+int ovs_flow_init(void)
{
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
0, NULL);
}
/* Uninitializes the flow module. */
-void flow_exit(void)
+void ovs_flow_exit(void)
{
kmem_cache_destroy(flow_cache);
}
unsigned char ar_tip[4]; /* target IP address */
} __packed;
-int flow_init(void);
-void flow_exit(void);
+int ovs_flow_init(void);
+void ovs_flow_exit(void);
-struct sw_flow *flow_alloc(void);
-void flow_deferred_free(struct sw_flow *);
+struct sw_flow *ovs_flow_alloc(void);
+void ovs_flow_deferred_free(struct sw_flow *);
-struct sw_flow_actions *flow_actions_alloc(const struct nlattr *);
-void flow_deferred_free_acts(struct sw_flow_actions *);
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *);
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
-void flow_hold(struct sw_flow *);
-void flow_put(struct sw_flow *);
+void ovs_flow_hold(struct sw_flow *);
+void ovs_flow_put(struct sw_flow *);
-int flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
- int *key_lenp);
-void flow_used(struct sw_flow *, struct sk_buff *);
-u64 flow_used_time(unsigned long flow_jiffies);
+int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
+ int *key_lenp);
+void ovs_flow_used(struct sw_flow *, struct sk_buff *);
+u64 ovs_flow_used_time(unsigned long flow_jiffies);
/* Upper bound on the length of a nlattr-formatted flow key. The longest
* nlattr-formatted flow key would be:
*/
#define FLOW_BUFSIZE 144
-int flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
-int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
const struct nlattr *);
-int flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
- const struct nlattr *);
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
+ const struct nlattr *);
#define TBL_MIN_BUCKETS 1024
struct rcu_head rcu;
};
-static inline int flow_tbl_count(struct flow_table *table)
+static inline int ovs_flow_tbl_count(struct flow_table *table)
{
return table->count;
}
-static inline int flow_tbl_need_to_expand(struct flow_table *table)
+static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table)
{
return (table->count > table->n_buckets);
}
-struct sw_flow *flow_tbl_lookup(struct flow_table *table,
- struct sw_flow_key *key, int len);
-void flow_tbl_destroy(struct flow_table *table);
-void flow_tbl_deferred_destroy(struct flow_table *table);
-struct flow_table *flow_tbl_alloc(int new_size);
-struct flow_table *flow_tbl_expand(struct flow_table *table);
-void flow_tbl_insert(struct flow_table *table, struct sw_flow *flow);
-void flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
-u32 flow_hash(const struct sw_flow_key *key, int key_len);
-
-struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
+ struct sw_flow_key *key, int len);
+void ovs_flow_tbl_destroy(struct flow_table *table);
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table);
+struct flow_table *ovs_flow_tbl_alloc(int new_size);
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow);
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len);
+
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1];
#endif /* flow.h */
if (!cache)
return;
- flow_put(cache->flow);
+ ovs_flow_put(cache->flow);
ip_rt_put(cache->rt);
kfree(cache);
}
return NULL;
}
-struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
- int tunnel_type,
- const struct tnl_mutable_config **mutable)
+struct vport *ovs_tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
+ int tunnel_type,
+ const struct tnl_mutable_config **mutable)
{
struct port_lookup_key lookup;
struct vport *vport;
}
/**
- * tnl_rcv - ingress point for generic tunnel code
+ * ovs_tnl_rcv - ingress point for generic tunnel code
*
* @vport: port this packet was received on
* @skb: received packet
* - skb->csum does not include the inner Ethernet header.
* - The layer pointers are undefined.
*/
-void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
+void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
{
struct ethhdr *eh;
return;
}
- vport_receive(vport, skb);
+ ovs_vport_receive(vport, skb);
}
static bool check_ipv4_address(__be32 addr)
}
#endif /* IPv6 */
-bool tnl_frag_needed(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
+bool ovs_tnl_frag_needed(struct vport *vport,
+ const struct tnl_mutable_config *mutable,
+ struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
{
unsigned int eth_hdr_len = ETH_HLEN;
unsigned int total_length = 0, header_length = 0, payload_length;
return false;
}
- vport_receive(vport, nskb);
+ ovs_vport_receive(vport, nskb);
return true;
}
mtu = max(mtu, IP_MIN_MTU);
if (packet_length > mtu &&
- tnl_frag_needed(vport, mutable, skb, mtu,
- OVS_CB(skb)->tun_id))
+ ovs_tnl_frag_needed(vport, mutable, skb, mtu,
+ OVS_CB(skb)->tun_id))
return false;
}
}
mtu = max(mtu, IPV6_MIN_MTU);
if (packet_length > mtu &&
- tnl_frag_needed(vport, mutable, skb, mtu,
- OVS_CB(skb)->tun_id))
+ ovs_tnl_frag_needed(vport, mutable, skb, mtu,
+ OVS_CB(skb)->tun_id))
return false;
}
}
hh->hh_lock.sequence == cache->hh_seq &&
#endif
mutable->seq == cache->mutable_seq &&
- (!is_internal_dev(rt_dst(cache->rt).dev) ||
+ (!ovs_is_internal_dev(rt_dst(cache->rt).dev) ||
(cache->flow && !cache->flow->dead));
}
cache->expiration = jiffies + tnl_vport->cache_exp_interval;
#endif
- if (is_internal_dev(rt_dst(rt).dev)) {
+ if (ovs_is_internal_dev(rt_dst(rt).dev)) {
struct sw_flow_key flow_key;
struct vport *dst_vport;
struct sk_buff *skb;
int flow_key_len;
struct sw_flow *flow;
- dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
+ dst_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
if (!dst_vport)
goto done;
__skb_put(skb, cache->len);
memcpy(skb->data, get_cached_header(cache), cache->len);
- err = flow_extract(skb, dst_vport->port_no, &flow_key,
- &flow_key_len);
+ err = ovs_flow_extract(skb, dst_vport->port_no, &flow_key,
+ &flow_key_len);
consume_skb(skb);
if (err)
goto done;
- flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
- &flow_key, flow_key_len);
+ flow = ovs_flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
+ &flow_key, flow_key_len);
if (flow) {
cache->flow = flow;
- flow_hold(flow);
+ ovs_flow_hold(flow);
}
}
* dropped so just free the rest. This may help improve the congestion
* that caused the first packet to be dropped.
*/
- tnl_free_linked_skbs(skb);
+ ovs_tnl_free_linked_skbs(skb);
return sent_len;
}
-int tnl_send(struct vport *vport, struct sk_buff *skb)
+int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
int orig_len = skb->len - cache->len;
struct vport *cache_vport;
- cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
+ cache_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
skb->protocol = htons(ETH_P_IP);
iph = ip_hdr(skb);
iph->tot_len = htons(skb->len - skb_network_offset(skb));
}
OVS_CB(skb)->flow = cache->flow;
- vport_receive(cache_vport, skb);
+ ovs_vport_receive(cache_vport, skb);
sent_len += orig_len;
} else {
int xmit_err;
}
if (unlikely(sent_len == 0))
- vport_record_error(vport, VPORT_E_TX_DROPPED);
+ ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
goto out;
error_free:
- tnl_free_linked_skbs(skb);
+ ovs_tnl_free_linked_skbs(skb);
error:
- vport_record_error(vport, err);
+ ovs_vport_record_error(vport, err);
out:
dst_release(unattached_dst);
return sent_len;
return 0;
}
-struct vport *tnl_create(const struct vport_parms *parms,
- const struct vport_ops *vport_ops,
- const struct tnl_ops *tnl_ops)
+struct vport *ovs_tnl_create(const struct vport_parms *parms,
+ const struct vport_ops *vport_ops,
+ const struct tnl_ops *tnl_ops)
{
struct vport *vport;
struct tnl_vport *tnl_vport;
int initial_frag_id;
int err;
- vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
+ vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
free_mutable_rtnl(mutable);
kfree(mutable);
error_free_vport:
- vport_free(vport);
+ ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
-int tnl_set_options(struct vport *vport, struct nlattr *options)
+int ovs_tnl_set_options(struct vport *vport, struct nlattr *options)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
const struct tnl_mutable_config *old_mutable;
return err;
}
-int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
+int ovs_tnl_get_options(const struct vport *vport, struct sk_buff *skb)
{
const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
free_cache((struct tnl_cache __force *)tnl_vport->cache);
kfree((struct tnl_mutable __force *)tnl_vport->mutable);
- vport_free(tnl_vport_to_vport(tnl_vport));
+ ovs_vport_free(tnl_vport_to_vport(tnl_vport));
}
-void tnl_destroy(struct vport *vport)
+void ovs_tnl_destroy(struct vport *vport)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
struct tnl_mutable_config *mutable;
call_rcu(&tnl_vport->rcu, free_port_rcu);
}
-int tnl_set_addr(struct vport *vport, const unsigned char *addr)
+int ovs_tnl_set_addr(struct vport *vport, const unsigned char *addr)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
struct tnl_mutable_config *old_mutable, *mutable;
return 0;
}
-const char *tnl_get_name(const struct vport *vport)
+const char *ovs_tnl_get_name(const struct vport *vport)
{
const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
return tnl_vport->name;
}
-const unsigned char *tnl_get_addr(const struct vport *vport)
+const unsigned char *ovs_tnl_get_addr(const struct vport *vport)
{
const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
}
-void tnl_free_linked_skbs(struct sk_buff *skb)
+void ovs_tnl_free_linked_skbs(struct sk_buff *skb)
{
while (skb) {
struct sk_buff *next = skb->next;
}
}
-int tnl_init(void)
+int ovs_tnl_init(void)
{
int i;
return 0;
}
-void tnl_exit(void)
+void ovs_tnl_exit(void)
{
int i;
#endif
};
-struct vport *tnl_create(const struct vport_parms *, const struct vport_ops *,
- const struct tnl_ops *);
-void tnl_destroy(struct vport *);
-
-int tnl_set_options(struct vport *, struct nlattr *);
-int tnl_get_options(const struct vport *, struct sk_buff *);
-
-int tnl_set_addr(struct vport *vport, const unsigned char *addr);
-const char *tnl_get_name(const struct vport *vport);
-const unsigned char *tnl_get_addr(const struct vport *vport);
-int tnl_send(struct vport *vport, struct sk_buff *skb);
-void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos);
-
-struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
- int tunnel_type,
- const struct tnl_mutable_config **mutable);
-bool tnl_frag_needed(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- struct sk_buff *skb, unsigned int mtu, __be64 flow_key);
-void tnl_free_linked_skbs(struct sk_buff *skb);
-
-int tnl_init(void);
-void tnl_exit(void);
+struct vport *ovs_tnl_create(const struct vport_parms *, const struct vport_ops *,
+ const struct tnl_ops *);
+void ovs_tnl_destroy(struct vport *);
+
+int ovs_tnl_set_options(struct vport *, struct nlattr *);
+int ovs_tnl_get_options(const struct vport *, struct sk_buff *);
+
+int ovs_tnl_set_addr(struct vport *vport, const unsigned char *addr);
+const char *ovs_tnl_get_name(const struct vport *vport);
+const unsigned char *ovs_tnl_get_addr(const struct vport *vport);
+int ovs_tnl_send(struct vport *vport, struct sk_buff *skb);
+void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos);
+
+struct vport *ovs_tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
+ int tunnel_type,
+ const struct tnl_mutable_config **mutable);
+bool ovs_tnl_frag_needed(struct vport *vport,
+ const struct tnl_mutable_config *mutable,
+ struct sk_buff *skb, unsigned int mtu, __be64 flow_key);
+void ovs_tnl_free_linked_skbs(struct sk_buff *skb);
+
+int ovs_tnl_init(void);
+void ovs_tnl_exit(void);
static inline struct tnl_vport *tnl_vport_priv(const struct vport *vport)
{
return vport_priv(vport);
}
-
#endif /* tunnel.h */
goto out;
iph = ip_hdr(skb);
- vport = tnl_find_port(iph->daddr, iph->saddr, key, TNL_T_PROTO_CAPWAP,
- &mutable);
+ vport = ovs_tnl_find_port(iph->daddr, iph->saddr, key, TNL_T_PROTO_CAPWAP,
+ &mutable);
if (unlikely(!vport)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
goto error;
else
OVS_CB(skb)->tun_id = 0;
- tnl_rcv(vport, skb, iph->tos);
+ ovs_tnl_rcv(vport, skb, iph->tos);
goto out;
error:
static struct vport *capwap_create(const struct vport_parms *parms)
{
- return tnl_create(parms, &capwap_vport_ops, &capwap_tnl_ops);
+ return ovs_tnl_create(parms, &ovs_capwap_vport_ops, &capwap_tnl_ops);
}
/* Random value. Irrelevant as long as it's not 0 since we set the handler. */
return result;
error:
- tnl_free_linked_skbs(result);
+ ovs_tnl_free_linked_skbs(result);
kfree_skb(skb);
return NULL;
}
inet_frag_put(&fq->ifq, &frag_state);
}
-const struct vport_ops capwap_vport_ops = {
+const struct vport_ops ovs_capwap_vport_ops = {
.type = OVS_VPORT_TYPE_CAPWAP,
.flags = VPORT_F_TUN_ID,
.init = capwap_init,
.exit = capwap_exit,
.create = capwap_create,
- .destroy = tnl_destroy,
- .set_addr = tnl_set_addr,
- .get_name = tnl_get_name,
- .get_addr = tnl_get_addr,
- .get_options = tnl_get_options,
- .set_options = tnl_set_options,
- .get_dev_flags = vport_gen_get_dev_flags,
- .is_running = vport_gen_is_running,
- .get_operstate = vport_gen_get_operstate,
- .send = tnl_send,
+ .destroy = ovs_tnl_destroy,
+ .set_addr = ovs_tnl_set_addr,
+ .get_name = ovs_tnl_get_name,
+ .get_addr = ovs_tnl_get_addr,
+ .get_options = ovs_tnl_get_options,
+ .set_options = ovs_tnl_set_options,
+ .get_dev_flags = ovs_vport_gen_get_dev_flags,
+ .is_running = ovs_vport_gen_is_running,
+ .get_operstate = ovs_vport_gen_get_operstate,
+ .send = ovs_tnl_send,
};
#else
#warning CAPWAP tunneling will not be available on kernels before 2.6.26
#include "vport-generic.h"
-unsigned vport_gen_get_dev_flags(const struct vport *vport)
+unsigned ovs_vport_gen_get_dev_flags(const struct vport *vport)
{
return IFF_UP | IFF_RUNNING | IFF_LOWER_UP;
}
-int vport_gen_is_running(const struct vport *vport)
+int ovs_vport_gen_is_running(const struct vport *vport)
{
return 1;
}
-unsigned char vport_gen_get_operstate(const struct vport *vport)
+unsigned char ovs_vport_gen_get_operstate(const struct vport *vport)
{
return IF_OPER_UP;
}
#include "vport.h"
-unsigned vport_gen_get_dev_flags(const struct vport *);
-int vport_gen_is_running(const struct vport *);
-unsigned char vport_gen_get_operstate(const struct vport *);
+unsigned ovs_vport_gen_get_dev_flags(const struct vport *);
+int ovs_vport_gen_is_running(const struct vport *);
+unsigned char ovs_vport_gen_get_operstate(const struct vport *);
#endif /* vport-generic.h */
if (tunnel_hdr_len < 0)
return;
- vport = tnl_find_port(iph->saddr, iph->daddr, key, TNL_T_PROTO_GRE,
- &mutable);
+ vport = ovs_tnl_find_port(iph->saddr, iph->daddr, key, TNL_T_PROTO_GRE,
+ &mutable);
if (!vport)
return;
#endif
__skb_pull(skb, tunnel_hdr_len);
- tnl_frag_needed(vport, mutable, skb, mtu, key);
+ ovs_tnl_frag_needed(vport, mutable, skb, mtu, key);
__skb_push(skb, tunnel_hdr_len);
out:
goto error;
iph = ip_hdr(skb);
- vport = tnl_find_port(iph->daddr, iph->saddr, key, TNL_T_PROTO_GRE,
- &mutable);
+ vport = ovs_tnl_find_port(iph->daddr, iph->saddr, key, TNL_T_PROTO_GRE,
+ &mutable);
if (unlikely(!vport)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
goto error;
__skb_pull(skb, hdr_len);
skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
- tnl_rcv(vport, skb, iph->tos);
+ ovs_tnl_rcv(vport, skb, iph->tos);
return 0;
error:
static struct vport *gre_create(const struct vport_parms *parms)
{
- return tnl_create(parms, &gre_vport_ops, &gre_tnl_ops);
+ return ovs_tnl_create(parms, &ovs_gre_vport_ops, &gre_tnl_ops);
}
static const struct net_protocol gre_protocol_handlers = {
inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
}
-const struct vport_ops gre_vport_ops = {
+const struct vport_ops ovs_gre_vport_ops = {
.type = OVS_VPORT_TYPE_GRE,
.flags = VPORT_F_TUN_ID,
.init = gre_init,
.exit = gre_exit,
.create = gre_create,
- .destroy = tnl_destroy,
- .set_addr = tnl_set_addr,
- .get_name = tnl_get_name,
- .get_addr = tnl_get_addr,
- .get_options = tnl_get_options,
- .set_options = tnl_set_options,
- .get_dev_flags = vport_gen_get_dev_flags,
- .is_running = vport_gen_is_running,
- .get_operstate = vport_gen_get_operstate,
- .send = tnl_send,
+ .destroy = ovs_tnl_destroy,
+ .set_addr = ovs_tnl_set_addr,
+ .get_name = ovs_tnl_get_name,
+ .get_addr = ovs_tnl_get_addr,
+ .get_options = ovs_tnl_get_options,
+ .set_options = ovs_tnl_set_options,
+ .get_dev_flags = ovs_vport_gen_get_dev_flags,
+ .is_running = ovs_vport_gen_is_running,
+ .get_operstate = ovs_vport_gen_get_operstate,
+ .send = ovs_tnl_send,
};
struct net_device_stats *stats = &netdev->stats;
#endif
#endif
- struct vport *vport = internal_dev_get_vport(netdev);
+ struct vport *vport = ovs_internal_dev_get_vport(netdev);
struct ovs_vport_stats vport_stats;
- vport_get_stats(vport, &vport_stats);
+ ovs_vport_get_stats(vport, &vport_stats);
/* The tx and rx stats need to be swapped because the
* switch and host OS have opposite perspectives. */
OVS_CB(skb)->flow = NULL;
rcu_read_lock();
- vport_receive(internal_dev_priv(netdev)->vport, skb);
+ ovs_vport_receive(internal_dev_priv(netdev)->vport, skb);
rcu_read_unlock();
return 0;
}
static int internal_dev_do_ioctl(struct net_device *dev,
struct ifreq *ifr, int cmd)
{
- if (dp_ioctl_hook)
- return dp_ioctl_hook(dev, ifr, cmd);
+ if (ovs_dp_ioctl_hook)
+ return ovs_dp_ioctl_hook(dev, ifr, cmd);
return -EOPNOTSUPP;
}
static void internal_dev_destructor(struct net_device *dev)
{
- struct vport *vport = internal_dev_get_vport(dev);
+ struct vport *vport = ovs_internal_dev_get_vport(dev);
- vport_free(vport);
+ ovs_vport_free(vport);
free_netdev(dev);
}
struct internal_dev *internal_dev;
int err;
- vport = vport_alloc(sizeof(struct netdev_vport),
- &internal_vport_ops, parms);
+ vport = ovs_vport_alloc(sizeof(struct netdev_vport),
+ &ovs_internal_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
error_free_netdev:
free_netdev(netdev_vport->dev);
error_free_vport:
- vport_free(vport);
+ ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
return len;
}
-const struct vport_ops internal_vport_ops = {
+const struct vport_ops ovs_internal_vport_ops = {
.type = OVS_VPORT_TYPE_INTERNAL,
.flags = VPORT_F_REQUIRED | VPORT_F_FLOW,
.create = internal_dev_create,
.destroy = internal_dev_destroy,
- .set_addr = netdev_set_addr,
- .get_name = netdev_get_name,
- .get_addr = netdev_get_addr,
- .get_kobj = netdev_get_kobj,
- .get_dev_flags = netdev_get_dev_flags,
- .is_running = netdev_is_running,
- .get_operstate = netdev_get_operstate,
- .get_ifindex = netdev_get_ifindex,
- .get_mtu = netdev_get_mtu,
+ .set_addr = ovs_netdev_set_addr,
+ .get_name = ovs_netdev_get_name,
+ .get_addr = ovs_netdev_get_addr,
+ .get_kobj = ovs_netdev_get_kobj,
+ .get_dev_flags = ovs_netdev_get_dev_flags,
+ .is_running = ovs_netdev_is_running,
+ .get_operstate = ovs_netdev_get_operstate,
+ .get_ifindex = ovs_netdev_get_ifindex,
+ .get_mtu = ovs_netdev_get_mtu,
.send = internal_dev_recv,
};
-int is_internal_dev(const struct net_device *netdev)
+int ovs_is_internal_dev(const struct net_device *netdev)
{
#ifdef HAVE_NET_DEVICE_OPS
return netdev->netdev_ops == &internal_dev_netdev_ops;
#endif
}
-struct vport *internal_dev_get_vport(struct net_device *netdev)
+struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
{
- if (!is_internal_dev(netdev))
+ if (!ovs_is_internal_dev(netdev))
return NULL;
return internal_dev_priv(netdev)->vport;
#include "datapath.h"
#include "vport.h"
-int is_internal_dev(const struct net_device *);
-struct vport *internal_dev_get_vport(struct net_device *);
+int ovs_is_internal_dev(const struct net_device *);
+struct vport *ovs_internal_dev_get_vport(struct net_device *);
#endif /* vport-internal_dev.h */
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
return RX_HANDLER_PASS;
- vport = netdev_get_vport(skb->dev);
+ vport = ovs_netdev_get_vport(skb->dev);
netdev_port_receive(vport, skb);
struct netdev_vport *netdev_vport;
int err;
- vport = vport_alloc(sizeof(struct netdev_vport),
- &netdev_vport_ops, parms);
+ vport = ovs_vport_alloc(sizeof(struct netdev_vport),
+ &ovs_netdev_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
if (netdev_vport->dev->flags & IFF_LOOPBACK ||
netdev_vport->dev->type != ARPHRD_ETHER ||
- is_internal_dev(netdev_vport->dev)) {
+ ovs_is_internal_dev(netdev_vport->dev)) {
err = -EINVAL;
goto error_put;
}
error_put:
dev_put(netdev_vport->dev);
error_free_vport:
- vport_free(vport);
+ ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
synchronize_rcu();
dev_put(netdev_vport->dev);
- vport_free(vport);
+ ovs_vport_free(vport);
}
-int netdev_set_addr(struct vport *vport, const unsigned char *addr)
+int ovs_netdev_set_addr(struct vport *vport, const unsigned char *addr)
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
struct sockaddr sa;
return dev_set_mac_address(netdev_vport->dev, &sa);
}
-const char *netdev_get_name(const struct vport *vport)
+const char *ovs_netdev_get_name(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netdev_vport->dev->name;
}
-const unsigned char *netdev_get_addr(const struct vport *vport)
+const unsigned char *ovs_netdev_get_addr(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netdev_vport->dev->dev_addr;
}
-struct kobject *netdev_get_kobj(const struct vport *vport)
+struct kobject *ovs_netdev_get_kobj(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return &netdev_vport->dev->NETDEV_DEV_MEMBER.kobj;
}
-unsigned netdev_get_dev_flags(const struct vport *vport)
+unsigned ovs_netdev_get_dev_flags(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return dev_get_flags(netdev_vport->dev);
}
-int netdev_is_running(const struct vport *vport)
+int ovs_netdev_is_running(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netif_running(netdev_vport->dev);
}
-unsigned char netdev_get_operstate(const struct vport *vport)
+unsigned char ovs_netdev_get_operstate(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netdev_vport->dev->operstate;
}
-int netdev_get_ifindex(const struct vport *vport)
+int ovs_netdev_get_ifindex(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netdev_vport->dev->ifindex;
}
-int netdev_get_mtu(const struct vport *vport)
+int ovs_netdev_get_mtu(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netdev_vport->dev->mtu;
}
vlan_copy_skb_tci(skb);
- vport_receive(vport, skb);
+ ovs_vport_receive(vport, skb);
}
static unsigned packet_length(const struct sk_buff *skb)
if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
if (net_ratelimit())
pr_warn("%s: dropped over-mtu packet: %d > %d\n",
- dp_name(vport->dp), packet_length(skb), mtu);
+ ovs_dp_name(vport->dp), packet_length(skb), mtu);
goto error;
}
error:
kfree_skb(skb);
- vport_record_error(vport, VPORT_E_TX_DROPPED);
+ ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
return 0;
}
/* Returns null if this device is not attached to a datapath. */
-struct vport *netdev_get_vport(struct net_device *dev)
+struct vport *ovs_netdev_get_vport(struct net_device *dev)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
#if IFF_BRIDGE_PORT != IFF_OVS_DATAPATH
#endif
}
-const struct vport_ops netdev_vport_ops = {
+const struct vport_ops ovs_netdev_vport_ops = {
.type = OVS_VPORT_TYPE_NETDEV,
.flags = VPORT_F_REQUIRED,
.init = netdev_init,
.exit = netdev_exit,
.create = netdev_create,
.destroy = netdev_destroy,
- .set_addr = netdev_set_addr,
- .get_name = netdev_get_name,
- .get_addr = netdev_get_addr,
- .get_kobj = netdev_get_kobj,
- .get_dev_flags = netdev_get_dev_flags,
- .is_running = netdev_is_running,
- .get_operstate = netdev_get_operstate,
- .get_ifindex = netdev_get_ifindex,
- .get_mtu = netdev_get_mtu,
+ .set_addr = ovs_netdev_set_addr,
+ .get_name = ovs_netdev_get_name,
+ .get_addr = ovs_netdev_get_addr,
+ .get_kobj = ovs_netdev_get_kobj,
+ .get_dev_flags = ovs_netdev_get_dev_flags,
+ .is_running = ovs_netdev_is_running,
+ .get_operstate = ovs_netdev_get_operstate,
+ .get_ifindex = ovs_netdev_get_ifindex,
+ .get_mtu = ovs_netdev_get_mtu,
.send = netdev_send,
};
#include "vport.h"
-struct vport *netdev_get_vport(struct net_device *dev);
+struct vport *ovs_netdev_get_vport(struct net_device *dev);
struct netdev_vport {
struct net_device *dev;
return vport_priv(vport);
}
-int netdev_set_addr(struct vport *, const unsigned char *addr);
-const char *netdev_get_name(const struct vport *);
-const unsigned char *netdev_get_addr(const struct vport *);
-const char *netdev_get_config(const struct vport *);
-struct kobject *netdev_get_kobj(const struct vport *);
-unsigned netdev_get_dev_flags(const struct vport *);
-int netdev_is_running(const struct vport *);
-unsigned char netdev_get_operstate(const struct vport *);
-int netdev_get_ifindex(const struct vport *);
-int netdev_get_mtu(const struct vport *);
+int ovs_netdev_set_addr(struct vport *, const unsigned char *addr);
+const char *ovs_netdev_get_name(const struct vport *);
+const unsigned char *ovs_netdev_get_addr(const struct vport *);
+const char *ovs_netdev_get_config(const struct vport *);
+struct kobject *ovs_netdev_get_kobj(const struct vport *);
+unsigned ovs_netdev_get_dev_flags(const struct vport *);
+int ovs_netdev_is_running(const struct vport *);
+unsigned char ovs_netdev_get_operstate(const struct vport *);
+int ovs_netdev_get_ifindex(const struct vport *);
+int ovs_netdev_get_mtu(const struct vport *);
#endif /* vport_netdev.h */
struct patch_config *patchconf;
int err;
- vport = vport_alloc(sizeof(struct patch_vport),
- &patch_vport_ops, parms);
+ vport = ovs_vport_alloc(sizeof(struct patch_vport),
+ &ovs_patch_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
peer_name = patchconf->peer_name;
hlist_add_head(&patch_vport->hash_node, hash_bucket(peer_name));
- rcu_assign_pointer(patch_vport->peer, vport_locate(peer_name));
+ rcu_assign_pointer(patch_vport->peer, ovs_vport_locate(peer_name));
update_peers(patch_vport->name, vport);
return vport;
error_free_patchconf:
kfree(patchconf);
error_free_vport:
- vport_free(vport);
+ ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
struct patch_vport, rcu);
kfree((struct patch_config __force *)patch_vport->patchconf);
- vport_free(vport_from_priv(patch_vport));
+ ovs_vport_free(vport_from_priv(patch_vport));
}
static void patch_destroy(struct vport *vport)
hlist_del(&patch_vport->hash_node);
- rcu_assign_pointer(patch_vport->peer, vport_locate(patchconf->peer_name));
+ rcu_assign_pointer(patch_vport->peer, ovs_vport_locate(patchconf->peer_name));
hlist_add_head(&patch_vport->hash_node, hash_bucket(patchconf->peer_name));
return 0;
if (!peer) {
kfree_skb(skb);
- vport_record_error(vport, VPORT_E_TX_DROPPED);
+ ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
return 0;
}
- vport_receive(peer, skb);
+ ovs_vport_receive(peer, skb);
return skb_len;
}
-const struct vport_ops patch_vport_ops = {
+const struct vport_ops ovs_patch_vport_ops = {
.type = OVS_VPORT_TYPE_PATCH,
.init = patch_init,
.exit = patch_exit,
.get_addr = patch_get_addr,
.get_options = patch_get_options,
.set_options = patch_set_options,
- .get_dev_flags = vport_gen_get_dev_flags,
- .is_running = vport_gen_is_running,
- .get_operstate = vport_gen_get_operstate,
+ .get_dev_flags = ovs_vport_gen_get_dev_flags,
+ .is_running = ovs_vport_gen_is_running,
+ .get_operstate = ovs_vport_gen_get_operstate,
.send = patch_send,
};
/* List of statically compiled vport implementations. Don't forget to also
* add yours to the list at the bottom of vport.h. */
static const struct vport_ops *base_vport_ops_list[] = {
- &netdev_vport_ops,
- &internal_vport_ops,
- &patch_vport_ops,
- &gre_vport_ops,
+ &ovs_netdev_vport_ops,
+ &ovs_internal_vport_ops,
+ &ovs_patch_vport_ops,
+ &ovs_gre_vport_ops,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
- &capwap_vport_ops,
+ &ovs_capwap_vport_ops,
#endif
};
#define VPORT_HASH_BUCKETS 1024
/**
- * vport_init - initialize vport subsystem
+ * ovs_vport_init - initialize vport subsystem
*
* Called at module load time to initialize the vport subsystem and any
* compiled in vport types.
*/
-int vport_init(void)
+int ovs_vport_init(void)
{
int err;
int i;
if (!err)
vport_ops_list[n_vport_types++] = new_ops;
else if (new_ops->flags & VPORT_F_REQUIRED) {
- vport_exit();
+ ovs_vport_exit();
goto error;
}
}
}
/**
- * vport_exit - shutdown vport subsystem
+ * ovs_vport_exit - shutdown vport subsystem
*
* Called at module exit time to shutdown the vport subsystem and any
* initialized vport types.
*/
-void vport_exit(void)
+void ovs_vport_exit(void)
{
int i;
}
/**
- * vport_locate - find a port that has already been created
+ * ovs_vport_locate - find a port that has already been created
*
* @name: name of port to find
*
* Must be called with RTNL or RCU read lock.
*/
-struct vport *vport_locate(const char *name)
+struct vport *ovs_vport_locate(const char *name)
{
struct hlist_head *bucket = hash_bucket(name);
struct vport *vport;
static struct kobj_type brport_ktype = {
#ifdef CONFIG_SYSFS
- .sysfs_ops = &brport_sysfs_ops,
+ .sysfs_ops = &ovs_brport_sysfs_ops,
#endif
.release = release_vport
};
/**
- * vport_alloc - allocate and initialize new vport
+ * ovs_vport_alloc - allocate and initialize new vport
*
* @priv_size: Size of private data area to allocate.
* @ops: vport device ops
* Allocate and initialize a new vport defined by @ops. The vport will contain
* a private data area of size @priv_size that can be accessed using
* vport_priv(). vports that are no longer needed should be released with
- * vport_free().
+ * ovs_vport_free().
*/
-struct vport *vport_alloc(int priv_size, const struct vport_ops *ops,
- const struct vport_parms *parms)
+struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
+ const struct vport_parms *parms)
{
struct vport *vport;
size_t alloc_size;
}
/**
- * vport_free - uninitialize and free vport
+ * ovs_vport_free - uninitialize and free vport
*
* @vport: vport to free
*
- * Frees a vport allocated with vport_alloc() when it is no longer needed.
+ * Frees a vport allocated with ovs_vport_alloc() when it is no longer needed.
*
* The caller must ensure that an RCU grace period has passed since the last
* time @vport was in a datapath.
*/
-void vport_free(struct vport *vport)
+void ovs_vport_free(struct vport *vport)
{
free_percpu(vport->percpu_stats);
}
/**
- * vport_add - add vport device (for kernel callers)
+ * ovs_vport_add - add vport device (for kernel callers)
*
* @parms: Information about new vport.
*
* Creates a new vport with the specified configuration (which is dependent on
* device type). RTNL lock must be held.
*/
-struct vport *vport_add(const struct vport_parms *parms)
+struct vport *ovs_vport_add(const struct vport_parms *parms)
{
struct vport *vport;
int err = 0;
}
/**
- * vport_set_options - modify existing vport device (for kernel callers)
+ * ovs_vport_set_options - modify existing vport device (for kernel callers)
*
* @vport: vport to modify.
* @port: New configuration.
* Modifies an existing device with the specified configuration (which is
* dependent on device type). RTNL lock must be held.
*/
-int vport_set_options(struct vport *vport, struct nlattr *options)
+int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
{
ASSERT_RTNL();
}
/**
- * vport_del - delete existing vport device
+ * ovs_vport_del - delete existing vport device
*
* @vport: vport to delete.
*
* Detaches @vport from its datapath and destroys it. It is possible to fail
* for reasons such as lack of memory. RTNL lock must be held.
*/
-void vport_del(struct vport *vport)
+void ovs_vport_del(struct vport *vport)
{
ASSERT_RTNL();
}
/**
- * vport_set_addr - set device Ethernet address (for kernel callers)
+ * ovs_vport_set_addr - set device Ethernet address (for kernel callers)
*
* @vport: vport on which to set Ethernet address.
* @addr: New address.
* setting the Ethernet address, in which case the result will always be
* -EOPNOTSUPP. RTNL lock must be held.
*/
-int vport_set_addr(struct vport *vport, const unsigned char *addr)
+int ovs_vport_set_addr(struct vport *vport, const unsigned char *addr)
{
ASSERT_RTNL();
}
/**
- * vport_set_stats - sets offset device stats
+ * ovs_vport_set_stats - sets offset device stats
*
* @vport: vport on which to set stats
* @stats: stats to set
*
* Must be called with RTNL lock.
*/
-void vport_set_stats(struct vport *vport, struct ovs_vport_stats *stats)
+void ovs_vport_set_stats(struct vport *vport, struct ovs_vport_stats *stats)
{
ASSERT_RTNL();
}
/**
- * vport_get_stats - retrieve device stats
+ * ovs_vport_get_stats - retrieve device stats
*
* @vport: vport from which to retrieve the stats
* @stats: location to store stats
*
* Must be called with RTNL lock or rcu_read_lock.
*/
-void vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
+void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
{
int i;
}
/**
- * vport_get_options - retrieve device options
+ * ovs_vport_get_options - retrieve device options
*
* @vport: vport from which to retrieve the options.
* @skb: sk_buff where options should be appended.
*
* Must be called with RTNL lock or rcu_read_lock.
*/
-int vport_get_options(const struct vport *vport, struct sk_buff *skb)
+int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
{
struct nlattr *nla;
}
/**
- * vport_receive - pass up received packet to the datapath for processing
+ * ovs_vport_receive - pass up received packet to the datapath for processing
*
* @vport: vport that received the packet
* @skb: skb that was received
* skb->data should point to the Ethernet header. The caller must have already
* called compute_ip_summed() to initialize the checksumming fields.
*/
-void vport_receive(struct vport *vport, struct sk_buff *skb)
+void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
{
struct vport_percpu_stats *stats;
if (!(vport->ops->flags & VPORT_F_TUN_ID))
OVS_CB(skb)->tun_id = 0;
- dp_process_received_packet(vport, skb);
+ ovs_dp_process_received_packet(vport, skb);
}
/**
- * vport_send - send a packet on a device
+ * ovs_vport_send - send a packet on a device
*
* @vport: vport on which to send the packet
* @skb: skb to send
* Sends the given packet and returns the length of data sent. Either RTNL
* lock or rcu_read_lock must be held.
*/
-int vport_send(struct vport *vport, struct sk_buff *skb)
+int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
{
int sent = vport->ops->send(vport, skb);
}
/**
- * vport_record_error - indicate device error to generic stats layer
+ * ovs_vport_record_error - indicate device error to generic stats layer
*
* @vport: vport that encountered the error
* @err_type: one of enum vport_err_type types to indicate the error type
* If using the vport generic stats layer indicate that an error of the given
* type has occured.
*/
-void vport_record_error(struct vport *vport, enum vport_err_type err_type)
+void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
{
spin_lock(&vport->stats_lock);
/* The following definitions are for users of the vport subsytem: */
-int vport_init(void);
-void vport_exit(void);
+int ovs_vport_init(void);
+void ovs_vport_exit(void);
-struct vport *vport_add(const struct vport_parms *);
-void vport_del(struct vport *);
+struct vport *ovs_vport_add(const struct vport_parms *);
+void ovs_vport_del(struct vport *);
-struct vport *vport_locate(const char *name);
+struct vport *ovs_vport_locate(const char *name);
-int vport_set_addr(struct vport *, const unsigned char *);
-void vport_set_stats(struct vport *, struct ovs_vport_stats *);
-void vport_get_stats(struct vport *, struct ovs_vport_stats *);
+int ovs_vport_set_addr(struct vport *, const unsigned char *);
+void ovs_vport_set_stats(struct vport *, struct ovs_vport_stats *);
+void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
-int vport_set_options(struct vport *, struct nlattr *options);
-int vport_get_options(const struct vport *, struct sk_buff *);
+int ovs_vport_set_options(struct vport *, struct nlattr *options);
+int ovs_vport_get_options(const struct vport *, struct sk_buff *);
-int vport_send(struct vport *, struct sk_buff *);
+int ovs_vport_send(struct vport *, struct sk_buff *);
/* The following definitions are for implementers of vport devices: */
enum ovs_vport_type type;
struct nlattr *options;
- /* For vport_alloc(). */
+ /* For ovs_vport_alloc(). */
struct datapath *dp;
u16 port_no;
u32 upcall_pid;
* not set and initialzation fails then no vports of this type can be created.
* @exit: Called at module unload.
* @create: Create a new vport configured as specified. On success returns
- * a new vport allocated with vport_alloc(), otherwise an ERR_PTR() value.
+ * a new vport allocated with ovs_vport_alloc(), otherwise an ERR_PTR() value.
* @destroy: Destroys a vport. Must call vport_free() on the vport but not
* before an RCU grace period has elapsed.
* @set_options: Modify the configuration of an existing vport. May be %NULL
VPORT_E_TX_ERROR,
};
-struct vport *vport_alloc(int priv_size, const struct vport_ops *,
- const struct vport_parms *);
-void vport_free(struct vport *);
+struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *,
+ const struct vport_parms *);
+void ovs_vport_free(struct vport *);
#define VPORT_ALIGN 8
return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
}
-void vport_receive(struct vport *, struct sk_buff *);
-void vport_record_error(struct vport *, enum vport_err_type err_type);
+void ovs_vport_receive(struct vport *, struct sk_buff *);
+void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
/* List of statically compiled vport implementations. Don't forget to also
* add yours to the list at the top of vport.c. */
-extern const struct vport_ops netdev_vport_ops;
-extern const struct vport_ops internal_vport_ops;
-extern const struct vport_ops patch_vport_ops;
-extern const struct vport_ops gre_vport_ops;
-extern const struct vport_ops capwap_vport_ops;
+extern const struct vport_ops ovs_netdev_vport_ops;
+extern const struct vport_ops ovs_internal_vport_ops;
+extern const struct vport_ops ovs_patch_vport_ops;
+extern const struct vport_ops ovs_gre_vport_ops;
+extern const struct vport_ops ovs_capwap_vport_ops;
#endif /* vport.h */