X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=datapath%2Fdatapath.c;h=4d40ac3bde253c1d6d34c2778c96fa4e7e0a82f7;hb=0f4d9dce8150fced85070149e0820707d55ee252;hp=6abd6918571b31487da6d91a191e701ef7487589;hpb=3544358aa5960b148bc31435a0062e9392530ec2;p=openvswitch diff --git a/datapath/datapath.c b/datapath/datapath.c index 6abd6918..4d40ac3b 100644 --- a/datapath/datapath.c +++ b/datapath/datapath.c @@ -84,7 +84,7 @@ EXPORT_SYMBOL(dp_ioctl_hook); static LIST_HEAD(dps); static struct vport *new_vport(const struct vport_parms *); -static int queue_userspace_packets(struct datapath *, struct sk_buff *, +static int queue_userspace_packets(struct datapath *, u32 pid, struct sk_buff *, const struct dp_upcall_info *); /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */ @@ -124,6 +124,24 @@ const char *dp_name(const struct datapath *dp) return vport_get_name(rcu_dereference_rtnl(dp->ports[OVSP_LOCAL])); } +static int get_dpifindex(struct datapath *dp) +{ + struct vport *local; + int ifindex; + + rcu_read_lock(); + + local = get_vport_protected(dp, OVSP_LOCAL); + if (local) + ifindex = vport_get_ifindex(local); + else + ifindex = 0; + + rcu_read_unlock(); + + return ifindex; +} + static inline size_t br_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) @@ -160,8 +178,7 @@ static int dp_fill_ifinfo(struct sk_buff *skb, hdr->ifi_change = 0; NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port)); - NLA_PUT_U32(skb, IFLA_MASTER, - vport_get_ifindex(get_vport_protected(dp, OVSP_LOCAL))); + NLA_PUT_U32(skb, IFLA_MASTER, get_dpifindex(dp)); NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port)); #ifdef IFLA_OPERSTATE NLA_PUT_U8(skb, IFLA_OPERSTATE, @@ -344,51 +361,22 @@ static struct genl_family dp_packet_genl_family = { .maxattr = OVS_PACKET_ATTR_MAX }; -/* Generic Netlink multicast groups for upcalls. - * - * We really want three unique multicast groups per datapath, but we can't even - * get one, because genl_register_mc_group() takes genl_lock, which is also - * held during Generic Netlink message processing, so trying to acquire - * multicast groups during OVS_DP_NEW processing deadlocks. Instead, we - * preallocate a few groups and use them round-robin for datapaths. Collision - * isn't fatal--multicast listeners should check that the family is the one - * that they want and discard others--but it wastes time and memory to receive - * unwanted messages. - */ -#define PACKET_N_MC_GROUPS 16 -static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS]; - -static u32 packet_mc_group(struct datapath *dp, u8 cmd) -{ - u32 idx; - BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS); - - idx = jhash_2words(dp->dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1); - return packet_mc_groups[idx].id; -} - -static int packet_register_mc_groups(void) -{ - int i; - - for (i = 0; i < PACKET_N_MC_GROUPS; i++) { - struct genl_multicast_group *group = &packet_mc_groups[i]; - int error; - - sprintf(group->name, "packet%d", i); - error = genl_register_mc_group(&dp_packet_genl_family, group); - if (error) - return error; - } - return 0; -} - int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct dp_stats_percpu *stats; + u32 pid; int err; - WARN_ON_ONCE(skb_shared(skb)); + if (OVS_CB(skb)->flow) + pid = OVS_CB(skb)->flow->upcall_pid; + else + pid = OVS_CB(skb)->vport->upcall_pid; + + if (pid == 0) { + err = -ENOTCONN; + kfree_skb(skb); + goto err; + } forward_ip_summed(skb, true); @@ -406,7 +394,7 @@ int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_i skb = nskb; } - err = queue_userspace_packets(dp, skb, upcall_info); + err = queue_userspace_packets(dp, pid, skb, upcall_info); if (err) goto err; @@ -429,13 +417,21 @@ err: * 'upcall_info'. There will be only one packet unless we broke up a GSO * packet. */ -static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb, - const struct dp_upcall_info *upcall_info) +static int queue_userspace_packets(struct datapath *dp, u32 pid, + struct sk_buff *skb, + const struct dp_upcall_info *upcall_info) { - u32 group = packet_mc_group(dp, upcall_info->cmd); + int dp_ifindex; struct sk_buff *nskb; int err; + dp_ifindex = get_dpifindex(dp); + if (!dp_ifindex) { + err = -ENODEV; + nskb = skb->next; + goto err_kfree_skbs; + } + do { struct ovs_header *upcall; struct sk_buff *user_skb; /* to be queued to userspace */ @@ -449,8 +445,10 @@ static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb, if (unlikely(err)) goto err_kfree_skbs; - if (nla_attr_size(skb->len) > USHRT_MAX) + if (nla_attr_size(skb->len) > USHRT_MAX) { + err = -EFBIG; goto err_kfree_skbs; + } len = sizeof(struct ovs_header); len += nla_total_size(skb->len); @@ -464,12 +462,12 @@ static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb, user_skb = genlmsg_new(len, GFP_ATOMIC); if (!user_skb) { - netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS); + err = -ENOMEM; goto err_kfree_skbs; } upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd); - upcall->dp_ifindex = dp->dp_ifindex; + upcall->dp_ifindex = dp_ifindex; nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); flow_to_nlattrs(upcall_info->key, user_skb); @@ -494,7 +492,7 @@ static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb, else skb_copy_bits(skb, 0, nla_data(nla), skb->len); - err = genlmsg_multicast(user_skb, 0, group, GFP_ATOMIC); + err = genlmsg_unicast(&init_net, user_skb, pid); if (err) goto err_kfree_skbs; @@ -674,6 +672,11 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) flow->hash = flow_hash(&flow->key, key_len); + if (a[OVS_PACKET_ATTR_UPCALL_PID]) + flow->upcall_pid = nla_get_u32(a[OVS_PACKET_ATTR_UPCALL_PID]); + else + flow->upcall_pid = NETLINK_CB(skb).pid; + acts = flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]); err = PTR_ERR(acts); if (IS_ERR(acts)) @@ -687,6 +690,11 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) err = -ENODEV; if (!dp) goto err_unlock; + + if (flow->key.eth.in_port < DP_MAX_PORTS) + OVS_CB(packet)->vport = get_vport_protected(dp, + flow->key.eth.in_port); + err = execute_actions(dp, packet); rcu_read_unlock(); @@ -707,6 +715,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC }, [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, + [OVS_PACKET_ATTR_UPCALL_PID] = { .type = NLA_U32 }, }; static struct genl_ops dp_packet_genl_ops[] = { @@ -744,54 +753,9 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) } } -/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports. - * Called with RTNL lock. - */ -int dp_min_mtu(const struct datapath *dp) -{ - struct vport *p; - int mtu = 0; - - ASSERT_RTNL(); - - list_for_each_entry (p, &dp->port_list, node) { - int dev_mtu; - - /* Skip any internal ports, since that's what we're trying to - * set. */ - if (is_internal_vport(p)) - continue; - - dev_mtu = vport_get_mtu(p); - if (!dev_mtu) - continue; - if (!mtu || dev_mtu < mtu) - mtu = dev_mtu; - } - - return mtu ? mtu : ETH_DATA_LEN; -} - -/* Sets the MTU of all datapath devices to the minimum of the ports - * Called with RTNL lock. - */ -void set_internal_devs_mtu(const struct datapath *dp) -{ - struct vport *p; - int mtu; - - ASSERT_RTNL(); - - mtu = dp_min_mtu(dp); - - list_for_each_entry (p, &dp->port_list, node) { - if (is_internal_vport(p)) - vport_set_mtu(p, mtu); - } -} - static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED }, + [OVS_FLOW_ATTR_UPCALL_PID] = { .type = NLA_U32 }, [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, }; @@ -828,7 +792,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, if (!ovs_header) return -EMSGSIZE; - ovs_header->dp_ifindex = dp->dp_ifindex; + ovs_header->dp_ifindex = get_dpifindex(dp); nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); if (!nla) @@ -838,6 +802,8 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, goto error; nla_nest_end(skb, nla); + NLA_PUT_U32(skb, OVS_FLOW_ATTR_UPCALL_PID, flow->upcall_pid); + spin_lock_bh(&flow->lock); used = flow->used; stats.n_packets = flow->packet_count; @@ -975,6 +941,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) flow->key = key; clear_stats(flow); + if (a[OVS_FLOW_ATTR_UPCALL_PID]) + flow->upcall_pid = nla_get_u32(a[OVS_FLOW_ATTR_UPCALL_PID]); + else + flow->upcall_pid = NETLINK_CB(skb).pid; + /* Obtain actions. */ acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]); error = PTR_ERR(acts); @@ -1024,6 +995,9 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW); + if (a[OVS_FLOW_ATTR_UPCALL_PID]) + flow->upcall_pid = nla_get_u32(a[OVS_FLOW_ATTR_UPCALL_PID]); + /* Clear stats. */ if (a[OVS_FLOW_ATTR_CLEAR]) { spin_lock_bh(&flow->lock); @@ -1182,6 +1156,7 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { #ifdef HAVE_NLA_NUL_STRING [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, #endif + [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, [OVS_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 }, [OVS_DP_ATTR_SAMPLING] = { .type = NLA_U32 }, }; @@ -1210,7 +1185,7 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, if (!ovs_header) goto error; - ovs_header->dp_ifindex = dp->dp_ifindex; + ovs_header->dp_ifindex = get_dpifindex(dp); rcu_read_lock(); err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp)); @@ -1229,14 +1204,6 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, if (dp->sflow_probability) NLA_PUT_U32(skb, OVS_DP_ATTR_SAMPLING, dp->sflow_probability); - nla = nla_nest_start(skb, OVS_DP_ATTR_MCGROUPS); - if (!nla) - goto nla_put_failure; - NLA_PUT_U32(skb, OVS_PACKET_CMD_MISS, packet_mc_group(dp, OVS_PACKET_CMD_MISS)); - NLA_PUT_U32(skb, OVS_PACKET_CMD_ACTION, packet_mc_group(dp, OVS_PACKET_CMD_ACTION)); - NLA_PUT_U32(skb, OVS_PACKET_CMD_SAMPLE, packet_mc_group(dp, OVS_PACKET_CMD_SAMPLE)); - nla_nest_end(skb, nla); - return genlmsg_end(skb, ovs_header); nla_put_failure: @@ -1341,30 +1308,34 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) if (!dp->table) goto err_free_dp; + dp->drop_frags = 0; + dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); + if (!dp->stats_percpu) { + err = -ENOMEM; + goto err_destroy_table; + } + + change_datapath(dp, a); + /* Set up our datapath device. */ parms.name = nla_data(a[OVS_DP_ATTR_NAME]); parms.type = OVS_VPORT_TYPE_INTERNAL; parms.options = NULL; parms.dp = dp; parms.port_no = OVSP_LOCAL; + if (a[OVS_DP_ATTR_UPCALL_PID]) + parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]); + else + parms.upcall_pid = NETLINK_CB(skb).pid; + vport = new_vport(&parms); if (IS_ERR(vport)) { err = PTR_ERR(vport); if (err == -EBUSY) err = -EEXIST; - goto err_destroy_table; + goto err_destroy_percpu; } - dp->dp_ifindex = vport_get_ifindex(vport); - - dp->drop_frags = 0; - dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); - if (!dp->stats_percpu) { - err = -ENOMEM; - goto err_destroy_local_port; - } - - change_datapath(dp, a); reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); err = PTR_ERR(reply); @@ -1382,6 +1353,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) err_destroy_local_port: dp_detach_port(get_vport_protected(dp, OVSP_LOCAL)); +err_destroy_percpu: + free_percpu(dp->stats_percpu); err_destroy_table: flow_tbl_destroy(get_table_protected(dp)); err_free_dp: @@ -1543,15 +1516,15 @@ static struct genl_ops dp_datapath_genl_ops[] = { static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { #ifdef HAVE_NLA_NUL_STRING [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, - [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, - [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, - [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) }, + [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, [OVS_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN }, #else - [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct rtnl_link_stats64) }, + [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) }, [OVS_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN }, #endif - [OVS_VPORT_ATTR_MTU] = { .type = NLA_U32 }, + [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, + [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, + [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 }, [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, }; @@ -1574,7 +1547,6 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, struct ovs_header *ovs_header; struct nlattr *nla; int ifindex; - int mtu; int err; ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family, @@ -1582,23 +1554,20 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, if (!ovs_header) return -EMSGSIZE; - ovs_header->dp_ifindex = vport->dp->dp_ifindex; + ovs_header->dp_ifindex = get_dpifindex(vport->dp); NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no); NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport_get_type(vport)); NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport_get_name(vport)); + NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid); - nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64)); + nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats)); if (!nla) goto nla_put_failure; - if (vport_get_stats(vport, nla_data(nla))) - __skb_trim(skb, skb->len - nla->nla_len); - NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport)); + vport_get_stats(vport, nla_data(nla)); - mtu = vport_get_mtu(vport); - if (mtu) - NLA_PUT_U32(skb, OVS_VPORT_ATTR_MTU, mtu); + NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport)); err = vport_get_options(vport, skb); if (err == -EMSGSIZE) @@ -1675,12 +1644,13 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header, static int change_vport(struct vport *vport, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) { int err = 0; + if (a[OVS_VPORT_ATTR_STATS]) - err = vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS])); - if (!err && a[OVS_VPORT_ATTR_ADDRESS]) + vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS])); + + if (a[OVS_VPORT_ATTR_ADDRESS]) err = vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS])); - if (!err && a[OVS_VPORT_ATTR_MTU]) - err = vport_set_mtu(vport, nla_get_u32(a[OVS_VPORT_ATTR_MTU])); + return err; } @@ -1737,13 +1707,16 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) parms.options = a[OVS_VPORT_ATTR_OPTIONS]; parms.dp = dp; parms.port_no = port_no; + if (a[OVS_VPORT_ATTR_UPCALL_PID]) + parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); + else + parms.upcall_pid = NETLINK_CB(skb).pid; vport = new_vport(&parms); err = PTR_ERR(vport); if (IS_ERR(vport)) goto exit_unlock; - set_internal_devs_mtu(dp); dp_sysfs_add_if(vport); err = change_vport(vport, a); @@ -1789,6 +1762,8 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); if (!err) err = change_vport(vport, a); + if (!err && a[OVS_VPORT_ATTR_UPCALL_PID]) + vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, OVS_VPORT_CMD_NEW); @@ -1990,9 +1965,6 @@ static int dp_register_genl(void) } } - err = packet_register_mc_groups(); - if (err) - goto error; return 0; error: