+ err = nla_parse(a, ODP_FLOW_ATTR_MAX,
+ (struct nlattr *)(skb->data + sizeof(struct odp_flow)),
+ skb->len - sizeof(struct odp_flow), flow_policy);
+ if (err)
+ goto error_free_skb;
+
+ /* ODP_FLOW_ATTR_KEY. */
+ if (a[ODP_FLOW_ATTR_KEY]) {
+ err = flow_from_nlattrs(&flowcmd->key, a[ODP_FLOW_ATTR_KEY]);
+ if (err)
+ goto error_free_skb;
+ } else
+ memset(&flowcmd->key, 0, sizeof(struct sw_flow_key));
+
+ /* ODP_FLOW_ATTR_ACTIONS. */
+ if (a[ODP_FLOW_ATTR_ACTIONS]) {
+ flowcmd->actions = nla_data(a[ODP_FLOW_ATTR_ACTIONS]);
+ flowcmd->actions_len = nla_len(a[ODP_FLOW_ATTR_ACTIONS]);
+ err = validate_actions(flowcmd->actions, flowcmd->actions_len);
+ if (err)
+ goto error_free_skb;
+ } else {
+ flowcmd->actions = NULL;
+ flowcmd->actions_len = 0;
+ }
+
+ flowcmd->clear = a[ODP_FLOW_ATTR_CLEAR] != NULL;
+
+ flowcmd->state = a[ODP_FLOW_ATTR_STATE] ? nla_get_u64(a[ODP_FLOW_ATTR_STATE]) : 0;
+
+ return skb;
+
+error_free_skb:
+ kfree_skb(skb);
+ return ERR_PTR(err);
+}
+
+static int new_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
+{
+ struct tbl_node *flow_node;
+ struct dp_flowcmd flowcmd;
+ struct sw_flow *flow;
+ struct sk_buff *skb;
+ struct datapath *dp;
+ struct tbl *table;
+ u32 hash;
+ int error;
+
+ skb = copy_flow_from_user(uodp_flow, &flowcmd);
+ error = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
+
+ dp = get_dp_locked(flowcmd.dp_idx);
+ error = -ENODEV;
+ if (!dp)
+ goto error_kfree_skb;
+
+ hash = flow_hash(&flowcmd.key);
+ table = get_table_protected(dp);
+ flow_node = tbl_lookup(table, &flowcmd.key, hash, flow_cmp);
+ if (!flow_node) {
+ struct sw_flow_actions *acts;
+
+ /* Bail out if we're not allowed to create a new flow. */
+ error = -ENOENT;
+ if (cmd == ODP_FLOW_SET)
+ goto error_unlock_dp;
+
+ /* Expand table, if necessary, to make room. */
+ if (tbl_count(table) >= tbl_n_buckets(table)) {
+ error = expand_table(dp);
+ if (error)
+ goto error_unlock_dp;
+ table = get_table_protected(dp);
+ }
+
+ /* Allocate flow. */
+ flow = flow_alloc();
+ if (IS_ERR(flow)) {
+ error = PTR_ERR(flow);
+ goto error_unlock_dp;
+ }
+ flow->key = flowcmd.key;
+ clear_stats(flow);
+
+ /* Obtain actions. */
+ acts = get_actions(&flowcmd);
+ error = PTR_ERR(acts);
+ if (IS_ERR(acts))
+ goto error_free_flow;
+ rcu_assign_pointer(flow->sf_acts, acts);
+
+ error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
+ if (error)
+ goto error_free_flow;
+
+ /* Put flow in bucket. */
+ error = tbl_insert(table, &flow->tbl_node, hash);
+ if (error)
+ goto error_free_flow;
+ } else {
+ /* We found a matching flow. */
+ struct sw_flow_actions *old_acts;
+
+ /* Bail out if we're not allowed to modify an existing flow.
+ * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
+ * because Generic Netlink treats the latter as a dump
+ * request. We also accept NLM_F_EXCL in case that bug ever
+ * gets fixed.
+ */
+ error = -EEXIST;
+ if (flowcmd.nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
+ goto error_kfree_skb;
+
+ /* Update actions. */
+ flow = flow_cast(flow_node);
+ old_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_is_held(&dp->mutex));
+ if (flowcmd.actions &&
+ (old_acts->actions_len != flowcmd.actions_len ||
+ memcmp(old_acts->actions, flowcmd.actions,
+ flowcmd.actions_len))) {
+ struct sw_flow_actions *new_acts;
+
+ new_acts = get_actions(&flowcmd);
+ error = PTR_ERR(new_acts);
+ if (IS_ERR(new_acts))
+ goto error_kfree_skb;
+
+ rcu_assign_pointer(flow->sf_acts, new_acts);
+ flow_deferred_free_acts(old_acts);
+ }
+
+ error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
+ if (error)
+ goto error_kfree_skb;
+
+ /* Clear stats. */
+ if (flowcmd.clear) {
+ spin_lock_bh(&flow->lock);
+ clear_stats(flow);
+ spin_unlock_bh(&flow->lock);
+ }
+ }
+ kfree_skb(skb);
+ mutex_unlock(&dp->mutex);
+ return 0;
+
+error_free_flow:
+ flow_put(flow);
+error_unlock_dp:
+ mutex_unlock(&dp->mutex);
+error_kfree_skb:
+ kfree_skb(skb);
+exit:
+ return error;
+}
+
+static int get_or_del_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
+{
+ struct tbl_node *flow_node;
+ struct dp_flowcmd flowcmd;
+ struct sw_flow *flow;
+ struct sk_buff *skb;
+ struct datapath *dp;
+ struct tbl *table;
+ int err;
+
+ skb = copy_flow_from_user(uodp_flow, &flowcmd);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
+
+ dp = get_dp_locked(flowcmd.dp_idx);
+ err = -ENODEV;
+ if (!dp)
+ goto exit_kfree_skb;
+
+ table = get_table_protected(dp);
+ flow_node = tbl_lookup(table, &flowcmd.key, flow_hash(&flowcmd.key), flow_cmp);
+ err = -ENOENT;
+ if (!flow_node)
+ goto exit_unlock_dp;
+
+ if (cmd == ODP_FLOW_DEL) {
+ err = tbl_remove(table, flow_node);
+ if (err)
+ goto exit_unlock_dp;
+ }
+
+ flow = flow_cast(flow_node);
+ err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
+ if (!err && cmd == ODP_FLOW_DEL)
+ flow_deferred_free(flow);
+
+exit_unlock_dp:
+ mutex_unlock(&dp->mutex);
+exit_kfree_skb:
+ kfree_skb(skb);
+exit:
+ return err;
+}
+
+static int dump_flow(struct odp_flow __user *uodp_flow)
+{
+ struct tbl_node *flow_node;
+ struct dp_flowcmd flowcmd;
+ struct sw_flow *flow;
+ struct sk_buff *skb;
+ struct datapath *dp;
+ u32 bucket, obj;
+ int err;
+
+ skb = copy_flow_from_user(uodp_flow, &flowcmd);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
+
+ dp = get_dp_locked(flowcmd.dp_idx);
+ err = -ENODEV;
+ if (!dp)
+ goto exit_free;
+
+ bucket = flowcmd.state >> 32;
+ obj = flowcmd.state;
+ flow_node = tbl_next(dp->table, &bucket, &obj);
+ err = -ENODEV;
+ if (!flow_node)
+ goto exit_unlock_dp;
+
+ flow = flow_cast(flow_node);
+ err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len,
+ ((u64)bucket << 32) | obj);
+
+exit_unlock_dp:
+ mutex_unlock(&dp->mutex);
+exit_free:
+ kfree_skb(skb);
+exit:
+ return err;
+}
+
+static const struct nla_policy datapath_policy[ODP_DP_ATTR_MAX + 1] = {
+ [ODP_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+ [ODP_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
+ [ODP_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
+};
+
+static int copy_datapath_to_user(void __user *dst, struct datapath *dp, uint32_t total_len)
+{
+ struct odp_datapath *odp_datapath;
+ struct sk_buff *skb;
+ struct nlattr *nla;
+ int err;
+
+ skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ err = -ENOMEM;
+ if (!skb)
+ goto exit;
+
+ odp_datapath = (struct odp_datapath*)__skb_put(skb, sizeof(struct odp_datapath));
+ odp_datapath->dp_idx = dp->dp_idx;
+ odp_datapath->total_len = total_len;
+
+ rcu_read_lock();
+ err = nla_put_string(skb, ODP_DP_ATTR_NAME, dp_name(dp));
+ rcu_read_unlock();
+ if (err)
+ goto nla_put_failure;
+
+ nla = nla_reserve(skb, ODP_DP_ATTR_STATS, sizeof(struct odp_stats));
+ if (!nla)
+ goto nla_put_failure;
+ get_dp_stats(dp, nla_data(nla));
+
+ NLA_PUT_U32(skb, ODP_DP_ATTR_IPV4_FRAGS,
+ dp->drop_frags ? ODP_DP_FRAG_DROP : ODP_DP_FRAG_ZERO);
+
+ if (dp->sflow_probability)
+ NLA_PUT_U32(skb, ODP_DP_ATTR_SAMPLING, dp->sflow_probability);
+
+ if (skb->len > total_len)
+ goto nla_put_failure;
+
+ odp_datapath->len = skb->len;
+ err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
+ goto exit_free_skb;
+
+nla_put_failure:
+ err = -EMSGSIZE;
+exit_free_skb:
+ kfree_skb(skb);
+exit:
+ return err;
+}
+
+static struct sk_buff *copy_datapath_from_user(struct odp_datapath __user *uodp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
+{
+ struct odp_datapath *odp_datapath;
+ struct sk_buff *skb;
+ u32 len;
+ int err;
+
+ if (get_user(len, &uodp_datapath->len))
+ return ERR_PTR(-EFAULT);
+ if (len < sizeof(struct odp_datapath))
+ return ERR_PTR(-EINVAL);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ err = -EFAULT;
+ if (copy_from_user(__skb_put(skb, len), uodp_datapath, len))
+ goto error_free_skb;
+
+ odp_datapath = (struct odp_datapath *)skb->data;
+ err = -EINVAL;
+ if (odp_datapath->len != len)
+ goto error_free_skb;
+
+ err = nla_parse(a, ODP_DP_ATTR_MAX,
+ (struct nlattr *)(skb->data + sizeof(struct odp_datapath)),
+ skb->len - sizeof(struct odp_datapath), datapath_policy);
+ if (err)
+ goto error_free_skb;
+
+ if (a[ODP_DP_ATTR_IPV4_FRAGS]) {
+ u32 frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]);
+
+ err = -EINVAL;
+ if (frags != ODP_DP_FRAG_ZERO && frags != ODP_DP_FRAG_DROP)
+ goto error_free_skb;
+ }
+
+ err = VERIFY_NUL_STRING(a[ODP_DP_ATTR_NAME], IFNAMSIZ - 1);
+ if (err)
+ goto error_free_skb;
+
+ return skb;
+
+error_free_skb:
+ kfree_skb(skb);
+ return ERR_PTR(err);
+}
+
+/* Called with dp_mutex and optionally with RTNL lock also.
+ * Holds the returned datapath's mutex on return.
+ */
+static struct datapath *lookup_datapath(struct odp_datapath *odp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
+{
+ WARN_ON_ONCE(!mutex_is_locked(&dp_mutex));
+
+ if (!a[ODP_DP_ATTR_NAME]) {
+ struct datapath *dp;
+
+ dp = get_dp(odp_datapath->dp_idx);
+ if (!dp)
+ return ERR_PTR(-ENODEV);
+ mutex_lock(&dp->mutex);
+ return dp;
+ } else {
+ struct datapath *dp;
+ struct vport *vport;
+ int dp_idx;
+
+ vport_lock();
+ vport = vport_locate(nla_data(a[ODP_DP_ATTR_NAME]));
+ dp_idx = vport && vport->port_no == ODPP_LOCAL ? vport->dp->dp_idx : -1;
+ vport_unlock();
+
+ if (dp_idx < 0)
+ return ERR_PTR(-ENODEV);
+
+ dp = get_dp(dp_idx);
+ mutex_lock(&dp->mutex);
+ return dp;
+ }
+}
+
+static void change_datapath(struct datapath *dp, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
+{
+ if (a[ODP_DP_ATTR_IPV4_FRAGS])
+ dp->drop_frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]) == ODP_DP_FRAG_DROP;
+ if (a[ODP_DP_ATTR_SAMPLING])
+ dp->sflow_probability = nla_get_u32(a[ODP_DP_ATTR_SAMPLING]);
+}
+
+static int new_datapath(struct odp_datapath __user *uodp_datapath)
+{
+ struct nlattr *a[ODP_DP_ATTR_MAX + 1];
+ struct odp_datapath *odp_datapath;
+ struct vport_parms parms;
+ struct sk_buff *skb;
+ struct datapath *dp;
+ struct vport *vport;
+ int dp_idx;
+ int err;
+ int i;
+
+ skb = copy_datapath_from_user(uodp_datapath, a);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto err;
+ odp_datapath = (struct odp_datapath *)skb->data;
+
+ err = -EINVAL;
+ if (!a[ODP_DP_ATTR_NAME])
+ goto err_free_skb;
+
+ rtnl_lock();
+ mutex_lock(&dp_mutex);
+ err = -ENODEV;
+ if (!try_module_get(THIS_MODULE))
+ goto err_unlock_dp_mutex;
+
+ dp_idx = odp_datapath->dp_idx;
+ if (dp_idx < 0) {
+ err = -EFBIG;
+ for (dp_idx = 0; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
+ if (get_dp(dp_idx))
+ continue;
+ err = 0;
+ break;
+ }
+ } else if (dp_idx < ARRAY_SIZE(dps))
+ err = get_dp(dp_idx) ? -EBUSY : 0;
+ else
+ err = -EINVAL;
+ if (err)
+ goto err_put_module;
+
+ err = -ENOMEM;
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+ if (dp == NULL)
+ goto err_put_module;
+ INIT_LIST_HEAD(&dp->port_list);
+ mutex_init(&dp->mutex);
+ mutex_lock(&dp->mutex);
+ dp->dp_idx = dp_idx;
+ for (i = 0; i < DP_N_QUEUES; i++)
+ skb_queue_head_init(&dp->queues[i]);
+ init_waitqueue_head(&dp->waitqueue);
+
+ /* Initialize kobject for bridge. This will be added as
+ * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
+ dp->ifobj.kset = NULL;
+ kobject_init(&dp->ifobj, &dp_ktype);
+
+ /* Allocate table. */
+ err = -ENOMEM;
+ rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
+ if (!dp->table)
+ goto err_free_dp;
+
+ /* Set up our datapath device. */
+ parms.name = nla_data(a[ODP_DP_ATTR_NAME]);
+ parms.type = ODP_VPORT_TYPE_INTERNAL;
+ parms.options = NULL;
+ parms.dp = dp;
+ parms.port_no = ODPP_LOCAL;
+ vport = new_vport(&parms);
+ if (IS_ERR(vport)) {
+ err = PTR_ERR(vport);
+ if (err == -EBUSY)
+ err = -EEXIST;
+
+ goto err_destroy_table;
+ }
+
+ dp->drop_frags = 0;
+ dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+ if (!dp->stats_percpu) {
+ err = -ENOMEM;
+ goto err_destroy_local_port;
+ }
+
+ change_datapath(dp, a);
+
+ rcu_assign_pointer(dps[dp_idx], dp);
+ dp_sysfs_add_dp(dp);
+
+ mutex_unlock(&dp->mutex);
+ mutex_unlock(&dp_mutex);
+ rtnl_unlock();
+
+ return 0;
+
+err_destroy_local_port:
+ dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
+err_destroy_table:
+ tbl_destroy(get_table_protected(dp), NULL);
+err_free_dp:
+ mutex_unlock(&dp->mutex);
+ kfree(dp);
+err_put_module:
+ module_put(THIS_MODULE);
+err_unlock_dp_mutex:
+ mutex_unlock(&dp_mutex);
+ rtnl_unlock();
+err_free_skb:
+ kfree_skb(skb);
+err:
+ return err;
+}
+
+static int del_datapath(struct odp_datapath __user *uodp_datapath)
+{
+ struct nlattr *a[ODP_DP_ATTR_MAX + 1];
+ struct datapath *dp;
+ struct sk_buff *skb;
+ int err;
+
+ skb = copy_datapath_from_user(uodp_datapath, a);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
+
+ rtnl_lock();
+ mutex_lock(&dp_mutex);
+ dp = lookup_datapath((struct odp_datapath *)skb->data, a);
+ err = PTR_ERR(dp);
+ if (IS_ERR(dp))
+ goto exit_free;
+
+ destroy_dp(dp);
+ err = 0;
+
+exit_free:
+ kfree_skb(skb);
+ mutex_unlock(&dp_mutex);
+ rtnl_unlock();
+exit:
+ return err;
+}
+
+static int set_datapath(struct odp_datapath __user *uodp_datapath)
+{
+ struct nlattr *a[ODP_DP_ATTR_MAX + 1];
+ struct datapath *dp;
+ struct sk_buff *skb;
+ int err;
+
+ skb = copy_datapath_from_user(uodp_datapath, a);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
+
+ mutex_lock(&dp_mutex);
+ dp = lookup_datapath((struct odp_datapath *)skb->data, a);
+ err = PTR_ERR(dp);
+ if (IS_ERR(dp))
+ goto exit_free;
+
+ change_datapath(dp, a);
+ mutex_unlock(&dp->mutex);
+ err = 0;
+
+exit_free:
+ kfree_skb(skb);
+ mutex_unlock(&dp_mutex);
+exit:
+ return err;
+}
+
+static int get_datapath(struct odp_datapath __user *uodp_datapath)
+{
+ struct nlattr *a[ODP_DP_ATTR_MAX + 1];
+ struct odp_datapath *odp_datapath;
+ struct datapath *dp;
+ struct sk_buff *skb;
+ int err;
+
+ skb = copy_datapath_from_user(uodp_datapath, a);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
+ odp_datapath = (struct odp_datapath *)skb->data;
+
+ mutex_lock(&dp_mutex);
+ dp = lookup_datapath(odp_datapath, a);
+ mutex_unlock(&dp_mutex);
+
+ err = PTR_ERR(dp);
+ if (IS_ERR(dp))
+ goto exit_free;
+
+ err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
+ mutex_unlock(&dp->mutex);
+exit_free:
+ kfree_skb(skb);
+exit:
+ return err;
+}
+
+static int dump_datapath(struct odp_datapath __user *uodp_datapath)
+{
+ struct nlattr *a[ODP_DP_ATTR_MAX + 1];
+ struct odp_datapath *odp_datapath;
+ struct sk_buff *skb;
+ u32 dp_idx;
+ int err;
+
+ skb = copy_datapath_from_user(uodp_datapath, a);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
+ odp_datapath = (struct odp_datapath *)skb->data;
+
+ mutex_lock(&dp_mutex);
+ for (dp_idx = odp_datapath->dp_idx; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
+ struct datapath *dp = get_dp(dp_idx);
+ if (!dp)
+ continue;
+
+ mutex_lock(&dp->mutex);
+ mutex_unlock(&dp_mutex);
+ err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
+ mutex_unlock(&dp->mutex);
+ goto exit_free;
+ }
+ mutex_unlock(&dp_mutex);
+ err = -ENODEV;
+
+exit_free:
+ kfree_skb(skb);
+exit:
+ return err;
+}
+
+static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
+ [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+ [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
+ [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+ [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
+ [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
+ [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
+ [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+};
+
+static int copy_vport_to_user(void __user *dst, struct vport *vport, uint32_t total_len)
+{
+ struct odp_vport *odp_vport;
+ struct sk_buff *skb;
+ struct nlattr *nla;
+ int ifindex, iflink;
+ int err;
+
+ skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ err = -ENOMEM;
+ if (!skb)
+ goto exit;
+
+ rcu_read_lock();
+ odp_vport = (struct odp_vport*)__skb_put(skb, sizeof(struct odp_vport));
+ odp_vport->dp_idx = vport->dp->dp_idx;
+ odp_vport->total_len = total_len;
+
+ NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
+ NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
+ NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
+
+ nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
+ if (!nla)
+ goto nla_put_failure;
+ if (vport_get_stats(vport, nla_data(nla)))
+ __skb_trim(skb, skb->len - nla->nla_len);
+
+ NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
+
+ NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, vport_get_mtu(vport));
+
+ err = vport_get_options(vport, skb);
+
+ ifindex = vport_get_ifindex(vport);
+ if (ifindex > 0)
+ NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
+
+ iflink = vport_get_iflink(vport);
+ if (iflink > 0)
+ NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
+
+ err = -EMSGSIZE;
+ if (skb->len > total_len)
+ goto exit_unlock;
+
+ odp_vport->len = skb->len;
+ err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
+ goto exit_unlock;
+
+nla_put_failure:
+ err = -EMSGSIZE;
+exit_unlock:
+ rcu_read_unlock();
+ kfree_skb(skb);
+exit:
+ return err;
+}
+
+static struct sk_buff *copy_vport_from_user(struct odp_vport __user *uodp_vport,
+ struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
+{
+ struct odp_vport *odp_vport;
+ struct sk_buff *skb;
+ u32 len;
+ int err;
+
+ if (get_user(len, &uodp_vport->len))
+ return ERR_PTR(-EFAULT);
+ if (len < sizeof(struct odp_vport))
+ return ERR_PTR(-EINVAL);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ err = -EFAULT;
+ if (copy_from_user(__skb_put(skb, len), uodp_vport, len))
+ goto error_free_skb;
+
+ odp_vport = (struct odp_vport *)skb->data;
+ err = -EINVAL;
+ if (odp_vport->len != len)
+ goto error_free_skb;
+
+ err = nla_parse(a, ODP_VPORT_ATTR_MAX, (struct nlattr *)(skb->data + sizeof(struct odp_vport)),
+ skb->len - sizeof(struct odp_vport), vport_policy);
+ if (err)
+ goto error_free_skb;
+
+ err = VERIFY_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
+ if (err)
+ goto error_free_skb;
+
+ return skb;
+
+error_free_skb:
+ kfree_skb(skb);
+ return ERR_PTR(err);
+}
+
+
+/* Called without any locks (or with RTNL lock).
+ * Returns holding vport->dp->mutex.
+ */
+static struct vport *lookup_vport(struct odp_vport *odp_vport,
+ struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
+{
+ struct datapath *dp;
+ struct vport *vport;
+
+ if (a[ODP_VPORT_ATTR_NAME]) {
+ int dp_idx, port_no;
+
+ retry:
+ vport_lock();
+ vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
+ if (!vport) {
+ vport_unlock();
+ return ERR_PTR(-ENODEV);
+ }
+ dp_idx = vport->dp->dp_idx;
+ port_no = vport->port_no;
+ vport_unlock();
+
+ dp = get_dp_locked(dp_idx);
+ if (!dp)
+ goto retry;
+
+ vport = get_vport_protected(dp, port_no);
+ if (!vport ||
+ strcmp(vport_get_name(vport), nla_data(a[ODP_VPORT_ATTR_NAME]))) {
+ mutex_unlock(&dp->mutex);
+ goto retry;
+ }
+
+ return vport;
+ } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
+ u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
+
+ if (port_no >= DP_MAX_PORTS)
+ return ERR_PTR(-EINVAL);
+
+ dp = get_dp_locked(odp_vport->dp_idx);
+ if (!dp)
+ return ERR_PTR(-ENODEV);
+
+ vport = get_vport_protected(dp, port_no);
+ if (!vport) {
+ mutex_unlock(&dp->mutex);
+ return ERR_PTR(-ENOENT);
+ }
+ return vport;
+ } else
+ return ERR_PTR(-EINVAL);
+}
+
+static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
+{
+ int err = 0;
+ if (a[ODP_VPORT_ATTR_STATS])
+ err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
+ if (!err && a[ODP_VPORT_ATTR_ADDRESS])
+ err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
+ if (!err && a[ODP_VPORT_ATTR_MTU])
+ err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
+ return err;
+}
+
+static int attach_vport(struct odp_vport __user *uodp_vport)
+{
+ struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
+ struct odp_vport *odp_vport;
+ struct vport_parms parms;
+ struct vport *vport;
+ struct sk_buff *skb;
+ struct datapath *dp;
+ u32 port_no;
+ int err;
+
+ skb = copy_vport_from_user(uodp_vport, a);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
+ odp_vport = (struct odp_vport *)skb->data;
+
+ err = -EINVAL;
+ if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
+ goto exit_kfree_skb;
+
+ rtnl_lock();
+
+ dp = get_dp_locked(odp_vport->dp_idx);
+ err = -ENODEV;
+ if (!dp)
+ goto exit_unlock_rtnl;
+
+ if (a[ODP_VPORT_ATTR_PORT_NO]) {
+ port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
+
+ err = -EFBIG;
+ if (port_no >= DP_MAX_PORTS)
+ goto exit_unlock_dp;
+
+ vport = get_vport_protected(dp, port_no);
+ err = -EBUSY;
+ if (vport)
+ goto exit_unlock_dp;
+ } else {
+ for (port_no = 1; ; port_no++) {
+ if (port_no >= DP_MAX_PORTS) {
+ err = -EFBIG;
+ goto exit_unlock_dp;
+ }
+ vport = get_vport_protected(dp, port_no);
+ if (!vport)
+ break;
+ }
+ }
+
+ parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
+ parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
+ parms.options = a[ODP_VPORT_ATTR_OPTIONS];
+ parms.dp = dp;
+ parms.port_no = port_no;
+
+ vport = new_vport(&parms);
+ err = PTR_ERR(vport);
+ if (IS_ERR(vport))
+ goto exit_unlock_dp;
+
+ set_internal_devs_mtu(dp);
+ dp_sysfs_add_if(vport);
+
+ err = change_vport(vport, a);
+ if (err) {
+ dp_detach_port(vport);
+ goto exit_unlock_dp;
+ }
+
+ err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
+
+exit_unlock_dp:
+ mutex_unlock(&dp->mutex);
+exit_unlock_rtnl:
+ rtnl_unlock();
+exit_kfree_skb:
+ kfree_skb(skb);
+exit:
+ return err;
+}
+
+static int set_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
+{
+ struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
+ struct vport *vport;
+ struct sk_buff *skb;
+ int err;