+ /* Extract key. */
+ error = -EINVAL;
+ if (!a[ODP_FLOW_ATTR_KEY])
+ goto error;
+ error = flow_from_nlattrs(&key, &key_len, a[ODP_FLOW_ATTR_KEY]);
+ if (error)
+ goto error;
+
+ /* Validate actions. */
+ if (a[ODP_FLOW_ATTR_ACTIONS]) {
+ error = validate_actions(a[ODP_FLOW_ATTR_ACTIONS]);
+ if (error)
+ goto error;
+ } else if (info->genlhdr->cmd == ODP_FLOW_CMD_NEW) {
+ error = -EINVAL;
+ goto error;
+ }
+
+ dp = get_dp(odp_header->dp_ifindex);
+ error = -ENODEV;
+ if (!dp)
+ goto error;
+
+ hash = flow_hash(&key, key_len);
+ table = get_table_protected(dp);
+ flow_node = tbl_lookup(table, &key, key_len, hash, flow_cmp);
+ if (!flow_node) {
+ struct sw_flow_actions *acts;
+
+ /* Bail out if we're not allowed to create a new flow. */
+ error = -ENOENT;
+ if (info->genlhdr->cmd == ODP_FLOW_CMD_SET)
+ goto error;
+
+ /* Expand table, if necessary, to make room. */
+ if (tbl_count(table) >= tbl_n_buckets(table)) {
+ error = expand_table(dp);
+ if (error)
+ goto error;
+ table = get_table_protected(dp);
+ }
+
+ /* Allocate flow. */
+ flow = flow_alloc();
+ if (IS_ERR(flow)) {
+ error = PTR_ERR(flow);
+ goto error;
+ }
+ flow->key = key;
+ clear_stats(flow);
+
+ /* Obtain actions. */
+ acts = flow_actions_alloc(a[ODP_FLOW_ATTR_ACTIONS]);
+ error = PTR_ERR(acts);
+ if (IS_ERR(acts))
+ goto error_free_flow;
+ rcu_assign_pointer(flow->sf_acts, acts);
+
+ /* Put flow in bucket. */
+ error = tbl_insert(table, &flow->tbl_node, hash);
+ if (error)
+ goto error_free_flow;
+
+ reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid,
+ info->snd_seq, ODP_FLOW_CMD_NEW);
+ } else {
+ /* We found a matching flow. */
+ struct sw_flow_actions *old_acts;
+
+ /* Bail out if we're not allowed to modify an existing flow.
+ * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
+ * because Generic Netlink treats the latter as a dump
+ * request. We also accept NLM_F_EXCL in case that bug ever
+ * gets fixed.
+ */
+ error = -EEXIST;
+ if (info->genlhdr->cmd == ODP_FLOW_CMD_NEW &&
+ info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
+ goto error;
+
+ /* Update actions. */
+ flow = flow_cast(flow_node);
+ old_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_genl_is_held());
+ if (a[ODP_FLOW_ATTR_ACTIONS] &&
+ (old_acts->actions_len != nla_len(a[ODP_FLOW_ATTR_ACTIONS]) ||
+ memcmp(old_acts->actions, nla_data(a[ODP_FLOW_ATTR_ACTIONS]),
+ old_acts->actions_len))) {
+ struct sw_flow_actions *new_acts;
+
+ new_acts = flow_actions_alloc(a[ODP_FLOW_ATTR_ACTIONS]);
+ error = PTR_ERR(new_acts);
+ if (IS_ERR(new_acts))
+ goto error;
+
+ rcu_assign_pointer(flow->sf_acts, new_acts);
+ flow_deferred_free_acts(old_acts);
+ }
+
+ reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid,
+ info->snd_seq, ODP_FLOW_CMD_NEW);
+
+ /* Clear stats. */
+ if (a[ODP_FLOW_ATTR_CLEAR]) {
+ spin_lock_bh(&flow->lock);
+ clear_stats(flow);
+ spin_unlock_bh(&flow->lock);
+ }
+ }
+
+ if (!IS_ERR(reply))
+ genl_notify(reply, genl_info_net(info), info->snd_pid,
+ dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ else
+ netlink_set_err(INIT_NET_GENL_SOCK, 0,
+ dp_flow_multicast_group.id, PTR_ERR(reply));
+ return 0;
+
+error_free_flow:
+ flow_put(flow);
+error:
+ return error;