+ if (base->dl_type != htons(ETH_TYPE_IP) ||
+ !base->nw_src || !base->nw_dst) {
+ return;
+ }
+
+ if (base->nw_src == flow->nw_src &&
+ base->nw_dst == flow->nw_dst &&
+ base->nw_tos == flow->nw_tos &&
+ base->nw_ttl == flow->nw_ttl &&
+ base->nw_frag == flow->nw_frag) {
+ return;
+ }
+
+ ipv4_key.ipv4_src = base->nw_src = flow->nw_src;
+ ipv4_key.ipv4_dst = base->nw_dst = flow->nw_dst;
+ ipv4_key.ipv4_proto = base->nw_proto;
+ ipv4_key.ipv4_tos = flow->nw_tos;
+ ipv4_key.ipv4_ttl = flow->nw_ttl;
+ ipv4_key.ipv4_frag = (base->nw_frag == 0 ? OVS_FRAG_TYPE_NONE
+ : base->nw_frag == FLOW_NW_FRAG_ANY
+ ? OVS_FRAG_TYPE_FIRST : OVS_FRAG_TYPE_LATER);
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_IPV4,
+ &ipv4_key, sizeof(ipv4_key));
+}
+
+static void
+commit_set_port_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions)
+{
+ if (!base->tp_src || !base->tp_dst) {
+ return;
+ }
+
+ if (base->tp_src == flow->tp_src &&
+ base->tp_dst == flow->tp_dst) {
+ return;
+ }
+
+ if (flow->nw_proto == IPPROTO_TCP) {
+ struct ovs_key_tcp port_key;
+
+ port_key.tcp_src = base->tp_src = flow->tp_src;
+ port_key.tcp_dst = base->tp_dst = flow->tp_dst;
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_TCP,
+ &port_key, sizeof(port_key));
+
+ } else if (flow->nw_proto == IPPROTO_UDP) {
+ struct ovs_key_udp port_key;
+
+ port_key.udp_src = base->tp_src = flow->tp_src;
+ port_key.udp_dst = base->tp_dst = flow->tp_dst;
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_UDP,
+ &port_key, sizeof(port_key));
+ }
+}
+
+static void
+commit_set_priority_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions)
+{
+ if (base->priority == flow->priority) {
+ return;
+ }
+ base->priority = flow->priority;
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_PRIORITY,
+ &base->priority, sizeof(base->priority));
+}
+
+static void
+commit_odp_actions(struct action_xlate_ctx *ctx)
+{
+ const struct flow *flow = &ctx->flow;
+ struct flow *base = &ctx->base_flow;
+ struct ofpbuf *odp_actions = ctx->odp_actions;
+
+ commit_set_tun_id_action(flow, base, odp_actions);
+ commit_set_ether_addr_action(flow, base, odp_actions);
+ commit_vlan_action(ctx, flow->vlan_tci);
+ commit_set_nw_action(flow, base, odp_actions);
+ commit_set_port_action(flow, base, odp_actions);
+ commit_set_priority_action(flow, base, odp_actions);
+}
+
+static void
+force_compose_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port)
+{
+ const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
+ uint16_t odp_port = ofp_port_to_odp_port(ofp_port);
+
+ if (ofport && ofport->up.opp.config & htonl(OFPPC_NO_FWD)) {
+ return;
+ }
+
+ nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
+ ctx->sflow_odp_port = odp_port;
+ ctx->sflow_n_outputs++;
+}
+
+static void
+compose_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port)
+{
+ struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
+
+ if (ofport && !stp_forward_in_state(ofport->stp_state)) {
+ /* Forwarding disabled on port. */
+ return;
+ }
+
+ /* We may not have an ofport record for this port, but it doesn't hurt to
+ * allow forwarding to it anyhow. Maybe such a port will appear later and
+ * we're pre-populating the flow table. */
+ force_compose_output_action(ctx, ofp_port);
+}
+
+static void
+commit_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port)
+{
+ commit_odp_actions(ctx);
+ compose_output_action(ctx, ofp_port);
+ ctx->nf_output_iface = ofp_port;
+}
+
+static void
+xlate_table_action(struct action_xlate_ctx *ctx,
+ uint16_t in_port, uint8_t table_id)
+{
+ if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
+ struct ofproto_dpif *ofproto = ctx->ofproto;
+ struct rule_dpif *rule;
+ uint16_t old_in_port;
+ uint8_t old_table_id;
+
+ old_table_id = ctx->table_id;
+ ctx->table_id = table_id;
+
+ /* Look up a flow with 'in_port' as the input port. */
+ old_in_port = ctx->flow.in_port;
+ ctx->flow.in_port = in_port;
+ rule = rule_dpif_lookup(ofproto, &ctx->flow, table_id);
+
+ /* Tag the flow. */
+ if (table_id > 0 && table_id < N_TABLES) {
+ struct table_dpif *table = &ofproto->tables[table_id];
+ if (table->other_table) {
+ ctx->tags |= (rule
+ ? rule->tag
+ : rule_calculate_tag(&ctx->flow,
+ &table->other_table->wc,
+ table->basis));
+ }
+ }
+
+ /* Restore the original input port. Otherwise OFPP_NORMAL and
+ * OFPP_IN_PORT will have surprising behavior. */
+ ctx->flow.in_port = old_in_port;
+
+ if (ctx->resubmit_hook) {
+ ctx->resubmit_hook(ctx, rule);
+ }
+
+ if (rule) {
+ ctx->recurse++;
+ do_xlate_actions(rule->up.actions, rule->up.n_actions, ctx);
+ ctx->recurse--;
+ }
+
+ ctx->table_id = old_table_id;
+ } else {
+ static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
+ MAX_RESUBMIT_RECURSION);
+ }
+}
+
+static void
+xlate_resubmit_table(struct action_xlate_ctx *ctx,
+ const struct nx_action_resubmit *nar)
+{
+ uint16_t in_port;
+ uint8_t table_id;
+
+ in_port = (nar->in_port == htons(OFPP_IN_PORT)
+ ? ctx->flow.in_port
+ : ntohs(nar->in_port));
+ table_id = nar->table == 255 ? ctx->table_id : nar->table;
+
+ xlate_table_action(ctx, in_port, table_id);
+}
+
+static void
+flood_packets(struct action_xlate_ctx *ctx, bool all)
+{
+ struct ofport_dpif *ofport;
+
+ commit_odp_actions(ctx);
+ HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
+ uint16_t ofp_port = ofport->up.ofp_port;
+
+ if (ofp_port == ctx->flow.in_port) {
+ continue;
+ }
+
+ if (all) {
+ force_compose_output_action(ctx, ofp_port);
+ } else if (!(ofport->up.opp.config & htonl(OFPPC_NO_FLOOD))) {
+ compose_output_action(ctx, ofp_port);
+ }
+ }
+
+ ctx->nf_output_iface = NF_OUT_FLOOD;
+}
+
+static void
+compose_controller_action(struct action_xlate_ctx *ctx, int len)
+{
+ struct user_action_cookie cookie;
+
+ cookie.type = USER_ACTION_COOKIE_CONTROLLER;
+ cookie.data = len;
+ cookie.n_output = 0;
+ cookie.vlan_tci = 0;
+ put_userspace_action(ctx->ofproto, ctx->odp_actions, &ctx->flow, &cookie);
+}
+
+static void
+xlate_output_action__(struct action_xlate_ctx *ctx,
+ uint16_t port, uint16_t max_len)
+{
+ uint16_t prev_nf_output_iface = ctx->nf_output_iface;
+
+ ctx->nf_output_iface = NF_OUT_DROP;
+
+ switch (port) {
+ case OFPP_IN_PORT:
+ commit_output_action(ctx, ctx->flow.in_port);
+ break;
+ case OFPP_TABLE:
+ xlate_table_action(ctx, ctx->flow.in_port, ctx->table_id);
+ break;
+ case OFPP_NORMAL:
+ xlate_normal(ctx);
+ break;
+ case OFPP_FLOOD:
+ flood_packets(ctx, false);
+ break;
+ case OFPP_ALL:
+ flood_packets(ctx, true);
+ break;
+ case OFPP_CONTROLLER:
+ commit_odp_actions(ctx);
+ compose_controller_action(ctx, max_len);
+ break;
+ case OFPP_LOCAL:
+ commit_output_action(ctx, OFPP_LOCAL);
+ break;
+ case OFPP_NONE:
+ break;
+ default:
+ if (port != ctx->flow.in_port) {
+ commit_output_action(ctx, port);
+ }
+ break;
+ }
+
+ if (prev_nf_output_iface == NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_FLOOD;
+ } else if (ctx->nf_output_iface == NF_OUT_DROP) {
+ ctx->nf_output_iface = prev_nf_output_iface;
+ } else if (prev_nf_output_iface != NF_OUT_DROP &&
+ ctx->nf_output_iface != NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_MULTI;
+ }
+}
+
+static void
+xlate_output_reg_action(struct action_xlate_ctx *ctx,
+ const struct nx_action_output_reg *naor)
+{
+ uint64_t ofp_port;
+
+ ofp_port = nxm_read_field_bits(naor->src, naor->ofs_nbits, &ctx->flow);
+
+ if (ofp_port <= UINT16_MAX) {
+ xlate_output_action__(ctx, ofp_port, ntohs(naor->max_len));
+ }
+}
+
+static void
+xlate_output_action(struct action_xlate_ctx *ctx,
+ const struct ofp_action_output *oao)
+{
+ xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len));
+}
+
+static void
+xlate_enqueue_action(struct action_xlate_ctx *ctx,