#include "odp-util.h"
#include "ofp-util.h"
#include "ofpbuf.h"
+#include "ofp-actions.h"
#include "ofp-parse.h"
#include "ofp-print.h"
#include "ofproto-dpif-governor.h"
VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
-COVERAGE_DEFINE(ofproto_dpif_ctlr_action);
COVERAGE_DEFINE(ofproto_dpif_expired);
COVERAGE_DEFINE(ofproto_dpif_xlate);
COVERAGE_DEFINE(facet_changed_rule);
-COVERAGE_DEFINE(facet_invalidated);
COVERAGE_DEFINE(facet_revalidate);
COVERAGE_DEFINE(facet_unexpected);
COVERAGE_DEFINE(facet_suppress);
ovs_be16 initial_tci, struct rule_dpif *,
uint8_t tcp_flags, const struct ofpbuf *);
static void xlate_actions(struct action_xlate_ctx *,
- const union ofp_action *in, size_t n_in,
+ const struct ofpact *ofpacts, size_t ofpacts_len,
struct ofpbuf *odp_actions);
static void xlate_actions_for_side_effects(struct action_xlate_ctx *,
- const union ofp_action *in,
- size_t n_in);
+ const struct ofpact *ofpacts,
+ size_t ofpacts_len);
static size_t put_userspace_action(const struct ofproto_dpif *,
struct ofpbuf *odp_actions,
}
static void port_run(struct ofport_dpif *);
+static void port_run_fast(struct ofport_dpif *);
static void port_wait(struct ofport_dpif *);
static int set_cfm(struct ofport *, const struct cfm_settings *);
static void ofport_clear_priorities(struct ofport_dpif *);
uint32_t basis; /* Keeps each table's tags separate. */
};
+/* Reasons that we might need to revalidate every facet, and corresponding
+ * coverage counters.
+ *
+ * A value of 0 means that there is no need to revalidate.
+ *
+ * It would be nice to have some cleaner way to integrate with coverage
+ * counters, but with only a few reasons I guess this is good enough for
+ * now. */
+enum revalidate_reason {
+ REV_RECONFIGURE = 1, /* Switch configuration changed. */
+ REV_STP, /* Spanning tree protocol port status change. */
+ REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
+ REV_FLOW_TABLE, /* Flow table changed. */
+ REV_INCONSISTENCY /* Facet self-check failed. */
+};
+COVERAGE_DEFINE(rev_reconfigure);
+COVERAGE_DEFINE(rev_stp);
+COVERAGE_DEFINE(rev_port_toggled);
+COVERAGE_DEFINE(rev_flow_table);
+COVERAGE_DEFINE(rev_inconsistency);
+
struct ofproto_dpif {
struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
struct ofproto up;
/* Revalidation. */
struct table_dpif tables[N_TABLES];
- bool need_revalidate;
+ enum revalidate_reason need_revalidate;
struct tag_set revalidate_set;
/* Support for debugging async flow mods. */
table->other_table = NULL;
table->basis = random_uint32();
}
- ofproto->need_revalidate = false;
+ ofproto->need_revalidate = 0;
tag_set_init(&ofproto->revalidate_set);
list_init(&ofproto->completions);
static int
add_internal_flow(struct ofproto_dpif *ofproto, int id,
- const struct ofpbuf *actions, struct rule_dpif **rulep)
+ const struct ofpbuf *ofpacts, struct rule_dpif **rulep)
{
struct ofputil_flow_mod fm;
int error;
fm.buffer_id = 0;
fm.out_port = 0;
fm.flags = 0;
- fm.actions = actions->data;
- fm.n_actions = actions->size / sizeof(union ofp_action);
+ fm.ofpacts = ofpacts->data;
+ fm.ofpacts_len = ofpacts->size;
error = ofproto_flow_mod(&ofproto->up, &fm);
if (error) {
static int
add_internal_flows(struct ofproto_dpif *ofproto)
{
- struct nx_action_controller *nac;
- uint64_t actions_stub[128 / 8];
- struct ofpbuf actions;
+ struct ofpact_controller *controller;
+ uint64_t ofpacts_stub[128 / 8];
+ struct ofpbuf ofpacts;
int error;
int id;
- ofpbuf_use_stack(&actions, actions_stub, sizeof actions_stub);
+ ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
id = 1;
- nac = ofputil_put_NXAST_CONTROLLER(&actions);
- nac->max_len = htons(UINT16_MAX);
- nac->controller_id = htons(0);
- nac->reason = OFPR_NO_MATCH;
- error = add_internal_flow(ofproto, id++, &actions, &ofproto->miss_rule);
+ controller = ofpact_put_CONTROLLER(&ofpacts);
+ controller->max_len = UINT16_MAX;
+ controller->controller_id = 0;
+ controller->reason = OFPR_NO_MATCH;
+ ofpact_pad(&ofpacts);
+
+ error = add_internal_flow(ofproto, id++, &ofpacts, &ofproto->miss_rule);
if (error) {
return error;
}
- ofpbuf_clear(&actions);
- error = add_internal_flow(ofproto, id++, &actions,
+ ofpbuf_clear(&ofpacts);
+ error = add_internal_flow(ofproto, id++, &ofpacts,
&ofproto->no_packet_in_rule);
return error;
}
run_fast(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct ofport_dpif *ofport;
unsigned int work;
+ HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
+ port_run_fast(ofport);
+ }
+
/* Handle one or more batches of upcalls, until there's nothing left to do
* or until we do a fixed total amount of work.
*
bool revalidate_all = ofproto->need_revalidate;
struct facet *facet;
+ switch (ofproto->need_revalidate) {
+ case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
+ case REV_STP: COVERAGE_INC(rev_stp); break;
+ case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
+ case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
+ case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
+ }
+
/* Clear the revalidation flags. */
tag_set_init(&ofproto->revalidate_set);
- ofproto->need_revalidate = false;
+ ofproto->need_revalidate = 0;
HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
if (revalidate_all
struct facet, hmap_node);
if (!tag_set_intersects(&ofproto->revalidate_set, facet->tags)) {
if (!facet_check_consistency(facet)) {
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_INCONSISTENCY;
}
}
}
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
port->odp_port = ofp_port_to_odp_port(port->up.ofp_port);
port->bundle = NULL;
port->cfm = NULL;
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
bundle_remove(port_);
set_cfm(port_, NULL);
if (ofproto->sflow) {
if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD |
OFPUTIL_PC_NO_PACKET_IN)) {
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
bundle_update(port->bundle);
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
dpif_sflow_add_port(ds, &ofport->up);
}
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
}
dpif_sflow_set_options(ds, sflow_options);
} else {
if (ds) {
dpif_sflow_destroy(ds);
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
ofproto->sflow = NULL;
}
}
struct ofproto_dpif *ofproto;
ofproto = ofproto_dpif_cast(ofport->up.ofproto);
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
ofport->cfm = cfm_create(netdev_get_name(ofport->up.netdev));
}
/* Only revalidate flows if the configuration changed. */
if (!s != !ofproto->stp) {
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
}
if (s) {
fwd_change = stp_forward_in_state(ofport->stp_state)
!= stp_forward_in_state(state);
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_STP;
ofport->stp_state = state;
ofport->stp_state_entered = time_msec();
pdscp = xmalloc(sizeof *pdscp);
pdscp->priority = priority;
pdscp->dscp = dscp;
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
}
if (pdscp->dscp != dscp) {
pdscp->dscp = dscp;
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
}
hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0));
if (!hmap_is_empty(&ofport->priorities)) {
ofport_clear_priorities(ofport);
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
}
hmap_swap(&new, &ofport->priorities);
struct mac_learning *ml = ofproto->ml;
struct mac_entry *mac, *next_mac;
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
if (mac->port.p == bundle) {
if (all_ofprotos) {
{
struct ofbundle *bundle = port->bundle;
- bundle->ofproto->need_revalidate = true;
+ bundle->ofproto->need_revalidate = REV_RECONFIGURE;
list_remove(&port->bundle_node);
port->bundle = NULL;
}
if (port->bundle != bundle) {
- bundle->ofproto->need_revalidate = true;
+ bundle->ofproto->need_revalidate = REV_RECONFIGURE;
if (port->bundle) {
bundle_del_port(port);
}
}
}
if (lacp) {
- port->bundle->ofproto->need_revalidate = true;
+ port->bundle->ofproto->need_revalidate = REV_RECONFIGURE;
lacp_slave_register(bundle->lacp, port, lacp);
}
mirror_destroy(m);
} else if (hmapx_find_and_delete(&m->srcs, bundle)
|| hmapx_find_and_delete(&m->dsts, bundle)) {
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
}
}
}
/* LACP. */
if (s->lacp) {
if (!bundle->lacp) {
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
bundle->lacp = lacp_create();
}
lacp_configure(bundle->lacp, s->lacp);
bundle->ofproto->has_bonded_bundles = true;
if (bundle->bond) {
if (bond_reconfigure(bundle->bond, s->bond)) {
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
}
} else {
bundle->bond = bond_create(s->bond);
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
}
LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
}
}
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
ofproto->has_mirrors = true;
mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
mirror_update_dups(ofproto);
}
ofproto = mirror->ofproto;
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
forward_bpdu_changed(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- /* Revalidate cached flows whenever forward_bpdu option changes. */
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
}
static void
ofproto_port->ofp_port = odp_port_to_ofp_port(dpif_port->port_no);
}
+static void
+port_run_fast(struct ofport_dpif *ofport)
+{
+ if (ofport->cfm && cfm_should_send_ccm(ofport->cfm)) {
+ struct ofpbuf packet;
+
+ ofpbuf_init(&packet, 0);
+ cfm_compose_ccm(ofport->cfm, &packet, ofport->up.pp.hw_addr);
+ send_packet(ofport, &packet);
+ ofpbuf_uninit(&packet);
+ }
+}
+
static void
port_run(struct ofport_dpif *ofport)
{
ofport->carrier_seq = carrier_seq;
+ port_run_fast(ofport);
if (ofport->cfm) {
cfm_run(ofport->cfm);
-
- if (cfm_should_send_ccm(ofport->cfm)) {
- struct ofpbuf packet;
-
- ofpbuf_init(&packet, 0);
- cfm_compose_ccm(ofport->cfm, &packet, ofport->up.pp.hw_addr);
- send_packet(ofport, &packet);
- ofpbuf_uninit(&packet);
- }
-
enable = enable && !cfm_get_fault(ofport->cfm)
&& cfm_get_opup(ofport->cfm);
}
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
if (ofproto->has_bundle_action) {
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_PORT_TOGGLED;
}
}
action_xlate_ctx_init(&ctx, ofproto, &miss->flow, miss->initial_tci,
rule, 0, packet);
ctx.resubmit_stats = &stats;
- xlate_actions(&ctx, rule->up.actions, rule->up.n_actions,
+ xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
&odp_actions);
if (odp_actions.size) {
long long int now;
uint8_t reason;
+ if (rule->up.pending) {
+ /* We'll have to expire it later. */
+ return;
+ }
+
/* Has 'rule' expired? */
now = time_msec();
if (rule->up.hard_timeout
facet->flow.vlan_tci,
facet->rule, facet->tcp_flags, NULL);
ctx.may_learn = true;
- xlate_actions_for_side_effects(&ctx, facet->rule->up.actions,
- facet->rule->up.n_actions);
+ xlate_actions_for_side_effects(&ctx, facet->rule->up.ofpacts,
+ facet->rule->up.ofpacts_len);
}
static void
static bool
facet_is_controller_flow(struct facet *facet)
{
- return (facet
- && facet->rule->up.n_actions == 1
- && action_outputs_to_port(&facet->rule->up.actions[0],
- htons(OFPP_CONTROLLER)));
+ if (facet) {
+ const struct rule *rule = &facet->rule->up;
+ const struct ofpact *ofpacts = rule->ofpacts;
+ size_t ofpacts_len = rule->ofpacts_len;
+
+ if (ofpacts->type == OFPACT_CONTROLLER &&
+ ofpact_next(ofpacts) >= ofpact_end(ofpacts, ofpacts_len)) {
+ return true;
+ }
+ }
+ return false;
}
/* Folds all of 'facet''s statistics into its rule. Also updates the
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
subfacet->initial_tci, rule, 0, NULL);
- xlate_actions(&ctx, rule->up.actions, rule->up.n_actions,
+ xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
&odp_actions);
if (subfacet->path == SF_NOT_INSTALLED) {
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
subfacet->initial_tci, new_rule, 0, NULL);
- xlate_actions(&ctx, new_rule->up.actions, new_rule->up.n_actions,
+ xlate_actions(&ctx, new_rule->up.ofpacts, new_rule->up.ofpacts_len,
&odp_actions);
slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, rule,
0, NULL);
ctx.resubmit_stats = stats;
- xlate_actions_for_side_effects(&ctx, rule->up.actions, rule->up.n_actions);
+ xlate_actions_for_side_effects(&ctx, rule->up.ofpacts,
+ rule->up.ofpacts_len);
}
\f
/* Subfacets. */
action_xlate_ctx_init(&ctx, ofproto, &facet->flow, subfacet->initial_tci,
rule, 0, packet);
- xlate_actions(&ctx, rule->up.actions, rule->up.n_actions, odp_actions);
+ xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, odp_actions);
facet->tags = ctx.tags;
facet->has_learn = ctx.has_learn;
facet->has_normal = ctx.has_normal;
uint8_t table_id;
enum ofperr error;
- error = validate_actions(rule->up.actions, rule->up.n_actions,
- &rule->up.cr.flow, ofproto->max_ports);
+ error = ofpacts_check(rule->up.ofpacts, rule->up.ofpacts_len,
+ &rule->up.cr.flow, ofproto->max_ports);
if (error) {
return error;
}
action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci,
rule, stats.tcp_flags, packet);
ctx.resubmit_stats = &stats;
- xlate_actions(&ctx, rule->up.actions, rule->up.n_actions, &odp_actions);
+ xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions);
execute_odp_actions(ofproto, flow, odp_actions.data,
odp_actions.size, packet);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
enum ofperr error;
- error = validate_actions(rule->up.actions, rule->up.n_actions,
- &rule->up.cr.flow, ofproto->max_ports);
+ error = ofpacts_check(rule->up.ofpacts, rule->up.ofpacts_len,
+ &rule->up.cr.flow, ofproto->max_ports);
if (error) {
ofoperation_complete(rule->up.pending, error);
return;
\f
/* OpenFlow to datapath action translation. */
-static void do_xlate_actions(const union ofp_action *in, size_t n_in,
- struct action_xlate_ctx *ctx);
+static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
+ struct action_xlate_ctx *);
static void xlate_normal(struct action_xlate_ctx *);
/* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'.
ctx->recurse++;
ctx->rule = rule;
- do_xlate_actions(rule->up.actions, rule->up.n_actions, ctx);
+ do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx);
ctx->rule = old_rule;
ctx->recurse--;
}
}
static void
-xlate_resubmit_table(struct action_xlate_ctx *ctx,
- const struct nx_action_resubmit *nar)
+xlate_ofpact_resubmit(struct action_xlate_ctx *ctx,
+ const struct ofpact_resubmit *resubmit)
{
uint16_t in_port;
uint8_t table_id;
- in_port = (nar->in_port == htons(OFPP_IN_PORT)
- ? ctx->flow.in_port
- : ntohs(nar->in_port));
- table_id = nar->table == 255 ? ctx->table_id : nar->table;
+ in_port = resubmit->in_port;
+ if (in_port == OFPP_IN_PORT) {
+ in_port = ctx->flow.in_port;
+ }
+
+ table_id = resubmit->table_id;
+ if (table_id == 255) {
+ table_id = ctx->table_id;
+ }
xlate_table_action(ctx, in_port, table_id);
}
}
static void
-xlate_output_action__(struct action_xlate_ctx *ctx,
- uint16_t port, uint16_t max_len)
+xlate_output_action(struct action_xlate_ctx *ctx,
+ uint16_t port, uint16_t max_len)
{
uint16_t prev_nf_output_iface = ctx->nf_output_iface;
compose_output_action(ctx, ctx->flow.in_port);
break;
case OFPP_TABLE:
- xlate_table_action(ctx, ctx->flow.in_port, ctx->table_id);
+ xlate_table_action(ctx, ctx->flow.in_port, 0);
break;
case OFPP_NORMAL:
xlate_normal(ctx);
static void
xlate_output_reg_action(struct action_xlate_ctx *ctx,
- const struct nx_action_output_reg *naor)
+ const struct ofpact_output_reg *or)
{
- struct mf_subfield src;
- uint64_t ofp_port;
-
- nxm_decode(&src, naor->src, naor->ofs_nbits);
- ofp_port = mf_get_subfield(&src, &ctx->flow);
-
- if (ofp_port <= UINT16_MAX) {
- xlate_output_action__(ctx, ofp_port, ntohs(naor->max_len));
+ uint64_t port = mf_get_subfield(&or->src, &ctx->flow);
+ if (port <= UINT16_MAX) {
+ xlate_output_action(ctx, port, or->max_len);
}
}
-static void
-xlate_output_action(struct action_xlate_ctx *ctx,
- const struct ofp_action_output *oao)
-{
- xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len));
-}
-
static void
xlate_enqueue_action(struct action_xlate_ctx *ctx,
- const struct ofp_action_enqueue *oae)
+ const struct ofpact_enqueue *enqueue)
{
- uint16_t ofp_port;
+ uint16_t ofp_port = enqueue->port;
+ uint32_t queue_id = enqueue->queue;
uint32_t flow_priority, priority;
int error;
- error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(oae->queue_id),
- &priority);
+ /* Translate queue to priority. */
+ error = dpif_queue_to_priority(ctx->ofproto->dpif, queue_id, &priority);
if (error) {
/* Fall back to ordinary output action. */
- xlate_output_action__(ctx, ntohs(oae->port), 0);
+ xlate_output_action(ctx, enqueue->port, 0);
return;
}
- /* Figure out datapath output port. */
- ofp_port = ntohs(oae->port);
+ /* Check output port. */
if (ofp_port == OFPP_IN_PORT) {
ofp_port = ctx->flow.in_port;
} else if (ofp_port == ctx->flow.in_port) {
}
static void
-xlate_set_queue_action(struct action_xlate_ctx *ctx,
- const struct nx_action_set_queue *nasq)
+xlate_set_queue_action(struct action_xlate_ctx *ctx, uint32_t queue_id)
{
- uint32_t priority;
- int error;
+ uint32_t skb_priority;
- error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(nasq->queue_id),
- &priority);
- if (error) {
- /* Couldn't translate queue to a priority, so ignore. A warning
+ if (!dpif_queue_to_priority(ctx->ofproto->dpif, queue_id, &skb_priority)) {
+ ctx->flow.skb_priority = skb_priority;
+ } else {
+ /* Couldn't translate queue to a priority. Nothing to do. A warning
* has already been logged. */
- return;
}
-
- ctx->flow.skb_priority = priority;
}
struct xlate_reg_state {
static void
xlate_autopath(struct action_xlate_ctx *ctx,
- const struct nx_action_autopath *naa)
+ const struct ofpact_autopath *ap)
{
- uint16_t ofp_port = ntohl(naa->id);
+ uint16_t ofp_port = ap->port;
struct ofport_dpif *port = get_ofp_port(ctx->ofproto, ofp_port);
if (!port || !port->bundle) {
ofp_port = slave->up.ofp_port;
}
}
- autopath_execute(naa, &ctx->flow, ofp_port);
+ nxm_reg_load(&ap->dst, ofp_port, &ctx->flow);
}
static bool
}
}
+static void
+xlate_bundle_action(struct action_xlate_ctx *ctx,
+ const struct ofpact_bundle *bundle)
+{
+ uint16_t port;
+
+ port = bundle_execute(bundle, &ctx->flow, slave_enabled_cb, ctx->ofproto);
+ if (bundle->dst.field) {
+ nxm_reg_load(&bundle->dst, port, &ctx->flow);
+ } else {
+ xlate_output_action(ctx, port, 0);
+ }
+}
+
static void
xlate_learn_action(struct action_xlate_ctx *ctx,
- const struct nx_action_learn *learn)
+ const struct ofpact_learn *learn)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
struct ofputil_flow_mod fm;
+ uint64_t ofpacts_stub[1024 / 8];
+ struct ofpbuf ofpacts;
int error;
- learn_execute(learn, &ctx->flow, &fm);
+ ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
+ learn_execute(learn, &ctx->flow, &fm, &ofpacts);
error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
if (error && !VLOG_DROP_WARN(&rl)) {
ofperr_get_name(error));
}
- free(fm.actions);
+ ofpbuf_uninit(&ofpacts);
}
/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
static void
xlate_fin_timeout(struct action_xlate_ctx *ctx,
- const struct nx_action_fin_timeout *naft)
+ const struct ofpact_fin_timeout *oft)
{
if (ctx->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
struct rule_dpif *rule = ctx->rule;
- reduce_timeout(ntohs(naft->fin_idle_timeout), &rule->up.idle_timeout);
- reduce_timeout(ntohs(naft->fin_hard_timeout), &rule->up.hard_timeout);
+ reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
+ reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout);
}
}
}
static void
-do_xlate_actions(const union ofp_action *in, size_t n_in,
+do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
struct action_xlate_ctx *ctx)
{
const struct ofport_dpif *port;
- const union ofp_action *ia;
bool was_evictable = true;
- size_t left;
+ const struct ofpact *a;
port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
if (port && !may_receive(port, ctx)) {
was_evictable = ctx->rule->up.evictable;
ctx->rule->up.evictable = false;
}
- OFPUTIL_ACTION_FOR_EACH_UNSAFE (ia, left, in, n_in) {
- const struct ofp_action_dl_addr *oada;
- const struct nx_action_resubmit *nar;
- const struct nx_action_set_tunnel *nast;
- const struct nx_action_set_queue *nasq;
- const struct nx_action_multipath *nam;
- const struct nx_action_autopath *naa;
- const struct nx_action_bundle *nab;
- const struct nx_action_output_reg *naor;
- const struct nx_action_controller *nac;
- enum ofputil_action_code code;
- ovs_be64 tun_id;
+ OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
+ struct ofpact_controller *controller;
if (ctx->exit) {
break;
}
- code = ofputil_decode_action_unsafe(ia);
- switch (code) {
- case OFPUTIL_OFPAT10_OUTPUT:
- xlate_output_action(ctx, &ia->output);
+ switch (a->type) {
+ case OFPACT_OUTPUT:
+ xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
+ ofpact_get_OUTPUT(a)->max_len);
+ break;
+
+ case OFPACT_CONTROLLER:
+ controller = ofpact_get_CONTROLLER(a);
+ execute_controller_action(ctx, controller->max_len,
+ controller->reason,
+ controller->controller_id);
+ break;
+
+ case OFPACT_ENQUEUE:
+ xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
break;
- case OFPUTIL_OFPAT10_SET_VLAN_VID:
+ case OFPACT_SET_VLAN_VID:
ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
- ctx->flow.vlan_tci |= ia->vlan_vid.vlan_vid | htons(VLAN_CFI);
+ ctx->flow.vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
+ | htons(VLAN_CFI));
break;
- case OFPUTIL_OFPAT10_SET_VLAN_PCP:
+ case OFPACT_SET_VLAN_PCP:
ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
- ctx->flow.vlan_tci |= htons(
- (ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
+ ctx->flow.vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
+ << VLAN_PCP_SHIFT)
+ | VLAN_CFI);
break;
- case OFPUTIL_OFPAT10_STRIP_VLAN:
+ case OFPACT_STRIP_VLAN:
ctx->flow.vlan_tci = htons(0);
break;
- case OFPUTIL_OFPAT10_SET_DL_SRC:
- oada = ((struct ofp_action_dl_addr *) ia);
- memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN);
+ case OFPACT_SET_ETH_SRC:
+ memcpy(ctx->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac,
+ ETH_ADDR_LEN);
break;
- case OFPUTIL_OFPAT10_SET_DL_DST:
- oada = ((struct ofp_action_dl_addr *) ia);
- memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN);
+ case OFPACT_SET_ETH_DST:
+ memcpy(ctx->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac,
+ ETH_ADDR_LEN);
break;
- case OFPUTIL_OFPAT10_SET_NW_SRC:
- ctx->flow.nw_src = ia->nw_addr.nw_addr;
+ case OFPACT_SET_IPV4_SRC:
+ ctx->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
break;
- case OFPUTIL_OFPAT10_SET_NW_DST:
- ctx->flow.nw_dst = ia->nw_addr.nw_addr;
+ case OFPACT_SET_IPV4_DST:
+ ctx->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
break;
- case OFPUTIL_OFPAT10_SET_NW_TOS:
+ case OFPACT_SET_IPV4_DSCP:
/* OpenFlow 1.0 only supports IPv4. */
if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
ctx->flow.nw_tos &= ~IP_DSCP_MASK;
- ctx->flow.nw_tos |= ia->nw_tos.nw_tos & IP_DSCP_MASK;
+ ctx->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
}
break;
- case OFPUTIL_OFPAT10_SET_TP_SRC:
- ctx->flow.tp_src = ia->tp_port.tp_port;
+ case OFPACT_SET_L4_SRC_PORT:
+ ctx->flow.tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
break;
- case OFPUTIL_OFPAT10_SET_TP_DST:
- ctx->flow.tp_dst = ia->tp_port.tp_port;
+ case OFPACT_SET_L4_DST_PORT:
+ ctx->flow.tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
break;
- case OFPUTIL_OFPAT10_ENQUEUE:
- xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
+ case OFPACT_RESUBMIT:
+ xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
break;
- case OFPUTIL_NXAST_RESUBMIT:
- nar = (const struct nx_action_resubmit *) ia;
- xlate_table_action(ctx, ntohs(nar->in_port), ctx->table_id);
+ case OFPACT_SET_TUNNEL:
+ ctx->flow.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
break;
- case OFPUTIL_NXAST_RESUBMIT_TABLE:
- xlate_resubmit_table(ctx, (const struct nx_action_resubmit *) ia);
+ case OFPACT_SET_QUEUE:
+ xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
break;
- case OFPUTIL_NXAST_SET_TUNNEL:
- nast = (const struct nx_action_set_tunnel *) ia;
- tun_id = htonll(ntohl(nast->tun_id));
- ctx->flow.tun_id = tun_id;
- break;
-
- case OFPUTIL_NXAST_SET_QUEUE:
- nasq = (const struct nx_action_set_queue *) ia;
- xlate_set_queue_action(ctx, nasq);
- break;
-
- case OFPUTIL_NXAST_POP_QUEUE:
+ case OFPACT_POP_QUEUE:
ctx->flow.skb_priority = ctx->orig_skb_priority;
break;
- case OFPUTIL_NXAST_REG_MOVE:
- nxm_execute_reg_move((const struct nx_action_reg_move *) ia,
- &ctx->flow);
- break;
-
- case OFPUTIL_NXAST_REG_LOAD:
- nxm_execute_reg_load((const struct nx_action_reg_load *) ia,
- &ctx->flow);
+ case OFPACT_REG_MOVE:
+ nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->flow);
break;
- case OFPUTIL_NXAST_NOTE:
- /* Nothing to do. */
+ case OFPACT_REG_LOAD:
+ nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->flow);
break;
- case OFPUTIL_NXAST_SET_TUNNEL64:
- tun_id = ((const struct nx_action_set_tunnel64 *) ia)->tun_id;
- ctx->flow.tun_id = tun_id;
+ case OFPACT_DEC_TTL:
+ if (compose_dec_ttl(ctx)) {
+ goto out;
+ }
break;
- case OFPUTIL_NXAST_MULTIPATH:
- nam = (const struct nx_action_multipath *) ia;
- multipath_execute(nam, &ctx->flow);
+ case OFPACT_NOTE:
+ /* Nothing to do. */
break;
- case OFPUTIL_NXAST_AUTOPATH:
- naa = (const struct nx_action_autopath *) ia;
- xlate_autopath(ctx, naa);
+ case OFPACT_MULTIPATH:
+ multipath_execute(ofpact_get_MULTIPATH(a), &ctx->flow);
break;
- case OFPUTIL_NXAST_BUNDLE:
- ctx->ofproto->has_bundle_action = true;
- nab = (const struct nx_action_bundle *) ia;
- xlate_output_action__(ctx, bundle_execute(nab, &ctx->flow,
- slave_enabled_cb,
- ctx->ofproto), 0);
+ case OFPACT_AUTOPATH:
+ xlate_autopath(ctx, ofpact_get_AUTOPATH(a));
break;
- case OFPUTIL_NXAST_BUNDLE_LOAD:
+ case OFPACT_BUNDLE:
ctx->ofproto->has_bundle_action = true;
- nab = (const struct nx_action_bundle *) ia;
- bundle_execute_load(nab, &ctx->flow, slave_enabled_cb,
- ctx->ofproto);
+ xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
break;
- case OFPUTIL_NXAST_OUTPUT_REG:
- naor = (const struct nx_action_output_reg *) ia;
- xlate_output_reg_action(ctx, naor);
+ case OFPACT_OUTPUT_REG:
+ xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
break;
- case OFPUTIL_NXAST_LEARN:
+ case OFPACT_LEARN:
ctx->has_learn = true;
if (ctx->may_learn) {
- xlate_learn_action(ctx, (const struct nx_action_learn *) ia);
+ xlate_learn_action(ctx, ofpact_get_LEARN(a));
}
break;
- case OFPUTIL_NXAST_DEC_TTL:
- if (compose_dec_ttl(ctx)) {
- goto out;
- }
- break;
-
- case OFPUTIL_NXAST_EXIT:
+ case OFPACT_EXIT:
ctx->exit = true;
break;
- case OFPUTIL_NXAST_FIN_TIMEOUT:
+ case OFPACT_FIN_TIMEOUT:
ctx->has_fin_timeout = true;
- xlate_fin_timeout(ctx, (const struct nx_action_fin_timeout *) ia);
- break;
-
- case OFPUTIL_NXAST_CONTROLLER:
- nac = (const struct nx_action_controller *) ia;
- execute_controller_action(ctx, ntohs(nac->max_len), nac->reason,
- ntohs(nac->controller_id));
+ xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
break;
}
}
ctx->resubmit_stats = NULL;
}
-/* Translates the 'n_in' "union ofp_action"s in 'in' into datapath actions in
- * 'odp_actions', using 'ctx'. */
+/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
+ * into datapath actions in 'odp_actions', using 'ctx'. */
static void
xlate_actions(struct action_xlate_ctx *ctx,
- const union ofp_action *in, size_t n_in,
+ const struct ofpact *ofpacts, size_t ofpacts_len,
struct ofpbuf *odp_actions)
{
/* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
ovs_be16 initial_tci = ctx->base_flow.vlan_tci;
add_sflow_action(ctx);
- do_xlate_actions(in, n_in, ctx);
+ do_xlate_actions(ofpacts, ofpacts_len, ctx);
if (ctx->max_resubmit_trigger && !ctx->resubmit_hook) {
if (!hit_resubmit_limit) {
}
}
-/* Translates the 'n_in' "union ofp_action"s in 'in' into datapath actions,
- * using 'ctx', and discards the datapath actions. */
+/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
+ * into datapath actions, using 'ctx', and discards the datapath actions. */
static void
xlate_actions_for_side_effects(struct action_xlate_ctx *ctx,
- const union ofp_action *in, size_t n_in)
+ const struct ofpact *ofpacts,
+ size_t ofpacts_len)
{
uint64_t odp_actions_stub[1024 / 8];
struct ofpbuf odp_actions;
ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
- xlate_actions(ctx, in, n_in, &odp_actions);
+ xlate_actions(ctx, ofpacts, ofpacts_len, &odp_actions);
ofpbuf_uninit(&odp_actions);
}
\f
* we don't know about.
*
* - The ofproto client didn't configure the port as part of a bundle.
+ * This is particularly likely to happen if a packet was received on the
+ * port after it was created, but before the client had a chance to
+ * configure its bundle.
*/
if (warn) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
if (table->catchall_table != catchall || table->other_table != other) {
table->catchall_table = catchall;
table->other_table = other;
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_FLOW_TABLE;
}
}
if (table->other_table && rule->tag) {
tag_set_add(&ofproto->revalidate_set, rule->tag);
} else {
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_FLOW_TABLE;
}
}
}
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
if (frag_handling != OFPC_FRAG_REASM) {
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
return true;
} else {
return false;
static enum ofperr
packet_out(struct ofproto *ofproto_, struct ofpbuf *packet,
const struct flow *flow,
- const union ofp_action *ofp_actions, size_t n_ofp_actions)
+ const struct ofpact *ofpacts, size_t ofpacts_len)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
enum ofperr error;
return OFPERR_NXBRC_BAD_IN_PORT;
}
- error = validate_actions(ofp_actions, n_ofp_actions, flow,
- ofproto->max_ports);
+ error = ofpacts_check(ofpacts, ofpacts_len, flow, ofproto->max_ports);
if (!error) {
struct odputil_keybuf keybuf;
struct dpif_flow_stats stats;
ofpbuf_use_stub(&odp_actions,
odp_actions_stub, sizeof odp_actions_stub);
- xlate_actions(&ctx, ofp_actions, n_ofp_actions, &odp_actions);
+ xlate_actions(&ctx, ofpacts, ofpacts_len, &odp_actions);
dpif_execute(ofproto->dpif, key.data, key.size,
odp_actions.data, odp_actions.size, packet);
ofpbuf_uninit(&odp_actions);
ds_put_char_multiple(result, '\t', level);
ds_put_cstr(result, "OpenFlow ");
- ofp_print_actions(result, rule->up.actions, rule->up.n_actions);
+ ofpacts_format(rule->up.ofpacts, rule->up.ofpacts_len, result);
ds_put_char(result, '\n');
}
action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_tci,
rule, tcp_flags, packet);
trace.ctx.resubmit_hook = trace_resubmit;
- xlate_actions(&trace.ctx, rule->up.actions, rule->up.n_actions,
+ xlate_actions(&trace.ctx, rule->up.ofpacts, rule->up.ofpacts_len,
&odp_actions);
ds_put_char(ds, '\n');
}
}
if (errors) {
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_INCONSISTENCY;
}
if (errors) {
return 0;
}
- ofproto->need_revalidate = true;
+ ofproto->need_revalidate = REV_RECONFIGURE;
if (ofport->realdev_ofp_port) {
vsp_remove(ofport);