X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=ofproto%2Fofproto-dpif.c;h=dc15c15bed5ce04eab778c2d7be64a1cb89b405f;hb=5cc2c261d33f091ea45ff3de22737cf4deb724a2;hp=9c8f7f4df74891b3562e9bd7cdb94fb00f726e3a;hpb=4acbc98d3c61432e9138215e84ae91e32902d301;p=openvswitch diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index 9c8f7f4d..dc15c15b 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -42,11 +42,13 @@ #include "odp-util.h" #include "ofp-util.h" #include "ofpbuf.h" +#include "ofp-actions.h" #include "ofp-parse.h" #include "ofp-print.h" #include "ofproto-dpif-governor.h" #include "ofproto-dpif-sflow.h" #include "poll-loop.h" +#include "simap.h" #include "timer.h" #include "unaligned.h" #include "unixctl.h" @@ -55,11 +57,9 @@ VLOG_DEFINE_THIS_MODULE(ofproto_dpif); -COVERAGE_DEFINE(ofproto_dpif_ctlr_action); COVERAGE_DEFINE(ofproto_dpif_expired); COVERAGE_DEFINE(ofproto_dpif_xlate); COVERAGE_DEFINE(facet_changed_rule); -COVERAGE_DEFINE(facet_invalidated); COVERAGE_DEFINE(facet_revalidate); COVERAGE_DEFINE(facet_unexpected); COVERAGE_DEFINE(facet_suppress); @@ -282,11 +282,11 @@ static void action_xlate_ctx_init(struct action_xlate_ctx *, ovs_be16 initial_tci, struct rule_dpif *, uint8_t tcp_flags, const struct ofpbuf *); static void xlate_actions(struct action_xlate_ctx *, - const union ofp_action *in, size_t n_in, + const struct ofpact *ofpacts, size_t ofpacts_len, struct ofpbuf *odp_actions); static void xlate_actions_for_side_effects(struct action_xlate_ctx *, - const union ofp_action *in, - size_t n_in); + const struct ofpact *ofpacts, + size_t ofpacts_len); static size_t put_userspace_action(const struct ofproto_dpif *, struct ofpbuf *odp_actions, @@ -530,8 +530,6 @@ struct vlan_splinter { static uint32_t vsp_realdev_to_vlandev(const struct ofproto_dpif *, uint32_t realdev, ovs_be16 vlan_tci); -static uint16_t vsp_vlandev_to_realdev(const struct ofproto_dpif *, - uint16_t vlandev, int *vid); static bool vsp_adjust_flow(const struct ofproto_dpif *, struct flow *); static void vsp_remove(struct ofport_dpif *); static void vsp_add(struct ofport_dpif *, uint16_t realdev_ofp_port, int vid); @@ -544,6 +542,7 @@ ofport_dpif_cast(const struct ofport *ofport) } static void port_run(struct ofport_dpif *); +static void port_run_fast(struct ofport_dpif *); static void port_wait(struct ofport_dpif *); static int set_cfm(struct ofport *, const struct cfm_settings *); static void ofport_clear_priorities(struct ofport_dpif *); @@ -564,6 +563,27 @@ struct table_dpif { uint32_t basis; /* Keeps each table's tags separate. */ }; +/* Reasons that we might need to revalidate every facet, and corresponding + * coverage counters. + * + * A value of 0 means that there is no need to revalidate. + * + * It would be nice to have some cleaner way to integrate with coverage + * counters, but with only a few reasons I guess this is good enough for + * now. */ +enum revalidate_reason { + REV_RECONFIGURE = 1, /* Switch configuration changed. */ + REV_STP, /* Spanning tree protocol port status change. */ + REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/ + REV_FLOW_TABLE, /* Flow table changed. */ + REV_INCONSISTENCY /* Facet self-check failed. */ +}; +COVERAGE_DEFINE(rev_reconfigure); +COVERAGE_DEFINE(rev_stp); +COVERAGE_DEFINE(rev_port_toggled); +COVERAGE_DEFINE(rev_flow_table); +COVERAGE_DEFINE(rev_inconsistency); + struct ofproto_dpif { struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */ struct ofproto up; @@ -596,7 +616,7 @@ struct ofproto_dpif { /* Revalidation. */ struct table_dpif tables[N_TABLES]; - bool need_revalidate; + enum revalidate_reason need_revalidate; struct tag_set revalidate_set; /* Support for debugging async flow mods. */ @@ -759,7 +779,7 @@ construct(struct ofproto *ofproto_) table->other_table = NULL; table->basis = random_uint32(); } - ofproto->need_revalidate = false; + ofproto->need_revalidate = 0; tag_set_init(&ofproto->revalidate_set); list_init(&ofproto->completions); @@ -785,13 +805,14 @@ construct(struct ofproto *ofproto_) static int add_internal_flow(struct ofproto_dpif *ofproto, int id, - const struct ofpbuf *actions, struct rule_dpif **rulep) + const struct ofpbuf *ofpacts, struct rule_dpif **rulep) { struct ofputil_flow_mod fm; int error; cls_rule_init_catchall(&fm.cr, 0); cls_rule_set_reg(&fm.cr, 0, id); + fm.new_cookie = htonll(0); fm.cookie = htonll(0); fm.cookie_mask = htonll(0); fm.table_id = TBL_INTERNAL; @@ -801,8 +822,8 @@ add_internal_flow(struct ofproto_dpif *ofproto, int id, fm.buffer_id = 0; fm.out_port = 0; fm.flags = 0; - fm.actions = actions->data; - fm.n_actions = actions->size / sizeof(union ofp_action); + fm.ofpacts = ofpacts->data; + fm.ofpacts_len = ofpacts->size; error = ofproto_flow_mod(&ofproto->up, &fm); if (error) { @@ -820,26 +841,28 @@ add_internal_flow(struct ofproto_dpif *ofproto, int id, static int add_internal_flows(struct ofproto_dpif *ofproto) { - struct nx_action_controller *nac; - uint64_t actions_stub[128 / 8]; - struct ofpbuf actions; + struct ofpact_controller *controller; + uint64_t ofpacts_stub[128 / 8]; + struct ofpbuf ofpacts; int error; int id; - ofpbuf_use_stack(&actions, actions_stub, sizeof actions_stub); + ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); id = 1; - nac = ofputil_put_NXAST_CONTROLLER(&actions); - nac->max_len = htons(UINT16_MAX); - nac->controller_id = htons(0); - nac->reason = OFPR_NO_MATCH; - error = add_internal_flow(ofproto, id++, &actions, &ofproto->miss_rule); + controller = ofpact_put_CONTROLLER(&ofpacts); + controller->max_len = UINT16_MAX; + controller->controller_id = 0; + controller->reason = OFPR_NO_MATCH; + ofpact_pad(&ofpacts); + + error = add_internal_flow(ofproto, id++, &ofpacts, &ofproto->miss_rule); if (error) { return error; } - ofpbuf_clear(&actions); - error = add_internal_flow(ofproto, id++, &actions, + ofpbuf_clear(&ofpacts); + error = add_internal_flow(ofproto, id++, &ofpacts, &ofproto->no_packet_in_rule); return error; } @@ -899,8 +922,13 @@ static int run_fast(struct ofproto *ofproto_) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); + struct ofport_dpif *ofport; unsigned int work; + HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { + port_run_fast(ofport); + } + /* Handle one or more batches of upcalls, until there's nothing left to do * or until we do a fixed total amount of work. * @@ -970,9 +998,17 @@ run(struct ofproto *ofproto_) bool revalidate_all = ofproto->need_revalidate; struct facet *facet; + switch (ofproto->need_revalidate) { + case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break; + case REV_STP: COVERAGE_INC(rev_stp); break; + case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break; + case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break; + case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break; + } + /* Clear the revalidation flags. */ tag_set_init(&ofproto->revalidate_set); - ofproto->need_revalidate = false; + ofproto->need_revalidate = 0; HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) { if (revalidate_all @@ -990,7 +1026,7 @@ run(struct ofproto *ofproto_) struct facet, hmap_node); if (!tag_set_intersects(&ofproto->revalidate_set, facet->tags)) { if (!facet_check_consistency(facet)) { - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_INCONSISTENCY; } } } @@ -1058,6 +1094,15 @@ wait(struct ofproto *ofproto_) } } +static void +get_memory_usage(const struct ofproto *ofproto_, struct simap *usage) +{ + const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); + + simap_increase(usage, "facets", hmap_count(&ofproto->facets)); + simap_increase(usage, "subfacets", hmap_count(&ofproto->subfacets)); +} + static void flush(struct ofproto *ofproto_) { @@ -1134,7 +1179,7 @@ port_construct(struct ofport *port_) struct ofport_dpif *port = ofport_dpif_cast(port_); struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto); - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; port->odp_port = ofp_port_to_odp_port(port->up.ofp_port); port->bundle = NULL; port->cfm = NULL; @@ -1160,7 +1205,7 @@ port_destruct(struct ofport *port_) struct ofport_dpif *port = ofport_dpif_cast(port_); struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto); - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; bundle_remove(port_); set_cfm(port_, NULL); if (ofproto->sflow) { @@ -1191,7 +1236,7 @@ port_reconfigured(struct ofport *port_, enum ofputil_port_config old_config) if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP | OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD | OFPUTIL_PC_NO_PACKET_IN)) { - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) { bundle_update(port->bundle); @@ -1214,13 +1259,13 @@ set_sflow(struct ofproto *ofproto_, HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { dpif_sflow_add_port(ds, &ofport->up); } - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; } dpif_sflow_set_options(ds, sflow_options); } else { if (ds) { dpif_sflow_destroy(ds); - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; ofproto->sflow = NULL; } } @@ -1240,7 +1285,7 @@ set_cfm(struct ofport *ofport_, const struct cfm_settings *s) struct ofproto_dpif *ofproto; ofproto = ofproto_dpif_cast(ofport->up.ofproto); - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; ofport->cfm = cfm_create(netdev_get_name(ofport->up.netdev)); } @@ -1320,7 +1365,7 @@ set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s) /* Only revalidate flows if the configuration changed. */ if (!s != !ofproto->stp) { - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; } if (s) { @@ -1393,7 +1438,7 @@ update_stp_port_state(struct ofport_dpif *ofport) fwd_change = stp_forward_in_state(ofport->stp_state) != stp_forward_in_state(state); - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_STP; ofport->stp_state = state; ofport->stp_state_entered = time_msec(); @@ -1591,12 +1636,12 @@ set_queues(struct ofport *ofport_, pdscp = xmalloc(sizeof *pdscp); pdscp->priority = priority; pdscp->dscp = dscp; - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; } if (pdscp->dscp != dscp) { pdscp->dscp = dscp; - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; } hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0)); @@ -1604,7 +1649,7 @@ set_queues(struct ofport *ofport_, if (!hmap_is_empty(&ofport->priorities)) { ofport_clear_priorities(ofport); - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; } hmap_swap(&new, &ofport->priorities); @@ -1631,7 +1676,7 @@ bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos) struct mac_learning *ml = ofproto->ml; struct mac_entry *mac, *next_mac; - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) { if (mac->port.p == bundle) { if (all_ofprotos) { @@ -1708,7 +1753,7 @@ bundle_del_port(struct ofport_dpif *port) { struct ofbundle *bundle = port->bundle; - bundle->ofproto->need_revalidate = true; + bundle->ofproto->need_revalidate = REV_RECONFIGURE; list_remove(&port->bundle_node); port->bundle = NULL; @@ -1736,7 +1781,7 @@ bundle_add_port(struct ofbundle *bundle, uint32_t ofp_port, } if (port->bundle != bundle) { - bundle->ofproto->need_revalidate = true; + bundle->ofproto->need_revalidate = REV_RECONFIGURE; if (port->bundle) { bundle_del_port(port); } @@ -1749,7 +1794,7 @@ bundle_add_port(struct ofbundle *bundle, uint32_t ofp_port, } } if (lacp) { - port->bundle->ofproto->need_revalidate = true; + port->bundle->ofproto->need_revalidate = REV_RECONFIGURE; lacp_slave_register(bundle->lacp, port, lacp); } @@ -1777,7 +1822,7 @@ bundle_destroy(struct ofbundle *bundle) mirror_destroy(m); } else if (hmapx_find_and_delete(&m->srcs, bundle) || hmapx_find_and_delete(&m->dsts, bundle)) { - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; } } } @@ -1849,7 +1894,7 @@ bundle_set(struct ofproto *ofproto_, void *aux, /* LACP. */ if (s->lacp) { if (!bundle->lacp) { - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; bundle->lacp = lacp_create(); } lacp_configure(bundle->lacp, s->lacp); @@ -1955,11 +2000,11 @@ bundle_set(struct ofproto *ofproto_, void *aux, bundle->ofproto->has_bonded_bundles = true; if (bundle->bond) { if (bond_reconfigure(bundle->bond, s->bond)) { - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; } } else { bundle->bond = bond_create(s->bond); - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; } LIST_FOR_EACH (port, bundle_node, &bundle->ports) { @@ -2264,7 +2309,7 @@ mirror_set(struct ofproto *ofproto_, void *aux, } } - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; ofproto->has_mirrors = true; mac_learning_flush(ofproto->ml, &ofproto->revalidate_set); mirror_update_dups(ofproto); @@ -2285,7 +2330,7 @@ mirror_destroy(struct ofmirror *mirror) } ofproto = mirror->ofproto; - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; mac_learning_flush(ofproto->ml, &ofproto->revalidate_set); mirror_bit = MIRROR_MASK_C(1) << mirror->idx; @@ -2354,8 +2399,7 @@ static void forward_bpdu_changed(struct ofproto *ofproto_) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); - /* Revalidate cached flows whenever forward_bpdu option changes. */ - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; } static void @@ -2389,6 +2433,19 @@ ofproto_port_from_dpif_port(struct ofproto_port *ofproto_port, ofproto_port->ofp_port = odp_port_to_ofp_port(dpif_port->port_no); } +static void +port_run_fast(struct ofport_dpif *ofport) +{ + if (ofport->cfm && cfm_should_send_ccm(ofport->cfm)) { + struct ofpbuf packet; + + ofpbuf_init(&packet, 0); + cfm_compose_ccm(ofport->cfm, &packet, ofport->up.pp.hw_addr); + send_packet(ofport, &packet); + ofpbuf_uninit(&packet); + } +} + static void port_run(struct ofport_dpif *ofport) { @@ -2398,18 +2455,9 @@ port_run(struct ofport_dpif *ofport) ofport->carrier_seq = carrier_seq; + port_run_fast(ofport); if (ofport->cfm) { cfm_run(ofport->cfm); - - if (cfm_should_send_ccm(ofport->cfm)) { - struct ofpbuf packet; - - ofpbuf_init(&packet, 0); - cfm_compose_ccm(ofport->cfm, &packet, ofport->up.pp.hw_addr); - send_packet(ofport, &packet); - ofpbuf_uninit(&packet); - } - enable = enable && !cfm_get_fault(ofport->cfm) && cfm_get_opup(ofport->cfm); } @@ -2425,7 +2473,7 @@ port_run(struct ofport_dpif *ofport) struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto); if (ofproto->has_bundle_action) { - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_PORT_TOGGLED; } } @@ -2819,7 +2867,7 @@ handle_flow_miss_without_facet(struct flow_miss *miss, action_xlate_ctx_init(&ctx, ofproto, &miss->flow, miss->initial_tci, rule, 0, packet); ctx.resubmit_stats = &stats; - xlate_actions(&ctx, rule->up.actions, rule->up.n_actions, + xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions); if (odp_actions.size) { @@ -3158,7 +3206,6 @@ handle_upcalls(struct ofproto_dpif *ofproto, unsigned int max_batch) assert(max_batch <= FLOW_MISS_MAX_BATCH); - n_processed = 0; n_misses = 0; for (n_processed = 0; n_processed < max_batch; n_processed++) { struct dpif_upcall *upcall = &misses[n_misses]; @@ -3515,6 +3562,11 @@ rule_expire(struct rule_dpif *rule) long long int now; uint8_t reason; + if (rule->up.pending) { + /* We'll have to expire it later. */ + return; + } + /* Has 'rule' expired? */ now = time_msec(); if (rule->up.hard_timeout @@ -3655,8 +3707,8 @@ facet_learn(struct facet *facet) facet->flow.vlan_tci, facet->rule, facet->tcp_flags, NULL); ctx.may_learn = true; - xlate_actions_for_side_effects(&ctx, facet->rule->up.actions, - facet->rule->up.n_actions); + xlate_actions_for_side_effects(&ctx, facet->rule->up.ofpacts, + facet->rule->up.ofpacts_len); } static void @@ -3717,10 +3769,17 @@ facet_account(struct facet *facet) static bool facet_is_controller_flow(struct facet *facet) { - return (facet - && facet->rule->up.n_actions == 1 - && action_outputs_to_port(&facet->rule->up.actions[0], - htons(OFPP_CONTROLLER))); + if (facet) { + const struct rule *rule = &facet->rule->up; + const struct ofpact *ofpacts = rule->ofpacts; + size_t ofpacts_len = rule->ofpacts_len; + + if (ofpacts->type == OFPACT_CONTROLLER && + ofpact_next(ofpacts) >= ofpact_end(ofpacts, ofpacts_len)) { + return true; + } + } + return false; } /* Folds all of 'facet''s statistics into its rule. Also updates the @@ -3895,7 +3954,7 @@ facet_check_consistency(struct facet *facet) action_xlate_ctx_init(&ctx, ofproto, &facet->flow, subfacet->initial_tci, rule, 0, NULL); - xlate_actions(&ctx, rule->up.actions, rule->up.n_actions, + xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions); if (subfacet->path == SF_NOT_INSTALLED) { @@ -4005,7 +4064,7 @@ facet_revalidate(struct facet *facet) action_xlate_ctx_init(&ctx, ofproto, &facet->flow, subfacet->initial_tci, new_rule, 0, NULL); - xlate_actions(&ctx, new_rule->up.actions, new_rule->up.n_actions, + xlate_actions(&ctx, new_rule->up.ofpacts, new_rule->up.ofpacts_len, &odp_actions); slow = (subfacet->slow & SLOW_MATCH) | ctx.slow; @@ -4135,7 +4194,8 @@ flow_push_stats(struct rule_dpif *rule, action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, rule, 0, NULL); ctx.resubmit_stats = stats; - xlate_actions_for_side_effects(&ctx, rule->up.actions, rule->up.n_actions); + xlate_actions_for_side_effects(&ctx, rule->up.ofpacts, + rule->up.ofpacts_len); } /* Subfacets. */ @@ -4295,7 +4355,7 @@ subfacet_make_actions(struct subfacet *subfacet, const struct ofpbuf *packet, action_xlate_ctx_init(&ctx, ofproto, &facet->flow, subfacet->initial_tci, rule, 0, packet); - xlate_actions(&ctx, rule->up.actions, rule->up.n_actions, odp_actions); + xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, odp_actions); facet->tags = ctx.tags; facet->has_learn = ctx.has_learn; facet->has_normal = ctx.has_normal; @@ -4532,8 +4592,8 @@ rule_construct(struct rule *rule_) uint8_t table_id; enum ofperr error; - error = validate_actions(rule->up.actions, rule->up.n_actions, - &rule->up.cr.flow, ofproto->max_ports); + error = ofpacts_check(rule->up.ofpacts, rule->up.ofpacts_len, + &rule->up.cr.flow, ofproto->max_ports); if (error) { return error; } @@ -4625,7 +4685,7 @@ rule_execute(struct rule *rule_, const struct flow *flow, action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, rule, stats.tcp_flags, packet); ctx.resubmit_stats = &stats; - xlate_actions(&ctx, rule->up.actions, rule->up.n_actions, &odp_actions); + xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions); execute_odp_actions(ofproto, flow, odp_actions.data, odp_actions.size, packet); @@ -4642,8 +4702,8 @@ rule_modify_actions(struct rule *rule_) struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); enum ofperr error; - error = validate_actions(rule->up.actions, rule->up.n_actions, - &rule->up.cr.flow, ofproto->max_ports); + error = ofpacts_check(rule->up.ofpacts, rule->up.ofpacts_len, + &rule->up.cr.flow, ofproto->max_ports); if (error) { ofoperation_complete(rule->up.pending, error); return; @@ -4696,8 +4756,8 @@ send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet) /* OpenFlow to datapath action translation. */ -static void do_xlate_actions(const union ofp_action *in, size_t n_in, - struct action_xlate_ctx *ctx); +static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len, + struct action_xlate_ctx *); static void xlate_normal(struct action_xlate_ctx *); /* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'. @@ -4943,7 +5003,7 @@ xlate_table_action(struct action_xlate_ctx *ctx, ctx->recurse++; ctx->rule = rule; - do_xlate_actions(rule->up.actions, rule->up.n_actions, ctx); + do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx); ctx->rule = old_rule; ctx->recurse--; } @@ -4959,16 +5019,21 @@ xlate_table_action(struct action_xlate_ctx *ctx, } static void -xlate_resubmit_table(struct action_xlate_ctx *ctx, - const struct nx_action_resubmit *nar) +xlate_ofpact_resubmit(struct action_xlate_ctx *ctx, + const struct ofpact_resubmit *resubmit) { uint16_t in_port; uint8_t table_id; - in_port = (nar->in_port == htons(OFPP_IN_PORT) - ? ctx->flow.in_port - : ntohs(nar->in_port)); - table_id = nar->table == 255 ? ctx->table_id : nar->table; + in_port = resubmit->in_port; + if (in_port == OFPP_IN_PORT) { + in_port = ctx->flow.in_port; + } + + table_id = resubmit->table_id; + if (table_id == 255) { + table_id = ctx->table_id; + } xlate_table_action(ctx, in_port, table_id); } @@ -5081,8 +5146,8 @@ compose_dec_ttl(struct action_xlate_ctx *ctx) } static void -xlate_output_action__(struct action_xlate_ctx *ctx, - uint16_t port, uint16_t max_len) +xlate_output_action(struct action_xlate_ctx *ctx, + uint16_t port, uint16_t max_len) { uint16_t prev_nf_output_iface = ctx->nf_output_iface; @@ -5093,7 +5158,7 @@ xlate_output_action__(struct action_xlate_ctx *ctx, compose_output_action(ctx, ctx->flow.in_port); break; case OFPP_TABLE: - xlate_table_action(ctx, ctx->flow.in_port, ctx->table_id); + xlate_table_action(ctx, ctx->flow.in_port, 0); break; case OFPP_NORMAL: xlate_normal(ctx); @@ -5129,44 +5194,32 @@ xlate_output_action__(struct action_xlate_ctx *ctx, static void xlate_output_reg_action(struct action_xlate_ctx *ctx, - const struct nx_action_output_reg *naor) + const struct ofpact_output_reg *or) { - struct mf_subfield src; - uint64_t ofp_port; - - nxm_decode(&src, naor->src, naor->ofs_nbits); - ofp_port = mf_get_subfield(&src, &ctx->flow); - - if (ofp_port <= UINT16_MAX) { - xlate_output_action__(ctx, ofp_port, ntohs(naor->max_len)); + uint64_t port = mf_get_subfield(&or->src, &ctx->flow); + if (port <= UINT16_MAX) { + xlate_output_action(ctx, port, or->max_len); } } -static void -xlate_output_action(struct action_xlate_ctx *ctx, - const struct ofp_action_output *oao) -{ - xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len)); -} - static void xlate_enqueue_action(struct action_xlate_ctx *ctx, - const struct ofp_action_enqueue *oae) + const struct ofpact_enqueue *enqueue) { - uint16_t ofp_port; + uint16_t ofp_port = enqueue->port; + uint32_t queue_id = enqueue->queue; uint32_t flow_priority, priority; int error; - error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(oae->queue_id), - &priority); + /* Translate queue to priority. */ + error = dpif_queue_to_priority(ctx->ofproto->dpif, queue_id, &priority); if (error) { /* Fall back to ordinary output action. */ - xlate_output_action__(ctx, ntohs(oae->port), 0); + xlate_output_action(ctx, enqueue->port, 0); return; } - /* Figure out datapath output port. */ - ofp_port = ntohs(oae->port); + /* Check output port. */ if (ofp_port == OFPP_IN_PORT) { ofp_port = ctx->flow.in_port; } else if (ofp_port == ctx->flow.in_port) { @@ -5188,21 +5241,16 @@ xlate_enqueue_action(struct action_xlate_ctx *ctx, } static void -xlate_set_queue_action(struct action_xlate_ctx *ctx, - const struct nx_action_set_queue *nasq) +xlate_set_queue_action(struct action_xlate_ctx *ctx, uint32_t queue_id) { - uint32_t priority; - int error; + uint32_t skb_priority; - error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(nasq->queue_id), - &priority); - if (error) { - /* Couldn't translate queue to a priority, so ignore. A warning + if (!dpif_queue_to_priority(ctx->ofproto->dpif, queue_id, &skb_priority)) { + ctx->flow.skb_priority = skb_priority; + } else { + /* Couldn't translate queue to a priority. Nothing to do. A warning * has already been logged. */ - return; } - - ctx->flow.skb_priority = priority; } struct xlate_reg_state { @@ -5212,9 +5260,9 @@ struct xlate_reg_state { static void xlate_autopath(struct action_xlate_ctx *ctx, - const struct nx_action_autopath *naa) + const struct ofpact_autopath *ap) { - uint16_t ofp_port = ntohl(naa->id); + uint16_t ofp_port = ap->port; struct ofport_dpif *port = get_ofp_port(ctx->ofproto, ofp_port); if (!port || !port->bundle) { @@ -5227,7 +5275,7 @@ xlate_autopath(struct action_xlate_ctx *ctx, ofp_port = slave->up.ofp_port; } } - autopath_execute(naa, &ctx->flow, ofp_port); + nxm_reg_load(&ap->dst, ofp_port, &ctx->flow); } static bool @@ -5252,15 +5300,32 @@ slave_enabled_cb(uint16_t ofp_port, void *ofproto_) } } +static void +xlate_bundle_action(struct action_xlate_ctx *ctx, + const struct ofpact_bundle *bundle) +{ + uint16_t port; + + port = bundle_execute(bundle, &ctx->flow, slave_enabled_cb, ctx->ofproto); + if (bundle->dst.field) { + nxm_reg_load(&bundle->dst, port, &ctx->flow); + } else { + xlate_output_action(ctx, port, 0); + } +} + static void xlate_learn_action(struct action_xlate_ctx *ctx, - const struct nx_action_learn *learn) + const struct ofpact_learn *learn) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1); struct ofputil_flow_mod fm; + uint64_t ofpacts_stub[1024 / 8]; + struct ofpbuf ofpacts; int error; - learn_execute(learn, &ctx->flow, &fm); + ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); + learn_execute(learn, &ctx->flow, &fm, &ofpacts); error = ofproto_flow_mod(&ctx->ofproto->up, &fm); if (error && !VLOG_DROP_WARN(&rl)) { @@ -5268,7 +5333,7 @@ xlate_learn_action(struct action_xlate_ctx *ctx, ofperr_get_name(error)); } - free(fm.actions); + ofpbuf_uninit(&ofpacts); } /* Reduces '*timeout' to no more than 'max'. A value of zero in either case @@ -5283,13 +5348,13 @@ reduce_timeout(uint16_t max, uint16_t *timeout) static void xlate_fin_timeout(struct action_xlate_ctx *ctx, - const struct nx_action_fin_timeout *naft) + const struct ofpact_fin_timeout *oft) { if (ctx->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) { struct rule_dpif *rule = ctx->rule; - reduce_timeout(ntohs(naft->fin_idle_timeout), &rule->up.idle_timeout); - reduce_timeout(ntohs(naft->fin_hard_timeout), &rule->up.hard_timeout); + reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout); + reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout); } } @@ -5315,13 +5380,12 @@ may_receive(const struct ofport_dpif *port, struct action_xlate_ctx *ctx) } static void -do_xlate_actions(const union ofp_action *in, size_t n_in, +do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, struct action_xlate_ctx *ctx) { const struct ofport_dpif *port; - const union ofp_action *ia; bool was_evictable = true; - size_t left; + const struct ofpact *a; port = get_ofp_port(ctx->ofproto, ctx->flow.in_port); if (port && !may_receive(port, ctx)) { @@ -5334,181 +5398,146 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, was_evictable = ctx->rule->up.evictable; ctx->rule->up.evictable = false; } - OFPUTIL_ACTION_FOR_EACH_UNSAFE (ia, left, in, n_in) { - const struct ofp_action_dl_addr *oada; - const struct nx_action_resubmit *nar; - const struct nx_action_set_tunnel *nast; - const struct nx_action_set_queue *nasq; - const struct nx_action_multipath *nam; - const struct nx_action_autopath *naa; - const struct nx_action_bundle *nab; - const struct nx_action_output_reg *naor; - const struct nx_action_controller *nac; - enum ofputil_action_code code; - ovs_be64 tun_id; + OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) { + struct ofpact_controller *controller; if (ctx->exit) { break; } - code = ofputil_decode_action_unsafe(ia); - switch (code) { - case OFPUTIL_OFPAT10_OUTPUT: - xlate_output_action(ctx, &ia->output); + switch (a->type) { + case OFPACT_OUTPUT: + xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port, + ofpact_get_OUTPUT(a)->max_len); break; - case OFPUTIL_OFPAT10_SET_VLAN_VID: + case OFPACT_CONTROLLER: + controller = ofpact_get_CONTROLLER(a); + execute_controller_action(ctx, controller->max_len, + controller->reason, + controller->controller_id); + break; + + case OFPACT_ENQUEUE: + xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a)); + break; + + case OFPACT_SET_VLAN_VID: ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK); - ctx->flow.vlan_tci |= ia->vlan_vid.vlan_vid | htons(VLAN_CFI); + ctx->flow.vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid) + | htons(VLAN_CFI)); break; - case OFPUTIL_OFPAT10_SET_VLAN_PCP: + case OFPACT_SET_VLAN_PCP: ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK); - ctx->flow.vlan_tci |= htons( - (ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT) | VLAN_CFI); + ctx->flow.vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp + << VLAN_PCP_SHIFT) + | VLAN_CFI); break; - case OFPUTIL_OFPAT10_STRIP_VLAN: + case OFPACT_STRIP_VLAN: ctx->flow.vlan_tci = htons(0); break; - case OFPUTIL_OFPAT10_SET_DL_SRC: - oada = ((struct ofp_action_dl_addr *) ia); - memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN); + case OFPACT_SET_ETH_SRC: + memcpy(ctx->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac, + ETH_ADDR_LEN); break; - case OFPUTIL_OFPAT10_SET_DL_DST: - oada = ((struct ofp_action_dl_addr *) ia); - memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN); + case OFPACT_SET_ETH_DST: + memcpy(ctx->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac, + ETH_ADDR_LEN); break; - case OFPUTIL_OFPAT10_SET_NW_SRC: - ctx->flow.nw_src = ia->nw_addr.nw_addr; + case OFPACT_SET_IPV4_SRC: + ctx->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4; break; - case OFPUTIL_OFPAT10_SET_NW_DST: - ctx->flow.nw_dst = ia->nw_addr.nw_addr; + case OFPACT_SET_IPV4_DST: + ctx->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4; break; - case OFPUTIL_OFPAT10_SET_NW_TOS: + case OFPACT_SET_IPV4_DSCP: /* OpenFlow 1.0 only supports IPv4. */ if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) { ctx->flow.nw_tos &= ~IP_DSCP_MASK; - ctx->flow.nw_tos |= ia->nw_tos.nw_tos & IP_DSCP_MASK; + ctx->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp; } break; - case OFPUTIL_OFPAT10_SET_TP_SRC: - ctx->flow.tp_src = ia->tp_port.tp_port; - break; - - case OFPUTIL_OFPAT10_SET_TP_DST: - ctx->flow.tp_dst = ia->tp_port.tp_port; + case OFPACT_SET_L4_SRC_PORT: + ctx->flow.tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port); break; - case OFPUTIL_OFPAT10_ENQUEUE: - xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia); + case OFPACT_SET_L4_DST_PORT: + ctx->flow.tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port); break; - case OFPUTIL_NXAST_RESUBMIT: - nar = (const struct nx_action_resubmit *) ia; - xlate_table_action(ctx, ntohs(nar->in_port), ctx->table_id); + case OFPACT_RESUBMIT: + xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a)); break; - case OFPUTIL_NXAST_RESUBMIT_TABLE: - xlate_resubmit_table(ctx, (const struct nx_action_resubmit *) ia); + case OFPACT_SET_TUNNEL: + ctx->flow.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id); break; - case OFPUTIL_NXAST_SET_TUNNEL: - nast = (const struct nx_action_set_tunnel *) ia; - tun_id = htonll(ntohl(nast->tun_id)); - ctx->flow.tun_id = tun_id; + case OFPACT_SET_QUEUE: + xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id); break; - case OFPUTIL_NXAST_SET_QUEUE: - nasq = (const struct nx_action_set_queue *) ia; - xlate_set_queue_action(ctx, nasq); - break; - - case OFPUTIL_NXAST_POP_QUEUE: + case OFPACT_POP_QUEUE: ctx->flow.skb_priority = ctx->orig_skb_priority; break; - case OFPUTIL_NXAST_REG_MOVE: - nxm_execute_reg_move((const struct nx_action_reg_move *) ia, - &ctx->flow); - break; - - case OFPUTIL_NXAST_REG_LOAD: - nxm_execute_reg_load((const struct nx_action_reg_load *) ia, - &ctx->flow); + case OFPACT_REG_MOVE: + nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->flow); break; - case OFPUTIL_NXAST_NOTE: - /* Nothing to do. */ + case OFPACT_REG_LOAD: + nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->flow); break; - case OFPUTIL_NXAST_SET_TUNNEL64: - tun_id = ((const struct nx_action_set_tunnel64 *) ia)->tun_id; - ctx->flow.tun_id = tun_id; + case OFPACT_DEC_TTL: + if (compose_dec_ttl(ctx)) { + goto out; + } break; - case OFPUTIL_NXAST_MULTIPATH: - nam = (const struct nx_action_multipath *) ia; - multipath_execute(nam, &ctx->flow); + case OFPACT_NOTE: + /* Nothing to do. */ break; - case OFPUTIL_NXAST_AUTOPATH: - naa = (const struct nx_action_autopath *) ia; - xlate_autopath(ctx, naa); + case OFPACT_MULTIPATH: + multipath_execute(ofpact_get_MULTIPATH(a), &ctx->flow); break; - case OFPUTIL_NXAST_BUNDLE: - ctx->ofproto->has_bundle_action = true; - nab = (const struct nx_action_bundle *) ia; - xlate_output_action__(ctx, bundle_execute(nab, &ctx->flow, - slave_enabled_cb, - ctx->ofproto), 0); + case OFPACT_AUTOPATH: + xlate_autopath(ctx, ofpact_get_AUTOPATH(a)); break; - case OFPUTIL_NXAST_BUNDLE_LOAD: + case OFPACT_BUNDLE: ctx->ofproto->has_bundle_action = true; - nab = (const struct nx_action_bundle *) ia; - bundle_execute_load(nab, &ctx->flow, slave_enabled_cb, - ctx->ofproto); + xlate_bundle_action(ctx, ofpact_get_BUNDLE(a)); break; - case OFPUTIL_NXAST_OUTPUT_REG: - naor = (const struct nx_action_output_reg *) ia; - xlate_output_reg_action(ctx, naor); + case OFPACT_OUTPUT_REG: + xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a)); break; - case OFPUTIL_NXAST_LEARN: + case OFPACT_LEARN: ctx->has_learn = true; if (ctx->may_learn) { - xlate_learn_action(ctx, (const struct nx_action_learn *) ia); + xlate_learn_action(ctx, ofpact_get_LEARN(a)); } break; - case OFPUTIL_NXAST_DEC_TTL: - if (compose_dec_ttl(ctx)) { - goto out; - } - break; - - case OFPUTIL_NXAST_EXIT: + case OFPACT_EXIT: ctx->exit = true; break; - case OFPUTIL_NXAST_FIN_TIMEOUT: + case OFPACT_FIN_TIMEOUT: ctx->has_fin_timeout = true; - xlate_fin_timeout(ctx, (const struct nx_action_fin_timeout *) ia); - break; - - case OFPUTIL_NXAST_CONTROLLER: - nac = (const struct nx_action_controller *) ia; - execute_controller_action(ctx, ntohs(nac->max_len), nac->reason, - ntohs(nac->controller_id)); + xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a)); break; } } @@ -5544,11 +5573,11 @@ action_xlate_ctx_init(struct action_xlate_ctx *ctx, ctx->resubmit_stats = NULL; } -/* Translates the 'n_in' "union ofp_action"s in 'in' into datapath actions in - * 'odp_actions', using 'ctx'. */ +/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts' + * into datapath actions in 'odp_actions', using 'ctx'. */ static void xlate_actions(struct action_xlate_ctx *ctx, - const union ofp_action *in, size_t n_in, + const struct ofpact *ofpacts, size_t ofpacts_len, struct ofpbuf *odp_actions) { /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so @@ -5618,7 +5647,7 @@ xlate_actions(struct action_xlate_ctx *ctx, ovs_be16 initial_tci = ctx->base_flow.vlan_tci; add_sflow_action(ctx); - do_xlate_actions(in, n_in, ctx); + do_xlate_actions(ofpacts, ofpacts_len, ctx); if (ctx->max_resubmit_trigger && !ctx->resubmit_hook) { if (!hit_resubmit_limit) { @@ -5653,17 +5682,18 @@ xlate_actions(struct action_xlate_ctx *ctx, } } -/* Translates the 'n_in' "union ofp_action"s in 'in' into datapath actions, - * using 'ctx', and discards the datapath actions. */ +/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts' + * into datapath actions, using 'ctx', and discards the datapath actions. */ static void xlate_actions_for_side_effects(struct action_xlate_ctx *ctx, - const union ofp_action *in, size_t n_in) + const struct ofpact *ofpacts, + size_t ofpacts_len) { uint64_t odp_actions_stub[1024 / 8]; struct ofpbuf odp_actions; ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub); - xlate_actions(ctx, in, n_in, &odp_actions); + xlate_actions(ctx, ofpacts, ofpacts_len, &odp_actions); ofpbuf_uninit(&odp_actions); } @@ -5848,47 +5878,6 @@ vlan_is_mirrored(const struct ofmirror *m, int vlan) return !m->vlans || bitmap_is_set(m->vlans, vlan); } -/* Returns true if a packet with Ethernet destination MAC 'dst' may be mirrored - * to a VLAN. In general most packets may be mirrored but we want to drop - * protocols that may confuse switches. */ -static bool -eth_dst_may_rspan(const uint8_t dst[ETH_ADDR_LEN]) -{ - /* If you change this function's behavior, please update corresponding - * documentation in vswitch.xml at the same time. */ - if (dst[0] != 0x01) { - /* All the currently banned MACs happen to start with 01 currently, so - * this is a quick way to eliminate most of the good ones. */ - } else { - if (eth_addr_is_reserved(dst)) { - /* Drop STP, IEEE pause frames, and other reserved protocols - * (01-80-c2-00-00-0x). */ - return false; - } - - if (dst[0] == 0x01 && dst[1] == 0x00 && dst[2] == 0x0c) { - /* Cisco OUI. */ - if ((dst[3] & 0xfe) == 0xcc && - (dst[4] & 0xfe) == 0xcc && - (dst[5] & 0xfe) == 0xcc) { - /* Drop the following protocols plus others following the same - pattern: - - CDP, VTP, DTP, PAgP (01-00-0c-cc-cc-cc) - Spanning Tree PVSTP+ (01-00-0c-cc-cc-cd) - STP Uplink Fast (01-00-0c-cd-cd-cd) */ - return false; - } - - if (!(dst[3] | dst[4] | dst[5])) { - /* Drop Inter Switch Link packets (01-00-0c-00-00-00). */ - return false; - } - } - } - return true; -} - static void add_mirror_actions(struct action_xlate_ctx *ctx, const struct flow *orig_flow) { @@ -5963,8 +5952,8 @@ add_mirror_actions(struct action_xlate_ctx *ctx, const struct flow *orig_flow) ctx->mirrors |= m->dup_mirrors; if (m->out) { output_normal(ctx, m->out, vlan); - } else if (eth_dst_may_rspan(orig_flow->dl_dst) - && vlan != m->out_vlan) { + } else if (vlan != m->out_vlan + && !eth_addr_is_reserved(orig_flow->dl_dst)) { struct ofbundle *bundle; HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { @@ -6090,6 +6079,9 @@ lookup_input_bundle(const struct ofproto_dpif *ofproto, uint16_t in_port, * we don't know about. * * - The ofproto client didn't configure the port as part of a bundle. + * This is particularly likely to happen if a packet was received on the + * port after it was created, but before the client had a chance to + * configure its bundle. */ if (warn) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); @@ -6122,7 +6114,7 @@ is_admissible(struct ofproto_dpif *ofproto, const struct flow *flow, /* Drop frames for reserved multicast addresses * only if forward_bpdu option is absent. */ - if (eth_addr_is_reserved(flow->dl_dst) && !ofproto->up.forward_bpdu) { + if (!ofproto->up.forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) { return false; } @@ -6316,7 +6308,7 @@ table_update_taggable(struct ofproto_dpif *ofproto, uint8_t table_id) if (table->catchall_table != catchall || table->other_table != other) { table->catchall_table = catchall; table->other_table = other; - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_FLOW_TABLE; } } @@ -6340,7 +6332,7 @@ rule_invalidate(const struct rule_dpif *rule) if (table->other_table && rule->tag) { tag_set_add(&ofproto->revalidate_set, rule->tag); } else { - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_FLOW_TABLE; } } } @@ -6352,7 +6344,7 @@ set_frag_handling(struct ofproto *ofproto_, struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); if (frag_handling != OFPC_FRAG_REASM) { - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; return true; } else { return false; @@ -6362,7 +6354,7 @@ set_frag_handling(struct ofproto *ofproto_, static enum ofperr packet_out(struct ofproto *ofproto_, struct ofpbuf *packet, const struct flow *flow, - const union ofp_action *ofp_actions, size_t n_ofp_actions) + const struct ofpact *ofpacts, size_t ofpacts_len) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); enum ofperr error; @@ -6371,8 +6363,7 @@ packet_out(struct ofproto *ofproto_, struct ofpbuf *packet, return OFPERR_NXBRC_BAD_IN_PORT; } - error = validate_actions(ofp_actions, n_ofp_actions, flow, - ofproto->max_ports); + error = ofpacts_check(ofpacts, ofpacts_len, flow, ofproto->max_ports); if (!error) { struct odputil_keybuf keybuf; struct dpif_flow_stats stats; @@ -6394,7 +6385,7 @@ packet_out(struct ofproto *ofproto_, struct ofpbuf *packet, ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub); - xlate_actions(&ctx, ofp_actions, n_ofp_actions, &odp_actions); + xlate_actions(&ctx, ofpacts, ofpacts_len, &odp_actions); dpif_execute(ofproto->dpif, key.data, key.size, odp_actions.data, odp_actions.size, packet); ofpbuf_uninit(&odp_actions); @@ -6551,7 +6542,7 @@ trace_format_rule(struct ds *result, uint8_t table_id, int level, ds_put_char_multiple(result, '\t', level); ds_put_cstr(result, "OpenFlow "); - ofp_print_actions(result, rule->up.actions, rule->up.n_actions); + ofpacts_format(rule->up.ofpacts, rule->up.ofpacts_len, result); ds_put_char(result, '\n'); } @@ -6759,7 +6750,7 @@ ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow, action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_tci, rule, tcp_flags, packet); trace.ctx.resubmit_hook = trace_resubmit; - xlate_actions(&trace.ctx, rule->up.actions, rule->up.n_actions, + xlate_actions(&trace.ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions); ds_put_char(ds, '\n'); @@ -6847,7 +6838,7 @@ ofproto_dpif_self_check__(struct ofproto_dpif *ofproto, struct ds *reply) } } if (errors) { - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_INCONSISTENCY; } if (errors) { @@ -6926,7 +6917,7 @@ set_realdev(struct ofport *ofport_, uint16_t realdev_ofp_port, int vid) return 0; } - ofproto->need_revalidate = true; + ofproto->need_revalidate = REV_RECONFIGURE; if (ofport->realdev_ofp_port) { vsp_remove(ofport); @@ -7101,6 +7092,7 @@ const struct ofproto_class ofproto_dpif_class = { run, run_fast, wait, + get_memory_usage, flush, get_features, get_tables,