struct rule_dpif {
struct rule up;
- long long int used; /* Time last used; time created if not used. */
-
/* These statistics:
*
* - Do include packets and bytes from facets that have been deleted or
static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *,
const struct flow *, uint8_t table);
-static void flow_push_stats(const struct rule_dpif *, const struct flow *,
+static void flow_push_stats(struct rule_dpif *, const struct flow *,
uint64_t packets, uint64_t bytes,
long long int used);
static void stp_run(struct ofproto_dpif *ofproto);
static void stp_wait(struct ofproto_dpif *ofproto);
+static int set_stp_port(struct ofport *,
+ const struct ofproto_port_stp_settings *);
static bool ofbundle_includes_vlan(const struct ofbundle *, uint16_t vlan);
* we are just revalidating. */
bool may_learn;
- /* Cookie of the currently matching rule, or 0. */
- ovs_be64 cookie;
+ /* The rule that we are currently translating, or NULL. */
+ struct rule_dpif *rule;
+
+ /* Union of the set of TCP flags seen so far in this flow. (Used only by
+ * NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
+ * timeouts.) */
+ uint8_t tcp_flags;
/* If nonnull, called just before executing a resubmit action.
*
* be reassessed for every packet. */
bool has_learn; /* Actions include NXAST_LEARN? */
bool has_normal; /* Actions output to OFPP_NORMAL? */
+ bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
uint16_t nf_output_iface; /* Output interface index for NetFlow. */
mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
static void action_xlate_ctx_init(struct action_xlate_ctx *,
struct ofproto_dpif *, const struct flow *,
- ovs_be16 initial_tci, ovs_be64 cookie,
- const struct ofpbuf *);
+ ovs_be16 initial_tci, struct rule_dpif *,
+ uint8_t tcp_flags, const struct ofpbuf *);
static struct ofpbuf *xlate_actions(struct action_xlate_ctx *,
const union ofp_action *in, size_t n_in);
/* Accounting. */
uint64_t accounted_bytes; /* Bytes processed by facet_account(). */
struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
+ uint8_t tcp_flags; /* TCP flags seen for this 'rule'. */
/* Properties of datapath actions.
*
bool may_install; /* Reassess actions for every packet? */
bool has_learn; /* Actions include NXAST_LEARN? */
bool has_normal; /* Actions output to OFPP_NORMAL? */
+ bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
tag_type tags; /* Tags that would require revalidation. */
mirror_mask_t mirrors; /* Bitmap of dependent mirrors. */
};
}
static int
-construct(struct ofproto *ofproto_, int *n_tablesp)
+construct(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
const char *name = ofproto->up.name;
ofproto->sflow = NULL;
ofproto->stp = NULL;
hmap_init(&ofproto->bundles);
- ofproto->ml = mac_learning_create();
+ ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
for (i = 0; i < MAX_MIRRORS; i++) {
ofproto->mirrors[i] = NULL;
}
hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
hash_string(ofproto->up.name, 0));
-
- *n_tablesp = N_TABLES;
memset(&ofproto->stats, 0, sizeof ofproto->stats);
+
+ ofproto_init_tables(ofproto_, N_TABLES);
+
return 0;
}
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct rule_dpif *rule, *next_rule;
- struct classifier *table;
+ struct oftable *table;
int i;
hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
struct cls_cursor cursor;
- cls_cursor_init(&cursor, table, NULL);
+ cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
ofproto_rule_destroy(&rule->up);
}
stp_set_max_age(ofproto->stp, s->max_age);
stp_set_forward_delay(ofproto->stp, s->fwd_delay);
} else {
+ struct ofport *ofport;
+
+ HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
+ set_stp_port(ofport, NULL);
+ }
+
stp_destroy(ofproto->stp);
ofproto->stp = NULL;
}
/* Revalidate cached flows whenever forward_bpdu option changes. */
ofproto->need_revalidate = true;
}
+
+static void
+set_mac_idle_time(struct ofproto *ofproto_, unsigned int idle_time)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ mac_learning_set_idle_time(ofproto->ml, idle_time);
+}
\f
/* Ports. */
pin.packet_len = packet->size;
pin.total_len = packet->size;
pin.reason = OFPR_NO_MATCH;
+ pin.controller_id = 0;
pin.table_id = 0;
pin.cookie = 0;
expire(struct ofproto_dpif *ofproto)
{
struct rule_dpif *rule, *next_rule;
- struct classifier *table;
+ struct oftable *table;
int dp_max_idle;
/* Update stats for each flow in the datapath. */
OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
struct cls_cursor cursor;
- cls_cursor_init(&cursor, table, NULL);
+ cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
rule_expire(rule);
}
subfacet->dp_packet_count = stats->n_packets;
subfacet->dp_byte_count = stats->n_bytes;
+ facet->tcp_flags |= stats->tcp_flags;
+
subfacet_update_time(subfacet, stats->used);
facet_account(facet);
facet_push_stats(facet);
if (rule->up.hard_timeout
&& now > rule->up.modified + rule->up.hard_timeout * 1000) {
reason = OFPRR_HARD_TIMEOUT;
- } else if (rule->up.idle_timeout && list_is_empty(&rule->facets)
- && now > rule->used + rule->up.idle_timeout * 1000) {
+ } else if (rule->up.idle_timeout
+ && now > rule->up.used + rule->up.idle_timeout * 1000) {
reason = OFPRR_IDLE_TIMEOUT;
} else {
return;
/* Feed information from the active flows back into the learning table to
* ensure that table is always in sync with what is actually flowing
* through the datapath. */
- if (facet->has_learn || facet->has_normal) {
+ if (facet->has_learn || facet->has_normal
+ || (facet->has_fin_timeout
+ && facet->tcp_flags & (TCP_FIN | TCP_RST))) {
struct action_xlate_ctx ctx;
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
facet->flow.vlan_tci,
- facet->rule->up.flow_cookie, NULL);
+ facet->rule, facet->tcp_flags, NULL);
ctx.may_learn = true;
ofpbuf_delete(xlate_actions(&ctx, facet->rule->up.actions,
facet->rule->up.n_actions));
facet_reset_counters(facet);
netflow_flow_clear(&facet->nf_flow);
+ facet->tcp_flags = 0;
}
/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
bool should_install;
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
- subfacet->initial_tci, rule->up.flow_cookie,
- NULL);
+ subfacet->initial_tci, rule, 0, NULL);
odp_actions = xlate_actions(&ctx, rule->up.actions,
rule->up.n_actions);
bool should_install;
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
- subfacet->initial_tci, new_rule->up.flow_cookie,
- NULL);
+ subfacet->initial_tci, new_rule, 0, NULL);
odp_actions = xlate_actions(&ctx, new_rule->up.actions,
new_rule->up.n_actions);
actions_changed = (subfacet->actions_len != odp_actions->size
facet->may_install = ctx.may_set_up_flow;
facet->has_learn = ctx.has_learn;
facet->has_normal = ctx.has_normal;
+ facet->has_fin_timeout = ctx.has_fin_timeout;
facet->mirrors = ctx.mirrors;
if (new_actions) {
i = 0;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
if (used > facet->used) {
facet->used = used;
- if (used > facet->rule->used) {
- facet->rule->used = used;
- }
+ ofproto_rule_update_used(&facet->rule->up, used);
netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
}
}
if (rule) {
rule->packet_count += push->packets;
rule->byte_count += push->bytes;
- rule->used = MAX(push->used, rule->used);
+ ofproto_rule_update_used(&rule->up, push->used);
}
}
/* Pushes flow statistics to the rules which 'flow' resubmits into given
* 'rule''s actions and mirrors. */
static void
-flow_push_stats(const struct rule_dpif *rule,
+flow_push_stats(struct rule_dpif *rule,
const struct flow *flow, uint64_t packets, uint64_t bytes,
long long int used)
{
push.bytes = bytes;
push.used = used;
- action_xlate_ctx_init(&push.ctx, ofproto, flow, flow->vlan_tci,
- rule->up.flow_cookie, NULL);
+ ofproto_rule_update_used(&rule->up, used);
+
+ action_xlate_ctx_init(&push.ctx, ofproto, flow, flow->vlan_tci, rule,
+ 0, NULL);
push.ctx.resubmit_hook = push_resubmit;
ofpbuf_delete(xlate_actions(&push.ctx,
rule->up.actions, rule->up.n_actions));
subfacet_make_actions(struct subfacet *subfacet, const struct ofpbuf *packet)
{
struct facet *facet = subfacet->facet;
- const struct rule_dpif *rule = facet->rule;
+ struct rule_dpif *rule = facet->rule;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
struct ofpbuf *odp_actions;
struct action_xlate_ctx ctx;
action_xlate_ctx_init(&ctx, ofproto, &facet->flow, subfacet->initial_tci,
- rule->up.flow_cookie, packet);
+ rule, 0, packet);
odp_actions = xlate_actions(&ctx, rule->up.actions, rule->up.n_actions);
facet->tags = ctx.tags;
facet->may_install = ctx.may_set_up_flow;
facet->has_learn = ctx.has_learn;
facet->has_normal = ctx.has_normal;
+ facet->has_fin_timeout = ctx.has_fin_timeout;
facet->nf_flow.output_iface = ctx.nf_output_iface;
facet->mirrors = ctx.mirrors;
subfacet_update_time(subfacet, stats->used);
facet->packet_count += stats->n_packets;
facet->byte_count += stats->n_bytes;
+ facet->tcp_flags |= stats->tcp_flags;
facet_push_stats(facet);
netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
}
return NULL;
}
- cls = &ofproto->up.tables[table_id];
+ cls = &ofproto->up.tables[table_id].cls;
if (flow->nw_frag & FLOW_NW_FRAG_ANY
&& ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
/* For OFPC_NORMAL frag_handling, we must pretend that transport ports
return error;
}
- rule->used = rule->up.created;
rule->packet_count = 0;
rule->byte_count = 0;
size_t size;
action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci,
- rule->up.flow_cookie, packet);
+ rule, packet_get_tcp_flags(packet, flow), packet);
odp_actions = xlate_actions(&ctx, rule->up.actions, rule->up.n_actions);
size = packet->size;
if (execute_odp_actions(ofproto, flow, odp_actions->data,
odp_actions->size, packet)) {
- rule->used = time_msec();
rule->packet_count++;
rule->byte_count += size;
- flow_push_stats(rule, flow, 1, size, rule->used);
+ flow_push_stats(rule, flow, 1, size, time_msec());
}
ofpbuf_delete(odp_actions);
}
if (rule) {
- ovs_be64 old_cookie = ctx->cookie;
+ struct rule_dpif *old_rule = ctx->rule;
ctx->recurse++;
- ctx->cookie = rule->up.flow_cookie;
+ ctx->rule = rule;
do_xlate_actions(rule->up.actions, rule->up.n_actions, ctx);
- ctx->cookie = old_cookie;
+ ctx->rule = old_rule;
ctx->recurse--;
}
static void
execute_controller_action(struct action_xlate_ctx *ctx, int len,
- enum ofp_packet_in_reason reason)
+ enum ofp_packet_in_reason reason,
+ uint16_t controller_id)
{
struct ofputil_packet_in pin;
struct ofpbuf *packet;
pin.packet = packet->data;
pin.packet_len = packet->size;
pin.reason = reason;
+ pin.controller_id = controller_id;
pin.table_id = ctx->table_id;
- pin.cookie = ctx->cookie;
+ pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
pin.buffer_id = 0;
pin.send_len = len;
ctx->flow.nw_ttl--;
return false;
} else {
- execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL);
+ execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
/* Stop processing for current table. */
return true;
flood_packets(ctx, true);
break;
case OFPP_CONTROLLER:
- execute_controller_action(ctx, max_len, OFPR_ACTION);
- break;
- case OFPP_LOCAL:
- compose_output_action(ctx, OFPP_LOCAL);
+ execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
break;
case OFPP_NONE:
break;
+ case OFPP_LOCAL:
default:
if (port != ctx->flow.in_port) {
compose_output_action(ctx, port);
free(fm.actions);
}
+/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
+ * means "infinite". */
+static void
+reduce_timeout(uint16_t max, uint16_t *timeout)
+{
+ if (max && (!*timeout || *timeout > max)) {
+ *timeout = max;
+ }
+}
+
+static void
+xlate_fin_timeout(struct action_xlate_ctx *ctx,
+ const struct nx_action_fin_timeout *naft)
+{
+ if (ctx->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
+ struct rule_dpif *rule = ctx->rule;
+
+ reduce_timeout(ntohs(naft->fin_idle_timeout), &rule->up.idle_timeout);
+ reduce_timeout(ntohs(naft->fin_hard_timeout), &rule->up.hard_timeout);
+ }
+}
+
static bool
may_receive(const struct ofport_dpif *port, struct action_xlate_ctx *ctx)
{
{
const struct ofport_dpif *port;
const union ofp_action *ia;
+ bool was_evictable = true;
size_t left;
port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
return;
}
+ if (ctx->rule) {
+ /* Don't let the rule we're working on get evicted underneath us. */
+ was_evictable = ctx->rule->up.evictable;
+ ctx->rule->up.evictable = false;
+ }
OFPUTIL_ACTION_FOR_EACH_UNSAFE (ia, left, in, n_in) {
const struct ofp_action_dl_addr *oada;
const struct nx_action_resubmit *nar;
const struct nx_action_autopath *naa;
const struct nx_action_bundle *nab;
const struct nx_action_output_reg *naor;
+ const struct nx_action_controller *nac;
enum ofputil_action_code code;
ovs_be64 tun_id;
case OFPUTIL_NXAST_EXIT:
ctx->exit = true;
break;
+
+ case OFPUTIL_NXAST_FIN_TIMEOUT:
+ ctx->has_fin_timeout = true;
+ xlate_fin_timeout(ctx, (const struct nx_action_fin_timeout *) ia);
+ break;
+
+ case OFPUTIL_NXAST_CONTROLLER:
+ nac = (const struct nx_action_controller *) ia;
+ execute_controller_action(ctx, ntohs(nac->max_len), nac->reason,
+ ntohs(nac->controller_id));
+ break;
}
}
ofpbuf_clear(ctx->odp_actions);
add_sflow_action(ctx);
}
+ if (ctx->rule) {
+ ctx->rule->up.evictable = was_evictable;
+ }
}
static void
action_xlate_ctx_init(struct action_xlate_ctx *ctx,
struct ofproto_dpif *ofproto, const struct flow *flow,
- ovs_be16 initial_tci, ovs_be64 cookie,
- const struct ofpbuf *packet)
+ ovs_be16 initial_tci, struct rule_dpif *rule,
+ uint8_t tcp_flags, const struct ofpbuf *packet)
{
ctx->ofproto = ofproto;
ctx->flow = *flow;
ctx->base_flow = ctx->flow;
ctx->base_flow.tun_id = 0;
ctx->base_flow.vlan_tci = initial_tci;
- ctx->cookie = cookie;
+ ctx->rule = rule;
ctx->packet = packet;
ctx->may_learn = packet != NULL;
+ ctx->tcp_flags = tcp_flags;
ctx->resubmit_hook = NULL;
}
ctx->may_set_up_flow = true;
ctx->has_learn = false;
ctx->has_normal = false;
+ ctx->has_fin_timeout = false;
ctx->nf_output_iface = NF_OUT_DROP;
ctx->mirrors = 0;
ctx->recurse = 0;
table_update_taggable(struct ofproto_dpif *ofproto, uint8_t table_id)
{
struct table_dpif *table = &ofproto->tables[table_id];
- const struct classifier *cls = &ofproto->up.tables[table_id];
+ const struct oftable *oftable = &ofproto->up.tables[table_id];
struct cls_table *catchall, *other;
struct cls_table *t;
catchall = other = NULL;
- switch (hmap_count(&cls->tables)) {
+ switch (hmap_count(&oftable->cls.tables)) {
case 0:
/* We could tag this OpenFlow table but it would make the logic a
* little harder and it's a corner case that doesn't seem worth it
case 1:
case 2:
- HMAP_FOR_EACH (t, hmap_node, &cls->tables) {
+ HMAP_FOR_EACH (t, hmap_node, &oftable->cls.tables) {
if (cls_table_is_catchall(t)) {
catchall = t;
} else if (!other) {
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
odp_flow_key_from_flow(&key, flow);
- action_xlate_ctx_init(&push.ctx, ofproto, flow, flow->vlan_tci, 0,
- packet);
+ action_xlate_ctx_init(&push.ctx, ofproto, flow, flow->vlan_tci, NULL,
+ packet_get_tcp_flags(packet, flow), packet);
/* Ensure that resubmits in 'ofp_actions' get accounted to their
* matching rules. */
if (argc > 1) {
ofproto = ofproto_dpif_lookup(argv[1]);
if (!ofproto) {
- unixctl_command_reply(conn, 501, "no such bridge");
+ unixctl_command_reply_error(conn, "no such bridge");
return;
}
mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
}
}
- unixctl_command_reply(conn, 200, "table successfully flushed");
+ unixctl_command_reply(conn, "table successfully flushed");
}
static void
ofproto = ofproto_dpif_lookup(argv[1]);
if (!ofproto) {
- unixctl_command_reply(conn, 501, "no such bridge");
+ unixctl_command_reply_error(conn, "no such bridge");
return;
}
struct ofbundle *bundle = e->port.p;
ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
ofbundle_get_a_port(bundle)->odp_port,
- e->vlan, ETH_ADDR_ARGS(e->mac), mac_entry_age(e));
+ e->vlan, ETH_ADDR_ARGS(e->mac),
+ mac_entry_age(ofproto->ml, e));
}
- unixctl_command_reply(conn, 200, ds_cstr(&ds));
+ unixctl_command_reply(conn, ds_cstr(&ds));
ds_destroy(&ds);
}
ds_put_char(result, '\n');
}
+static void
+trace_format_odp(struct ds *result, int level, const char *title,
+ struct ofproto_trace *trace)
+{
+ struct ofpbuf *odp_actions = trace->ctx.odp_actions;
+
+ ds_put_char_multiple(result, '\t', level);
+ ds_put_format(result, "%s: ", title);
+ format_odp_actions(result, odp_actions->data, odp_actions->size);
+ ds_put_char(result, '\n');
+}
+
static void
trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
{
ds_put_char(result, '\n');
trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace);
trace_format_regs(result, ctx->recurse + 1, "Resubmitted regs", trace);
+ trace_format_odp(result, ctx->recurse + 1, "Resubmitted odp", trace);
trace_format_rule(result, ctx->table_id, ctx->recurse + 1, rule);
}
ofproto = ofproto_dpif_lookup(dpname);
if (!ofproto) {
- unixctl_command_reply(conn, 501, "Unknown ofproto (use ofproto/list "
- "for help)");
+ unixctl_command_reply_error(conn, "Unknown ofproto (use ofproto/list "
+ "for help)");
goto exit;
}
if (argc == 3 || (argc == 4 && !strcmp(argv[3], "-generate"))) {
ofpbuf_init(&odp_key, 0);
error = odp_flow_key_from_string(flow_s, NULL, &odp_key);
if (error) {
- unixctl_command_reply(conn, 501, "Bad flow syntax");
+ unixctl_command_reply_error(conn, "Bad flow syntax");
goto exit;
}
odp_key.size, &flow,
&initial_tci, NULL);
if (error == ODP_FIT_ERROR) {
- unixctl_command_reply(conn, 501, "Invalid flow");
+ unixctl_command_reply_error(conn, "Invalid flow");
goto exit;
}
msg = eth_from_hex(packet_s, &packet);
if (msg) {
- unixctl_command_reply(conn, 501, msg);
+ unixctl_command_reply_error(conn, msg);
goto exit;
}
flow_extract(packet, priority, tun_id, in_port, &flow);
initial_tci = flow.vlan_tci;
} else {
- unixctl_command_reply(conn, 501, "Bad command syntax");
+ unixctl_command_reply_error(conn, "Bad command syntax");
goto exit;
}
if (rule) {
struct ofproto_trace trace;
struct ofpbuf *odp_actions;
+ uint8_t tcp_flags;
+ tcp_flags = packet ? packet_get_tcp_flags(packet, &flow) : 0;
trace.result = &result;
trace.flow = flow;
action_xlate_ctx_init(&trace.ctx, ofproto, &flow, initial_tci,
- rule->up.flow_cookie, packet);
+ rule, tcp_flags, packet);
trace.ctx.resubmit_hook = trace_resubmit;
odp_actions = xlate_actions(&trace.ctx,
rule->up.actions, rule->up.n_actions);
}
}
- unixctl_command_reply(conn, 200, ds_cstr(&result));
+ unixctl_command_reply(conn, ds_cstr(&result));
exit:
ds_destroy(&result);
const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
{
clogged = true;
- unixctl_command_reply(conn, 200, NULL);
+ unixctl_command_reply(conn, NULL);
}
static void
const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
{
clogged = false;
- unixctl_command_reply(conn, 200, NULL);
+ unixctl_command_reply(conn, NULL);
}
/* Runs a self-check of flow translations in 'ofproto'. Appends a message to
if (argc > 1) {
ofproto = ofproto_dpif_lookup(argv[1]);
if (!ofproto) {
- unixctl_command_reply(conn, 501, "Unknown ofproto (use "
- "ofproto/list for help)");
+ unixctl_command_reply_error(conn, "Unknown ofproto (use "
+ "ofproto/list for help)");
return;
}
ofproto_dpif_self_check__(ofproto, &reply);
}
}
- unixctl_command_reply(conn, 200, ds_cstr(&reply));
+ unixctl_command_reply(conn, ds_cstr(&reply));
ds_destroy(&reply);
}
set_flood_vlans,
is_mirror_output_bundle,
forward_bpdu_changed,
+ set_mac_idle_time,
set_realdev,
};