bool need_revalidate;
long long int next_expiration;
struct tag_set revalidate_set;
+ bool tun_id_from_cookie;
/* OpenFlow connections. */
struct list all_conns;
return;
}
- /* Destroy fail-open early, because it touches the classifier. */
+ /* Destroy fail-open and in-band early, since they touch the classifier. */
ofproto_set_failure(p, false);
+ ofproto_set_in_band(p, false);
ofproto_flush_flows(p);
classifier_destroy(&p->cls);
shash_destroy(&p->port_by_name);
switch_status_destroy(p->switch_status);
- in_band_destroy(p->in_band);
discovery_destroy(p->discovery);
pinsched_destroy(p->miss_sched);
pinsched_destroy(p->action_sched);
rule = rule_create(p, NULL, actions, n_actions,
idle_timeout >= 0 ? idle_timeout : 5 /* XXX */,
0, 0, false);
- cls_rule_from_flow(&rule->cr, flow, wildcards, priority);
+ cls_rule_from_flow(flow, wildcards, priority, &rule->cr);
rule_insert(p, rule, NULL, 0);
}
/* Send the packet and credit it to the rule. */
if (packet) {
flow_t flow;
- flow_extract(packet, in_port, &flow);
+ flow_extract(packet, 0, in_port, &flow);
rule_execute(p, rule, packet, &flow);
}
rule->idle_timeout, rule->hard_timeout,
0, false);
COVERAGE_INC(ofproto_subrule_create);
- cls_rule_from_flow(&subrule->cr, flow, 0,
- (rule->cr.priority <= UINT16_MAX ? UINT16_MAX
- : rule->cr.priority));
+ cls_rule_from_flow(flow, 0, (rule->cr.priority <= UINT16_MAX ? UINT16_MAX
+ : rule->cr.priority), &subrule->cr);
classifier_insert_exact(&ofproto->cls, &subrule->cr);
return subrule;
put->flow.key = rule->cr.flow;
put->flow.actions = rule->odp_actions;
put->flow.n_actions = rule->n_odp_actions;
+ put->flow.flags = 0;
put->flags = flags;
return dpif_flow_put(ofproto->dpif, put);
}
odp_flow.key = rule->cr.flow;
odp_flow.actions = NULL;
odp_flow.n_actions = 0;
+ odp_flow.flags = 0;
if (!dpif_flow_del(p->dpif, &odp_flow)) {
update_stats(p, rule, &odp_flow.stats);
}
xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port)
{
if (!ctx->recurse) {
- uint16_t old_in_port = ctx->flow.in_port;
+ uint16_t old_in_port;
struct rule *rule;
+ /* Look up a flow with 'in_port' as the input port. Then restore the
+ * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
+ * have surprising behavior). */
+ old_in_port = ctx->flow.in_port;
ctx->flow.in_port = in_port;
rule = lookup_valid_rule(ctx->ofproto, &ctx->flow);
+ ctx->flow.in_port = old_in_port;
+
if (rule) {
if (rule->super) {
rule = rule->super;
do_xlate_actions(rule->actions, rule->n_actions, ctx);
ctx->recurse--;
}
- ctx->flow.in_port = old_in_port;
}
}
const struct nx_action_header *nah)
{
const struct nx_action_resubmit *nar;
+ const struct nx_action_set_tunnel *nast;
+ union odp_action *oa;
int subtype = ntohs(nah->subtype);
assert(nah->vendor == htonl(NX_VENDOR_ID));
xlate_table_action(ctx, ofp_port_to_odp_port(ntohs(nar->in_port)));
break;
+ case NXAST_SET_TUNNEL:
+ nast = (const struct nx_action_set_tunnel *) nah;
+ oa = odp_actions_add(ctx->out, ODPAT_SET_TUNNEL);
+ ctx->flow.tun_id = oa->tunnel.tun_id = nast->tun_id;
+ break;
+
/* If you add a new action here that modifies flow data, don't forget to
* update the flow key in ctx->flow in the same key. */
buffer = NULL;
}
- flow_extract(&payload, ofp_port_to_odp_port(ntohs(opo->in_port)), &flow);
+ flow_extract(&payload, 0, ofp_port_to_odp_port(ntohs(opo->in_port)), &flow);
error = xlate_actions((const union ofp_action *) opo->actions, n_actions,
&flow, p, &payload, &actions, NULL, NULL, NULL);
if (error) {
memset(ots, 0, sizeof *ots);
ots->table_id = TABLEID_CLASSIFIER;
strcpy(ots->name, "classifier");
- ots->wildcards = htonl(OFPFW_ALL);
+ ots->wildcards = p->tun_id_from_cookie ? htonl(OVSFW_ALL)
+ : htonl(OFPFW_ALL);
ots->max_entries = htonl(65536);
ots->active_count = htonl(n_wild);
ots->lookup_count = htonll(0); /* XXX */
ofs->length = htons(len);
ofs->table_id = rule->cr.wc.wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
ofs->pad = 0;
- flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofs->match);
+ flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
+ cbdata->ofproto->tun_id_from_cookie, &ofs->match);
ofs->duration_sec = htonl(sec);
ofs->duration_nsec = htonl(msec * 1000000);
ofs->cookie = rule->flow_cookie;
cbdata.ofconn = ofconn;
cbdata.out_port = fsr->out_port;
cbdata.msg = start_stats_reply(osr, 1024);
- cls_rule_from_match(&target, &fsr->match, 0);
+ cls_rule_from_match(&fsr->match, 0, false, 0, &target);
classifier_for_each_match(&p->cls, &target,
table_id_to_include(fsr->table_id),
flow_stats_cb, &cbdata);
}
query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
- flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &match);
+ flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
+ cbdata->ofproto->tun_id_from_cookie, &match);
ds_put_format(results, "duration=%llds, ",
(time_msec() - rule->created) / 1000);
struct flow_stats_ds_cbdata cbdata;
memset(&match, 0, sizeof match);
- match.wildcards = htonl(OFPFW_ALL);
+ match.wildcards = htonl(OVSFW_ALL);
cbdata.ofproto = p;
cbdata.results = results;
- cls_rule_from_match(&target, &match, 0);
+ cls_rule_from_match(&match, 0, false, 0, &target);
classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
flow_stats_ds_cb, &cbdata);
}
cbdata.packet_count = 0;
cbdata.byte_count = 0;
cbdata.n_flows = 0;
- cls_rule_from_match(&target, &asr->match, 0);
+ cls_rule_from_match(&asr->match, 0, false, 0, &target);
classifier_for_each_match(&p->cls, &target,
table_id_to_include(asr->table_id),
aggregate_stats_cb, &cbdata);
flow_t flow;
uint32_t wildcards;
- flow_from_match(&flow, &wildcards, &ofm->match);
+ flow_from_match(&ofm->match, p->tun_id_from_cookie, ofm->cookie,
+ &flow, &wildcards);
if (classifier_rule_overlaps(&p->cls, &flow, wildcards,
ntohs(ofm->priority))) {
return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
n_actions, ntohs(ofm->idle_timeout),
ntohs(ofm->hard_timeout), ofm->cookie,
ofm->flags & htons(OFPFF_SEND_FLOW_REM));
- cls_rule_from_match(&rule->cr, &ofm->match, ntohs(ofm->priority));
+ cls_rule_from_match(&ofm->match, ntohs(ofm->priority),
+ p->tun_id_from_cookie, ofm->cookie, &rule->cr);
error = 0;
if (ofm->buffer_id != htonl(UINT32_MAX)) {
uint32_t wildcards;
flow_t flow;
- flow_from_match(&flow, &wildcards, &ofm->match);
+ flow_from_match(&ofm->match, p->tun_id_from_cookie, ofm->cookie,
+ &flow, &wildcards);
return rule_from_cls_rule(classifier_find_rule_exactly(
&p->cls, &flow, wildcards,
ntohs(ofm->priority)));
return error;
}
- flow_extract(packet, in_port, &flow);
+ flow_extract(packet, 0, in_port, &flow);
rule_execute(ofproto, rule, packet, &flow);
ofpbuf_delete(packet);
cbdata.n_actions = n_actions;
cbdata.match = NULL;
- cls_rule_from_match(&target, &ofm->match, 0);
+ cls_rule_from_match(&ofm->match, 0, p->tun_id_from_cookie, ofm->cookie,
+ &target);
classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
modify_flows_cb, &cbdata);
cbdata.ofproto = p;
cbdata.out_port = ofm->out_port;
- cls_rule_from_match(&target, &ofm->match, 0);
+ cls_rule_from_match(&ofm->match, 0, p->tun_id_from_cookie, ofm->cookie,
+ &target);
classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
delete_flows_cb, &cbdata);
}
}
+static int
+handle_tun_id_from_cookie(struct ofproto *p, struct nxt_tun_id_cookie *msg)
+{
+ int error;
+
+ error = check_ofp_message(&msg->header, OFPT_VENDOR, sizeof *msg);
+ if (error) {
+ return error;
+ }
+
+ p->tun_id_from_cookie = !!msg->set;
+ return 0;
+}
+
static int
handle_vendor(struct ofproto *p, struct ofconn *ofconn, void *msg)
{
struct nicira_header *nh;
if (ntohs(ovh->header.length) < sizeof(struct ofp_vendor_header)) {
+ VLOG_WARN_RL(&rl, "received vendor message of length %zu "
+ "(expected at least %zu)",
+ ntohs(ovh->header.length), sizeof(struct ofp_vendor_header));
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
}
if (ovh->vendor != htonl(NX_VENDOR_ID)) {
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
}
if (ntohs(ovh->header.length) < sizeof(struct nicira_header)) {
+ VLOG_WARN_RL(&rl, "received Nicira vendor message of length %zu "
+ "(expected at least %zu)",
+ ntohs(ovh->header.length), sizeof(struct nicira_header));
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
}
case NXT_STATUS_REQUEST:
return switch_status_handle_request(p->switch_status, ofconn->rconn,
msg);
+
+ case NXT_TUN_ID_FROM_COOKIE:
+ return handle_tun_id_from_cookie(p, msg);
}
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
payload.data = msg + 1;
payload.size = msg->length - sizeof *msg;
- flow_extract(&payload, msg->port, &flow);
+ flow_extract(&payload, msg->arg, msg->port, &flow);
/* Check with in-band control to see if this packet should be sent
* to the local port regardless of the flow table. */
}
static struct ofpbuf *
-compose_flow_removed(const struct rule *rule, long long int now, uint8_t reason)
+compose_flow_removed(struct ofproto *p, const struct rule *rule,
+ long long int now, uint8_t reason)
{
struct ofp_flow_removed *ofr;
struct ofpbuf *buf;
uint32_t msec = tdiff - (sec * 1000);
ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf);
- flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofr->match);
+ flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, p->tun_id_from_cookie,
+ &ofr->match);
ofr->cookie = rule->flow_cookie;
ofr->priority = htons(rule->cr.priority);
ofr->reason = reason;
if (prev) {
queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter);
} else {
- buf = compose_flow_removed(rule, now, reason);
+ buf = compose_flow_removed(p, rule, now, reason);
}
prev = ofconn;
}