}
int
-ofproto_set_stp(struct ofproto *ofproto UNUSED, bool enable_stp)
+ofproto_set_stp(struct ofproto *ofproto OVS_UNUSED, bool enable_stp)
{
/* XXX */
if (enable_stp) {
return;
}
+ /* Destroy fail-open early, because it touches the classifier. */
+ ofproto_set_failure(p, false);
+
ofproto_flush_flows(p);
classifier_destroy(&p->cls);
switch_status_destroy(p->switch_status);
in_band_destroy(p->in_band);
discovery_destroy(p->discovery);
- fail_open_destroy(p->fail_open);
pinsched_destroy(p->miss_sched);
pinsched_destroy(p->action_sched);
netflow_destroy(p->netflow);
(1u << OFPAT_SET_DL_DST) |
(1u << OFPAT_SET_NW_SRC) |
(1u << OFPAT_SET_NW_DST) |
+ (1u << OFPAT_SET_NW_TOS) |
(1u << OFPAT_SET_TP_SRC) |
(1u << OFPAT_SET_TP_DST));
case OFPAT_SET_NW_DST:
oa = odp_actions_add(ctx->out, ODPAT_SET_NW_DST);
oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
+
+ case OFPAT_SET_NW_TOS:
+ oa = odp_actions_add(ctx->out, ODPAT_SET_NW_TOS);
+ oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
break;
case OFPAT_SET_TP_SRC:
ofs->priority = htons(rule->cr.priority);
ofs->idle_timeout = htons(rule->idle_timeout);
ofs->hard_timeout = htons(rule->hard_timeout);
- memset(ofs->pad2, 0, sizeof ofs->pad2);
+ ofs->pad2 = 0;
ofs->packet_count = htonll(packet_count);
ofs->byte_count = htonll(byte_count);
memcpy(ofs->actions, rule->actions, act_len);
struct cls_rule target;
if (arg_size != sizeof *fsr) {
- return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
}
fsr = (struct ofp_flow_stats_request *) osr->body;
struct ofpbuf *msg;
if (arg_size != sizeof *asr) {
- return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
}
asr = (struct ofp_aggregate_stats_request *) osr->body;
uint16_t in_port;
int error;
+ if (ofm->flags & htons(OFPFF_CHECK_OVERLAP)) {
+ flow_t flow;
+ uint32_t wildcards;
+
+ flow_from_match(&flow, &wildcards, &ofm->match);
+ if (classifier_rule_overlaps(&p->cls, &flow, wildcards,
+ ntohs(ofm->priority))) {
+ return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
+ }
+ }
+
rule = rule_create(p, NULL, (const union ofp_action *) ofm->actions,
n_actions, ntohs(ofm->idle_timeout),
ntohs(ofm->hard_timeout));
cls_rule_from_match(&rule->cr, &ofm->match, ntohs(ofm->priority));
- packet = NULL;
error = 0;
if (ofm->buffer_id != htonl(UINT32_MAX)) {
error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id),
&packet, &in_port);
+ } else {
+ packet = NULL;
+ in_port = UINT16_MAX;
}
rule_insert(p, rule, packet, in_port);
return error;
}
+ /* We do not support the emergency flow cache. It will hopefully
+ * get dropped from OpenFlow in the near future. */
+ if (ofm->flags & htons(OFPFF_EMERG)) {
+ /* There isn't a good fit for an error code, so just state that the
+ * flow table is full. */
+ return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL);
+ }
+
normalize_match(&ofm->match);
if (!ofm->match.wildcards) {
ofm->priority = htons(UINT16_MAX);
struct nicira_header *nh;
if (ntohs(ovh->header.length) < sizeof(struct ofp_vendor_header)) {
- return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
}
if (ovh->vendor != htonl(NX_VENDOR_ID)) {
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
}
if (ntohs(ovh->header.length) < sizeof(struct nicira_header)) {
- return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
}
nh = msg;