bool need_revalidate;
long long int next_expiration;
struct tag_set revalidate_set;
- bool tun_id_from_cookie;
+ int flow_format; /* One of NXFF_*. */
/* OpenFlow connections. */
struct hmap controllers; /* Controller "struct ofconn"s. */
p->need_revalidate = false;
p->next_expiration = time_msec() + 1000;
tag_set_init(&p->revalidate_set);
+ p->flow_format = NXFF_OPENFLOW10;
/* Initialize OpenFlow connections. */
list_init(&p->all_conns);
}
}
-static void
-send_error(const struct ofconn *ofconn, const struct ofp_header *oh,
- int error, const void *data, size_t len)
-{
- struct ofpbuf *buf;
- struct ofp_error_msg *oem;
-
- if (!(error >> 16)) {
- VLOG_WARN_RL(&rl, "not sending bad error code %d to controller",
- error);
- return;
- }
-
- COVERAGE_INC(ofproto_error);
- oem = make_openflow_xid(len + sizeof *oem, OFPT_ERROR,
- oh ? oh->xid : 0, &buf);
- oem->type = htons((unsigned int) error >> 16);
- oem->code = htons(error & 0xffff);
- memcpy(oem->data, data, len);
- queue_tx(buf, ofconn, ofconn->reply_counter);
-}
-
static void
send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
int error)
{
- size_t oh_length = ntohs(oh->length);
- send_error(ofconn, oh, error, oh, MIN(oh_length, 64));
+ struct ofpbuf *buf = make_ofp_error_msg(error, oh);
+ if (buf) {
+ COVERAGE_INC(ofproto_error);
+ queue_tx(buf, ofconn, ofconn->reply_counter);
+ }
}
static void
ots = append_stats_reply(sizeof *ots, ofconn, &msg);
memset(ots, 0, sizeof *ots);
strcpy(ots->name, "classifier");
- ots->wildcards = p->tun_id_from_cookie ? htonl(OVSFW_ALL)
- : htonl(OFPFW_ALL);
+ ots->wildcards = (p->flow_format == NXFF_OPENFLOW10
+ ? htonl(OFPFW_ALL) : htonl(OVSFW_ALL));
ots->max_entries = htonl(1024 * 1024); /* An arbitrary big number. */
ots->active_count = htonl(n_rules);
ots->lookup_count = htonll(0); /* XXX */
ofs->table_id = 0;
ofs->pad = 0;
flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
- cbdata->ofproto->tun_id_from_cookie, &ofs->match);
+ cbdata->ofproto->flow_format, &ofs->match);
ofs->duration_sec = htonl(sec);
ofs->duration_nsec = htonl(msec * 1000000);
ofs->cookie = rule->flow_cookie;
cbdata.ofconn = ofconn;
cbdata.out_port = fsr->out_port;
cbdata.msg = start_stats_reply(osr, 1024);
- cls_rule_from_match(&fsr->match, 0, false, 0, &target);
+ cls_rule_from_match(&fsr->match, 0, NXFF_OPENFLOW10, 0, &target);
classifier_for_each_match(&p->cls, &target,
table_id_to_include(fsr->table_id),
flow_stats_cb, &cbdata);
query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
- cbdata->ofproto->tun_id_from_cookie, &match);
+ cbdata->ofproto->flow_format, &match);
ds_put_format(results, "duration=%llds, ",
(time_msec() - rule->created) / 1000);
cbdata.ofproto = p;
cbdata.results = results;
- cls_rule_from_match(&match, 0, false, 0, &target);
+ cls_rule_from_match(&match, 0, NXFF_OPENFLOW10, 0, &target);
classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
flow_stats_ds_cb, &cbdata);
}
cbdata.packet_count = 0;
cbdata.byte_count = 0;
cbdata.n_flows = 0;
- cls_rule_from_match(&asr->match, 0, false, 0, &target);
+ cls_rule_from_match(&asr->match, 0, NXFF_OPENFLOW10, 0, &target);
classifier_for_each_match(&p->cls, &target,
table_id_to_include(asr->table_id),
aggregate_stats_cb, &cbdata);
struct cls_rule cr;
cls_rule_from_match(&ofm->match, ntohs(ofm->priority),
- p->tun_id_from_cookie, ofm->cookie, &cr);
+ p->flow_format, ofm->cookie, &cr);
if (classifier_rule_overlaps(&p->cls, &cr)) {
return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
}
ntohs(ofm->hard_timeout), ofm->cookie,
ofm->flags & htons(OFPFF_SEND_FLOW_REM));
cls_rule_from_match(&ofm->match, ntohs(ofm->priority),
- p->tun_id_from_cookie, ofm->cookie, &rule->cr);
+ p->flow_format, ofm->cookie, &rule->cr);
error = 0;
if (ofm->buffer_id != htonl(UINT32_MAX)) {
struct cls_rule target;
cls_rule_from_match(&ofm->match, ntohs(ofm->priority),
- p->tun_id_from_cookie, ofm->cookie, &target);
+ p->flow_format, ofm->cookie, &target);
return rule_from_cls_rule(classifier_find_rule_exactly(&p->cls, &target));
}
cbdata.n_actions = n_actions;
cbdata.match = NULL;
- cls_rule_from_match(&ofm->match, 0, p->tun_id_from_cookie, ofm->cookie,
- &target);
+ cls_rule_from_match(&ofm->match, 0, p->flow_format, ofm->cookie, &target);
classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
modify_flows_cb, &cbdata);
cbdata.ofproto = p;
cbdata.out_port = ofm->out_port;
- cls_rule_from_match(&ofm->match, 0, p->tun_id_from_cookie, ofm->cookie,
- &target);
+ cls_rule_from_match(&ofm->match, 0, p->flow_format, ofm->cookie, &target);
classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
delete_flows_cb, &cbdata);
return error;
}
- p->tun_id_from_cookie = !!msg->set;
+ p->flow_format = msg->set ? NXFF_TUN_ID_FROM_COOKIE : NXFF_OPENFLOW10;
return 0;
}
}
ofconn->role = role;
- reply = make_openflow_xid(sizeof *reply, OFPT_VENDOR, msg->header.xid,
- &buf);
- reply->nxh.vendor = htonl(NX_VENDOR_ID);
- reply->nxh.subtype = htonl(NXT_ROLE_REPLY);
+ reply = make_nxmsg_xid(sizeof *reply, NXT_ROLE_REPLY, msg->header.xid,
+ &buf);
reply->role = htonl(role);
queue_tx(buf, ofconn, ofconn->reply_counter);
uint32_t msec = tdiff - (sec * 1000);
ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf);
- flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, p->tun_id_from_cookie,
+ flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, p->flow_format,
&ofr->match);
ofr->cookie = rule->flow_cookie;
ofr->priority = htons(rule->cr.priority);
long long int now, uint8_t reason)
{
struct ofconn *ofconn;
- struct ofconn *prev;
- struct ofpbuf *buf = NULL;
if (!rule->send_flow_removed) {
return;
}
- /* We limit the maximum number of queued flow expirations it by accounting
- * them under the counter for replies. That works because preventing
- * OpenFlow requests from being processed also prevents new flows from
- * being added (and expiring). (It also prevents processing OpenFlow
- * requests that would not add new flows, so it is imperfect.) */
-
- prev = NULL;
LIST_FOR_EACH (ofconn, node, &p->all_conns) {
- if (rconn_is_connected(ofconn->rconn)
- && ofconn_receives_async_msgs(ofconn)) {
- if (prev) {
- queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter);
- } else {
- buf = compose_flow_removed(p, rule, now, reason);
- }
- prev = ofconn;
+ struct ofpbuf *msg;
+
+ if (!rconn_is_connected(ofconn->rconn)
+ || !ofconn_receives_async_msgs(ofconn)) {
+ continue;
}
- }
- if (prev) {
- queue_tx(buf, prev, prev->reply_counter);
+
+ msg = compose_flow_removed(p, rule, now, reason);
+
+ /* Account flow expirations under ofconn->reply_counter, the counter
+ * for replies to OpenFlow requests. That works because preventing
+ * OpenFlow requests from being processed also prevents new flows from
+ * being added (and expiring). (It also prevents processing OpenFlow
+ * requests that would not add new flows, so it is imperfect.) */
+ queue_tx(msg, ofconn, ofconn->reply_counter);
}
}