static void ofport_free(struct ofport *);
static void ofport_run(struct ofproto *, struct ofport *);
static void ofport_wait(struct ofport *);
-static void hton_ofp_phy_port(struct ofp_phy_port *);
struct action_xlate_ctx {
/* action_xlate_ctx_init() initializes these members. */
static void ofconn_destroy(struct ofconn *);
static void ofconn_run(struct ofconn *);
static void ofconn_wait(struct ofconn *);
+
static bool ofconn_receives_async_msgs(const struct ofconn *);
static char *ofconn_make_name(const struct ofproto *, const char *target);
static void ofconn_set_rate_limit(struct ofconn *, int rate, int burst);
+static struct ofproto *ofconn_get_ofproto(struct ofconn *);
+
+static enum nx_flow_format ofconn_get_flow_format(struct ofconn *);
+static void ofconn_set_flow_format(struct ofconn *, enum nx_flow_format);
+
+static int ofconn_get_miss_send_len(const struct ofconn *);
+static void ofconn_set_miss_send_len(struct ofconn *, int miss_send_len);
+
+static enum ofconn_type ofconn_get_type(const struct ofconn *);
+
+static enum nx_role ofconn_get_role(const struct ofconn *);
+static void ofconn_set_role(struct ofconn *, enum nx_role);
+
static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
struct rconn_packet_counter *counter);
ofport->cfm->mpid = cfm->mpid;
ofport->cfm->interval = cfm->interval;
- memcpy(ofport->cfm->eth_src, cfm->eth_src, ETH_ADDR_LEN);
memcpy(ofport->cfm->maid, cfm->maid, CCM_MAID_LEN);
cfm_update_remote_mps(ofport->cfm, remote_mps, n_remote_mps);
static int
snoop_preference(const struct ofconn *ofconn)
{
- switch (ofconn->role) {
+ switch (ofconn_get_role(ofconn)) {
case NX_ROLE_MASTER:
return 3;
case NX_ROLE_OTHER:
/* Pick a controller for monitoring. */
best = NULL;
LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
- if (ofconn->type == OFCONN_PRIMARY
+ if (ofconn_get_type(ofconn) == OFCONN_PRIMARY
&& (!best || snoop_preference(ofconn) > snoop_preference(best))) {
best = ofconn;
}
shash_add(info, rconn_get_target(rconn), cinfo);
cinfo->is_connected = rconn_is_connected(rconn);
- cinfo->role = ofconn->role;
+ cinfo->role = ofconn_get_role(ofconn);
cinfo->pairs.n = 0;
/* Primary controllers, even slaves, should always get port status
updates. Otherwise obey ofconn_receives_async_msgs(). */
- if (ofconn->type != OFCONN_PRIMARY
+ if (ofconn_get_type(ofconn) != OFCONN_PRIMARY
&& !ofconn_receives_async_msgs(ofconn)) {
continue;
}
ofport_run(struct ofproto *ofproto, struct ofport *ofport)
{
if (ofport->cfm) {
- struct ofpbuf *packet = cfm_run(ofport->cfm);
- if (packet) {
- ofproto_send_packet(ofproto, ofport->odp_port, 0, packet);
- ofpbuf_delete(packet);
+ cfm_run(ofport->cfm);
+
+ if (cfm_should_send_ccm(ofport->cfm)) {
+ struct ofpbuf packet;
+ struct ccm *ccm;
+
+ ofpbuf_init(&packet, 0);
+ ccm = compose_packet(&packet, eth_addr_ccm, ofport->opp.hw_addr,
+ ETH_TYPE_CFM, sizeof *ccm);
+ cfm_compose_ccm(ofport->cfm, ccm);
+ ofproto_send_packet(ofproto, ofport->odp_port, 0, &packet);
+ ofpbuf_uninit(&packet);
}
}
}
static void
ofconn_destroy(struct ofconn *ofconn)
{
- if (ofconn->type == OFCONN_PRIMARY) {
- hmap_remove(&ofconn->ofproto->controllers, &ofconn->hmap_node);
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+
+ if (ofconn_get_type(ofconn) == OFCONN_PRIMARY) {
+ hmap_remove(&ofproto->controllers, &ofconn->hmap_node);
}
list_remove(&ofconn->node);
static void
ofconn_run(struct ofconn *ofconn)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
int iteration;
size_t i;
static bool
ofconn_receives_async_msgs(const struct ofconn *ofconn)
{
- if (ofconn->type == OFCONN_PRIMARY) {
+ if (ofconn_get_type(ofconn) == OFCONN_PRIMARY) {
/* Primary controllers always get asynchronous messages unless they
* have configured themselves as "slaves". */
- return ofconn->role != NX_ROLE_SLAVE;
+ return ofconn_get_role(ofconn) != NX_ROLE_SLAVE;
} else {
/* Service connections don't get asynchronous messages unless they have
* explicitly asked for them by setting a nonzero miss send length. */
}
}
}
+
+static struct ofproto *
+ofconn_get_ofproto(struct ofconn *ofconn)
+{
+ return ofconn->ofproto;
+}
+
+static enum nx_flow_format
+ofconn_get_flow_format(struct ofconn *ofconn)
+{
+ return ofconn->flow_format;
+}
+
+static void
+ofconn_set_flow_format(struct ofconn *ofconn, enum nx_flow_format flow_format)
+{
+ ofconn->flow_format = flow_format;
+}
+
+static int
+ofconn_get_miss_send_len(const struct ofconn *ofconn)
+{
+ return ofconn->miss_send_len;
+}
+
+static void
+ofconn_set_miss_send_len(struct ofconn *ofconn, int miss_send_len)
+{
+ ofconn->miss_send_len = miss_send_len;
+}
+
+static enum ofconn_type
+ofconn_get_type(const struct ofconn *ofconn)
+{
+ return ofconn->type;
+}
+
+static enum nx_role
+ofconn_get_role(const struct ofconn *ofconn)
+{
+ return ofconn->role;
+}
+
+static void
+ofconn_set_role(struct ofconn *ofconn, enum nx_role role)
+{
+ ofconn->role = role;
+}
\f
static void
ofservice_reconfigure(struct ofservice *ofservice,
}
}
+static void
+ofconn_send_reply(const struct ofconn *ofconn, struct ofpbuf *msg)
+{
+ queue_tx(msg, ofconn, ofconn->reply_counter);
+}
+
static void
send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
int error)
struct ofpbuf *buf = ofputil_encode_error_msg(error, oh);
if (buf) {
COVERAGE_INC(ofproto_error);
- queue_tx(buf, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, buf);
}
}
-static void
-hton_ofp_phy_port(struct ofp_phy_port *opp)
-{
- opp->port_no = htons(opp->port_no);
- opp->config = htonl(opp->config);
- opp->state = htonl(opp->state);
- opp->curr = htonl(opp->curr);
- opp->advertised = htonl(opp->advertised);
- opp->supported = htonl(opp->supported);
- opp->peer = htonl(opp->peer);
-}
-
static int
handle_echo_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
- queue_tx(make_echo_reply(oh), ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, make_echo_reply(oh));
return 0;
}
static int
handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofp_switch_features *osf;
struct ofpbuf *buf;
struct ofport *port;
osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf);
- osf->datapath_id = htonll(ofconn->ofproto->datapath_id);
+ osf->datapath_id = htonll(ofproto->datapath_id);
osf->n_buffers = htonl(pktbuf_capacity());
osf->n_tables = 2;
osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
(1u << OFPAT_SET_TP_DST) |
(1u << OFPAT_ENQUEUE));
- HMAP_FOR_EACH (port, hmap_node, &ofconn->ofproto->ports) {
+ HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
}
- queue_tx(buf, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, buf);
return 0;
}
static int
handle_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofpbuf *buf;
struct ofp_switch_config *osc;
uint16_t flags;
bool drop_frags;
/* Figure out flags. */
- dpif_get_drop_frags(ofconn->ofproto->dpif, &drop_frags);
+ dpif_get_drop_frags(ofproto->dpif, &drop_frags);
flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL;
/* Send reply. */
osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf);
osc->flags = htons(flags);
- osc->miss_send_len = htons(ofconn->miss_send_len);
- queue_tx(buf, ofconn, ofconn->reply_counter);
+ osc->miss_send_len = htons(ofconn_get_miss_send_len(ofconn));
+ ofconn_send_reply(ofconn, buf);
return 0;
}
static int
handle_set_config(struct ofconn *ofconn, const struct ofp_switch_config *osc)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
uint16_t flags = ntohs(osc->flags);
- if (ofconn->type == OFCONN_PRIMARY && ofconn->role != NX_ROLE_SLAVE) {
+ if (ofconn_get_type(ofconn) == OFCONN_PRIMARY
+ && ofconn_get_role(ofconn) != NX_ROLE_SLAVE) {
switch (flags & OFPC_FRAG_MASK) {
case OFPC_FRAG_NORMAL:
- dpif_set_drop_frags(ofconn->ofproto->dpif, false);
+ dpif_set_drop_frags(ofproto->dpif, false);
break;
case OFPC_FRAG_DROP:
- dpif_set_drop_frags(ofconn->ofproto->dpif, true);
+ dpif_set_drop_frags(ofproto->dpif, true);
break;
default:
VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
}
}
- ofconn->miss_send_len = ntohs(osc->miss_send_len);
+ ofconn_set_miss_send_len(ofconn, ntohs(osc->miss_send_len));
return 0;
}
static int
reject_slave_controller(struct ofconn *ofconn, const const char *msg_type)
{
- if (ofconn->type == OFCONN_PRIMARY && ofconn->role == NX_ROLE_SLAVE) {
+ if (ofconn_get_type(ofconn) == OFCONN_PRIMARY
+ && ofconn_get_role(ofconn) == NX_ROLE_SLAVE) {
static struct vlog_rate_limit perm_rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&perm_rl, "rejecting %s message from slave controller",
msg_type);
static int
handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct ofp_packet_out *opo;
struct ofpbuf payload, *buffer;
union ofp_action *ofp_actions;
static int
handle_port_mod(struct ofconn *ofconn, const struct ofp_header *oh)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
const struct ofp_port_mod *opm = (const struct ofp_port_mod *) oh;
struct ofport *port;
int error;
struct ofp_stats_reply *reply = msg->data;
reply->flags = htons(OFPSF_REPLY_MORE);
*msgp = make_ofp_stats_reply(reply->header.xid, reply->type, nbytes);
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, msg);
}
return ofpbuf_put_uninit(*msgp, nbytes);
}
struct nicira_stats_msg *reply = msg->data;
reply->flags = htons(OFPSF_REPLY_MORE);
*msgp = make_nxstats_reply(reply->header.xid, reply->subtype, nbytes);
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, msg);
}
ofpbuf_prealloc_tailroom(*msgp, nbytes);
}
handle_desc_stats_request(struct ofconn *ofconn,
const struct ofp_header *request)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct ofp_desc_stats *ods;
struct ofpbuf *msg;
ovs_strlcpy(ods->sw_desc, p->sw_desc, sizeof ods->sw_desc);
ovs_strlcpy(ods->serial_num, p->serial_desc, sizeof ods->serial_num);
ovs_strlcpy(ods->dp_desc, p->dp_desc, sizeof ods->dp_desc);
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, msg);
return 0;
}
handle_table_stats_request(struct ofconn *ofconn,
const struct ofp_header *request)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct ofp_table_stats *ots;
struct ofpbuf *msg;
ots = append_ofp_stats_reply(sizeof *ots, ofconn, &msg);
memset(ots, 0, sizeof *ots);
strcpy(ots->name, "classifier");
- ots->wildcards = (ofconn->flow_format == NXFF_OPENFLOW10
+ ots->wildcards = (ofconn_get_flow_format(ofconn) == NXFF_OPENFLOW10
? htonl(OFPFW_ALL) : htonl(OVSFW_ALL));
ots->max_entries = htonl(1024 * 1024); /* An arbitrary big number. */
ots->active_count = htonl(classifier_count(&p->cls));
put_32aligned_be64(&ots->lookup_count, htonll(0)); /* XXX */
put_32aligned_be64(&ots->matched_count, htonll(0)); /* XXX */
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, msg);
return 0;
}
static int
handle_port_stats_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
const struct ofp_port_stats_request *psr = ofputil_stats_body(oh);
struct ofp_port_stats *ops;
struct ofpbuf *msg;
}
}
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, msg);
return 0;
}
static void
-calc_flow_duration(long long int start, ovs_be32 *sec, ovs_be32 *nsec)
+calc_flow_duration__(long long int start, uint32_t *sec, uint32_t *nsec)
{
long long int msecs = time_msec() - start;
- *sec = htonl(msecs / 1000);
- *nsec = htonl((msecs % 1000) * (1000 * 1000));
+ *sec = msecs / 1000;
+ *nsec = (msecs % 1000) * (1000 * 1000);
+}
+
+static void
+calc_flow_duration(long long int start, ovs_be32 *sec_be, ovs_be32 *nsec_be)
+{
+ uint32_t sec, nsec;
+
+ calc_flow_duration__(start, &sec, &nsec);
+ *sec_be = htonl(sec);
+ *nsec_be = htonl(nsec);
}
static void
ofs->length = htons(len);
ofs->table_id = 0;
ofs->pad = 0;
- ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofs->match,
- rule->flow_cookie, &cookie);
+ ofputil_cls_rule_to_match(&rule->cr, ofconn_get_flow_format(ofconn),
+ &ofs->match, rule->flow_cookie, &cookie);
put_32aligned_be64(&ofs->cookie, cookie);
calc_flow_duration(rule->created, &ofs->duration_sec, &ofs->duration_nsec);
ofs->priority = htons(rule->cr.priority);
handle_flow_stats_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
const struct ofp_flow_stats_request *fsr = ofputil_stats_body(oh);
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofpbuf *reply;
COVERAGE_INC(ofproto_flows_req);
ofputil_cls_rule_from_match(&fsr->match, 0, NXFF_OPENFLOW10, 0,
&target);
- cls_cursor_init(&cursor, &ofconn->ofproto->cls, &target);
+ cls_cursor_init(&cursor, &ofproto->cls, &target);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
put_ofp_flow_stats(ofconn, rule, fsr->out_port, &reply);
}
}
- queue_tx(reply, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, reply);
return 0;
}
static int
handle_nxst_flow(struct ofconn *ofconn, const struct ofp_header *oh)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct nx_flow_stats_request *nfsr;
struct cls_rule target;
struct ofpbuf *reply;
struct cls_cursor cursor;
struct rule *rule;
- cls_cursor_init(&cursor, &ofconn->ofproto->cls, &target);
+ cls_cursor_init(&cursor, &ofproto->cls, &target);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
put_nx_flow_stats(ofconn, rule, nfsr->out_port, &reply);
}
}
- queue_tx(reply, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, reply);
return 0;
}
const struct ofp_header *oh)
{
const struct ofp_aggregate_stats_request *request = ofputil_stats_body(oh);
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofp_aggregate_stats_reply *reply;
struct cls_rule target;
struct ofpbuf *msg;
msg = start_ofp_stats_reply(oh, sizeof *reply);
reply = append_ofp_stats_reply(sizeof *reply, ofconn, &msg);
- query_aggregate_stats(ofconn->ofproto, &target, request->out_port,
+ query_aggregate_stats(ofproto, &target, request->out_port,
request->table_id, reply);
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, msg);
return 0;
}
static int
handle_nxst_aggregate(struct ofconn *ofconn, const struct ofp_header *oh)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct nx_aggregate_stats_request *request;
struct ofp_aggregate_stats_reply *reply;
struct cls_rule target;
COVERAGE_INC(ofproto_flows_req);
buf = start_nxstats_reply(&request->nsm, sizeof *reply);
reply = ofpbuf_put_uninit(buf, sizeof *reply);
- query_aggregate_stats(ofconn->ofproto, &target, request->out_port,
+ query_aggregate_stats(ofproto, &target, request->out_port,
request->table_id, reply);
- queue_tx(buf, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, buf);
return 0;
}
static int
handle_queue_stats_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
- struct ofproto *ofproto = ofconn->ofproto;
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
const struct ofp_queue_stats_request *qsr;
struct queue_stats_cbdata cbdata;
struct ofport *port;
ofpbuf_delete(cbdata.msg);
return ofp_mkerr(OFPET_QUEUE_OP_FAILED, OFPQOFC_BAD_PORT);
}
- queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, cbdata.msg);
return 0;
}
* in which no matching flow already exists in the flow table.
*
* Adds the flow specified by 'ofm', which is followed by 'n_actions'
- * ofp_actions, to ofconn->ofproto's flow table. Returns 0 on success or an
+ * ofp_actions, to the ofproto's flow table. Returns 0 on success or an
* OpenFlow error code as encoded by ofp_mkerr() on failure.
*
* 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
static int
add_flow(struct ofconn *ofconn, struct flow_mod *fm)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct ofpbuf *packet;
struct rule *rule;
uint16_t in_port;
send_buffered_packet(struct ofconn *ofconn,
struct rule *rule, uint32_t buffer_id)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofpbuf *packet;
uint16_t in_port;
int error;
return error;
}
- rule_execute(ofconn->ofproto, rule, in_port, packet);
+ rule_execute(ofproto, rule, in_port, packet);
return 0;
}
static int
modify_flows_loose(struct ofconn *ofconn, struct flow_mod *fm)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct rule *match = NULL;
struct cls_cursor cursor;
struct rule *rule;
static int
modify_flow_strict(struct ofconn *ofconn, struct flow_mod *fm)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct rule *rule = find_flow_strict(p, fm);
if (rule && !rule_is_hidden(rule)) {
modify_flow(p, fm, rule);
static int
handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct flow_mod fm;
int error;
return error;
}
- error = ofputil_decode_flow_mod(&fm, oh, ofconn->flow_format);
+ error = ofputil_decode_flow_mod(&fm, oh, ofconn_get_flow_format(ofconn));
if (error) {
return error;
}
{
const struct nxt_tun_id_cookie *msg
= (const struct nxt_tun_id_cookie *) oh;
+ enum nx_flow_format flow_format;
+
+ flow_format = msg->set ? NXFF_TUN_ID_FROM_COOKIE : NXFF_OPENFLOW10;
+ ofconn_set_flow_format(ofconn, flow_format);
- ofconn->flow_format = msg->set ? NXFF_TUN_ID_FROM_COOKIE : NXFF_OPENFLOW10;
return 0;
}
struct ofpbuf *buf;
uint32_t role;
- if (ofconn->type != OFCONN_PRIMARY) {
+ if (ofconn_get_type(ofconn) != OFCONN_PRIMARY) {
VLOG_WARN_RL(&rl, "ignoring role request on non-controller "
"connection");
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
}
if (role == NX_ROLE_MASTER) {
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofconn *other;
- HMAP_FOR_EACH (other, hmap_node, &ofconn->ofproto->controllers) {
- if (other->role == NX_ROLE_MASTER) {
- other->role = NX_ROLE_SLAVE;
+ HMAP_FOR_EACH (other, hmap_node, &ofproto->controllers) {
+ if (ofconn_get_role(other) == NX_ROLE_MASTER) {
+ ofconn_set_role(other, NX_ROLE_SLAVE);
}
}
}
- ofconn->role = role;
+ ofconn_set_role(ofconn, role);
reply = make_nxmsg_xid(sizeof *reply, NXT_ROLE_REPLY, oh->xid, &buf);
reply->role = htonl(role);
- queue_tx(buf, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, buf);
return 0;
}
if (format == NXFF_OPENFLOW10
|| format == NXFF_TUN_ID_FROM_COOKIE
|| format == NXFF_NXM) {
- ofconn->flow_format = format;
+ ofconn_set_flow_format(ofconn, format);
return 0;
} else {
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
/* Currently, everything executes synchronously, so we can just
* immediately send the barrier reply. */
ob = make_openflow_xid(sizeof *ob, OFPT_BARRIER_REPLY, oh->xid, &buf);
- queue_tx(buf, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, buf);
return 0;
}
rule_remove(ofproto, rule);
}
\f
-static struct ofpbuf *
-compose_ofp_flow_removed(struct ofconn *ofconn, const struct rule *rule,
- uint8_t reason)
-{
- struct ofp_flow_removed *ofr;
- struct ofpbuf *buf;
-
- ofr = make_openflow_xid(sizeof *ofr, OFPT_FLOW_REMOVED, htonl(0), &buf);
- ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofr->match,
- rule->flow_cookie, &ofr->cookie);
- ofr->priority = htons(rule->cr.priority);
- ofr->reason = reason;
- calc_flow_duration(rule->created, &ofr->duration_sec, &ofr->duration_nsec);
- ofr->idle_timeout = htons(rule->idle_timeout);
- ofr->packet_count = htonll(rule->packet_count);
- ofr->byte_count = htonll(rule->byte_count);
-
- return buf;
-}
-
-static struct ofpbuf *
-compose_nx_flow_removed(const struct rule *rule, uint8_t reason)
-{
- struct nx_flow_removed *nfr;
- struct ofpbuf *buf;
- int match_len;
-
- make_nxmsg_xid(sizeof *nfr, NXT_FLOW_REMOVED, htonl(0), &buf);
- match_len = nx_put_match(buf, &rule->cr);
-
- nfr = buf->data;
- nfr->cookie = rule->flow_cookie;
- nfr->priority = htons(rule->cr.priority);
- nfr->reason = reason;
- calc_flow_duration(rule->created, &nfr->duration_sec, &nfr->duration_nsec);
- nfr->idle_timeout = htons(rule->idle_timeout);
- nfr->match_len = htons(match_len);
- nfr->packet_count = htonll(rule->packet_count);
- nfr->byte_count = htonll(rule->byte_count);
-
- return buf;
-}
-
static void
rule_send_removed(struct ofproto *p, struct rule *rule, uint8_t reason)
{
+ struct ofputil_flow_removed fr;
struct ofconn *ofconn;
if (!rule->send_flow_removed) {
return;
}
+ fr.rule = rule->cr;
+ fr.cookie = rule->flow_cookie;
+ fr.reason = reason;
+ calc_flow_duration__(rule->created, &fr.duration_sec, &fr.duration_nsec);
+ fr.idle_timeout = rule->idle_timeout;
+ fr.packet_count = rule->packet_count;
+ fr.byte_count = rule->byte_count;
+
LIST_FOR_EACH (ofconn, node, &p->all_conns) {
struct ofpbuf *msg;
continue;
}
- msg = (ofconn->flow_format == NXFF_NXM
- ? compose_nx_flow_removed(rule, reason)
- : compose_ofp_flow_removed(ofconn, rule, reason));
-
- /* Account flow expirations under ofconn->reply_counter, the counter
- * for replies to OpenFlow requests. That works because preventing
- * OpenFlow requests from being processed also prevents new flows from
- * being added (and expiring). (It also prevents processing OpenFlow
- * requests that would not add new flows, so it is imperfect.) */
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ /* This accounts flow expirations as if they were replies to OpenFlow
+ * requests. That works because preventing OpenFlow requests from
+ * being processed also prevents new flows from being added (and
+ * expiring). (It also prevents processing OpenFlow requests that
+ * would not add new flows, so it is imperfect.) */
+ msg = ofputil_encode_flow_removed(&fr, ofconn_get_flow_format(ofconn));
+ ofconn_send_reply(ofconn, msg);
}
}
schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall,
const struct flow *flow, bool clone)
{
- enum { OPI_SIZE = offsetof(struct ofp_packet_in, data) };
- struct ofproto *ofproto = ofconn->ofproto;
- struct ofp_packet_in *opi;
- int total_len, send_len;
- struct ofpbuf *packet;
- uint32_t buffer_id;
- int idx;
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+ struct ofputil_packet_in pin;
+ struct ofpbuf *msg;
+
+ /* Figure out the easy parts. */
+ pin.packet = upcall->packet;
+ pin.in_port = odp_port_to_ofp_port(flow->in_port);
+ pin.reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION;
/* Get OpenFlow buffer_id. */
if (upcall->type == DPIF_UC_ACTION) {
- buffer_id = UINT32_MAX;
+ pin.buffer_id = UINT32_MAX;
} else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) {
- buffer_id = pktbuf_get_null();
+ pin.buffer_id = pktbuf_get_null();
} else if (!ofconn->pktbuf) {
- buffer_id = UINT32_MAX;
+ pin.buffer_id = UINT32_MAX;
} else {
- buffer_id = pktbuf_save(ofconn->pktbuf, upcall->packet, flow->in_port);
+ pin.buffer_id = pktbuf_save(ofconn->pktbuf, upcall->packet,
+ flow->in_port);
}
/* Figure out how much of the packet to send. */
- total_len = send_len = upcall->packet->size;
- if (buffer_id != UINT32_MAX) {
- send_len = MIN(send_len, ofconn->miss_send_len);
+ pin.send_len = upcall->packet->size;
+ if (pin.buffer_id != UINT32_MAX) {
+ pin.send_len = MIN(pin.send_len, ofconn->miss_send_len);
}
if (upcall->type == DPIF_UC_ACTION) {
- send_len = MIN(send_len, upcall->userdata);
+ pin.send_len = MIN(pin.send_len, upcall->userdata);
}
- /* Copy or steal buffer for OFPT_PACKET_IN. */
- if (clone) {
- packet = ofpbuf_clone_data_with_headroom(upcall->packet->data,
- send_len, OPI_SIZE);
- } else {
- packet = upcall->packet;
- packet->size = send_len;
- }
-
- /* Add OFPT_PACKET_IN. */
- opi = ofpbuf_push_zeros(packet, OPI_SIZE);
- opi->header.version = OFP_VERSION;
- opi->header.type = OFPT_PACKET_IN;
- opi->total_len = htons(total_len);
- opi->in_port = htons(odp_port_to_ofp_port(flow->in_port));
- opi->reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION;
- opi->buffer_id = htonl(buffer_id);
- update_openflow_length(packet);
-
- /* Hand over to packet scheduler. It might immediately call into
- * do_send_packet_in() or it might buffer it for a while (until a later
- * call to pinsched_run()). */
- idx = upcall->type == DPIF_UC_MISS ? 0 : 1;
- pinsched_send(ofconn->schedulers[idx], flow->in_port,
- packet, do_send_packet_in, ofconn);
+ /* Make OFPT_PACKET_IN and hand over to packet scheduler. It might
+ * immediately call into do_send_packet_in() or it might buffer it for a
+ * while (until a later call to pinsched_run()). */
+ msg = ofputil_encode_packet_in(&pin, clone ? NULL : upcall->packet);
+ pinsched_send(ofconn->schedulers[upcall->type == DPIF_UC_MISS ? 0 : 1],
+ flow->in_port, msg, do_send_packet_in, ofconn);
}
/* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an