#include <stdbool.h>
#include <stdlib.h>
#include "byte-order.h"
+#include "cfm.h"
#include "classifier.h"
#include "coverage.h"
-#include "discovery.h"
#include "dpif.h"
#include "dynamic-string.h"
#include "fail-open.h"
struct netdev *netdev;
struct ofp_phy_port opp; /* In host byte order. */
uint16_t odp_port;
+ struct cfm *cfm; /* Connectivity Fault Management, if any. */
};
static void ofport_free(struct ofport *);
-static void hton_ofp_phy_port(struct ofp_phy_port *);
+static void ofport_run(struct ofproto *, struct ofport *);
+static void ofport_wait(struct ofport *);
struct action_xlate_ctx {
/* action_xlate_ctx_init() initializes these members. */
/* type == OFCONN_PRIMARY only. */
enum nx_role role; /* Role. */
struct hmap_node hmap_node; /* In struct ofproto's "controllers" map. */
- struct discovery *discovery; /* Controller discovery object, if enabled. */
enum ofproto_band band; /* In-band or out-of-band? */
};
static void ofconn_destroy(struct ofconn *);
static void ofconn_run(struct ofconn *);
static void ofconn_wait(struct ofconn *);
+
static bool ofconn_receives_async_msgs(const struct ofconn *);
static char *ofconn_make_name(const struct ofproto *, const char *target);
static void ofconn_set_rate_limit(struct ofconn *, int rate, int burst);
+static struct ofproto *ofconn_get_ofproto(struct ofconn *);
+
+static enum nx_flow_format ofconn_get_flow_format(struct ofconn *);
+static void ofconn_set_flow_format(struct ofconn *, enum nx_flow_format);
+
+static int ofconn_get_miss_send_len(const struct ofconn *);
+static void ofconn_set_miss_send_len(struct ofconn *, int miss_send_len);
+
+static enum ofconn_type ofconn_get_type(const struct ofconn *);
+
+static enum nx_role ofconn_get_role(const struct ofconn *);
+static void ofconn_set_role(struct ofconn *, enum nx_role);
+
static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
struct rconn_packet_counter *counter);
}
}
-static bool
-is_discovery_controller(const struct ofproto_controller *c)
-{
- return !strcmp(c->target, "discover");
-}
-
-static bool
-is_in_band_controller(const struct ofproto_controller *c)
-{
- return is_discovery_controller(c) || c->band == OFPROTO_IN_BAND;
-}
-
/* Creates a new controller in 'ofproto'. Some of the settings are initially
* drawn from 'c', but update_controller() needs to be called later to finish
* the new ofconn's configuration. */
static void
add_controller(struct ofproto *ofproto, const struct ofproto_controller *c)
{
- struct discovery *discovery;
+ char *name = ofconn_make_name(ofproto, c->target);
struct ofconn *ofconn;
- if (is_discovery_controller(c)) {
- int error = discovery_create(c->accept_re, c->update_resolv_conf,
- ofproto->dpif, &discovery);
- if (error) {
- return;
- }
- } else {
- discovery = NULL;
- }
-
ofconn = ofconn_create(ofproto, rconn_create(5, 8), OFCONN_PRIMARY);
ofconn->pktbuf = pktbuf_create();
ofconn->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
- if (discovery) {
- ofconn->discovery = discovery;
- } else {
- char *name = ofconn_make_name(ofproto, c->target);
- rconn_connect(ofconn->rconn, c->target, name);
- free(name);
- }
+ rconn_connect(ofconn->rconn, c->target, name);
hmap_insert(&ofproto->controllers, &ofconn->hmap_node,
hash_string(c->target, 0));
+
+ free(name);
}
/* Reconfigures 'ofconn' to match 'c'. This function cannot update an ofconn's
- * target or turn discovery on or off (these are done by creating new ofconns
- * and deleting old ones), but it can update the rest of an ofconn's
- * settings. */
+ * target (this is done by creating new ofconns and deleting old ones), but it
+ * can update the rest of an ofconn's settings. */
static void
update_controller(struct ofconn *ofconn, const struct ofproto_controller *c)
{
int probe_interval;
- ofconn->band = (is_in_band_controller(c)
- ? OFPROTO_IN_BAND : OFPROTO_OUT_OF_BAND);
+ ofconn->band = c->band;
rconn_set_max_backoff(ofconn->rconn, c->max_backoff);
probe_interval = c->probe_interval ? MAX(c->probe_interval, 5) : 0;
rconn_set_probe_interval(ofconn->rconn, probe_interval);
- if (ofconn->discovery) {
- discovery_set_update_resolv_conf(ofconn->discovery,
- c->update_resolv_conf);
- discovery_set_accept_controller_re(ofconn->discovery, c->accept_re);
- }
-
ofconn_set_rate_limit(ofconn, c->rate_limit, c->burst_limit);
}
static const char *
ofconn_get_target(const struct ofconn *ofconn)
{
- return ofconn->discovery ? "discover" : rconn_get_target(ofconn->rconn);
+ return rconn_get_target(ofconn->rconn);
}
static struct ofconn *
const struct ofconn *ofconn;
struct sockaddr_in *addrs;
size_t max_addrs, n_addrs;
- bool discovery;
size_t i;
/* Allocate enough memory for as many remotes as we could possibly have. */
n_addrs = 0;
/* Add all the remotes. */
- discovery = false;
HMAP_FOR_EACH (ofconn, hmap_node, &ofproto->controllers) {
struct sockaddr_in *sin = &addrs[n_addrs];
sin->sin_port = rconn_get_remote_port(ofconn->rconn);
n_addrs++;
}
- if (ofconn->discovery) {
- discovery = true;
- }
}
for (i = 0; i < ofproto->n_extra_remotes; i++) {
addrs[n_addrs++] = ofproto->extra_in_band_remotes[i];
}
- /* Create or update or destroy in-band.
- *
- * Ordinarily we only enable in-band if there's at least one remote
- * address, but discovery needs the in-band rules for DHCP to be installed
- * even before we know any remote addresses. */
- if (n_addrs || discovery) {
+ /* Create or update or destroy in-band. */
+ if (n_addrs) {
if (!ofproto->in_band) {
in_band_create(ofproto, ofproto->dpif, &ofproto->in_band);
}
for (i = 0; i < n_controllers; i++) {
const struct ofproto_controller *c = &controllers[i];
- if (!vconn_verify_name(c->target) || !strcmp(c->target, "discover")) {
+ if (!vconn_verify_name(c->target)) {
if (!find_controller_by_target(p, c->target)) {
add_controller(p, c);
}
ofproto->sflow = NULL;
}
}
+\f
+/* Connectivity Fault Management configuration. */
+
+/* Clears the CFM configuration from 'port_no' on 'ofproto'. */
+void
+ofproto_iface_clear_cfm(struct ofproto *ofproto, uint32_t port_no)
+{
+ struct ofport *ofport = get_port(ofproto, port_no);
+ if (ofport && ofport->cfm){
+ cfm_destroy(ofport->cfm);
+ ofport->cfm = NULL;
+ }
+}
+/* Configures connectivity fault management on 'port_no' in 'ofproto'. Takes
+ * basic configuration from the configuration members in 'cfm', and the set of
+ * remote maintenance points from the 'n_remote_mps' elements in 'remote_mps'.
+ * Ignores the statistics members of 'cfm'.
+ *
+ * This function has no effect if 'ofproto' does not have a port 'port_no'. */
+void
+ofproto_iface_set_cfm(struct ofproto *ofproto, uint32_t port_no,
+ const struct cfm *cfm,
+ const uint16_t *remote_mps, size_t n_remote_mps)
+{
+ struct ofport *ofport;
+
+ ofport = get_port(ofproto, port_no);
+ if (!ofport) {
+ VLOG_WARN("%s: cannot configure CFM on nonexistent port %"PRIu32,
+ dpif_name(ofproto->dpif), port_no);
+ return;
+ }
+
+ if (!ofport->cfm) {
+ ofport->cfm = cfm_create();
+ }
+
+ ofport->cfm->mpid = cfm->mpid;
+ ofport->cfm->interval = cfm->interval;
+ memcpy(ofport->cfm->maid, cfm->maid, CCM_MAID_LEN);
+
+ cfm_update_remote_mps(ofport->cfm, remote_mps, n_remote_mps);
+
+ if (!cfm_configure(ofport->cfm)) {
+ VLOG_WARN("%s: CFM configuration on port %"PRIu32" (%s) failed",
+ dpif_name(ofproto->dpif), port_no,
+ netdev_get_name(ofport->netdev));
+ cfm_destroy(ofport->cfm);
+ ofport->cfm = NULL;
+ }
+}
+
+/* Returns the connectivity fault management object associated with 'port_no'
+ * within 'ofproto', or a null pointer if 'ofproto' does not have a port
+ * 'port_no' or if that port does not have CFM configured. The caller must not
+ * modify or destroy the returned object. */
+const struct cfm *
+ofproto_iface_get_cfm(struct ofproto *ofproto, uint32_t port_no)
+{
+ struct ofport *ofport = get_port(ofproto, port_no);
+ return ofport ? ofport->cfm : NULL;
+}
+\f
uint64_t
ofproto_get_datapath_id(const struct ofproto *ofproto)
{
static int
snoop_preference(const struct ofconn *ofconn)
{
- switch (ofconn->role) {
+ switch (ofconn_get_role(ofconn)) {
case NX_ROLE_MASTER:
return 3;
case NX_ROLE_OTHER:
/* Pick a controller for monitoring. */
best = NULL;
LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
- if (ofconn->type == OFCONN_PRIMARY
+ if (ofconn_get_type(ofconn) == OFCONN_PRIMARY
&& (!best || snoop_preference(ofconn) > snoop_preference(best))) {
best = ofconn;
}
{
struct ofconn *ofconn, *next_ofconn;
struct ofservice *ofservice;
+ struct ofport *ofport;
char *devname;
int error;
int i;
process_port_change(p, error, devname);
}
+ HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
+ ofport_run(p, ofport);
+ }
+
if (p->in_band) {
if (time_msec() >= p->next_in_band_update) {
update_in_band_remotes(p);
{
struct ofservice *ofservice;
struct ofconn *ofconn;
+ struct ofport *ofport;
size_t i;
dpif_recv_wait(p->dpif);
dpif_port_poll_wait(p->dpif);
netdev_monitor_poll_wait(p->netdev_monitor);
+ HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
+ ofport_wait(ofport);
+ }
LIST_FOR_EACH (ofconn, node, &p->all_conns) {
ofconn_wait(ofconn);
}
shash_add(info, rconn_get_target(rconn), cinfo);
cinfo->is_connected = rconn_is_connected(rconn);
- cinfo->role = ofconn->role;
+ cinfo->role = ofconn_get_role(ofconn);
cinfo->pairs.n = 0;
return ofport && !(ofport->opp.config & OFPPC_NO_FLOOD);
}
+/* Sends 'packet' out of port 'port_no' within 'p'. If 'vlan_tci' is zero the
+ * packet will not have any 802.1Q hader; if it is nonzero, then the packet
+ * will be sent with the VLAN TCI specified by 'vlan_tci & ~VLAN_CFI'.
+ *
+ * Returns 0 if successful, otherwise a positive errno value. */
int
-ofproto_send_packet(struct ofproto *p, const struct flow *flow,
- const union ofp_action *actions, size_t n_actions,
+ofproto_send_packet(struct ofproto *ofproto,
+ uint32_t port_no, uint16_t vlan_tci,
const struct ofpbuf *packet)
{
- struct action_xlate_ctx ctx;
- struct ofpbuf *odp_actions;
-
- action_xlate_ctx_init(&ctx, p, flow, packet);
- /* Always xlate packets originated in this function. */
- ctx.check_special = false;
- odp_actions = xlate_actions(&ctx, actions, n_actions);
-
- /* XXX Should we translate the dpif_execute() errno value into an OpenFlow
- * error code? */
- dpif_execute(p->dpif, odp_actions->data, odp_actions->size, packet);
+ struct ofpbuf odp_actions;
+ int error;
- ofpbuf_delete(odp_actions);
+ ofpbuf_init(&odp_actions, 32);
+ if (vlan_tci != 0) {
+ nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_SET_DL_TCI,
+ ntohs(vlan_tci & ~VLAN_CFI));
+ }
+ nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_OUTPUT, port_no);
+ error = dpif_execute(ofproto->dpif, odp_actions.data, odp_actions.size,
+ packet);
+ ofpbuf_uninit(&odp_actions);
- return 0;
+ if (error) {
+ VLOG_WARN_RL(&rl, "%s: failed to send packet on port %"PRIu32" (%s)",
+ dpif_name(ofproto->dpif), port_no, strerror(error));
+ }
+ return error;
}
/* Adds a flow to the OpenFlow flow table in 'p' that matches 'cls_rule' and
/* Primary controllers, even slaves, should always get port status
updates. Otherwise obey ofconn_receives_async_msgs(). */
- if (ofconn->type != OFCONN_PRIMARY
+ if (ofconn_get_type(ofconn) != OFCONN_PRIMARY
&& !ofconn_receives_async_msgs(ofconn)) {
continue;
}
}
}
+static void
+ofport_run(struct ofproto *ofproto, struct ofport *ofport)
+{
+ if (ofport->cfm) {
+ cfm_run(ofport->cfm);
+
+ if (cfm_should_send_ccm(ofport->cfm)) {
+ struct ofpbuf packet;
+ struct ccm *ccm;
+
+ ofpbuf_init(&packet, 0);
+ ccm = compose_packet(&packet, eth_addr_ccm, ofport->opp.hw_addr,
+ ETH_TYPE_CFM, sizeof *ccm);
+ cfm_compose_ccm(ofport->cfm, ccm);
+ ofproto_send_packet(ofproto, ofport->odp_port, 0, &packet);
+ ofpbuf_uninit(&packet);
+ }
+ }
+}
+
+static void
+ofport_wait(struct ofport *ofport)
+{
+ if (ofport->cfm) {
+ cfm_wait(ofport->cfm);
+ }
+}
+
static void
ofport_free(struct ofport *ofport)
{
if (ofport) {
+ cfm_destroy(ofport->cfm);
netdev_close(ofport->netdev);
free(ofport);
}
static void
ofconn_destroy(struct ofconn *ofconn)
{
- if (ofconn->type == OFCONN_PRIMARY) {
- hmap_remove(&ofconn->ofproto->controllers, &ofconn->hmap_node);
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+
+ if (ofconn_get_type(ofconn) == OFCONN_PRIMARY) {
+ hmap_remove(&ofproto->controllers, &ofconn->hmap_node);
}
- discovery_destroy(ofconn->discovery);
list_remove(&ofconn->node);
rconn_destroy(ofconn->rconn);
static void
ofconn_run(struct ofconn *ofconn)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
int iteration;
size_t i;
- if (ofconn->discovery) {
- char *controller_name;
- if (rconn_is_connectivity_questionable(ofconn->rconn)) {
- discovery_question_connectivity(ofconn->discovery);
- }
- if (discovery_run(ofconn->discovery, &controller_name)) {
- if (controller_name) {
- char *ofconn_name = ofconn_make_name(p, controller_name);
- rconn_connect(ofconn->rconn, controller_name, ofconn_name);
- free(ofconn_name);
- free(controller_name);
- } else {
- rconn_disconnect(ofconn->rconn);
- }
- }
- }
-
for (i = 0; i < N_SCHEDULERS; i++) {
pinsched_run(ofconn->schedulers[i], do_send_packet_in, ofconn);
}
}
}
- if (!ofconn->discovery && !rconn_is_alive(ofconn->rconn)) {
+ if (!rconn_is_alive(ofconn->rconn)) {
ofconn_destroy(ofconn);
}
}
{
int i;
- if (ofconn->discovery) {
- discovery_wait(ofconn->discovery);
- }
for (i = 0; i < N_SCHEDULERS; i++) {
pinsched_wait(ofconn->schedulers[i]);
}
static bool
ofconn_receives_async_msgs(const struct ofconn *ofconn)
{
- if (ofconn->type == OFCONN_PRIMARY) {
+ if (ofconn_get_type(ofconn) == OFCONN_PRIMARY) {
/* Primary controllers always get asynchronous messages unless they
* have configured themselves as "slaves". */
- return ofconn->role != NX_ROLE_SLAVE;
+ return ofconn_get_role(ofconn) != NX_ROLE_SLAVE;
} else {
/* Service connections don't get asynchronous messages unless they have
* explicitly asked for them by setting a nonzero miss send length. */
}
}
}
+
+static struct ofproto *
+ofconn_get_ofproto(struct ofconn *ofconn)
+{
+ return ofconn->ofproto;
+}
+
+static enum nx_flow_format
+ofconn_get_flow_format(struct ofconn *ofconn)
+{
+ return ofconn->flow_format;
+}
+
+static void
+ofconn_set_flow_format(struct ofconn *ofconn, enum nx_flow_format flow_format)
+{
+ ofconn->flow_format = flow_format;
+}
+
+static int
+ofconn_get_miss_send_len(const struct ofconn *ofconn)
+{
+ return ofconn->miss_send_len;
+}
+
+static void
+ofconn_set_miss_send_len(struct ofconn *ofconn, int miss_send_len)
+{
+ ofconn->miss_send_len = miss_send_len;
+}
+
+static enum ofconn_type
+ofconn_get_type(const struct ofconn *ofconn)
+{
+ return ofconn->type;
+}
+
+static enum nx_role
+ofconn_get_role(const struct ofconn *ofconn)
+{
+ return ofconn->role;
+}
+
+static void
+ofconn_set_role(struct ofconn *ofconn, enum nx_role role)
+{
+ ofconn->role = role;
+}
\f
static void
ofservice_reconfigure(struct ofservice *ofservice,
}
}
+static void
+ofconn_send_reply(const struct ofconn *ofconn, struct ofpbuf *msg)
+{
+ queue_tx(msg, ofconn, ofconn->reply_counter);
+}
+
static void
send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
int error)
struct ofpbuf *buf = ofputil_encode_error_msg(error, oh);
if (buf) {
COVERAGE_INC(ofproto_error);
- queue_tx(buf, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, buf);
}
}
-static void
-hton_ofp_phy_port(struct ofp_phy_port *opp)
-{
- opp->port_no = htons(opp->port_no);
- opp->config = htonl(opp->config);
- opp->state = htonl(opp->state);
- opp->curr = htonl(opp->curr);
- opp->advertised = htonl(opp->advertised);
- opp->supported = htonl(opp->supported);
- opp->peer = htonl(opp->peer);
-}
-
static int
handle_echo_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
- queue_tx(make_echo_reply(oh), ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, make_echo_reply(oh));
return 0;
}
static int
handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofp_switch_features *osf;
struct ofpbuf *buf;
struct ofport *port;
osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf);
- osf->datapath_id = htonll(ofconn->ofproto->datapath_id);
+ osf->datapath_id = htonll(ofproto->datapath_id);
osf->n_buffers = htonl(pktbuf_capacity());
osf->n_tables = 2;
osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
(1u << OFPAT_SET_TP_DST) |
(1u << OFPAT_ENQUEUE));
- HMAP_FOR_EACH (port, hmap_node, &ofconn->ofproto->ports) {
+ HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
}
- queue_tx(buf, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, buf);
return 0;
}
static int
handle_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofpbuf *buf;
struct ofp_switch_config *osc;
uint16_t flags;
bool drop_frags;
/* Figure out flags. */
- dpif_get_drop_frags(ofconn->ofproto->dpif, &drop_frags);
+ dpif_get_drop_frags(ofproto->dpif, &drop_frags);
flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL;
/* Send reply. */
osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf);
osc->flags = htons(flags);
- osc->miss_send_len = htons(ofconn->miss_send_len);
- queue_tx(buf, ofconn, ofconn->reply_counter);
+ osc->miss_send_len = htons(ofconn_get_miss_send_len(ofconn));
+ ofconn_send_reply(ofconn, buf);
return 0;
}
static int
handle_set_config(struct ofconn *ofconn, const struct ofp_switch_config *osc)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
uint16_t flags = ntohs(osc->flags);
- if (ofconn->type == OFCONN_PRIMARY && ofconn->role != NX_ROLE_SLAVE) {
+ if (ofconn_get_type(ofconn) == OFCONN_PRIMARY
+ && ofconn_get_role(ofconn) != NX_ROLE_SLAVE) {
switch (flags & OFPC_FRAG_MASK) {
case OFPC_FRAG_NORMAL:
- dpif_set_drop_frags(ofconn->ofproto->dpif, false);
+ dpif_set_drop_frags(ofproto->dpif, false);
break;
case OFPC_FRAG_DROP:
- dpif_set_drop_frags(ofconn->ofproto->dpif, true);
+ dpif_set_drop_frags(ofproto->dpif, true);
break;
default:
VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
}
}
- ofconn->miss_send_len = ntohs(osc->miss_send_len);
+ ofconn_set_miss_send_len(ofconn, ntohs(osc->miss_send_len));
return 0;
}
ctx->check_special = true;
}
+static void
+ofproto_process_cfm(struct ofproto *ofproto, const struct flow *flow,
+ const struct ofpbuf *packet)
+{
+ struct ofport *ofport;
+
+ ofport = get_port(ofproto, flow->in_port);
+ if (ofport && ofport->cfm) {
+ cfm_process_heartbeat(ofport->cfm, packet);
+ }
+}
+
static struct ofpbuf *
xlate_actions(struct action_xlate_ctx *ctx,
const union ofp_action *in, size_t n_in)
ctx->recurse = 0;
ctx->last_pop_priority = -1;
- if (!ctx->check_special
- || !ctx->ofproto->ofhooks->special_cb
- || ctx->ofproto->ofhooks->special_cb(&ctx->flow, ctx->packet,
- ctx->ofproto->aux)) {
- do_xlate_actions(in, n_in, ctx);
- } else {
+ if (ctx->check_special && cfm_should_process_flow(&ctx->flow)) {
+ if (ctx->packet) {
+ ofproto_process_cfm(ctx->ofproto, &ctx->flow, ctx->packet);
+ }
+ ctx->may_set_up_flow = false;
+ } else if (ctx->check_special
+ && ctx->ofproto->ofhooks->special_cb
+ && !ctx->ofproto->ofhooks->special_cb(&ctx->flow, ctx->packet,
+ ctx->ofproto->aux)) {
ctx->may_set_up_flow = false;
+ } else {
+ do_xlate_actions(in, n_in, ctx);
}
remove_pop_action(ctx);
static int
reject_slave_controller(struct ofconn *ofconn, const const char *msg_type)
{
- if (ofconn->type == OFCONN_PRIMARY && ofconn->role == NX_ROLE_SLAVE) {
+ if (ofconn_get_type(ofconn) == OFCONN_PRIMARY
+ && ofconn_get_role(ofconn) == NX_ROLE_SLAVE) {
static struct vlog_rate_limit perm_rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&perm_rl, "rejecting %s message from slave controller",
msg_type);
static int
handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct ofp_packet_out *opo;
struct ofpbuf payload, *buffer;
union ofp_action *ofp_actions;
static int
handle_port_mod(struct ofconn *ofconn, const struct ofp_header *oh)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
const struct ofp_port_mod *opm = (const struct ofp_port_mod *) oh;
struct ofport *port;
int error;
struct ofp_stats_reply *reply = msg->data;
reply->flags = htons(OFPSF_REPLY_MORE);
*msgp = make_ofp_stats_reply(reply->header.xid, reply->type, nbytes);
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, msg);
}
return ofpbuf_put_uninit(*msgp, nbytes);
}
struct nicira_stats_msg *reply = msg->data;
reply->flags = htons(OFPSF_REPLY_MORE);
*msgp = make_nxstats_reply(reply->header.xid, reply->subtype, nbytes);
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, msg);
}
ofpbuf_prealloc_tailroom(*msgp, nbytes);
}
handle_desc_stats_request(struct ofconn *ofconn,
const struct ofp_header *request)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct ofp_desc_stats *ods;
struct ofpbuf *msg;
ovs_strlcpy(ods->sw_desc, p->sw_desc, sizeof ods->sw_desc);
ovs_strlcpy(ods->serial_num, p->serial_desc, sizeof ods->serial_num);
ovs_strlcpy(ods->dp_desc, p->dp_desc, sizeof ods->dp_desc);
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, msg);
return 0;
}
handle_table_stats_request(struct ofconn *ofconn,
const struct ofp_header *request)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct ofp_table_stats *ots;
struct ofpbuf *msg;
ots = append_ofp_stats_reply(sizeof *ots, ofconn, &msg);
memset(ots, 0, sizeof *ots);
strcpy(ots->name, "classifier");
- ots->wildcards = (ofconn->flow_format == NXFF_OPENFLOW10
+ ots->wildcards = (ofconn_get_flow_format(ofconn) == NXFF_OPENFLOW10
? htonl(OFPFW_ALL) : htonl(OVSFW_ALL));
ots->max_entries = htonl(1024 * 1024); /* An arbitrary big number. */
ots->active_count = htonl(classifier_count(&p->cls));
put_32aligned_be64(&ots->lookup_count, htonll(0)); /* XXX */
put_32aligned_be64(&ots->matched_count, htonll(0)); /* XXX */
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, msg);
return 0;
}
static int
handle_port_stats_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
const struct ofp_port_stats_request *psr = ofputil_stats_body(oh);
struct ofp_port_stats *ops;
struct ofpbuf *msg;
}
}
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, msg);
return 0;
}
static void
-calc_flow_duration(long long int start, ovs_be32 *sec, ovs_be32 *nsec)
+calc_flow_duration__(long long int start, uint32_t *sec, uint32_t *nsec)
{
long long int msecs = time_msec() - start;
- *sec = htonl(msecs / 1000);
- *nsec = htonl((msecs % 1000) * (1000 * 1000));
+ *sec = msecs / 1000;
+ *nsec = (msecs % 1000) * (1000 * 1000);
+}
+
+static void
+calc_flow_duration(long long int start, ovs_be32 *sec_be, ovs_be32 *nsec_be)
+{
+ uint32_t sec, nsec;
+
+ calc_flow_duration__(start, &sec, &nsec);
+ *sec_be = htonl(sec);
+ *nsec_be = htonl(nsec);
}
static void
ofs->length = htons(len);
ofs->table_id = 0;
ofs->pad = 0;
- ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofs->match,
- rule->flow_cookie, &cookie);
+ ofputil_cls_rule_to_match(&rule->cr, ofconn_get_flow_format(ofconn),
+ &ofs->match, rule->flow_cookie, &cookie);
put_32aligned_be64(&ofs->cookie, cookie);
calc_flow_duration(rule->created, &ofs->duration_sec, &ofs->duration_nsec);
ofs->priority = htons(rule->cr.priority);
handle_flow_stats_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
const struct ofp_flow_stats_request *fsr = ofputil_stats_body(oh);
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofpbuf *reply;
COVERAGE_INC(ofproto_flows_req);
ofputil_cls_rule_from_match(&fsr->match, 0, NXFF_OPENFLOW10, 0,
&target);
- cls_cursor_init(&cursor, &ofconn->ofproto->cls, &target);
+ cls_cursor_init(&cursor, &ofproto->cls, &target);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
put_ofp_flow_stats(ofconn, rule, fsr->out_port, &reply);
}
}
- queue_tx(reply, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, reply);
return 0;
}
static int
handle_nxst_flow(struct ofconn *ofconn, const struct ofp_header *oh)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct nx_flow_stats_request *nfsr;
struct cls_rule target;
struct ofpbuf *reply;
struct cls_cursor cursor;
struct rule *rule;
- cls_cursor_init(&cursor, &ofconn->ofproto->cls, &target);
+ cls_cursor_init(&cursor, &ofproto->cls, &target);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
put_nx_flow_stats(ofconn, rule, nfsr->out_port, &reply);
}
}
- queue_tx(reply, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, reply);
return 0;
}
const struct ofp_header *oh)
{
const struct ofp_aggregate_stats_request *request = ofputil_stats_body(oh);
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofp_aggregate_stats_reply *reply;
struct cls_rule target;
struct ofpbuf *msg;
msg = start_ofp_stats_reply(oh, sizeof *reply);
reply = append_ofp_stats_reply(sizeof *reply, ofconn, &msg);
- query_aggregate_stats(ofconn->ofproto, &target, request->out_port,
+ query_aggregate_stats(ofproto, &target, request->out_port,
request->table_id, reply);
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, msg);
return 0;
}
static int
handle_nxst_aggregate(struct ofconn *ofconn, const struct ofp_header *oh)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct nx_aggregate_stats_request *request;
struct ofp_aggregate_stats_reply *reply;
struct cls_rule target;
COVERAGE_INC(ofproto_flows_req);
buf = start_nxstats_reply(&request->nsm, sizeof *reply);
reply = ofpbuf_put_uninit(buf, sizeof *reply);
- query_aggregate_stats(ofconn->ofproto, &target, request->out_port,
+ query_aggregate_stats(ofproto, &target, request->out_port,
request->table_id, reply);
- queue_tx(buf, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, buf);
return 0;
}
static int
handle_queue_stats_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
- struct ofproto *ofproto = ofconn->ofproto;
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
const struct ofp_queue_stats_request *qsr;
struct queue_stats_cbdata cbdata;
struct ofport *port;
ofpbuf_delete(cbdata.msg);
return ofp_mkerr(OFPET_QUEUE_OP_FAILED, OFPQOFC_BAD_PORT);
}
- queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, cbdata.msg);
return 0;
}
* in which no matching flow already exists in the flow table.
*
* Adds the flow specified by 'ofm', which is followed by 'n_actions'
- * ofp_actions, to ofconn->ofproto's flow table. Returns 0 on success or an
+ * ofp_actions, to the ofproto's flow table. Returns 0 on success or an
* OpenFlow error code as encoded by ofp_mkerr() on failure.
*
* 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
static int
add_flow(struct ofconn *ofconn, struct flow_mod *fm)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct ofpbuf *packet;
struct rule *rule;
uint16_t in_port;
send_buffered_packet(struct ofconn *ofconn,
struct rule *rule, uint32_t buffer_id)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofpbuf *packet;
uint16_t in_port;
int error;
return error;
}
- rule_execute(ofconn->ofproto, rule, in_port, packet);
+ rule_execute(ofproto, rule, in_port, packet);
return 0;
}
static int
modify_flows_loose(struct ofconn *ofconn, struct flow_mod *fm)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct rule *match = NULL;
struct cls_cursor cursor;
struct rule *rule;
static int
modify_flow_strict(struct ofconn *ofconn, struct flow_mod *fm)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct rule *rule = find_flow_strict(p, fm);
if (rule && !rule_is_hidden(rule)) {
modify_flow(p, fm, rule);
static int
handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh)
{
- struct ofproto *p = ofconn->ofproto;
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
struct flow_mod fm;
int error;
return error;
}
- error = ofputil_decode_flow_mod(&fm, oh, ofconn->flow_format);
+ error = ofputil_decode_flow_mod(&fm, oh, ofconn_get_flow_format(ofconn));
if (error) {
return error;
}
{
const struct nxt_tun_id_cookie *msg
= (const struct nxt_tun_id_cookie *) oh;
+ enum nx_flow_format flow_format;
+
+ flow_format = msg->set ? NXFF_TUN_ID_FROM_COOKIE : NXFF_OPENFLOW10;
+ ofconn_set_flow_format(ofconn, flow_format);
- ofconn->flow_format = msg->set ? NXFF_TUN_ID_FROM_COOKIE : NXFF_OPENFLOW10;
return 0;
}
struct ofpbuf *buf;
uint32_t role;
- if (ofconn->type != OFCONN_PRIMARY) {
+ if (ofconn_get_type(ofconn) != OFCONN_PRIMARY) {
VLOG_WARN_RL(&rl, "ignoring role request on non-controller "
"connection");
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
}
if (role == NX_ROLE_MASTER) {
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofconn *other;
- HMAP_FOR_EACH (other, hmap_node, &ofconn->ofproto->controllers) {
- if (other->role == NX_ROLE_MASTER) {
- other->role = NX_ROLE_SLAVE;
+ HMAP_FOR_EACH (other, hmap_node, &ofproto->controllers) {
+ if (ofconn_get_role(other) == NX_ROLE_MASTER) {
+ ofconn_set_role(other, NX_ROLE_SLAVE);
}
}
}
- ofconn->role = role;
+ ofconn_set_role(ofconn, role);
reply = make_nxmsg_xid(sizeof *reply, NXT_ROLE_REPLY, oh->xid, &buf);
reply->role = htonl(role);
- queue_tx(buf, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, buf);
return 0;
}
if (format == NXFF_OPENFLOW10
|| format == NXFF_TUN_ID_FROM_COOKIE
|| format == NXFF_NXM) {
- ofconn->flow_format = format;
+ ofconn_set_flow_format(ofconn, format);
return 0;
} else {
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
/* Currently, everything executes synchronously, so we can just
* immediately send the barrier reply. */
ob = make_openflow_xid(sizeof *ob, OFPT_BARRIER_REPLY, oh->xid, &buf);
- queue_tx(buf, ofconn, ofconn->reply_counter);
+ ofconn_send_reply(ofconn, buf);
return 0;
}
/* Set header pointers in 'flow'. */
flow_extract(upcall->packet, flow.tun_id, flow.in_port, &flow);
- if (p->ofhooks->special_cb
- && !p->ofhooks->special_cb(&flow, upcall->packet, p->aux)) {
+ if (cfm_should_process_flow(&flow)) {
+ ofproto_process_cfm(p, &flow, upcall->packet);
+ ofpbuf_delete(upcall->packet);
+ return;
+ } else if (p->ofhooks->special_cb
+ && !p->ofhooks->special_cb(&flow, upcall->packet, p->aux)) {
ofpbuf_delete(upcall->packet);
return;
}
/* Check with in-band control to see if this packet should be sent
* to the local port regardless of the flow table. */
if (in_band_msg_in_hook(p->in_band, &flow, upcall->packet)) {
- struct ofpbuf odp_actions;
-
- ofpbuf_init(&odp_actions, 32);
- nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_OUTPUT, ODPP_LOCAL);
- dpif_execute(p->dpif, odp_actions.data, odp_actions.size,
- upcall->packet);
- ofpbuf_uninit(&odp_actions);
+ ofproto_send_packet(p, ODPP_LOCAL, 0, upcall->packet);
}
facet = facet_lookup_valid(p, &flow);
rule_remove(ofproto, rule);
}
\f
-static struct ofpbuf *
-compose_ofp_flow_removed(struct ofconn *ofconn, const struct rule *rule,
- uint8_t reason)
-{
- struct ofp_flow_removed *ofr;
- struct ofpbuf *buf;
-
- ofr = make_openflow_xid(sizeof *ofr, OFPT_FLOW_REMOVED, htonl(0), &buf);
- ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofr->match,
- rule->flow_cookie, &ofr->cookie);
- ofr->priority = htons(rule->cr.priority);
- ofr->reason = reason;
- calc_flow_duration(rule->created, &ofr->duration_sec, &ofr->duration_nsec);
- ofr->idle_timeout = htons(rule->idle_timeout);
- ofr->packet_count = htonll(rule->packet_count);
- ofr->byte_count = htonll(rule->byte_count);
-
- return buf;
-}
-
-static struct ofpbuf *
-compose_nx_flow_removed(const struct rule *rule, uint8_t reason)
-{
- struct nx_flow_removed *nfr;
- struct ofpbuf *buf;
- int match_len;
-
- make_nxmsg_xid(sizeof *nfr, NXT_FLOW_REMOVED, htonl(0), &buf);
- match_len = nx_put_match(buf, &rule->cr);
-
- nfr = buf->data;
- nfr->cookie = rule->flow_cookie;
- nfr->priority = htons(rule->cr.priority);
- nfr->reason = reason;
- calc_flow_duration(rule->created, &nfr->duration_sec, &nfr->duration_nsec);
- nfr->idle_timeout = htons(rule->idle_timeout);
- nfr->match_len = htons(match_len);
- nfr->packet_count = htonll(rule->packet_count);
- nfr->byte_count = htonll(rule->byte_count);
-
- return buf;
-}
-
static void
rule_send_removed(struct ofproto *p, struct rule *rule, uint8_t reason)
{
+ struct ofputil_flow_removed fr;
struct ofconn *ofconn;
if (!rule->send_flow_removed) {
return;
}
+ fr.rule = rule->cr;
+ fr.cookie = rule->flow_cookie;
+ fr.reason = reason;
+ calc_flow_duration__(rule->created, &fr.duration_sec, &fr.duration_nsec);
+ fr.idle_timeout = rule->idle_timeout;
+ fr.packet_count = rule->packet_count;
+ fr.byte_count = rule->byte_count;
+
LIST_FOR_EACH (ofconn, node, &p->all_conns) {
struct ofpbuf *msg;
continue;
}
- msg = (ofconn->flow_format == NXFF_NXM
- ? compose_nx_flow_removed(rule, reason)
- : compose_ofp_flow_removed(ofconn, rule, reason));
-
- /* Account flow expirations under ofconn->reply_counter, the counter
- * for replies to OpenFlow requests. That works because preventing
- * OpenFlow requests from being processed also prevents new flows from
- * being added (and expiring). (It also prevents processing OpenFlow
- * requests that would not add new flows, so it is imperfect.) */
- queue_tx(msg, ofconn, ofconn->reply_counter);
+ /* This accounts flow expirations as if they were replies to OpenFlow
+ * requests. That works because preventing OpenFlow requests from
+ * being processed also prevents new flows from being added (and
+ * expiring). (It also prevents processing OpenFlow requests that
+ * would not add new flows, so it is imperfect.) */
+ msg = ofputil_encode_flow_removed(&fr, ofconn_get_flow_format(ofconn));
+ ofconn_send_reply(ofconn, msg);
}
}
schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall,
const struct flow *flow, bool clone)
{
- enum { OPI_SIZE = offsetof(struct ofp_packet_in, data) };
- struct ofproto *ofproto = ofconn->ofproto;
- struct ofp_packet_in *opi;
- int total_len, send_len;
- struct ofpbuf *packet;
- uint32_t buffer_id;
- int idx;
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+ struct ofputil_packet_in pin;
+ struct ofpbuf *msg;
+
+ /* Figure out the easy parts. */
+ pin.packet = upcall->packet;
+ pin.in_port = odp_port_to_ofp_port(flow->in_port);
+ pin.reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION;
/* Get OpenFlow buffer_id. */
if (upcall->type == DPIF_UC_ACTION) {
- buffer_id = UINT32_MAX;
+ pin.buffer_id = UINT32_MAX;
} else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) {
- buffer_id = pktbuf_get_null();
+ pin.buffer_id = pktbuf_get_null();
} else if (!ofconn->pktbuf) {
- buffer_id = UINT32_MAX;
+ pin.buffer_id = UINT32_MAX;
} else {
- buffer_id = pktbuf_save(ofconn->pktbuf, upcall->packet, flow->in_port);
+ pin.buffer_id = pktbuf_save(ofconn->pktbuf, upcall->packet,
+ flow->in_port);
}
/* Figure out how much of the packet to send. */
- total_len = send_len = upcall->packet->size;
- if (buffer_id != UINT32_MAX) {
- send_len = MIN(send_len, ofconn->miss_send_len);
+ pin.send_len = upcall->packet->size;
+ if (pin.buffer_id != UINT32_MAX) {
+ pin.send_len = MIN(pin.send_len, ofconn->miss_send_len);
}
if (upcall->type == DPIF_UC_ACTION) {
- send_len = MIN(send_len, upcall->userdata);
- }
-
- /* Copy or steal buffer for OFPT_PACKET_IN. */
- if (clone) {
- packet = ofpbuf_clone_data_with_headroom(upcall->packet->data,
- send_len, OPI_SIZE);
- } else {
- packet = upcall->packet;
- packet->size = send_len;
+ pin.send_len = MIN(pin.send_len, upcall->userdata);
}
- /* Add OFPT_PACKET_IN. */
- opi = ofpbuf_push_zeros(packet, OPI_SIZE);
- opi->header.version = OFP_VERSION;
- opi->header.type = OFPT_PACKET_IN;
- opi->total_len = htons(total_len);
- opi->in_port = htons(odp_port_to_ofp_port(flow->in_port));
- opi->reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION;
- opi->buffer_id = htonl(buffer_id);
- update_openflow_length(packet);
-
- /* Hand over to packet scheduler. It might immediately call into
- * do_send_packet_in() or it might buffer it for a while (until a later
- * call to pinsched_run()). */
- idx = upcall->type == DPIF_UC_MISS ? 0 : 1;
- pinsched_send(ofconn->schedulers[idx], flow->in_port,
- packet, do_send_packet_in, ofconn);
+ /* Make OFPT_PACKET_IN and hand over to packet scheduler. It might
+ * immediately call into do_send_packet_in() or it might buffer it for a
+ * while (until a later call to pinsched_run()). */
+ msg = ofputil_encode_packet_in(&pin, clone ? NULL : upcall->packet);
+ pinsched_send(ofconn->schedulers[upcall->type == DPIF_UC_MISS ? 0 : 1],
+ flow->in_port, msg, do_send_packet_in, ofconn);
}
/* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an
goto exit;
}
- tun_id = htonll(strtoull(tun_id_s, NULL, 10));
+ tun_id = htonll(strtoull(tun_id_s, NULL, 0));
in_port = ofp_port_to_odp_port(atoi(in_port_s));
packet_s = ofpbuf_put_hex(&packet, packet_s, NULL);
uint16_t *nf_output_iface, void *ofproto_)
{
struct ofproto *ofproto = ofproto_;
- int out_port;
+ struct mac_entry *dst_mac;
/* Drop frames for reserved multicast addresses. */
if (eth_addr_is_reserved(flow->dl_dst)) {
}
/* Learn source MAC (but don't try to learn from revalidation). */
- if (packet != NULL) {
- tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src,
- 0, flow->in_port,
- GRAT_ARP_LOCK_NONE);
- if (rev_tag) {
+ if (packet != NULL
+ && mac_learning_may_learn(ofproto->ml, flow->dl_src, 0)) {
+ struct mac_entry *src_mac;
+
+ src_mac = mac_learning_insert(ofproto->ml, flow->dl_src, 0);
+ if (mac_entry_is_new(src_mac) || src_mac->port.i != flow->in_port) {
/* The log messages here could actually be useful in debugging,
* so keep the rate limit relatively high. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
- ofproto_revalidate(ofproto, rev_tag);
+
+ ofproto_revalidate(ofproto,
+ mac_learning_changed(ofproto->ml, src_mac));
+ src_mac->port.i = flow->in_port;
}
}
/* Determine output port. */
- out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags,
- NULL);
- if (out_port < 0) {
+ dst_mac = mac_learning_lookup(ofproto->ml, flow->dl_dst, 0, tags);
+ if (!dst_mac) {
flood_packets(ofproto, flow->in_port, OFPPC_NO_FLOOD,
nf_output_iface, odp_actions);
- } else if (out_port != flow->in_port) {
- nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT, out_port);
- *nf_output_iface = out_port;
} else {
- /* Drop. */
+ int out_port = dst_mac->port.i;
+ if (out_port != flow->in_port) {
+ nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT, out_port);
+ *nf_output_iface = out_port;
+ } else {
+ /* Drop. */
+ }
}
return true;