Once you have installed all the prerequisites listed above in the Base
Prerequisites section, follow the procedure below to build.
-1. In the top source directory, configure the package by running the
+1. If you pulled the sources directly from an Open vSwitch Git tree,
+ run boot.sh in the top source directory:
+
+ % ./boot.sh
+
+2. In the top source directory, configure the package by running the
configure script. You can usually invoke configure without any
arguments:
additional environment variables. For a full list, invoke
configure with the --help option.
-2. Run make in the top source directory:
+3. Run make in the top source directory:
% make
-3. Become root by running "su" or another program.
+4. Become root by running "su" or another program.
-4. Run "make install" to install the executables and manpages into the
+5. Run "make install" to install the executables and manpages into the
running system, by default under /usr/local.
-5. If you built kernel modules, you may load them with "insmod", e.g.:
+6. If you built kernel modules, you may load them with "insmod", e.g.:
% insmod datapath/linux-2.6/openvswitch_mod.ko
if (!dp->table)
goto err_free_dp;
- /* Setup our datapath device */
+ /* Set up our datapath device. */
dp_dev = dp_dev_create(dp, devname, ODPP_LOCAL);
err = PTR_ERR(dp_dev);
if (IS_ERR(dp_dev))
* we get completely accurate stats, but that blows our performance,
* badly. */
dp->n_flows--;
- error = answer_query(flow, uf.flags, ufp);
+ error = answer_query(flow, 0, ufp);
flow_deferred_free(flow);
error:
if (!flow)
error = __put_user(ENOENT, &ufp->stats.error);
else
- error = answer_query(flow, 0, ufp);
+ error = answer_query(flow, uf.flags, ufp);
if (error)
return -EFAULT;
}
static void cmd_shell(const struct dict *);
static void cmd_show_version(const struct dict *);
static void cmd_configure(const struct dict *);
-static void cmd_setup_pki(const struct dict *);
+static void cmd_set_up_pki(const struct dict *);
static void cmd_browse_status(const struct dict *);
static void cmd_show_motto(const struct dict *);
menu_add_item(&menu, "Exit");
menu_add_item(&menu, "Show Version")->f = cmd_show_version;
menu_add_item(&menu, "Configure")->f = cmd_configure;
- menu_add_item(&menu, "Setup PKI")->f = cmd_setup_pki;
+ menu_add_item(&menu, "Set up PKI")->f = cmd_set_up_pki;
if (debug_mode) {
menu_add_item(&menu, "Browse Status")->f = cmd_browse_status;
menu_add_item(&menu, "Shell")->f = cmd_shell;
}
static void
-cmd_setup_pki(const struct dict *dict UNUSED)
+cmd_set_up_pki(const struct dict *dict UNUSED)
{
static const char def_privkey_file[]
= "/etc/openflow-switch/of0-privkey.pem";
for (line_number = 1; fgets(line, sizeof line, f); line_number++) {
if (strstr(line, "[stack]")) {
uintptr_t end;
- if (sscanf(line, "%*"SCNxPTR"-%"SCNxPTR, &end) != 1) {
+ if (sscanf(line, "%*x-%"SCNxPTR, &end) != 1) {
VLOG_WARN("%s:%d: parse error", file_name, line_number);
continue;
}
uintptr_t low;
asm("movl %%esp,%0" : "=g" (low));
return low;
+#elif __x86_64__
+ uintptr_t low;
+ asm("movq %%rsp,%0" : "=g" (low));
+ return low;
#else
/* This causes a warning in GCC that cannot be disabled, so use it only on
* non-x86. */
slash = strrchr(file_name, '/');
if (slash) {
lock_name = xasprintf("%.*s/.%s.~lock~",
- slash - file_name, file_name, slash + 1);
+ (int) (slash - file_name), file_name, slash + 1);
} else {
lock_name = xasprintf(".%s.~lock~", file_name);
}
return 0;
} else {
VLOG_WARN_RL(&error_rl, "%s: discarding message truncated "
- "from %zu bytes to %d",
+ "from %"PRIu32" bytes to %d",
dpif_name(dpif_), msg->length, retval);
error = ERANGE;
}
if (!dladdr(frame[1], &addrinfo) || !addrinfo.dli_sname) {
fprintf(stderr, " 0x%08"PRIxPTR"\n", (uintptr_t) frame[1]);
} else {
- fprintf(stderr, " 0x%08"PRIxPTR" (%s+0x%x)\n",
+ fprintf(stderr, " 0x%08"PRIxPTR" (%s+0x%tx)\n",
(uintptr_t) frame[1], addrinfo.dli_sname,
(char *) frame[1] - (char *) addrinfo.dli_saddr);
}
void
flow_format(struct ds *ds, const flow_t *flow)
{
- ds_put_format(ds, "port%04x:vlan%d mac"ETH_ADDR_FMT"->"ETH_ADDR_FMT" "
+ ds_put_format(ds, "in_port%04x:vlan%d mac"ETH_ADDR_FMT"->"ETH_ADDR_FMT" "
"type%04x proto%"PRId8" ip"IP_FMT"->"IP_FMT" port%d->%d",
flow->in_port, ntohs(flow->dl_vlan),
ETH_ADDR_ARGS(flow->dl_src), ETH_ADDR_ARGS(flow->dl_dst),
putc(':', file);
backtrace_capture(&backtrace);
for (i = 0; i < backtrace.n_frames; i++) {
- fprintf(file, " 0x%x", backtrace.frames[i]);
+ fprintf(file, " 0x%"PRIxPTR, backtrace.frames[i]);
}
putc('\n', file);
}
#include <inttypes.h>
#include <stdlib.h>
+#include "bitmap.h"
#include "coverage.h"
#include "hash.h"
#include "list.h"
list_push_front(&ml->free, &s->lru_node);
}
ml->secret = random_uint32();
+ ml->non_learning_vlans = NULL;
return ml;
}
void
mac_learning_destroy(struct mac_learning *ml)
{
+ if (ml) {
+ bitmap_free(ml->non_learning_vlans);
+ }
free(ml);
}
+/* Provides a bitmap of VLANs which have learning disabled. It takes
+ * ownership of the bitmap. Returns true if the set has changed from
+ * the previous value. */
+bool
+mac_learning_set_disabled_vlans(struct mac_learning *ml, unsigned long *bitmap)
+{
+ bool ret = (bitmap == NULL
+ ? ml->non_learning_vlans != NULL
+ : (ml->non_learning_vlans == NULL
+ || !bitmap_equal(bitmap, ml->non_learning_vlans, 4096)));
+
+ bitmap_free(ml->non_learning_vlans);
+ ml->non_learning_vlans = bitmap;
+
+ return ret;
+}
+
+static bool
+is_learning_vlan(const struct mac_learning *ml, uint16_t vlan)
+{
+ return !(ml->non_learning_vlans
+ && bitmap_is_set(ml->non_learning_vlans, vlan));
+}
+
/* Attempts to make 'ml' learn from the fact that a frame from 'src_mac' was
* just observed arriving from 'src_port' on the given 'vlan'.
*
struct mac_entry *e;
struct list *bucket;
+ if (!is_learning_vlan(ml, vlan)) {
+ return 0;
+ }
+
if (eth_addr_is_multicast(src_mac)) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 30);
VLOG_DBG_RL(&rl, "multicast packet source "ETH_ADDR_FMT,
const uint8_t dst[ETH_ADDR_LEN], uint16_t vlan,
tag_type *tag)
{
- if (eth_addr_is_multicast(dst)) {
+ if (eth_addr_is_multicast(dst) || !is_learning_vlan(ml, vlan)) {
return -1;
} else {
struct mac_entry *e = search_bucket(mac_table_bucket(ml, dst, vlan),
struct list table[MAC_HASH_SIZE]; /* Hash table. */
struct mac_entry entries[MAC_MAX]; /* All entries. */
uint32_t secret; /* Secret for */
+ unsigned long *non_learning_vlans; /* Bitmap of learning disabled VLANs. */
};
struct mac_learning *mac_learning_create(void);
void mac_learning_destroy(struct mac_learning *);
+bool mac_learning_set_disabled_vlans(struct mac_learning *,
+ unsigned long *bitmap);
tag_type mac_learning_learn(struct mac_learning *,
const uint8_t src[ETH_ADDR_LEN], uint16_t vlan,
uint16_t src_port);
void
format_odp_flow_stats(struct ds *ds, const struct odp_flow_stats *s)
{
- ds_put_format(ds, "packets:%"PRIu64", bytes:%"PRIu64", used:",
- s->n_packets, s->n_bytes);
+ ds_put_format(ds, "packets:%llu, bytes:%llu, used:",
+ (unsigned long long int) s->n_packets,
+ (unsigned long long int) s->n_bytes);
if (s->used_sec) {
long long int used = s->used_sec * 1000 + s->used_nsec / 1000000;
ds_put_format(ds, "%.3fs", (time_msec() - used) / 1000.0);
((len & 0x0000ff00) << 8) |
((len & 0x000000ff) << 24));
if (swapped_len > 0xffff) {
- VLOG_WARN("bad packet length %"PRIu32" or %"PRIu32" "
+ VLOG_WARN("bad packet length %zu or %"PRIu32" "
"reading pcap file",
len, swapped_len);
return EPROTO;
#include "poll-loop.h"
#include <assert.h>
#include <errno.h>
+#include <inttypes.h>
#include <poll.h>
#include <stdlib.h>
#include <string.h>
ds_put_char(&ds, ':');
for (i = 0; i < backtrace->n_frames; i++) {
- ds_put_format(&ds, " 0x%x", backtrace->frames[i]);
+ ds_put_format(&ds, " 0x%"PRIxPTR, backtrace->frames[i]);
}
}
VLOG_DBG("%s", ds_cstr(&ds));
/* Time at which to die with SIGALRM (if not TIME_MIN). */
static time_t deadline = TIME_MIN;
-static void setup_timer(void);
+static void set_up_timer(void);
static void sigalrm_handler(int);
static void refresh_if_ticked(void);
static time_t time_add(time_t, time_t);
}
/* Set up periodic signal. */
- setup_timer();
+ set_up_timer();
}
static void
-setup_timer(void)
+set_up_timer(void)
{
struct itimerval itimer;
void
time_postfork(void)
{
- setup_timer();
+ set_up_timer();
}
/* Forces a refresh of the current time from the kernel. It is not usually
if (got_size != size) {
char *type_name = ofp_message_type_to_string(type);
VLOG_WARN_RL(&bad_ofmsg_rl,
- "received %s message of length %"PRIu16" (expected %zu)",
+ "received %s message of length %zu (expected %zu)",
type_name, got_size, size);
free(type_name);
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
got_size = ntohs(msg->length);
if (got_size < min_size) {
char *type_name = ofp_message_type_to_string(type);
- VLOG_WARN_RL(&bad_ofmsg_rl, "received %s message of length %"PRIu16" "
+ VLOG_WARN_RL(&bad_ofmsg_rl, "received %s message of length %zu "
"(expected at least %zu)",
type_name, got_size, min_size);
free(type_name);
if ((got_size - min_size) % array_elt_size) {
char *type_name = ofp_message_type_to_string(type);
VLOG_WARN_RL(&bad_ofmsg_rl,
- "received %s message of bad length %"PRIu16": the "
+ "received %s message of bad length %zu: the "
"excess over %zu (%zu) is not evenly divisible by %zu "
"(remainder is %zu)",
type_name, got_size, min_size, got_size - min_size,
actions_len = ntohs(opo->actions_len);
if (actions_len > extra) {
- VLOG_WARN_RL(&bad_ofmsg_rl, "packet-out claims %zu bytes of actions "
+ VLOG_WARN_RL(&bad_ofmsg_rl, "packet-out claims %u bytes of actions "
"but message has room for only %zu bytes",
actions_len, extra);
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
}
if (actions_len % sizeof(union ofp_action)) {
- VLOG_WARN_RL(&bad_ofmsg_rl, "packet-out claims %zu bytes of actions, "
+ VLOG_WARN_RL(&bad_ofmsg_rl, "packet-out claims %u bytes of actions, "
"which is not a multiple of %zu",
actions_len, sizeof(union ofp_action));
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
if (n_slots > slots_left) {
VLOG_DBG_RL(&bad_ofmsg_rl,
- "action requires %u slots but only %td remain",
+ "action requires %u slots but only %u remain",
n_slots, slots_left);
return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_BAD_LEN);
}
if (wc & OFPFW_DL_TYPE) {
m->dl_type = 0;
- /* Can't sensibly m on network or transport headers if the
+ /* Can't sensibly match on network or transport headers if the
* data link type is unknown. */
wc |= OFPFW_NW | OFPFW_TP;
m->nw_src = m->nw_dst = m->nw_proto = 0;
if (wc & OFPFW_NW_PROTO) {
m->nw_proto = 0;
- /* Can't sensibly m on transport headers if the network
+ /* Can't sensibly match on transport headers if the network
* protocol is unknown. */
wc |= OFPFW_TP;
m->tp_src = m->tp_dst = 0;
}
} else {
/* Transport layer fields will always be extracted as zeros, so we
- * can do an exact-m on those values. */
+ * can do an exact-match on those values. */
wc &= ~OFPFW_TP;
m->tp_src = m->tp_dst = 0;
}
}
} else {
/* Network and transport layer fields will always be extracted as
- * zeros, so we can do an exact-m on those values. */
+ * zeros, so we can do an exact-match on those values. */
wc &= ~(OFPFW_NW | OFPFW_TP);
m->nw_proto = m->nw_src = m->nw_dst = 0;
m->tp_src = m->tp_dst = 0;
*
* In Open vSwitch, in-band control is implemented as "hidden" flows (in
* that they are not visible through OpenFlow) and at a higher priority
- * than wildcarded flows can be setup by the controller. This is done
+ * than wildcarded flows can be set up by the controller. This is done
* so that the controller cannot interfere with them and possibly break
* connectivity with its switches. It is possible to see all flows,
* including in-band ones, with the ovs-appctl "bridge/dump-flows"
*
* - Differing Controllers for Switches. All switches must know
* the L3 addresses for all the controllers that other switches
- * may use, since rules need to be setup to allow traffic related
+ * may use, since rules need to be set up to allow traffic related
* to those controllers through. See rules (f), (g), (h), and (i).
*
* - Differing Routes for Switches. In order for the switch to
/* out_port and fixed_fields are assumed never to change. */
static void
-setup_flow(struct in_band *in_band, int rule_idx, const flow_t *flow,
- uint32_t fixed_fields, uint16_t out_port)
+set_up_flow(struct in_band *in_band, int rule_idx, const flow_t *flow,
+ uint32_t fixed_fields, uint16_t out_port)
{
struct ib_rule *rule = &in_band->rules[rule_idx];
flow.nw_proto = IP_TYPE_UDP;
flow.tp_src = htons(DHCP_CLIENT_PORT);
flow.tp_dst = htons(DHCP_SERVER_PORT);
- setup_flow(in_band, IBR_FROM_LOCAL_DHCP, &flow,
- (OFPFW_IN_PORT | OFPFW_DL_TYPE | OFPFW_DL_SRC
- | OFPFW_NW_PROTO | OFPFW_TP_SRC | OFPFW_TP_DST),
- OFPP_NORMAL);
+ set_up_flow(in_band, IBR_FROM_LOCAL_DHCP, &flow,
+ (OFPFW_IN_PORT | OFPFW_DL_TYPE | OFPFW_DL_SRC
+ | OFPFW_NW_PROTO | OFPFW_TP_SRC | OFPFW_TP_DST),
+ OFPP_NORMAL);
/* Allow the connection's interface to receive directed ARP traffic. */
memset(&flow, 0, sizeof flow);
flow.dl_type = htons(ETH_TYPE_ARP);
memcpy(flow.dl_dst, local_mac, ETH_ADDR_LEN);
flow.nw_proto = ARP_OP_REPLY;
- setup_flow(in_band, IBR_TO_LOCAL_ARP, &flow,
- (OFPFW_DL_TYPE | OFPFW_DL_DST | OFPFW_NW_PROTO),
- OFPP_NORMAL);
+ set_up_flow(in_band, IBR_TO_LOCAL_ARP, &flow,
+ (OFPFW_DL_TYPE | OFPFW_DL_DST | OFPFW_NW_PROTO),
+ OFPP_NORMAL);
/* Allow the connection's interface to be the source of ARP traffic. */
memset(&flow, 0, sizeof flow);
flow.dl_type = htons(ETH_TYPE_ARP);
memcpy(flow.dl_src, local_mac, ETH_ADDR_LEN);
flow.nw_proto = ARP_OP_REQUEST;
- setup_flow(in_band, IBR_FROM_LOCAL_ARP, &flow,
- (OFPFW_DL_TYPE | OFPFW_DL_SRC | OFPFW_NW_PROTO),
- OFPP_NORMAL);
+ set_up_flow(in_band, IBR_FROM_LOCAL_ARP, &flow,
+ (OFPFW_DL_TYPE | OFPFW_DL_SRC | OFPFW_NW_PROTO),
+ OFPP_NORMAL);
} else {
drop_flow(in_band, IBR_TO_LOCAL_ARP);
drop_flow(in_band, IBR_FROM_LOCAL_ARP);
flow.dl_type = htons(ETH_TYPE_ARP);
memcpy(flow.dl_dst, remote_mac, ETH_ADDR_LEN);
flow.nw_proto = ARP_OP_REPLY;
- setup_flow(in_band, IBR_TO_REMOTE_ARP, &flow,
- (OFPFW_DL_TYPE | OFPFW_DL_DST | OFPFW_NW_PROTO),
- OFPP_NORMAL);
+ set_up_flow(in_band, IBR_TO_REMOTE_ARP, &flow,
+ (OFPFW_DL_TYPE | OFPFW_DL_DST | OFPFW_NW_PROTO),
+ OFPP_NORMAL);
/* Allow ARP requests from the remote side's MAC. */
memset(&flow, 0, sizeof flow);
flow.dl_type = htons(ETH_TYPE_ARP);
memcpy(flow.dl_src, remote_mac, ETH_ADDR_LEN);
flow.nw_proto = ARP_OP_REQUEST;
- setup_flow(in_band, IBR_FROM_REMOTE_ARP, &flow,
- (OFPFW_DL_TYPE | OFPFW_DL_SRC | OFPFW_NW_PROTO),
- OFPP_NORMAL);
+ set_up_flow(in_band, IBR_FROM_REMOTE_ARP, &flow,
+ (OFPFW_DL_TYPE | OFPFW_DL_SRC | OFPFW_NW_PROTO),
+ OFPP_NORMAL);
} else {
drop_flow(in_band, IBR_TO_REMOTE_ARP);
drop_flow(in_band, IBR_FROM_REMOTE_ARP);
flow.dl_type = htons(ETH_TYPE_ARP);
flow.nw_proto = ARP_OP_REPLY;
flow.nw_dst = controller_ip;
- setup_flow(in_band, IBR_TO_CTL_ARP, &flow,
- (OFPFW_DL_TYPE | OFPFW_NW_PROTO | OFPFW_NW_DST_MASK),
- OFPP_NORMAL);
+ set_up_flow(in_band, IBR_TO_CTL_ARP, &flow,
+ (OFPFW_DL_TYPE | OFPFW_NW_PROTO | OFPFW_NW_DST_MASK),
+ OFPP_NORMAL);
/* Allow ARP requests from the controller's IP. */
memset(&flow, 0, sizeof flow);
flow.dl_type = htons(ETH_TYPE_ARP);
flow.nw_proto = ARP_OP_REQUEST;
flow.nw_src = controller_ip;
- setup_flow(in_band, IBR_FROM_CTL_ARP, &flow,
- (OFPFW_DL_TYPE | OFPFW_NW_PROTO | OFPFW_NW_SRC_MASK),
- OFPP_NORMAL);
+ set_up_flow(in_band, IBR_FROM_CTL_ARP, &flow,
+ (OFPFW_DL_TYPE | OFPFW_NW_PROTO | OFPFW_NW_SRC_MASK),
+ OFPP_NORMAL);
/* OpenFlow traffic to or from the controller.
*
flow.nw_dst = controller_ip;
flow.tp_src = htons(OFP_TCP_PORT);
flow.tp_dst = htons(OFP_TCP_PORT);
- setup_flow(in_band, IBR_TO_CTL_OFP, &flow,
- (OFPFW_DL_TYPE | OFPFW_NW_PROTO | OFPFW_NW_DST_MASK
- | OFPFW_TP_DST), OFPP_NORMAL);
- setup_flow(in_band, IBR_FROM_CTL_OFP, &flow,
- (OFPFW_DL_TYPE | OFPFW_NW_PROTO | OFPFW_NW_SRC_MASK
- | OFPFW_TP_SRC), OFPP_NORMAL);
+ set_up_flow(in_band, IBR_TO_CTL_OFP, &flow,
+ (OFPFW_DL_TYPE | OFPFW_NW_PROTO | OFPFW_NW_DST_MASK
+ | OFPFW_TP_DST), OFPP_NORMAL);
+ set_up_flow(in_band, IBR_FROM_CTL_OFP, &flow,
+ (OFPFW_DL_TYPE | OFPFW_NW_PROTO | OFPFW_NW_SRC_MASK
+ | OFPFW_TP_SRC), OFPP_NORMAL);
} else {
drop_flow(in_band, IBR_TO_CTL_ARP);
drop_flow(in_band, IBR_FROM_CTL_ARP);
#define NETFLOW_V5_VERSION 5
+static const int ACTIVE_TIMEOUT_DEFAULT = 600;
+
/* Every NetFlow v5 message contains the header that follows. This is
* followed by up to thirty records that describe a terminating flow.
* We only send a single record per NetFlow message.
* bits of the interface fields. */
uint32_t netflow_cnt; /* Flow sequence number for NetFlow. */
struct ofpbuf packet; /* NetFlow packet being accumulated. */
+ long long int active_timeout; /* Timeout for flows that are still active. */
+ long long int reconfig_time; /* When we reconfigured the timeouts. */
};
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
}
void
-netflow_expire(struct netflow *nf, const struct ofexpired *expired)
+netflow_expire(struct netflow *nf, struct netflow_flow *nf_flow,
+ struct ofexpired *expired)
{
struct netflow_v5_header *nf_hdr;
struct netflow_v5_record *nf_rec;
struct timeval now;
- /* NetFlow only reports on IP packets. */
- if (expired->flow.dl_type != htons(ETH_TYPE_IP)) {
+ nf_flow->last_expired += nf->active_timeout;
+
+ /* NetFlow only reports on IP packets and we should only report flows
+ * that actually have traffic. */
+ if (expired->flow.dl_type != htons(ETH_TYPE_IP) ||
+ expired->packet_count - nf_flow->packet_count_off == 0) {
return;
}
if (nf->add_id_to_iface) {
uint16_t iface = (nf->engine_id & 0x7f) << 9;
nf_rec->input = htons(iface | (expired->flow.in_port & 0x1ff));
- nf_rec->output = htons(iface);
+ nf_rec->output = htons(iface | (nf_flow->output_iface & 0x1ff));
} else {
nf_rec->input = htons(expired->flow.in_port);
- nf_rec->output = htons(0);
+ nf_rec->output = htons(nf_flow->output_iface);
}
- nf_rec->packet_count = htonl(MIN(expired->packet_count, UINT32_MAX));
- nf_rec->byte_count = htonl(MIN(expired->byte_count, UINT32_MAX));
- nf_rec->init_time = htonl(expired->created - nf->boot_time);
- nf_rec->used_time = htonl(MAX(expired->created, expired->used)
+ nf_rec->packet_count = htonl(MIN(expired->packet_count -
+ nf_flow->packet_count_off, UINT32_MAX));
+ nf_rec->byte_count = htonl(MIN(expired->byte_count -
+ nf_flow->byte_count_off, UINT32_MAX));
+ nf_rec->init_time = htonl(nf_flow->created - nf->boot_time);
+ nf_rec->used_time = htonl(MAX(nf_flow->created, expired->used)
- nf->boot_time);
if (expired->flow.nw_proto == IP_TYPE_ICMP) {
/* In NetFlow, the ICMP type and code are concatenated and
nf_rec->src_port = expired->flow.tp_src;
nf_rec->dst_port = expired->flow.tp_dst;
}
- nf_rec->tcp_flags = expired->tcp_flags;
+ nf_rec->tcp_flags = nf_flow->tcp_flags;
nf_rec->ip_proto = expired->flow.nw_proto;
- nf_rec->ip_tos = expired->ip_tos;
+ nf_rec->ip_tos = nf_flow->ip_tos;
+
+ /* Update flow tracking data. */
+ nf_flow->created = 0;
+ nf_flow->packet_count_off = expired->packet_count;
+ nf_flow->byte_count_off = expired->byte_count;
+ nf_flow->tcp_flags = 0;
/* NetFlow messages are limited to 30 records. */
if (ntohs(nf_hdr->count) >= 30) {
}
int
-netflow_set_collectors(struct netflow *nf, const struct svec *collectors_)
+netflow_set_options(struct netflow *nf,
+ const struct netflow_options *nf_options)
{
struct svec collectors;
int error = 0;
size_t i;
+ long long int old_timeout;
+
+ nf->engine_type = nf_options->engine_type;
+ nf->engine_id = nf_options->engine_id;
+ nf->add_id_to_iface = nf_options->add_id_to_iface;
clear_collectors(nf);
- svec_clone(&collectors, collectors_);
+ svec_clone(&collectors, &nf_options->collectors);
svec_sort_unique(&collectors);
nf->fds = xmalloc(sizeof *nf->fds * collectors.n);
}
svec_destroy(&collectors);
- return error;
-}
-void
-netflow_set_engine(struct netflow *nf, uint8_t engine_type,
- uint8_t engine_id, bool add_id_to_iface)
-{
- nf->engine_type = engine_type;
- nf->engine_id = engine_id;
- nf->add_id_to_iface = add_id_to_iface;
+ old_timeout = nf->active_timeout;
+ if (nf_options->active_timeout != -1) {
+ nf->active_timeout = nf_options->active_timeout;
+ } else {
+ nf->active_timeout = ACTIVE_TIMEOUT_DEFAULT;
+ }
+ nf->active_timeout *= 1000;
+ if (old_timeout != nf->active_timeout) {
+ nf->reconfig_time = time_msec();
+ }
+
+ return error;
}
struct netflow *
free(nf);
}
}
+
+void
+netflow_flow_clear(struct netflow_flow *nf_flow)
+{
+ uint16_t output_iface = nf_flow->output_iface;
+
+ memset(nf_flow, 0, sizeof *nf_flow);
+ nf_flow->output_iface = output_iface;
+}
+
+void
+netflow_flow_update_time(struct netflow *nf, struct netflow_flow *nf_flow,
+ long long int used)
+{
+ if (!nf_flow->created) {
+ nf_flow->created = used;
+ }
+
+ if (!nf || !nf->active_timeout || !nf_flow->last_expired ||
+ nf->reconfig_time > nf_flow->last_expired) {
+ /* Keep the time updated to prevent a flood of expiration in
+ * the future. */
+ nf_flow->last_expired = time_msec();
+ }
+}
+
+void
+netflow_flow_update_flags(struct netflow_flow *nf_flow, uint8_t ip_tos,
+ uint8_t tcp_flags)
+{
+ nf_flow->ip_tos = ip_tos;
+ nf_flow->tcp_flags |= tcp_flags;
+}
+
+bool
+netflow_active_timeout_expired(struct netflow *nf, struct netflow_flow *nf_flow)
+{
+ if (nf->active_timeout) {
+ return time_msec() > nf_flow->last_expired + nf->active_timeout;
+ }
+
+ return false;
+}
#define NETFLOW_H 1
#include "flow.h"
+#include "svec.h"
struct ofexpired;
-struct svec;
+
+struct netflow_options {
+ struct svec collectors;
+ uint8_t engine_type;
+ uint8_t engine_id;
+ int active_timeout;
+ bool add_id_to_iface;
+};
+
+enum netflow_output_ports {
+ NF_OUT_FLOOD = UINT16_MAX,
+ NF_OUT_MULTI = UINT16_MAX - 1,
+ NF_OUT_DROP = UINT16_MAX - 2
+};
+
+struct netflow_flow {
+ long long int last_expired; /* Time this flow last timed out. */
+ long long int created; /* Time flow was created since time out. */
+
+ uint64_t packet_count_off; /* Packet count at last time out. */
+ uint64_t byte_count_off; /* Byte count at last time out. */
+
+ uint16_t output_iface; /* Output interface index. */
+ uint8_t ip_tos; /* Last-seen IP type-of-service. */
+ uint8_t tcp_flags; /* Bitwise-OR of all TCP flags seen. */
+};
struct netflow *netflow_create(void);
void netflow_destroy(struct netflow *);
-int netflow_set_collectors(struct netflow *, const struct svec *collectors);
-void netflow_set_engine(struct netflow *nf, uint8_t engine_type,
- uint8_t engine_id, bool add_id_to_iface);
-void netflow_expire(struct netflow *, const struct ofexpired *);
+int netflow_set_options(struct netflow *, const struct netflow_options *);
+void netflow_expire(struct netflow *, struct netflow_flow *,
+ struct ofexpired *);
void netflow_run(struct netflow *);
+void netflow_flow_clear(struct netflow_flow *);
+void netflow_flow_update_time(struct netflow *, struct netflow_flow *,
+ long long int used);
+void netflow_flow_update_flags(struct netflow_flow *, uint8_t ip_tos,
+ uint8_t tcp_flags);
+bool netflow_active_timeout_expired(struct netflow *, struct netflow_flow *);
+
#endif /* netflow.h */
const flow_t *flow, struct ofproto *ofproto,
const struct ofpbuf *packet,
struct odp_actions *out, tag_type *tags,
- bool *may_setup_flow);
+ bool *may_set_up_flow, uint16_t *nf_output_iface);
struct rule {
struct cls_rule cr;
uint64_t packet_count; /* Number of packets received. */
uint64_t byte_count; /* Number of bytes received. */
uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
- uint8_t tcp_flags; /* Bitwise-OR of all TCP flags seen. */
- uint8_t ip_tos; /* Last-seen IP type-of-service. */
tag_type tags; /* Tags (set only by hooks). */
+ struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
/* If 'super' is non-NULL, this rule is a subrule, that is, it is an
* exact-match rule (having cr.wc.wildcards of 0) generated from the
return false;
}
-static struct rule *rule_create(struct rule *super, const union ofp_action *,
- size_t n_actions, uint16_t idle_timeout,
- uint16_t hard_timeout);
+static struct rule *rule_create(struct ofproto *, struct rule *super,
+ const union ofp_action *, size_t n_actions,
+ uint16_t idle_timeout, uint16_t hard_timeout);
static void rule_free(struct rule *);
static void rule_destroy(struct ofproto *, struct rule *);
static struct rule *rule_from_cls_rule(const struct cls_rule *);
static void send_packet_in_miss(struct ofpbuf *, void *ofproto);
static void send_packet_in_action(struct ofpbuf *, void *ofproto);
static void update_used(struct ofproto *);
-static void update_stats(struct rule *, const struct odp_flow_stats *);
+static void update_stats(struct ofproto *, struct rule *,
+ const struct odp_flow_stats *);
static void expire_rule(struct cls_rule *, void *ofproto);
+static void active_timeout(struct ofproto *ofproto, struct rule *rule);
static bool revalidate_rule(struct ofproto *p, struct rule *rule);
static void revalidate_cb(struct cls_rule *rule_, void *p_);
}
int
-ofproto_set_netflow(struct ofproto *ofproto, const struct svec *collectors,
- uint8_t engine_type, uint8_t engine_id, bool add_id_to_iface)
+ofproto_set_netflow(struct ofproto *ofproto,
+ const struct netflow_options *nf_options)
{
- if (collectors && collectors->n) {
+ if (nf_options->collectors.n) {
if (!ofproto->netflow) {
ofproto->netflow = netflow_create();
}
- netflow_set_engine(ofproto->netflow, engine_type, engine_id,
- add_id_to_iface);
- return netflow_set_collectors(ofproto->netflow, collectors);
+ return netflow_set_options(ofproto->netflow, nf_options);
} else {
netflow_destroy(ofproto->netflow);
ofproto->netflow = NULL;
int error;
error = xlate_actions(actions, n_actions, flow, p, packet, &odp_actions,
- NULL, NULL);
+ NULL, NULL, NULL);
if (error) {
return error;
}
int idle_timeout)
{
struct rule *rule;
- rule = rule_create(NULL, actions, n_actions,
+ rule = rule_create(p, NULL, actions, n_actions,
idle_timeout >= 0 ? idle_timeout : 5 /* XXX */, 0);
cls_rule_from_flow(&rule->cr, flow, wildcards, priority);
rule_insert(p, rule, NULL, 0);
/* Caller is responsible for initializing the 'cr' member of the returned
* rule. */
static struct rule *
-rule_create(struct rule *super,
+rule_create(struct ofproto *ofproto, struct rule *super,
const union ofp_action *actions, size_t n_actions,
uint16_t idle_timeout, uint16_t hard_timeout)
{
}
rule->n_actions = n_actions;
rule->actions = xmemdup(actions, n_actions * sizeof *actions);
+ netflow_flow_clear(&rule->nf_flow);
+ netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
+
return rule;
}
if (rule->cr.wc.wildcards || !flow_equal(flow, &rule->cr.flow)) {
struct rule *super = rule->super ? rule->super : rule;
if (xlate_actions(super->actions, super->n_actions, flow, ofproto,
- packet, &a, NULL, 0)) {
+ packet, &a, NULL, 0, NULL)) {
return;
}
actions = a.actions;
actions, n_actions, packet)) {
struct odp_flow_stats stats;
flow_extract_stats(flow, packet, &stats);
- update_stats(rule, &stats);
+ update_stats(ofproto, rule, &stats);
rule->used = time_msec();
+ netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used);
}
}
rule_create_subrule(struct ofproto *ofproto, struct rule *rule,
const flow_t *flow)
{
- struct rule *subrule = rule_create(rule, NULL, 0,
+ struct rule *subrule = rule_create(ofproto, rule, NULL, 0,
rule->idle_timeout, rule->hard_timeout);
COVERAGE_INC(ofproto_subrule_create);
cls_rule_from_flow(&subrule->cr, flow, 0,
super = rule->super ? rule->super : rule;
rule->tags = 0;
xlate_actions(super->actions, super->n_actions, &rule->cr.flow, p,
- packet, &a, &rule->tags, &rule->may_install);
+ packet, &a, &rule->tags, &rule->may_install,
+ &rule->nf_flow.output_iface);
actions_len = a.n_actions * sizeof *a.actions;
if (rule->n_odp_actions != a.n_actions
&put)) {
rule->installed = true;
if (displaced_rule) {
- update_stats(rule, &put.flow.stats);
+ update_stats(p, rule, &put.flow.stats);
rule_post_uninstall(p, displaced_rule);
}
}
odp_flow.actions = NULL;
odp_flow.n_actions = 0;
if (!dpif_flow_del(p->dpif, &odp_flow)) {
- update_stats(rule, &odp_flow.stats);
+ update_stats(p, rule, &odp_flow.stats);
}
rule->installed = false;
}
}
+static bool
+is_controller_rule(struct rule *rule)
+{
+ /* If the only action is send to the controller then don't report
+ * NetFlow expiration messages since it is just part of the control
+ * logic for the network and not real traffic. */
+
+ if (rule && rule->super) {
+ struct rule *super = rule->super;
+
+ return super->n_actions == 1 &&
+ super->actions[0].type == htons(OFPAT_OUTPUT) &&
+ super->actions[0].output.port == htons(OFPP_CONTROLLER);
+ }
+
+ return false;
+}
+
static void
rule_post_uninstall(struct ofproto *ofproto, struct rule *rule)
{
struct rule *super = rule->super;
rule_account(ofproto, rule, 0);
- if (ofproto->netflow && rule->byte_count) {
+
+ if (ofproto->netflow && !is_controller_rule(rule)) {
struct ofexpired expired;
expired.flow = rule->cr.flow;
expired.packet_count = rule->packet_count;
expired.byte_count = rule->byte_count;
expired.used = rule->used;
- expired.created = rule->created;
- expired.tcp_flags = rule->tcp_flags;
- expired.ip_tos = rule->ip_tos;
- netflow_expire(ofproto->netflow, &expired);
+ netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
}
if (super) {
super->packet_count += rule->packet_count;
super->byte_count += rule->byte_count;
- super->tcp_flags |= rule->tcp_flags;
- if (rule->packet_count) {
- super->ip_tos = rule->ip_tos;
- }
- }
- /* Reset counters to prevent double counting if the rule ever gets
- * reinstalled. */
- rule->packet_count = 0;
- rule->byte_count = 0;
- rule->accounted_bytes = 0;
- rule->tcp_flags = 0;
- rule->ip_tos = 0;
+ /* Reset counters to prevent double counting if the rule ever gets
+ * reinstalled. */
+ rule->packet_count = 0;
+ rule->byte_count = 0;
+ rule->accounted_bytes = 0;
+
+ netflow_flow_clear(&rule->nf_flow);
+ }
}
\f
static void
}
static void
-add_output_group_action(struct odp_actions *actions, uint16_t group)
+add_output_group_action(struct odp_actions *actions, uint16_t group,
+ uint16_t *nf_output_iface)
{
odp_actions_add(actions, ODPAT_OUTPUT_GROUP)->output_group.group = group;
+
+ if (group == DP_GROUP_ALL || group == DP_GROUP_FLOOD) {
+ *nf_output_iface = NF_OUT_FLOOD;
+ }
}
static void
/* Output. */
struct odp_actions *out; /* Datapath actions. */
tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
- bool may_setup_flow; /* True ordinarily; false if the actions must
+ bool may_set_up_flow; /* True ordinarily; false if the actions must
* be reassessed for every packet. */
+ uint16_t nf_output_iface; /* Output interface index for NetFlow. */
};
static void do_xlate_actions(const union ofp_action *in, size_t n_in,
}
odp_actions_add(ctx->out, ODPAT_OUTPUT)->output.port = port;
+ ctx->nf_output_iface = port;
}
static struct rule *
const struct ofp_action_output *oao)
{
uint16_t odp_port;
+ uint16_t prev_nf_output_iface = ctx->nf_output_iface;
+
+ ctx->nf_output_iface = NF_OUT_DROP;
switch (ntohs(oao->port)) {
case OFPP_IN_PORT:
case OFPP_NORMAL:
if (!ctx->ofproto->ofhooks->normal_cb(ctx->flow, ctx->packet,
ctx->out, ctx->tags,
+ &ctx->nf_output_iface,
ctx->ofproto->aux)) {
COVERAGE_INC(ofproto_uninstallable);
- ctx->may_setup_flow = false;
+ ctx->may_set_up_flow = false;
}
break;
case OFPP_FLOOD:
- add_output_group_action(ctx->out, DP_GROUP_FLOOD);
+ add_output_group_action(ctx->out, DP_GROUP_FLOOD,
+ &ctx->nf_output_iface);
break;
case OFPP_ALL:
- add_output_group_action(ctx->out, DP_GROUP_ALL);
+ add_output_group_action(ctx->out, DP_GROUP_ALL, &ctx->nf_output_iface);
break;
case OFPP_CONTROLLER:
add_controller_action(ctx->out, oao);
}
break;
}
+
+ if (prev_nf_output_iface == NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_FLOOD;
+ } else if (ctx->nf_output_iface == NF_OUT_DROP) {
+ ctx->nf_output_iface = prev_nf_output_iface;
+ } else if (prev_nf_output_iface != NF_OUT_DROP &&
+ ctx->nf_output_iface != NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_MULTI;
+ }
}
static void
xlate_actions(const union ofp_action *in, size_t n_in,
const flow_t *flow, struct ofproto *ofproto,
const struct ofpbuf *packet,
- struct odp_actions *out, tag_type *tags, bool *may_setup_flow)
+ struct odp_actions *out, tag_type *tags, bool *may_set_up_flow,
+ uint16_t *nf_output_iface)
{
tag_type no_tags = 0;
struct action_xlate_ctx ctx;
ctx.packet = packet;
ctx.out = out;
ctx.tags = tags ? tags : &no_tags;
- ctx.may_setup_flow = true;
+ ctx.may_set_up_flow = true;
+ ctx.nf_output_iface = NF_OUT_DROP;
do_xlate_actions(in, n_in, &ctx);
- /* Check with in-band control to see if we're allowed to setup this
+ /* Check with in-band control to see if we're allowed to set up this
* flow. */
if (!in_band_rule_check(ofproto->in_band, flow, out)) {
- ctx.may_setup_flow = false;
+ ctx.may_set_up_flow = false;
}
- if (may_setup_flow) {
- *may_setup_flow = ctx.may_setup_flow;
+ if (may_set_up_flow) {
+ *may_set_up_flow = ctx.may_set_up_flow;
+ }
+ if (nf_output_iface) {
+ *nf_output_iface = ctx.nf_output_iface;
}
if (odp_actions_overflow(out)) {
odp_actions_init(out);
flow_extract(&payload, ofp_port_to_odp_port(ntohs(opo->in_port)), &flow);
error = xlate_actions((const union ofp_action *) opo->actions, n_actions,
- &flow, p, &payload, &actions, NULL, NULL);
+ &flow, p, &payload, &actions, NULL, NULL, NULL);
if (error) {
return error;
}
}
static void
-update_time(struct rule *rule, const struct odp_flow_stats *stats)
+update_time(struct ofproto *ofproto, struct rule *rule,
+ const struct odp_flow_stats *stats)
{
long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
if (used > rule->used) {
rule->used = used;
+ netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
}
}
static void
-update_stats(struct rule *rule, const struct odp_flow_stats *stats)
+update_stats(struct ofproto *ofproto, struct rule *rule,
+ const struct odp_flow_stats *stats)
{
- update_time(rule, stats);
- rule->packet_count += stats->n_packets;
- rule->byte_count += stats->n_bytes;
- rule->tcp_flags |= stats->tcp_flags;
if (stats->n_packets) {
- rule->ip_tos = stats->ip_tos;
+ update_time(ofproto, rule, stats);
+ rule->packet_count += stats->n_packets;
+ rule->byte_count += stats->n_bytes;
+ netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos,
+ stats->tcp_flags);
}
}
uint16_t in_port;
int error;
- rule = rule_create(NULL, (const union ofp_action *) ofm->actions,
+ rule = rule_create(p, NULL, (const union ofp_action *) ofm->actions,
n_actions, ntohs(ofm->idle_timeout),
ntohs(ofm->hard_timeout));
cls_rule_from_match(&rule->cr, &ofm->match, ntohs(ofm->priority));
{
size_t msg_len = ntohs(ofmph->header.header.length);
if (msg_len < sizeof(*ofmph)) {
- VLOG_WARN_RL(&rl, "dropping short managment message: %d\n", msg_len);
+ VLOG_WARN_RL(&rl, "dropping short managment message: %zu\n", msg_len);
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
}
struct ofmp_capability_request *ofmpcr;
if (msg_len < sizeof(struct ofmp_capability_request)) {
- VLOG_WARN_RL(&rl, "dropping short capability request: %d\n",
+ VLOG_WARN_RL(&rl, "dropping short capability request: %zu\n",
msg_len);
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
}
flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofe->match);
ofe->priority = htons(rule->cr.priority);
ofe->reason = reason;
- ofe->duration = (now - rule->created) / 1000;
- ofe->packet_count = rule->packet_count;
- ofe->byte_count = rule->byte_count;
+ ofe->duration = htonl((now - rule->created) / 1000);
+ ofe->packet_count = htonll(rule->packet_count);
+ ofe->byte_count = htonll(rule->byte_count);
return buf;
}
? rule->used + rule->idle_timeout * 1000
: LLONG_MAX);
expire = MIN(hard_expire, idle_expire);
- if (expire == LLONG_MAX) {
- if (rule->installed && time_msec() >= rule->used + 5000) {
- uninstall_idle_flow(p, rule);
- }
- return;
- }
now = time_msec();
if (now < expire) {
if (rule->installed && now >= rule->used + 5000) {
uninstall_idle_flow(p, rule);
+ } else if (!rule->cr.wc.wildcards) {
+ active_timeout(p, rule);
}
+
return;
}
rule_remove(p, rule);
}
+static void
+active_timeout(struct ofproto *ofproto, struct rule *rule)
+{
+ if (ofproto->netflow && !is_controller_rule(rule) &&
+ netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
+ struct ofexpired expired;
+ struct odp_flow odp_flow;
+
+ /* Get updated flow stats. */
+ memset(&odp_flow, 0, sizeof odp_flow);
+ if (rule->installed) {
+ odp_flow.key = rule->cr.flow;
+ odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
+ dpif_flow_get(ofproto->dpif, &odp_flow);
+
+ if (odp_flow.stats.n_packets) {
+ update_time(ofproto, rule, &odp_flow.stats);
+ netflow_flow_update_flags(&rule->nf_flow, odp_flow.stats.ip_tos,
+ odp_flow.stats.tcp_flags);
+ }
+ }
+
+ expired.flow = rule->cr.flow;
+ expired.packet_count = rule->packet_count +
+ odp_flow.stats.n_packets;
+ expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
+ expired.used = rule->used;
+
+ netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
+
+ /* Schedule us to send the accumulated records once we have
+ * collected all of them. */
+ poll_immediate_wake();
+ }
+}
+
static void
update_used(struct ofproto *p)
{
continue;
}
- update_time(rule, &f->stats);
+ update_time(p, rule, &f->stats);
rule_account(p, rule, f->stats.n_bytes);
}
free(flows);
static bool
default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
struct odp_actions *actions, tag_type *tags,
- void *ofproto_)
+ uint16_t *nf_output_iface, void *ofproto_)
{
struct ofproto *ofproto = ofproto_;
int out_port;
/* Determine output port. */
out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags);
if (out_port < 0) {
- add_output_group_action(actions, DP_GROUP_FLOOD);
+ add_output_group_action(actions, DP_GROUP_FLOOD, nf_output_iface);
} else if (out_port != flow->in_port) {
odp_actions_add(actions, ODPAT_OUTPUT)->output.port = out_port;
+ *nf_output_iface = out_port;
} else {
/* Drop. */
}
#include <stddef.h>
#include <stdint.h>
#include "flow.h"
+#include "netflow.h"
#include "tag.h"
struct odp_actions;
struct ofexpired {
flow_t flow;
- uint64_t packet_count; /* Packets from *expired* subrules. */
- uint64_t byte_count; /* Bytes from *expired* subrules. */
+ uint64_t packet_count; /* Packets from subrules. */
+ uint64_t byte_count; /* Bytes from subrules. */
long long int used; /* Last-used time (0 if never used). */
- long long int created; /* Creation time. */
- uint8_t tcp_flags; /* Bitwise-OR of all TCP flags seen. */
- uint8_t ip_tos; /* Last-seen IP type-of-service. */
};
int ofproto_create(const char *datapath, const struct ofhooks *, void *aux,
int ofproto_set_controller(struct ofproto *, const char *controller);
int ofproto_set_listeners(struct ofproto *, const struct svec *listeners);
int ofproto_set_snoops(struct ofproto *, const struct svec *snoops);
-int ofproto_set_netflow(struct ofproto *, const struct svec *collectors,
- uint8_t engine_type, uint8_t engine_id, bool add_id_to_iface);
+int ofproto_set_netflow(struct ofproto *,
+ const struct netflow_options *nf_options);
void ofproto_set_failure(struct ofproto *, bool fail_open);
void ofproto_set_rate_limit(struct ofproto *, int rate_limit, int burst_limit);
int ofproto_set_stp(struct ofproto *, bool enable_stp);
void (*port_changed_cb)(enum ofp_port_reason, const struct ofp_phy_port *,
void *aux);
bool (*normal_cb)(const flow_t *, const struct ofpbuf *packet,
- struct odp_actions *, tag_type *, void *aux);
+ struct odp_actions *, tag_type *,
+ uint16_t *nf_output_iface, void *aux);
void (*account_flow_cb)(const flow_t *, const union odp_action *,
size_t n_actions, unsigned long long int n_bytes,
void *aux);
static bool learn_macs = true;
/* Set up flows? (If not, every packet is processed at the controller.) */
-static bool setup_flows = true;
+static bool set_up_flows = true;
/* --max-idle: Maximum idle time, in seconds, before flows expire. */
static int max_idle = 60;
{
sw->rconn = rconn_new_from_vconn(name, vconn);
sw->lswitch = lswitch_create(sw->rconn, learn_macs,
- setup_flows ? max_idle : -1);
+ set_up_flows ? max_idle : -1);
}
static int
break;
case 'n':
- setup_flows = false;
+ set_up_flows = false;
break;
case OPT_MUTE:
printf("\tports: cur:%"PRIu32", max:%"PRIu32"\n",
stats.n_ports, stats.max_ports);
printf("\tgroups: max:%"PRIu16"\n", stats.max_groups);
- printf("\tlookups: frags:%"PRIu64", hit:%"PRIu64", missed:%"PRIu64", "
- "lost:%"PRIu64"\n",
- stats.n_frags, stats.n_hit, stats.n_missed, stats.n_lost);
+ printf("\tlookups: frags:%llu, hit:%llu, missed:%llu, lost:%llu\n",
+ (unsigned long long int) stats.n_frags,
+ (unsigned long long int) stats.n_hit,
+ (unsigned long long int) stats.n_missed,
+ (unsigned long long int) stats.n_lost);
printf("\tqueues: max-miss:%"PRIu16", max-action:%"PRIu16"\n",
stats.max_miss_queue, stats.max_action_queue);
}
printf("Reply:\n");
ofp_print(stdout, reply, reply->size, 2);
}
- printf("%d bytes from %s: xid=%08"PRIx32" time=%.1f ms\n",
+ printf("%zu bytes from %s: xid=%08"PRIx32" time=%.1f ms\n",
reply->size - sizeof *rpy_hdr, argv[1], rpy_hdr->xid,
(1000*(double)(end.tv_sec - start.tv_sec))
+ (.001*(end.tv_usec - start.tv_usec)));
struct ofproto *ofproto;
struct ofsettings s;
int error;
+ struct netflow_options nf_options;
set_program_name(argv[0]);
register_fault_handlers();
ovs_fatal(error,
"failed to configure controller snooping connections");
}
- error = ofproto_set_netflow(ofproto, &s.netflow, 0, 0, false);
+ memset(&nf_options, 0, sizeof nf_options);
+ nf_options.collectors = s.netflow;
+ error = ofproto_set_netflow(ofproto, &nf_options);
if (error) {
ovs_fatal(error, "failed to configure NetFlow collectors");
}
#include "odp-util.h"
#include "ofp-print.h"
#include "ofpbuf.h"
+#include "ofproto/netflow.h"
#include "ofproto/ofproto.h"
#include "packets.h"
#include "poll-loop.h"
struct bridge {
struct list node; /* Node in global list of bridges. */
char *name; /* User-specified arbitrary name. */
- struct mac_learning *ml; /* MAC learning table, or null not to learn. */
+ struct mac_learning *ml; /* MAC learning table. */
bool sent_config_request; /* Successfully sent config request? */
uint8_t default_ea[ETH_ADDR_LEN]; /* Default MAC. */
static void bond_wait(struct bridge *);
static void bond_rebalance_port(struct port *);
static void bond_send_learning_packets(struct port *);
+static void bond_enable_slave(struct iface *iface, bool enable);
static void port_create(struct bridge *, const char *name);
static void port_reconfigure(struct port *);
uint64_t dpid;
struct iface *local_iface;
struct iface *hw_addr_iface;
- uint8_t engine_type, engine_id;
- bool add_id_to_iface = false;
- struct svec nf_hosts;
+ struct netflow_options nf_options;
bridge_fetch_dp_ifaces(br);
iterate_and_prune_ifaces(br, init_iface_netdev, NULL);
ofproto_set_datapath_id(br->ofproto, dpid);
/* Set NetFlow configuration on this bridge. */
- dpif_get_netflow_ids(br->dpif, &engine_type, &engine_id);
+ memset(&nf_options, 0, sizeof nf_options);
+ dpif_get_netflow_ids(br->dpif, &nf_options.engine_type,
+ &nf_options.engine_id);
+ nf_options.active_timeout = -1;
+
if (cfg_has("netflow.%s.engine-type", br->name)) {
- engine_type = cfg_get_int(0, "netflow.%s.engine-type",
+ nf_options.engine_type = cfg_get_int(0, "netflow.%s.engine-type",
br->name);
}
if (cfg_has("netflow.%s.engine-id", br->name)) {
- engine_id = cfg_get_int(0, "netflow.%s.engine-id", br->name);
+ nf_options.engine_id = cfg_get_int(0, "netflow.%s.engine-id",
+ br->name);
+ }
+ if (cfg_has("netflow.%s.active-timeout", br->name)) {
+ nf_options.active_timeout = cfg_get_int(0,
+ "netflow.%s.active-timeout",
+ br->name);
}
if (cfg_has("netflow.%s.add-id-to-iface", br->name)) {
- add_id_to_iface = cfg_get_bool(0, "netflow.%s.add-id-to-iface",
- br->name);
+ nf_options.add_id_to_iface = cfg_get_bool(0,
+ "netflow.%s.add-id-to-iface",
+ br->name);
}
- if (add_id_to_iface && engine_id > 0x7f) {
+ if (nf_options.add_id_to_iface && nf_options.engine_id > 0x7f) {
VLOG_WARN("bridge %s: netflow port mangling may conflict with "
"another vswitch, choose an engine id less than 128",
br->name);
}
- if (add_id_to_iface && br->n_ports > 0x1ff) {
+ if (nf_options.add_id_to_iface && br->n_ports > 508) {
VLOG_WARN("bridge %s: netflow port mangling will conflict with "
- "another port when 512 or more ports are used",
+ "another port when more than 508 ports are used",
br->name);
}
- svec_init(&nf_hosts);
- cfg_get_all_keys(&nf_hosts, "netflow.%s.host", br->name);
- if (ofproto_set_netflow(br->ofproto, &nf_hosts, engine_type,
- engine_id, add_id_to_iface)) {
+ svec_init(&nf_options.collectors);
+ cfg_get_all_keys(&nf_options.collectors, "netflow.%s.host", br->name);
+ if (ofproto_set_netflow(br->ofproto, &nf_options)) {
VLOG_ERR("bridge %s: problem setting netflow collectors",
br->name);
}
- svec_destroy(&nf_hosts);
+ svec_destroy(&nf_options.collectors);
/* Update the controller and related settings. It would be more
* straightforward to call this from bridge_reconfigure_one(), but we
continue;
}
- if (br->ml) {
- mac_learning_wait(br->ml);
- }
+ mac_learning_wait(br->ml);
bond_wait(br);
brstp_wait(br);
}
{
COVERAGE_INC(bridge_flush);
br->flush = true;
- if (br->ml) {
- mac_learning_flush(br->ml);
- }
+ mac_learning_flush(br->ml);
}
/* Returns the 'br' interface for the ODPP_LOCAL port, or null if 'br' has no
{
struct ds ds = DS_EMPTY_INITIALIZER;
const struct bridge *br;
+ const struct mac_entry *e;
br = bridge_lookup(args);
if (!br) {
}
ds_put_cstr(&ds, " port VLAN MAC Age\n");
- if (br->ml) {
- const struct mac_entry *e;
- LIST_FOR_EACH (e, struct mac_entry, lru_node, &br->ml->lrus) {
- if (e->port < 0 || e->port >= br->n_ports) {
- continue;
- }
- ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
- br->ports[e->port]->ifaces[0]->dp_ifidx,
- e->vlan, ETH_ADDR_ARGS(e->mac), mac_entry_age(e));
+ LIST_FOR_EACH (e, struct mac_entry, lru_node, &br->ml->lrus) {
+ if (e->port < 0 || e->port >= br->n_ports) {
+ continue;
}
+ ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
+ br->ports[e->port]->ifaces[0]->dp_ifidx,
+ e->vlan, ETH_ADDR_ARGS(e->mac), mac_entry_age(e));
}
unixctl_command_reply(conn, 200, ds_cstr(&ds));
ds_destroy(&ds);
return error;
}
- if (br->ml) {
- mac_learning_run(br->ml, ofproto_get_revalidate_set(br->ofproto));
- }
+ mac_learning_run(br->ml, ofproto_get_revalidate_set(br->ofproto));
bond_run(br);
brstp_run(br);
static int
bond_choose_iface(const struct port *port)
{
- size_t i;
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
+ size_t i, best_down_slave = -1;
+ long long next_delay_expiration = LLONG_MAX;
+
for (i = 0; i < port->n_ifaces; i++) {
- if (port->ifaces[i]->enabled) {
+ struct iface *iface = port->ifaces[i];
+
+ if (iface->enabled) {
return i;
+ } else if (iface->delay_expires < next_delay_expiration) {
+ best_down_slave = i;
+ next_delay_expiration = iface->delay_expires;
}
}
- return -1;
+
+ if (best_down_slave != -1) {
+ struct iface *iface = port->ifaces[best_down_slave];
+
+ VLOG_INFO_RL(&rl, "interface %s: skipping remaining %lli ms updelay "
+ "since no other interface is up", iface->name,
+ iface->delay_expires - time_msec());
+ bond_enable_slave(iface, true);
+ }
+
+ return best_down_slave;
}
static bool
iface->delay_expires = LLONG_MAX;
VLOG_INFO_RL(&rl, "interface %s: will not be %s",
iface->name, carrier ? "disabled" : "enabled");
- } else if (carrier && port->updelay && port->active_iface < 0) {
- iface->delay_expires = time_msec();
- VLOG_INFO_RL(&rl, "interface %s: skipping %d ms updelay since no "
- "other interface is up", iface->name, port->updelay);
+ } else if (carrier && port->active_iface < 0) {
+ bond_enable_slave(iface, true);
+ if (port->updelay) {
+ VLOG_INFO_RL(&rl, "interface %s: skipping %d ms updelay since no "
+ "other interface is up", iface->name, port->updelay);
+ }
} else {
int delay = carrier ? port->updelay : port->downdelay;
iface->delay_expires = time_msec() + delay;
struct port *port = iface->port;
struct bridge *br = port->bridge;
+ /* This acts as a recursion check. If the act of disabling a slave
+ * causes a different slave to be enabled, the flag will allow us to
+ * skip redundant work when we reenter this function. It must be
+ * cleared on exit to keep things safe with multiple bonds. */
+ static bool moving_active_iface = false;
+
iface->delay_expires = LLONG_MAX;
if (enable == iface->enabled) {
return;
if (iface->port_ifidx == port->active_iface) {
ofproto_revalidate(br->ofproto,
port->active_iface_tag);
+
+ /* Disabling a slave can lead to another slave being immediately
+ * enabled if there will be no active slaves but one is waiting
+ * on an updelay. In this case we do not need to run most of the
+ * code for the newly enabled slave since there was no period
+ * without an active slave and it is redundant with the disabling
+ * path. */
+ moving_active_iface = true;
bond_choose_active_iface(port);
}
bond_send_learning_packets(port);
} else {
VLOG_WARN("interface %s: enabled", iface->name);
- if (port->active_iface < 0) {
+ if (port->active_iface < 0 && !moving_active_iface) {
ofproto_revalidate(br->ofproto, port->no_ifaces_tag);
bond_choose_active_iface(port);
bond_send_learning_packets(port);
}
iface->tag = tag_create_random();
}
- port_update_bond_compat(port);
+
+ moving_active_iface = false;
+ port->bond_compat_is_stale = true;
}
static void
for (i = 0; i < br->n_ports; i++) {
struct port *port = br->ports[i];
+ if (port->n_ifaces >= 2) {
+ for (j = 0; j < port->n_ifaces; j++) {
+ struct iface *iface = port->ifaces[j];
+ if (time_msec() >= iface->delay_expires) {
+ bond_enable_slave(iface, !iface->enabled);
+ }
+ }
+ }
+
if (port->bond_compat_is_stale) {
port->bond_compat_is_stale = false;
port_update_bond_compat(port);
}
-
- if (port->n_ifaces < 2) {
- continue;
- }
- for (j = 0; j < port->n_ifaces; j++) {
- struct iface *iface = port->ifaces[j];
- if (time_msec() >= iface->delay_expires) {
- bond_enable_slave(iface, !iface->enabled);
- }
- }
}
}
static size_t
compose_dsts(const struct bridge *br, const flow_t *flow, uint16_t vlan,
const struct port *in_port, const struct port *out_port,
- struct dst dsts[], tag_type *tags)
+ struct dst dsts[], tag_type *tags, uint16_t *nf_output_iface)
{
mirror_mask_t mirrors = in_port->src_mirrors;
struct dst *dst = dsts;
dst++;
}
}
+ *nf_output_iface = NF_OUT_FLOOD;
} else if (out_port && set_dst(dst, flow, in_port, out_port, tags)) {
+ *nf_output_iface = dst->dp_ifidx;
mirrors |= out_port->dst_mirrors;
dst++;
}
static void
compose_actions(struct bridge *br, const flow_t *flow, uint16_t vlan,
const struct port *in_port, const struct port *out_port,
- tag_type *tags, struct odp_actions *actions)
+ tag_type *tags, struct odp_actions *actions,
+ uint16_t *nf_output_iface)
{
struct dst dsts[DP_MAX_PORTS * (MAX_MIRRORS + 1)];
size_t n_dsts;
const struct dst *p;
uint16_t cur_vlan;
- n_dsts = compose_dsts(br, flow, vlan, in_port, out_port, dsts, tags);
+ n_dsts = compose_dsts(br, flow, vlan, in_port, out_port, dsts, tags,
+ nf_output_iface);
cur_vlan = ntohs(flow->dl_vlan);
for (p = dsts; p < &dsts[n_dsts]; p++) {
}
static bool
-is_bcast_arp_reply(const flow_t *flow, const struct ofpbuf *packet)
+is_bcast_arp_reply(const flow_t *flow)
{
- struct arp_eth_header *arp = (struct arp_eth_header *) packet->data;
return (flow->dl_type == htons(ETH_TYPE_ARP)
- && eth_addr_is_broadcast(flow->dl_dst)
- && packet->size >= sizeof(struct arp_eth_header)
- && arp->ar_op == ARP_OP_REQUEST);
+ && flow->nw_proto == ARP_OP_REPLY
+ && eth_addr_is_broadcast(flow->dl_dst));
}
/* If the composed actions may be applied to any packet in the given 'flow',
static bool
process_flow(struct bridge *br, const flow_t *flow,
const struct ofpbuf *packet, struct odp_actions *actions,
- tag_type *tags)
+ tag_type *tags, uint16_t *nf_output_iface)
{
struct iface *in_iface;
struct port *in_port;
struct port *out_port = NULL; /* By default, drop the packet/flow. */
int vlan;
+ int out_port_idx;
/* Find the interface and port structure for the received packet. */
in_iface = iface_from_dp_ifidx(br, flow->in_port);
* to this rule: the host has moved to another switch. */
src_idx = mac_learning_lookup(br->ml, flow->dl_src, vlan);
if (src_idx != -1 && src_idx != in_port->port_idx &&
- (!packet || !is_bcast_arp_reply(flow, packet))) {
+ !is_bcast_arp_reply(flow)) {
goto done;
}
}
/* MAC learning. */
out_port = FLOOD_PORT;
- if (br->ml) {
- int out_port_idx;
-
- /* Learn source MAC (but don't try to learn from revalidation). */
- if (packet) {
- tag_type rev_tag = mac_learning_learn(br->ml, flow->dl_src,
- vlan, in_port->port_idx);
- if (rev_tag) {
- /* The log messages here could actually be useful in debugging,
- * so keep the rate limit relatively high. */
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30,
- 300);
- VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
- "on port %s in VLAN %d",
- br->name, ETH_ADDR_ARGS(flow->dl_src),
- in_port->name, vlan);
- ofproto_revalidate(br->ofproto, rev_tag);
- }
- }
-
- /* Determine output port. */
- out_port_idx = mac_learning_lookup_tag(br->ml, flow->dl_dst, vlan,
- tags);
- if (out_port_idx >= 0 && out_port_idx < br->n_ports) {
- out_port = br->ports[out_port_idx];
- } else if (!packet) {
- /* If we are revalidating but don't have a learning entry then
- * eject the flow. Installing a flow that floods packets will
- * prevent us from seeing future packets and learning properly. */
- return false;
- }
+ /* Learn source MAC (but don't try to learn from revalidation). */
+ if (packet) {
+ tag_type rev_tag = mac_learning_learn(br->ml, flow->dl_src,
+ vlan, in_port->port_idx);
+ if (rev_tag) {
+ /* The log messages here could actually be useful in debugging,
+ * so keep the rate limit relatively high. */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30,
+ 300);
+ VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
+ "on port %s in VLAN %d",
+ br->name, ETH_ADDR_ARGS(flow->dl_src),
+ in_port->name, vlan);
+ ofproto_revalidate(br->ofproto, rev_tag);
+ }
+ }
+
+ /* Determine output port. */
+ out_port_idx = mac_learning_lookup_tag(br->ml, flow->dl_dst, vlan,
+ tags);
+ if (out_port_idx >= 0 && out_port_idx < br->n_ports) {
+ out_port = br->ports[out_port_idx];
+ } else if (!packet) {
+ /* If we are revalidating but don't have a learning entry then
+ * eject the flow. Installing a flow that floods packets will
+ * prevent us from seeing future packets and learning properly. */
+ return false;
}
/* Don't send packets out their input ports. Don't forward frames that STP
}
done:
- compose_actions(br, flow, vlan, in_port, out_port, tags, actions);
+ compose_actions(br, flow, vlan, in_port, out_port, tags, actions,
+ nf_output_iface);
- /*
- * We send out only a single packet, instead of setting up a flow, if the
- * packet is an ARP directed to broadcast that arrived on a bonded
- * interface. In such a situation ARP requests and replies must be handled
- * differently, but OpenFlow unfortunately can't distinguish them.
- */
- return (in_port->n_ifaces < 2
- || flow->dl_type != htons(ETH_TYPE_ARP)
- || !eth_addr_is_broadcast(flow->dl_dst));
+ return true;
}
/* Careful: 'opp' is in host byte order and opp->port_no is an OFP port
static bool
bridge_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
- struct odp_actions *actions, tag_type *tags, void *br_)
+ struct odp_actions *actions, tag_type *tags,
+ uint16_t *nf_output_iface, void *br_)
{
struct bridge *br = br_;
#endif
COVERAGE_INC(bridge_process_flow);
- return process_flow(br, flow, packet, actions, tags);
+ return process_flow(br, flow, packet, actions, tags, nf_output_iface);
}
static void
struct ofpbuf packet;
int error, n_packets, n_errors;
- if (!port->n_ifaces || port->active_iface < 0 || !br->ml) {
+ if (!port->n_ifaces || port->active_iface < 0) {
return;
}
continue;
}
- ds_put_format(&ds, "\thash %d: %lld kB load\n",
+ ds_put_format(&ds, "\thash %d: %"PRIu64" kB load\n",
hash, be->tx_bytes / 1024);
/* MACs. */
- if (!port->bridge->ml) {
- break;
- }
-
LIST_FOR_EACH (me, struct mac_entry, lru_node,
&port->bridge->ml->lrus) {
uint16_t dp_ifidx;
mirror_reconfigure(struct bridge *br)
{
struct svec old_mirrors, new_mirrors;
- size_t i;
+ size_t i, n_rspan_vlans;
+ unsigned long *rspan_vlans;
/* Collect old and new mirrors. */
svec_init(&old_mirrors);
m->out_port->is_mirror_output_port = true;
}
}
+
+ /* Update learning disabled vlans (for RSPAN). */
+ rspan_vlans = NULL;
+ n_rspan_vlans = cfg_count("vlan.%s.disable-learning", br->name);
+ if (n_rspan_vlans) {
+ rspan_vlans = bitmap_allocate(4096);
+
+ for (i = 0; i < n_rspan_vlans; i++) {
+ int vlan = cfg_get_vlan(i, "vlan.%s.disable-learning", br->name);
+ if (vlan >= 0) {
+ bitmap_set1(rspan_vlans, vlan);
+ } else {
+ VLOG_ERR("bridge %s: invalid value '%s' for learning disabled "
+ "VLAN", br->name,
+ cfg_get_string(i, "vlan.%s.disable-learning", br->name));
+ }
+ }
+ }
+ if (mac_learning_set_disabled_vlans(br->ml, rspan_vlans)) {
+ bridge_flush(br);
+ }
}
static void
* OpenFlow message. */
new_oh = ofpbuf_at(&ext_data_buffer, 0, 65536);
if (!new_oh) {
- VLOG_WARN_RL(&rl, "received short embedded message: %d\n",
+ VLOG_WARN_RL(&rl, "received short embedded message: %zu\n",
ext_data_buffer.size);
return -EINVAL;
}
Lists each MAC address/VLAN pair learned by the specified \fIbridge\fR,
along with the port on which it was learned and the age of the entry,
in seconds.
+.
+.IP "\fBbridge/dump-flows\fR \fIbridge\fR"
+Lists all flows in \fIbridge\fR, including those normally hidden to
+commands such as \fBovs-ofctl dump-flows\fR. Flows set up by mechanisms
+such as in-band control and fail-open are hidden from the controller
+since it is not allowed to modify or override them.
.SS "BOND COMMANDS"
These commands manage bonded ports on an Open vSwitch's bridges. To
understand some of these commands, it is important to understand a
in this scenario, then the physical switch must be replaced by one
that learns Ethernet addresses on a per-VLAN basis. In addition,
learning should be disabled on the VLAN containing mirrored traffic.
-If this is not done then the intermediate switch will learn the MAC
+If this is not done then intermediate switches will learn the MAC
address of each end host from the mirrored traffic. If packets being
sent to that end host are also mirrored, then they will be dropped
since the switch will attempt to send them out the input port.
Disabling learning for the VLAN will cause the switch to correctly
-send the packet out all ports configured for that VLAN.
+send the packet out all ports configured for that VLAN. If Open
+vSwitch is being used as an intermediate switch learning can be disabled
+by setting the key \fBvlan.\fIbrname\fB.learning-disable=\fIvid\fR
+to the mirrored VLAN.
.ST "Example"
The following \fBovs\-vswitchd\fR configuration copies all frames received
on \fBeth1\fR or \fBeth2\fR to \fBeth3\fR.
.fi
.SS "NetFlow v5 Flow Logging"
-NetFlow is a protocol that exports a number of details about terminating
-IP flows, such as the principals involved and duration. A bridge may be
-configured to send NetFlow v5 records to NetFlow collectors when flows
-end. To enable, define the key \fBnetflow.\fIbridge\fB.host\fR for each
-collector in the form \fIip\fB:\fIport\fR. Records from \fIbridge\fR
+NetFlow is a protocol that exports a number of details about terminating
+IP flows, such as the principals involved and duration. A bridge may be
+configured to send NetFlow v5 records to NetFlow collectors when flows
+end. To enable, define the key \fBnetflow.\fIbridge\fB.host\fR for each
+collector in the form \fIip\fB:\fIport\fR. Records from \fIbridge\fR
will be sent to each \fIip\fR on UDP \fIport\fR. The \fIip\fR must
be specified numerically, not as a DNS name.
-The NetFlow messages will use the datapath index for the engine type and id.
-This can be overridden with the \fBnetflow.\fIbridge\fB.engine-type\fR and
+In addition to terminating flows, NetFlow can also send records at a set
+interval for flows that are still active. This interval can be configured
+by defining the key \fBnetflow.\fIbridge\fB\.active-timeout\fR. The value
+is in seconds. An active timeout of 0 will disable this functionality. By
+default there is timeout value of 600 seconds.
+
+The NetFlow messages will use the datapath index for the engine type and id.
+This can be overridden with the \fBnetflow.\fIbridge\fB.engine-type\fR and
\fBnetflow.\fIbridge\fB.engine-id\fR, respectively. Each takes a value
-between 0 and 255, inclusive.
+between 0 and 255, inclusive.
Many NetFlow collectors do not expect multiple switches to be
sending messages from the same host, and they do not store the engine
flows from multiple switches appearing as if they came on the interface,
add \fBnetflow.\fIbridge\fB.add-id-to-iface=true\fR to the configuration
file. This will place the least significant 7 bits of the engine id
-into the most significant bits of the ingress and egress interface fields
-of flow records. By default, this behavior is disabled.
+into the most significant bits of the ingress and egress interface fields
+of flow records. When this option is enabled, a maximum of 508 ports are
+supported. By default, this behavior is disabled.
+
+The egress interface field normally contains the OpenFlow port number,
+however, certain port values have special meaning: 0xFFFF indicates
+flooding, 0xFFFE is multiple controller-specified output interfaces, and
+0xFFFD means that packets from the flow were dropped. If add-id-to-iface
+is enabled then these values become 0x1FF, 0x1FE, and 0x1FD respectively.
The following syntax sends NetFlow records for \fBmybr\fR to the NetFlow
collector \fBnflow.example.com\fR on UDP port \fB9995\fR:
ifdown(ipdev)
if dp:
- #nw = db.get_pif_record(pif)['network']
- #nwrec = db.get_network_record(nw)
- #cfgmod_argv += ['# deconfigure xs-network-uuids']
- #cfgmod_argv += ['--del-entry=bridge.%s.xs-network-uuids=%s' % (bridge,nwrec['uuid'])]
+ nw = db.get_pif_record(pif)['network']
+ nwrec = db.get_network_record(nw)
+ cfgmod_argv += ['# deconfigure xs-network-uuids']
+ cfgmod_argv += ['--del-entry=bridge.%s.xs-network-uuids=%s' % (bridge,nwrec['uuid'])]
log("deconfigure ipdev %s on %s" % (ipdev,bridge))
cfgmod_argv += ["# deconfigure ipdev %s" % ipdev]
# Query XAPI for the information we need using the vif's opaque reference
def dump_vif_info(domid, devid, vif_ref):
+ vif_info = []
+ session = XenAPI.xapi_local()
+ session.xenapi.login_with_password("root", "")
try:
- session = XenAPI.xapi_local()
- session.xenapi.login_with_password("root", "")
vif_rec = session.xenapi.VIF.get_record(vif_ref)
net_rec = session.xenapi.network.get_record(vif_rec["network"])
- vm_rec = session.xenapi.VM.get_record(vif_rec["VM"])
+ vm_uuid = session.xenapi.VM.get_uuid(vif_rec["VM"])
# Data to allow vNetManager to associate VIFs with xapi data
- sys.stdout.write('--add=port.vif%s.%s.net-uuid=%s '
- % (domid, devid, net_rec["uuid"]))
- sys.stdout.write('--add=port.vif%s.%s.vif-mac=%s '
- % (domid, devid, vif_rec["MAC"]))
- sys.stdout.write('--add=port.vif%s.%s.vif-uuid=%s '
- % (domid, devid, vif_rec["uuid"]))
- sys.stdout.write('--add=port.vif%s.%s.vm-uuid=%s '
- % (domid, devid, vm_rec["uuid"]))
+ add_port = '--add=port.vif%s.%s' % (domid, devid)
+ vif_info.append('%s.net-uuid=%s' % (add_port, net_rec["uuid"]))
+ vif_info.append('%s.vif-mac=%s' % (add_port, vif_rec["MAC"]))
+ vif_info.append('%s.vif-uuid=%s' % (add_port, vif_rec["uuid"]))
+ vif_info.append('%s.vm-uuid=%s' % (add_port, vm_uuid))
# vNetManager needs to know the network UUID(s) associated with
# each datapath. Normally interface-reconfigure adds them, but
# There may still be a brief delay between the initial
# ovs-vswitchd connection to vNetManager and setting this
# configuration variable, but vNetManager can tolerate that.
- if len(net_rec['PIFs']) == 0:
+ if not net_rec['PIFs']:
key = 'bridge.%s.xs-network-uuids' % net_rec['bridge']
value = net_rec['uuid']
- sys.stdout.write('--del-match=%s=* ' % key)
- sys.stdout.write('--add=%s=%s ' % (key, value))
+ vif_info.append('--del-match=%s=*' % key)
+ vif_info.append('--add=%s=%s' % (key, value))
finally:
session.xenapi.session.logout()
+ print ' '.join(vif_info)
if __name__ == '__main__':
- if (len(sys.argv) != 3):
- sys.stderr.write("ERROR: %s <domid> <devid>\n")
+ if len(sys.argv) != 3:
+ sys.stderr.write("ERROR: %s <domid> <devid>\n" % sys.argv[0])
sys.exit(1)
domid = sys.argv[1]
vif_ref = get_vif_ref(domid, devid)
if not vif_ref:
sys.stderr.write("ERROR: Could not find interface vif%s.%s\n"
- % (domid, devid))
+ % (domid, devid))
sys.exit(1)
dump_vif_info(domid, devid, vif_ref)