ovs_fatal(error,
"failed to configure controller snooping connections");
}
- error = ofproto_set_netflow(ofproto, &s.netflow);
+ error = ofproto_set_netflow(ofproto, &s.netflow, 0, 0, false);
if (error) {
ovs_fatal(error, "failed to configure NetFlow collectors");
}
after epoch seconds. */
uint32_t flow_seq; /* Number of flows since sending
messages began. */
- uint8_t engine_type; /* Set to zero. */
- uint8_t engine_id; /* Set to zero. */
+ uint8_t engine_type; /* Engine type. */
+ uint8_t engine_id; /* Engine id. */
uint16_t sampling_interval; /* Set to zero. */
};
BUILD_ASSERT_DECL(sizeof(struct netflow_v5_header) == 24);
BUILD_ASSERT_DECL(sizeof(struct netflow_v5_record) == 48);
struct netflow {
+ uint8_t engine_type; /* Value of engine_type to use. */
+ uint8_t engine_id; /* Value of engine_id to use. */
long long int boot_time; /* Time when netflow_create() was called. */
int *fds; /* Sockets for NetFlow collectors. */
size_t n_fds; /* Number of Netflow collectors. */
+ bool add_id_to_iface; /* Put the 7 least signficiant bits of
+ * 'engine_id' into the most signficant
+ * bits of the interface fields. */
uint32_t netflow_cnt; /* Flow sequence number for NetFlow. */
struct ofpbuf packet; /* NetFlow packet being accumulated. */
};
nf_hdr->unix_secs = htonl(now.tv_sec);
nf_hdr->unix_nsecs = htonl(now.tv_usec * 1000);
nf_hdr->flow_seq = htonl(nf->netflow_cnt++);
- nf_hdr->engine_type = 0;
- nf_hdr->engine_id = 0;
+ nf_hdr->engine_type = nf->engine_type;
+ nf_hdr->engine_id = nf->engine_id;
nf_hdr->sampling_interval = htons(0);
}
nf_rec->src_addr = expired->flow.nw_src;
nf_rec->dst_addr = expired->flow.nw_dst;
nf_rec->nexthop = htons(0);
- nf_rec->input = htons(expired->flow.in_port);
- nf_rec->output = htons(0);
+ if (nf->add_id_to_iface) {
+ uint16_t iface = (nf->engine_id & 0x7f) << 9;
+ nf_rec->input = htons(iface | (expired->flow.in_port & 0x1ff));
+ nf_rec->output = htons(iface);
+ printf("input: %x\n", ntohs(nf_rec->input));
+ } else {
+ nf_rec->input = htons(expired->flow.in_port);
+ nf_rec->output = htons(0);
+ }
nf_rec->packet_count = htonl(MIN(expired->packet_count, UINT32_MAX));
nf_rec->byte_count = htonl(MIN(expired->byte_count, UINT32_MAX));
nf_rec->init_time = htonl(expired->created - nf->boot_time);
return error;
}
+void
+netflow_set_engine(struct netflow *nf, uint8_t engine_type,
+ uint8_t engine_id, bool add_id_to_iface)
+{
+ nf->engine_type = engine_type;
+ nf->engine_id = engine_id;
+ nf->add_id_to_iface = add_id_to_iface;
+}
+
struct netflow *
netflow_create(void)
{
struct netflow *nf = xmalloc(sizeof *nf);
+ nf->engine_type = 0;
+ nf->engine_id = 0;
nf->boot_time = time_msec();
nf->fds = NULL;
nf->n_fds = 0;
+ nf->add_id_to_iface = false;
nf->netflow_cnt = 0;
ofpbuf_init(&nf->packet, 1500);
return nf;
struct netflow *netflow_create(void);
void netflow_destroy(struct netflow *);
int netflow_set_collectors(struct netflow *, const struct svec *collectors);
+void netflow_set_engine(struct netflow *nf, uint8_t engine_type,
+ uint8_t engine_id, bool add_id_to_iface);
void netflow_expire(struct netflow *, const struct ofexpired *);
void netflow_run(struct netflow *);
}
int
-ofproto_set_netflow(struct ofproto *ofproto, const struct svec *collectors)
+ofproto_set_netflow(struct ofproto *ofproto, const struct svec *collectors,
+ uint8_t engine_type, uint8_t engine_id, bool add_id_to_iface)
{
if (collectors && collectors->n) {
if (!ofproto->netflow) {
ofproto->netflow = netflow_create();
}
+ netflow_set_engine(ofproto->netflow, engine_type, engine_id,
+ add_id_to_iface);
return netflow_set_collectors(ofproto->netflow, collectors);
} else {
netflow_destroy(ofproto->netflow);
int ofproto_set_controller(struct ofproto *, const char *controller);
int ofproto_set_listeners(struct ofproto *, const struct svec *listeners);
int ofproto_set_snoops(struct ofproto *, const struct svec *snoops);
-int ofproto_set_netflow(struct ofproto *, const struct svec *collectors);
+int ofproto_set_netflow(struct ofproto *, const struct svec *collectors,
+ uint8_t engine_type, uint8_t engine_id, bool add_id_to_iface);
void ofproto_set_failure(struct ofproto *, bool fail_open);
void ofproto_set_rate_limit(struct ofproto *, int rate_limit, int burst_limit);
int ofproto_set_stp(struct ofproto *, bool enable_stp);
uint64_t dpid;
struct iface *local_iface = NULL;
const char *devname;
+ uint8_t engine_type = br->dpif.minor;
+ uint8_t engine_id = br->dpif.minor;
+ bool add_id_to_iface = false;
struct svec nf_hosts;
+
bridge_fetch_dp_ifaces(br);
for (i = 0; i < br->n_ports; ) {
struct port *port = br->ports[i];
ofproto_set_datapath_id(br->ofproto, dpid);
/* Set NetFlow configuration on this bridge. */
+ if (cfg_has("netflow.%s.engine-type", br->name)) {
+ engine_type = cfg_get_int(0, "netflow.%s.engine-type",
+ br->name);
+ }
+ if (cfg_has("netflow.%s.engine-id", br->name)) {
+ engine_id = cfg_get_int(0, "netflow.%s.engine-id", br->name);
+ }
+ if (cfg_has("netflow.%s.add-id-to-iface", br->name)) {
+ add_id_to_iface = cfg_get_bool(0, "netflow.%s.add-id-to-iface",
+ br->name);
+ }
+ if (add_id_to_iface && engine_id > 0x7f) {
+ VLOG_WARN("bridge %s: netflow port mangling may conflict with "
+ "another vswitch, choose an engine id less than 128",
+ br->name);
+ }
+ if (add_id_to_iface && br->n_ports > 0x1ff) {
+ VLOG_WARN("bridge %s: netflow port mangling will conflict with "
+ "another port when 512 or more ports are used",
+ br->name);
+ }
svec_init(&nf_hosts);
cfg_get_all_keys(&nf_hosts, "netflow.%s.host", br->name);
- if (ofproto_set_netflow(br->ofproto, &nf_hosts)) {
+ if (ofproto_set_netflow(br->ofproto, &nf_hosts, engine_type,
+ engine_id, add_id_to_iface)) {
VLOG_ERR("bridge %s: problem setting netflow collectors",
br->name);
}
.fi
.SS "NetFlow v5 Flow Logging"
NetFlow is a protocol that exports a number of details about terminating
-flows, such as the principals involved and duration. A bridge may be
+IP flows, such as the principals involved and duration. A bridge may be
configured to send NetFlow v5 records to NetFlow collectors when flows
end. To enable, define the key \fBnetflow.\fIbridge\fB.host\fR for each
collector in the form \fIhost\fB:\fIport\fR. Records from \fIbridge\fR
will be sent to each \fIhost\fR on UDP \fIport\fR.
-.PP
+
+The NetFlow messages will use the datapath index for the engine type and id.
+This can be overridden with the \fBnetflow.\fIbridge\fB.engine-type\fR and
+\fBnetflow.\fIbridge\fB.engine-id\fR, respectively. Each takes a value
+between 0 and 255, inclusive.
+
+Many NetFlow collectors do not expect multiple virtual switches to be
+sending messages from the same host, and they do not store the engine
+information which could be used to disambiguate the traffic. To prevent
+flows from multiple switches appearing as if they came on the interface,
+add \fBnetflow.\fIbridge\fB.add-id-to-iface=true\fR to the configuration
+file. This will place the least significant 7 bits of the engine id
+into the most significant bits of the ingress and egress interface fields
+of flow records. By default, this behavior is disabled.
+
The following syntax sends NetFlow records for \fBmybr\fR to the NetFlow
collector \fBnflow.example.com\fR on UDP port \fB9995\fR:
.PP