#include "random.h"
#include "shash.h"
#include "sset.h"
+#include "timeval.h"
#include "unaligned.h"
#include "util.h"
#include "vlog.h"
VLOG_DEFINE_THIS_MODULE(dpif_linux);
enum { MAX_PORTS = USHRT_MAX };
-enum { N_UPCALL_SOCKS = 17 };
-BUILD_ASSERT_DECL(IS_POW2(N_UPCALL_SOCKS - 1));
-BUILD_ASSERT_DECL(N_UPCALL_SOCKS > 1);
-BUILD_ASSERT_DECL(N_UPCALL_SOCKS <= 32); /* We use a 32-bit word as a mask. */
+enum { N_CHANNELS = 17 };
+BUILD_ASSERT_DECL(IS_POW2(N_CHANNELS - 1));
+BUILD_ASSERT_DECL(N_CHANNELS > 1);
+BUILD_ASSERT_DECL(N_CHANNELS <= 32); /* We use a 32-bit word as a mask. */
/* This ethtool flag was introduced in Linux 2.6.24, so it might be
* missing if we have old headers. */
static void dpif_linux_flow_get_stats(const struct dpif_linux_flow *,
struct dpif_flow_stats *);
+/* Packet drop monitoring.
+ *
+ * When kernel-to-user Netlink buffers overflow, the kernel notifies us that
+ * one or more packets were dropped, but it doesn't tell us anything about
+ * those packets. However, the administrator really wants to know. So we do
+ * the next best thing, and keep track of the top sources of packets received
+ * on each kernel-to-user channel, since the top sources are those that will
+ * cause the buffers to overflow.
+ *
+ * We use a variation on the "Space-Saving" algorithm in Metwally et al.,
+ * "Efficient Computation of Frequent and Top-k Elements in Data Streams", ACM
+ * Transactions on Database Systems 31:3 (2006). This algorithm yields
+ * perfectly accurate results when the data stream's unique values (in this
+ * case, port numbers) fit into our data structure, and degrades gracefully
+ * even for challenging distributions (e.g. Zipf).
+ *
+ * Our implementation is very simple, without any of the special flourishes
+ * described in the paper. It avoids the need to use a hash for lookup by
+ * keeping the constant factor (N_SKETCHES) very small. The error calculations
+ * in the paper make it sound like the results should still be satisfactory.
+ *
+ * "space-saving" and "Metwally" seem like awkward names for data structures,
+ * so we call this a "sketch" even though technically that's a different sort
+ * of summary structure.
+ */
+
+/* One of N_SKETCHES counting elements per channel in the Metwally
+ * "space-saving" algorithm. */
+enum { N_SKETCHES = 8 }; /* Number of elements per channel. */
+struct dpif_sketch {
+ uint32_t port_no; /* Port number. */
+ unsigned int hits; /* Number of hits. */
+ unsigned int error; /* Upper bound on error in 'hits'. */
+};
+
+/* One of N_CHANNELS channels per dpif between the kernel and userspace. */
+struct dpif_channel {
+ struct nl_sock *sock; /* Netlink socket. */
+ struct dpif_sketch sketches[N_SKETCHES]; /* From max to min 'hits'. */
+ long long int last_poll; /* Last time this channel was polled. */
+};
+
+static void update_sketch(struct dpif_channel *, uint32_t port_no);
+static void scale_sketches(struct dpif *);
+static void report_loss(struct dpif *, struct dpif_channel *);
+
+/* Interval, in milliseconds, at which to scale down the sketch values by a
+ * factor of 2. The Metwally algorithm doesn't do this, which makes sense in
+ * the context it assumes, but in our situation we ought to weight recent data
+ * more heavily than old data, so in my opinion this is reasonable. */
+#define SCALE_INTERVAL (60 * 1000)
+
/* Datapath interface for the openvswitch Linux kernel module. */
struct dpif_linux {
struct dpif dpif;
int dp_ifindex;
/* Upcall messages. */
- struct nl_sock *upcall_socks[N_UPCALL_SOCKS];
+ struct dpif_channel channels[N_CHANNELS];
uint32_t ready_mask; /* 1-bit for each sock with unread messages. */
- int epoll_fd; /* epoll fd that includes the upcall socks. */
+ int epoll_fd; /* epoll fd that includes channel socks. */
+ long long int next_scale; /* Next time to scale down the sketches. */
/* Change notification. */
struct sset changed_ports; /* Ports that have changed. */
bool change_error;
/* Port number allocation. */
- uint16_t alloc_port_no;
+ uint32_t alloc_port_no;
};
static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5);
static void open_dpif(const struct dpif_linux_dp *, struct dpif **);
static bool dpif_linux_nln_parse(struct ofpbuf *, void *);
static void dpif_linux_port_changed(const void *vport, void *dpif);
-static uint32_t dpif_linux_port_get_pid(const struct dpif *, uint16_t port_no);
+static uint32_t dpif_linux_port_get_pid(const struct dpif *, uint32_t port_no);
static void dpif_linux_vport_to_ofpbuf(const struct dpif_linux_vport *,
struct ofpbuf *);
dpif_init(&dpif->dpif, &dpif_linux_class, dp->name,
dp->dp_ifindex, dp->dp_ifindex);
+ dpif->next_scale = LLONG_MAX;
+
dpif->dp_ifindex = dp->dp_ifindex;
sset_init(&dpif->changed_ports);
*dpifp = &dpif->dpif;
}
static void
-destroy_upcall_socks(struct dpif_linux *dpif)
+destroy_channels(struct dpif_linux *dpif)
{
- int i;
+ struct dpif_channel *ch;
if (dpif->epoll_fd >= 0) {
close(dpif->epoll_fd);
dpif->epoll_fd = -1;
}
- for (i = 0; i < N_UPCALL_SOCKS; i++) {
- nl_sock_destroy(dpif->upcall_socks[i]);
- dpif->upcall_socks[i] = NULL;
+ for (ch = dpif->channels; ch < &dpif->channels[N_CHANNELS]; ch++) {
+ nl_sock_destroy(ch->sock);
+ ch->sock = NULL;
}
+ dpif->next_scale = LLONG_MAX;
}
static void
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
nln_notifier_destroy(dpif->port_notifier);
- destroy_upcall_socks(dpif);
+ destroy_channels(dpif);
sset_destroy(&dpif->changed_ports);
free(dpif);
}
}
static void
-dpif_linux_run(struct dpif *dpif OVS_UNUSED)
+dpif_linux_run(struct dpif *dpif_)
{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ if (time_msec() >= dpif->next_scale) {
+ dpif->next_scale = time_msec() + SCALE_INTERVAL;
+ scale_sketches(dpif_);
+ }
+
if (nln) {
nln_run(nln);
}
static int
dpif_linux_port_add(struct dpif *dpif_, struct netdev *netdev,
- uint16_t *port_nop)
+ uint32_t *port_nop)
{
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
const char *name = netdev_get_name(netdev);
netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false);
}
- /* Loop until we find a port that isn't used. */
+ /* Unless a specific port was requested, loop until we find a port
+ * that isn't used. */
do {
uint32_t upcall_pid;
- request.port_no = ++dpif->alloc_port_no;
+ request.port_no = *port_nop != UINT32_MAX ? *port_nop
+ : ++dpif->alloc_port_no;
upcall_pid = dpif_linux_port_get_pid(dpif_, request.port_no);
request.upcall_pid = &upcall_pid;
error = dpif_linux_vport_transact(&request, &reply, &buf);
/* Older datapath has lower limit. */
max_ports = dpif->alloc_port_no;
dpif->alloc_port_no = 0;
+ } else if (error == EBUSY && *port_nop != UINT32_MAX) {
+ VLOG_INFO("%s: requested port %"PRIu32" is in use",
+ dpif_name(dpif_), *port_nop);
}
ofpbuf_delete(buf);
- } while ((i++ < max_ports)
+ } while ((*port_nop == UINT32_MAX) && (i++ < max_ports)
&& (error == EBUSY || error == EFBIG));
return error;
}
static int
-dpif_linux_port_del(struct dpif *dpif_, uint16_t port_no)
+dpif_linux_port_del(struct dpif *dpif_, uint32_t port_no)
{
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
struct dpif_linux_vport vport;
/* A query by name reported that 'port_name' is in some datapath
* other than 'dpif', but the caller wants to know about 'dpif'. */
error = ENODEV;
- } else {
+ } else if (dpif_port) {
dpif_port->name = xstrdup(reply.name);
dpif_port->type = xstrdup(netdev_vport_get_netdev_type(&reply));
dpif_port->port_no = reply.port_no;
}
static int
-dpif_linux_port_query_by_number(const struct dpif *dpif, uint16_t port_no,
+dpif_linux_port_query_by_number(const struct dpif *dpif, uint32_t port_no,
struct dpif_port *dpif_port)
{
return dpif_linux_port_query__(dpif, port_no, NULL, dpif_port);
}
static uint32_t
-dpif_linux_port_get_pid(const struct dpif *dpif_, uint16_t port_no)
+dpif_linux_port_get_pid(const struct dpif *dpif_, uint32_t port_no)
{
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
} else {
int idx;
- idx = (port_no != UINT16_MAX
- ? 1 + (port_no & (N_UPCALL_SOCKS - 2))
+ idx = (port_no != UINT32_MAX
+ ? 1 + (port_no & (N_CHANNELS - 2))
: 0);
- return nl_sock_pid(dpif->upcall_socks[idx]);
+ return nl_sock_pid(dpif->channels[idx].sock);
}
}
return error;
}
- dpif_port->name = (char *) vport.name;
- dpif_port->type = (char *) netdev_vport_get_netdev_type(&vport);
+ dpif_port->name = CONST_CAST(char *, vport.name);
+ dpif_port->type = CONST_CAST(char *, netdev_vport_get_netdev_type(&vport));
dpif_port->port_no = vport.port_no;
return 0;
}
dpif_linux_flow_get_stats(&reply, stats);
}
if (actionsp) {
- buf->data = (void *) reply.actions;
+ buf->data = CONST_CAST(struct nlattr *, reply.actions);
buf->size = reply.actions_len;
*actionsp = buf;
} else {
switch (op->type) {
case DPIF_OP_FLOW_PUT:
put = &op->u.flow_put;
- if (!op->error && put->stats) {
- struct dpif_linux_flow reply;
-
- op->error = dpif_linux_flow_from_ofpbuf(&reply, txn->reply);
+ if (put->stats) {
if (!op->error) {
- dpif_linux_flow_get_stats(&reply, put->stats);
+ struct dpif_linux_flow reply;
+
+ op->error = dpif_linux_flow_from_ofpbuf(&reply,
+ txn->reply);
+ if (!op->error) {
+ dpif_linux_flow_get_stats(&reply, put->stats);
+ }
+ }
+
+ if (op->error) {
+ memset(put->stats, 0, sizeof *put->stats);
}
}
break;
case DPIF_OP_FLOW_DEL:
del = &op->u.flow_del;
- if (!op->error && del->stats) {
- struct dpif_linux_flow reply;
-
- op->error = dpif_linux_flow_from_ofpbuf(&reply, txn->reply);
+ if (del->stats) {
if (!op->error) {
- dpif_linux_flow_get_stats(&reply, del->stats);
+ struct dpif_linux_flow reply;
+
+ op->error = dpif_linux_flow_from_ofpbuf(&reply,
+ txn->reply);
+ if (!op->error) {
+ dpif_linux_flow_get_stats(&reply, del->stats);
+ }
+ }
+
+ if (op->error) {
+ memset(del->stats, 0, sizeof *del->stats);
}
}
break;
}
if (!enable) {
- destroy_upcall_socks(dpif);
+ destroy_channels(dpif);
} else {
- int i;
+ struct dpif_channel *ch;
int error;
- dpif->epoll_fd = epoll_create(N_UPCALL_SOCKS);
+ dpif->epoll_fd = epoll_create(N_CHANNELS);
if (dpif->epoll_fd < 0) {
return errno;
}
- for (i = 0; i < N_UPCALL_SOCKS; i++) {
+ for (ch = dpif->channels; ch < &dpif->channels[N_CHANNELS]; ch++) {
+ int indx = ch - dpif->channels;
struct epoll_event event;
- error = nl_sock_create(NETLINK_GENERIC, &dpif->upcall_socks[i]);
+ error = nl_sock_create(NETLINK_GENERIC, &ch->sock);
if (error) {
- destroy_upcall_socks(dpif);
+ destroy_channels(dpif);
return error;
}
memset(&event, 0, sizeof event);
event.events = EPOLLIN;
- event.data.u32 = i;
- if (epoll_ctl(dpif->epoll_fd, EPOLL_CTL_ADD,
- nl_sock_fd(dpif->upcall_socks[i]), &event) < 0) {
+ event.data.u32 = indx;
+ if (epoll_ctl(dpif->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(ch->sock),
+ &event) < 0) {
error = errno;
- destroy_upcall_socks(dpif);
+ destroy_channels(dpif);
return error;
}
+
+ memset(ch->sketches, 0, sizeof ch->sketches);
+ ch->last_poll = LLONG_MIN;
}
dpif->ready_mask = 0;
+ dpif->next_scale = time_msec() + SCALE_INTERVAL;
}
set_upcall_pids(dpif_);
memset(upcall, 0, sizeof *upcall);
upcall->type = type;
upcall->packet = buf;
- upcall->packet->data = (void *) nl_attr_get(a[OVS_PACKET_ATTR_PACKET]);
+ upcall->packet->data = CONST_CAST(struct nlattr *,
+ nl_attr_get(a[OVS_PACKET_ATTR_PACKET]));
upcall->packet->size = nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]);
- upcall->key = (void *) nl_attr_get(a[OVS_PACKET_ATTR_KEY]);
+ upcall->key = CONST_CAST(struct nlattr *,
+ nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
upcall->userdata = (a[OVS_PACKET_ATTR_USERDATA]
? nl_attr_get_u64(a[OVS_PACKET_ATTR_USERDATA])
}
if (!dpif->ready_mask) {
- struct epoll_event events[N_UPCALL_SOCKS];
+ struct epoll_event events[N_CHANNELS];
int retval;
int i;
do {
- retval = epoll_wait(dpif->epoll_fd, events, N_UPCALL_SOCKS, 0);
+ retval = epoll_wait(dpif->epoll_fd, events, N_CHANNELS, 0);
} while (retval < 0 && errno == EINTR);
if (retval < 0) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
while (dpif->ready_mask) {
int indx = ffs(dpif->ready_mask) - 1;
- struct nl_sock *upcall_sock = dpif->upcall_socks[indx];
+ struct dpif_channel *ch = &dpif->channels[indx];
dpif->ready_mask &= ~(1u << indx);
return EAGAIN;
}
- error = nl_sock_recv(upcall_sock, buf, false);
+ error = nl_sock_recv(ch->sock, buf, false);
+ if (error == ENOBUFS) {
+ /* ENOBUFS typically means that we've received so many
+ * packets that the buffer overflowed. Try again
+ * immediately because there's almost certainly a packet
+ * waiting for us. */
+ report_loss(dpif_, ch);
+ continue;
+ }
+
+ ch->last_poll = time_msec();
if (error) {
if (error == EAGAIN) {
break;
error = parse_odp_packet(buf, upcall, &dp_ifindex);
if (!error && dp_ifindex == dpif->dp_ifindex) {
+ const struct nlattr *in_port;
+
+ in_port = nl_attr_find__(upcall->key, upcall->key_len,
+ OVS_KEY_ATTR_IN_PORT);
+ if (in_port) {
+ update_sketch(ch, nl_attr_get_u32(in_port));
+ }
return 0;
}
if (error) {
dpif_linux_recv_purge(struct dpif *dpif_)
{
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
- int i;
+ struct dpif_channel *ch;
if (dpif->epoll_fd < 0) {
return;
}
- for (i = 0; i < N_UPCALL_SOCKS; i++) {
- nl_sock_drain(dpif->upcall_socks[i]);
+ for (ch = dpif->channels; ch < &dpif->channels[N_CHANNELS]; ch++) {
+ nl_sock_drain(ch->sock);
}
}
uint64_t action;
ofpbuf_use_const(&packet, data, size);
- flow_extract(&packet, 0, htonll(0), 0, &flow);
+ flow_extract(&packet, 0, NULL, 0, &flow);
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, &flow);
+ odp_flow_key_from_flow(&key, &flow, OVSP_NONE);
ofpbuf_use_stack(&actions, &action, sizeof action);
nl_msg_put_u32(&actions, OVS_ACTION_ATTR_OUTPUT, port_no);
stats->used = flow->used ? get_32aligned_u64(flow->used) : 0;
stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0;
}
+\f
+/* Metwally "space-saving" algorithm implementation. */
+
+/* Updates 'ch' to record that a packet was received on 'port_no'. */
+static void
+update_sketch(struct dpif_channel *ch, uint32_t port_no)
+{
+ struct dpif_sketch *sk;
+
+ /* Find an existing counting element for 'port_no' or, if none, replace the
+ * counting element with the fewest hits by 'port_no'. */
+ for (sk = ch->sketches; ; sk++) {
+ if (port_no == sk->port_no) {
+ break;
+ } else if (sk == &ch->sketches[N_SKETCHES - 1]) {
+ sk->port_no = port_no;
+ sk->error = sk->hits;
+ break;
+ }
+ }
+
+ /* Increment the hit count, then re-sort the counting elements (usually
+ * nothing needs to be done). */
+ sk->hits++;
+ while (sk > ch->sketches && sk[-1].hits > sk->hits) {
+ struct dpif_sketch tmp = sk[-1];
+ sk[-1] = *sk;
+ *sk = tmp;
+ sk--;
+ }
+}
+
+/* Divide the counts of all the the counting elements in 'dpif' by 2. See the
+ * comment on SCALE_INTERVAL. */
+static void
+scale_sketches(struct dpif *dpif_)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ struct dpif_channel *ch;
+
+ for (ch = dpif->channels; ch < &dpif->channels[N_CHANNELS]; ch++) {
+ struct dpif_sketch *sk;
+
+ for (sk = ch->sketches; sk < &ch->sketches[N_SKETCHES]; sk++) {
+ sk->hits /= 2;
+ sk->error /= 2;
+ }
+ }
+}
+
+/* Logs information about a packet that was recently lost in 'ch' (in
+ * 'dpif_'). */
+static void
+report_loss(struct dpif *dpif_, struct dpif_channel *ch)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
+ struct dpif_sketch *sk;
+ struct ds s;
+
+ if (VLOG_DROP_ERR(&rl)) {
+ return;
+ }
+
+ ds_init(&s);
+ if (ch->last_poll != LLONG_MIN) {
+ ds_put_format(&s, " (last polled %lld ms ago)",
+ time_msec() - ch->last_poll);
+ }
+ ds_put_cstr(&s, ", most frequent sources are");
+ for (sk = ch->sketches; sk < &ch->sketches[N_SKETCHES]; sk++) {
+ if (sk->hits) {
+ struct dpif_port port;
+
+ ds_put_format(&s, " %"PRIu32, sk->port_no);
+ if (!dpif_port_query_by_number(dpif_, sk->port_no, &port)) {
+ ds_put_format(&s, "(%s)", port.name);
+ dpif_port_destroy(&port);
+ }
+ if (sk->error) {
+ ds_put_format(&s, ": %u to %u,",
+ sk->hits - sk->error, sk->hits);
+ } else {
+ ds_put_format(&s, ": %u,", sk->hits);
+ }
+ }
+ }
+ ds_chomp(&s, ',');
+
+ VLOG_WARN("%s: lost packet on channel %td%s",
+ dpif_name(dpif_), ch - dpif->channels, ds_cstr(&s));
+ ds_destroy(&s);
+}