+enum { MAX_PORTS = USHRT_MAX };
+
+enum { N_CHANNELS = 17 };
+BUILD_ASSERT_DECL(IS_POW2(N_CHANNELS - 1));
+BUILD_ASSERT_DECL(N_CHANNELS > 1);
+BUILD_ASSERT_DECL(N_CHANNELS <= 32); /* We use a 32-bit word as a mask. */
+
+/* This ethtool flag was introduced in Linux 2.6.24, so it might be
+ * missing if we have old headers. */
+#define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */
+
+struct dpif_linux_dp {
+ /* Generic Netlink header. */
+ uint8_t cmd;
+
+ /* struct ovs_header. */
+ int dp_ifindex;
+
+ /* Attributes. */
+ const char *name; /* OVS_DP_ATTR_NAME. */
+ const uint32_t *upcall_pid; /* OVS_DP_UPCALL_PID. */
+ struct ovs_dp_stats stats; /* OVS_DP_ATTR_STATS. */
+};
+
+static void dpif_linux_dp_init(struct dpif_linux_dp *);
+static int dpif_linux_dp_from_ofpbuf(struct dpif_linux_dp *,
+ const struct ofpbuf *);
+static void dpif_linux_dp_dump_start(struct nl_dump *);
+static int dpif_linux_dp_transact(const struct dpif_linux_dp *request,
+ struct dpif_linux_dp *reply,
+ struct ofpbuf **bufp);
+static int dpif_linux_dp_get(const struct dpif *, struct dpif_linux_dp *reply,
+ struct ofpbuf **bufp);
+
+struct dpif_linux_flow {
+ /* Generic Netlink header. */
+ uint8_t cmd;
+
+ /* struct ovs_header. */
+ unsigned int nlmsg_flags;
+ int dp_ifindex;
+
+ /* Attributes.
+ *
+ * The 'stats' member points to 64-bit data that might only be aligned on
+ * 32-bit boundaries, so get_unaligned_u64() should be used to access its
+ * values.
+ *
+ * If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in
+ * the Netlink version of the command, even if actions_len is zero. */
+ const struct nlattr *key; /* OVS_FLOW_ATTR_KEY. */
+ size_t key_len;
+ const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */
+ size_t actions_len;
+ const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */
+ const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */
+ const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */
+ bool clear; /* OVS_FLOW_ATTR_CLEAR. */
+};
+
+static void dpif_linux_flow_init(struct dpif_linux_flow *);
+static int dpif_linux_flow_from_ofpbuf(struct dpif_linux_flow *,
+ const struct ofpbuf *);
+static void dpif_linux_flow_to_ofpbuf(const struct dpif_linux_flow *,
+ struct ofpbuf *);
+static int dpif_linux_flow_transact(struct dpif_linux_flow *request,
+ struct dpif_linux_flow *reply,
+ struct ofpbuf **bufp);
+static void dpif_linux_flow_get_stats(const struct dpif_linux_flow *,
+ struct dpif_flow_stats *);
+
+/* Packet drop monitoring.
+ *
+ * When kernel-to-user Netlink buffers overflow, the kernel notifies us that
+ * one or more packets were dropped, but it doesn't tell us anything about
+ * those packets. However, the administrator really wants to know. So we do
+ * the next best thing, and keep track of the top sources of packets received
+ * on each kernel-to-user channel, since the top sources are those that will
+ * cause the buffers to overflow.
+ *
+ * We use a variation on the "Space-Saving" algorithm in Metwally et al.,
+ * "Efficient Computation of Frequent and Top-k Elements in Data Streams", ACM
+ * Transactions on Database Systems 31:3 (2006). This algorithm yields
+ * perfectly accurate results when the data stream's unique values (in this
+ * case, port numbers) fit into our data structure, and degrades gracefully
+ * even for challenging distributions (e.g. Zipf).
+ *
+ * Our implementation is very simple, without any of the special flourishes
+ * described in the paper. It avoids the need to use a hash for lookup by
+ * keeping the constant factor (N_SKETCHES) very small. The error calculations
+ * in the paper make it sound like the results should still be satisfactory.
+ *
+ * "space-saving" and "Metwally" seem like awkward names for data structures,
+ * so we call this a "sketch" even though technically that's a different sort
+ * of summary structure.
+ */
+
+/* One of N_SKETCHES counting elements per channel in the Metwally
+ * "space-saving" algorithm. */
+enum { N_SKETCHES = 8 }; /* Number of elements per channel. */
+struct dpif_sketch {
+ uint32_t port_no; /* Port number. */
+ unsigned int hits; /* Number of hits. */
+ unsigned int error; /* Upper bound on error in 'hits'. */
+};
+
+/* One of N_CHANNELS channels per dpif between the kernel and userspace. */
+struct dpif_channel {
+ struct nl_sock *sock; /* Netlink socket. */
+ struct dpif_sketch sketches[N_SKETCHES]; /* From max to min 'hits'. */
+ long long int last_poll; /* Last time this channel was polled. */
+};
+
+static void update_sketch(struct dpif_channel *, uint32_t port_no);
+static void scale_sketches(struct dpif *);
+static void report_loss(struct dpif *, struct dpif_channel *);
+
+/* Interval, in milliseconds, at which to scale down the sketch values by a
+ * factor of 2. The Metwally algorithm doesn't do this, which makes sense in
+ * the context it assumes, but in our situation we ought to weight recent data
+ * more heavily than old data, so in my opinion this is reasonable. */
+#define SCALE_INTERVAL (60 * 1000)