+ return make_hook(fail_open_packet_cb, fail_open_periodic_cb, NULL,
+ fail_open);
+}
+\f
+struct rate_limiter {
+ const struct settings *s;
+ struct rconn *remote_rconn;
+
+ /* One queue per physical port. */
+ struct queue queues[OFPP_MAX];
+ int n_queued; /* Sum over queues[*].n. */
+ int next_tx_port; /* Next port to check in round-robin. */
+
+ /* Token bucket.
+ *
+ * It costs 1000 tokens to send a single packet_in message. A single token
+ * per message would be more straightforward, but this choice lets us avoid
+ * round-off error in refill_bucket()'s calculation of how many tokens to
+ * add to the bucket, since no division step is needed. */
+ long long int last_fill; /* Time at which we last added tokens. */
+ int tokens; /* Current number of tokens. */
+
+ /* Transmission queue. */
+ int n_txq; /* No. of packets waiting in rconn for tx. */
+};
+
+/* Drop a packet from the longest queue in 'rl'. */
+static void
+drop_packet(struct rate_limiter *rl)
+{
+ struct queue *longest; /* Queue currently selected as longest. */
+ int n_longest; /* # of queues of same length as 'longest'. */
+ struct queue *q;
+
+ longest = &rl->queues[0];
+ n_longest = 1;
+ for (q = &rl->queues[0]; q < &rl->queues[OFPP_MAX]; q++) {
+ if (longest->n < q->n) {
+ longest = q;
+ n_longest = 1;
+ } else if (longest->n == q->n) {
+ n_longest++;
+
+ /* Randomly select one of the longest queues, with a uniform
+ * distribution (Knuth algorithm 3.4.2R). */
+ if (!random_range(n_longest)) {
+ longest = q;
+ }
+ }
+ }
+
+ /* FIXME: do we want to pop the tail instead? */
+ buffer_delete(queue_pop_head(longest));
+ rl->n_queued--;
+}
+
+/* Remove and return the next packet to transmit (in round-robin order). */
+static struct buffer *
+dequeue_packet(struct rate_limiter *rl)
+{
+ unsigned int i;
+
+ for (i = 0; i < OFPP_MAX; i++) {
+ unsigned int port = (rl->next_tx_port + i) % OFPP_MAX;
+ struct queue *q = &rl->queues[port];
+ if (q->n) {
+ rl->next_tx_port = (port + 1) % OFPP_MAX;
+ rl->n_queued--;
+ return queue_pop_head(q);
+ }
+ }
+ NOT_REACHED();
+}
+
+/* Add tokens to the bucket based on elapsed time. */
+static void
+refill_bucket(struct rate_limiter *rl)
+{
+ const struct settings *s = rl->s;
+ long long int now = time_msec();
+ long long int tokens = (now - rl->last_fill) * s->rate_limit + rl->tokens;
+ if (tokens >= 1000) {
+ rl->last_fill = now;
+ rl->tokens = MIN(tokens, s->burst_limit * 1000);
+ }
+}
+
+/* Attempts to remove enough tokens from 'rl' to transmit a packet. Returns
+ * true if successful, false otherwise. (In the latter case no tokens are
+ * removed.) */
+static bool
+get_token(struct rate_limiter *rl)
+{
+ if (rl->tokens >= 1000) {
+ rl->tokens -= 1000;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static bool
+rate_limit_packet_cb(struct relay *r, int half, void *rl_)
+{
+ struct rate_limiter *rl = rl_;
+ const struct settings *s = rl->s;
+ struct buffer *msg = r->halves[HALF_LOCAL].rxbuf;
+ struct ofp_header *oh;
+
+ if (half == HALF_REMOTE) {
+ return false;
+ }
+
+ oh = msg->data;
+ if (oh->type != OFPT_PACKET_IN) {
+ return false;
+ }
+ if (msg->size < offsetof(struct ofp_packet_in, data)) {
+ VLOG_WARN("packet too short (%zu bytes) for packet_in", msg->size);
+ return false;
+ }
+
+ if (!rl->n_queued && get_token(rl)) {
+ /* In the common case where we are not constrained by the rate limit,
+ * let the packet take the normal path. */
+ return false;
+ } else {
+ /* Otherwise queue it up for the periodic callback to drain out. */
+ struct ofp_packet_in *opi = msg->data;
+ int port = ntohs(opi->in_port) % OFPP_MAX;
+ if (rl->n_queued >= s->burst_limit) {
+ drop_packet(rl);
+ }
+ queue_push_tail(&rl->queues[port], buffer_clone(msg));
+ rl->n_queued++;
+ return true;
+ }
+}
+
+static void
+rate_limit_periodic_cb(void *rl_)
+{
+ struct rate_limiter *rl = rl_;
+ int i;
+
+ /* Drain some packets out of the bucket if possible, but limit the number
+ * of iterations to allow other code to get work done too. */
+ refill_bucket(rl);
+ for (i = 0; rl->n_queued && get_token(rl) && i < 50; i++) {
+ /* Use a small, arbitrary limit for the amount of queuing to do here,
+ * because the TCP connection is responsible for buffering and there is
+ * no point in trying to transmit faster than the TCP connection can
+ * handle. */
+ struct buffer *b = dequeue_packet(rl);
+ rconn_send_with_limit(rl->remote_rconn, b, &rl->n_txq, 10);
+ }
+}
+
+static void
+rate_limit_wait_cb(void *rl_)
+{
+ struct rate_limiter *rl = rl_;
+ if (rl->n_queued) {
+ if (rl->tokens >= 1000) {
+ /* We can transmit more packets as soon as we're called again. */
+ poll_immediate_wake();
+ } else {
+ /* We have to wait for the bucket to re-fill. We could calculate
+ * the exact amount of time here for increased smoothness. */
+ poll_timer_wait(TIME_UPDATE_INTERVAL / 2);
+ }
+ }
+}
+
+static struct hook
+rate_limit_hook_create(const struct settings *s,
+ struct rconn *local,
+ struct rconn *remote)
+{
+ struct rate_limiter *rl;
+ size_t i;
+
+ rl = xcalloc(1, sizeof *rl);
+ rl->s = s;
+ rl->remote_rconn = remote;
+ for (i = 0; i < ARRAY_SIZE(rl->queues); i++) {
+ queue_init(&rl->queues[i]);
+ }
+ rl->last_fill = time_msec();
+ rl->tokens = s->rate_limit * 100;
+ return make_hook(rate_limit_packet_cb, rate_limit_periodic_cb,
+ rate_limit_wait_cb, rl);