struct mac_learning *ml; /* NULL to act as hub instead of switch. */
/* Number of outgoing queued packets on the rconn. */
- int n_queued;
+ struct rconn_packet_counter *queued;
/* Spanning tree protocol implementation.
*
sw->datapath_id = 0;
sw->last_features_request = time_now() - 1;
sw->ml = learn_macs ? mac_learning_create() : NULL;
+ sw->queued = rconn_packet_counter_create();
sw->next_query = LLONG_MIN;
sw->last_query = LLONG_MIN;
sw->last_reply = LLONG_MIN;
{
if (sw) {
mac_learning_destroy(sw->ml);
+ rconn_packet_counter_destroy(sw->queued);
free(sw);
}
}
static void
queue_tx(struct lswitch *sw, struct rconn *rconn, struct ofpbuf *b)
{
- int retval = rconn_send_with_limit(rconn, b, &sw->n_queued, 10);
+ int retval = rconn_send_with_limit(rconn, b, sw->queued, 10);
if (retval && retval != ENOTCONN) {
if (retval == EAGAIN) {
VLOG_INFO_RL(&rl, "%012llx: %s: tx queue overflow",
* destroyed), or ENOTCONN if 'rc' is not currently connected (in which case
* the caller retains ownership of 'b').
*
- * If 'n_queued' is non-null, then '*n_queued' will be incremented while the
+ * If 'counter' is non-null, then 'counter' will be incremented while the
* packet is in flight, then decremented when it has been sent (or discarded
* due to disconnection). Because 'b' may be sent (or discarded) before this
* function returns, the caller may not be able to observe any change in
- * '*n_queued'.
+ * 'counter'.
*
* There is no rconn_send_wait() function: an rconn has a send queue that it
* takes care of sending if you call rconn_run(), which will have the side
* effect of waking up poll_block(). */
int
-rconn_send(struct rconn *rc, struct ofpbuf *b, int *n_queued)
+rconn_send(struct rconn *rc, struct ofpbuf *b,
+ struct rconn_packet_counter *counter)
{
if (rconn_is_connected(rc)) {
copy_to_monitor(rc, b);
- b->private = n_queued;
- if (n_queued) {
- ++*n_queued;
+ b->private = counter;
+ if (counter) {
+ rconn_packet_counter_inc(counter);
}
queue_push_tail(&rc->txq, b);
}
}
-/* Sends 'b' on 'rc'. Increments '*n_queued' while the packet is in flight; it
+/* Sends 'b' on 'rc'. Increments 'counter' while the packet is in flight; it
* will be decremented when it has been sent (or discarded due to
- * disconnection). Returns 0 if successful, EAGAIN if '*n_queued' is already
+ * disconnection). Returns 0 if successful, EAGAIN if 'counter->n' is already
* at least as large as 'queue_limit', or ENOTCONN if 'rc' is not currently
* connected. Regardless of return value, 'b' is destroyed.
*
* Because 'b' may be sent (or discarded) before this function returns, the
- * caller may not be able to observe any change in '*n_queued'.
+ * caller may not be able to observe any change in 'counter'.
*
* There is no rconn_send_wait() function: an rconn has a send queue that it
* takes care of sending if you call rconn_run(), which will have the side
* effect of waking up poll_block(). */
int
rconn_send_with_limit(struct rconn *rc, struct ofpbuf *b,
- int *n_queued, int queue_limit)
+ struct rconn_packet_counter *counter, int queue_limit)
{
int retval;
- retval = *n_queued >= queue_limit ? EAGAIN : rconn_send(rc, b, n_queued);
+ retval = counter->n >= queue_limit ? EAGAIN : rconn_send(rc, b, counter);
if (retval) {
ofpbuf_delete(b);
}
return rc->seqno;
}
\f
+struct rconn_packet_counter *
+rconn_packet_counter_create(void)
+{
+ struct rconn_packet_counter *c = xmalloc(sizeof *c);
+ c->n = 0;
+ c->ref_cnt = 1;
+ return c;
+}
+
+void
+rconn_packet_counter_destroy(struct rconn_packet_counter *c)
+{
+ if (c) {
+ assert(c->ref_cnt > 0);
+ if (!--c->ref_cnt && !c->n) {
+ free(c);
+ }
+ }
+}
+
+void
+rconn_packet_counter_inc(struct rconn_packet_counter *c)
+{
+ c->n++;
+}
+
+void
+rconn_packet_counter_dec(struct rconn_packet_counter *c)
+{
+ assert(c->n > 0);
+ if (!--c->n && !c->ref_cnt) {
+ free(c);
+ }
+}
+\f
/* Tries to send a packet from 'rc''s send buffer. Returns 0 if successful,
* otherwise a positive errno value. */
static int
{
int retval = 0;
struct ofpbuf *next = rc->txq.head->next;
- int *n_queued = rc->txq.head->private;
+ struct rconn_packet_counter *counter = rc->txq.head->private;
retval = vconn_send(rc->vconn, rc->txq.head);
if (retval) {
if (retval != EAGAIN) {
return retval;
}
rc->packets_sent++;
- if (n_queued) {
- --*n_queued;
+ if (counter) {
+ rconn_packet_counter_dec(counter);
}
queue_advance_head(&rc->txq, next);
return 0;
}
while (rc->txq.n > 0) {
struct ofpbuf *b = queue_pop_head(&rc->txq);
- int *n_queued = b->private;
- if (n_queued) {
- --*n_queued;
+ struct rconn_packet_counter *counter = b->private;
+ if (counter) {
+ rconn_packet_counter_dec(counter);
}
ofpbuf_delete(b);
}
-/* Copyright (c) 2008 The Board of Trustees of The Leland Stanford
+/* Copyright (c) 2008, 2009 The Board of Trustees of The Leland Stanford
* Junior University
*
* We are making the OpenFlow specification and associated documentation
*/
struct vconn;
+struct rconn_packet_counter;
struct rconn *rconn_new(const char *name,
int inactivity_probe_interval, int max_backoff);
void rconn_run_wait(struct rconn *);
struct ofpbuf *rconn_recv(struct rconn *);
void rconn_recv_wait(struct rconn *);
-int rconn_send(struct rconn *, struct ofpbuf *, int *n_queued);
+int rconn_send(struct rconn *, struct ofpbuf *, struct rconn_packet_counter *);
int rconn_send_with_limit(struct rconn *, struct ofpbuf *,
- int *n_queued, int queue_limit);
+ struct rconn_packet_counter *, int queue_limit);
unsigned int rconn_packets_sent(const struct rconn *);
unsigned int rconn_packets_received(const struct rconn *);
unsigned int rconn_get_state_elapsed(const struct rconn *);
unsigned int rconn_get_connection_seqno(const struct rconn *);
+/* Counts the number of packets queued into an rconn by a given source. */
+struct rconn_packet_counter {
+ int n; /* Number of packets queued. */
+ int ref_cnt; /* Number of owners. */
+};
+
+struct rconn_packet_counter *rconn_packet_counter_create(void);
+void rconn_packet_counter_destroy(struct rconn_packet_counter *);
+void rconn_packet_counter_inc(struct rconn_packet_counter *);
+void rconn_packet_counter_dec(struct rconn_packet_counter *);
+
#endif /* rconn.h */
struct bridge {
struct list node; /* Node in global list of bridges. */
char *name; /* User-specified arbitrary name. */
- int txqlen; /* # of messages queued to send on 'rconn'. */
+ struct rconn_packet_counter *txqlen; /* # queued to send on 'rconn'. */
struct mac_learning *ml; /* MAC learning table, or null not to learn. */
int flow_idle_time; /* Idle time for flows we set up. */
bool sent_config_request; /* Successfully sent config request? */
assert(!bridge_lookup(name));
br = xcalloc(1, sizeof *br);
br->name = xstrdup(name);
- br->txqlen = 0;
+ br->txqlen = rconn_packet_counter_create();
br->ml = mac_learning_create();
br->flow_idle_time = 5;
br->sent_config_request = false;
}
process_destroy(br->secchan);
rconn_destroy(br->rconn);
+ rconn_packet_counter_destroy(br->txqlen);
free(br->controller);
svec_destroy(&br->secchan_opts);
ft_destroy(br->ft);
osc = make_openflow(sizeof *osc, OFPT_SET_CONFIG, &msg);
osc->flags = htons(OFPC_SEND_FLOW_EXP | OFPC_FRAG_NORMAL);
osc->miss_send_len = htons(OFP_DEFAULT_MISS_SEND_LEN);
- if (!rconn_send_with_limit(br->rconn, msg, &br->txqlen, INT_MAX)) {
+ if (!rconn_send_with_limit(br->rconn, msg, br->txqlen, INT_MAX)) {
br->sent_config_request = true;
}
}
struct ofpbuf *msg;
oh = make_openflow(sizeof *oh, OFPT_FEATURES_REQUEST, &msg);
- if (!rconn_send_with_limit(br->rconn, msg, &br->txqlen, INT_MAX)) {
+ if (!rconn_send_with_limit(br->rconn, msg, br->txqlen, INT_MAX)) {
br->sent_features_request = true;
}
}
static bool
bridge_is_backlogged(const struct bridge *br)
{
- return br->txqlen >= 100;
+ return br->txqlen->n >= 100;
}
/* For robustness, in case the administrator moves around datapath ports behind
{
int retval;
update_openflow_length(msg);
- retval = rconn_send(br->rconn, msg, &br->txqlen);
+ retval = rconn_send(br->rconn, msg, br->txqlen);
if (retval) {
ofpbuf_delete(msg);
/* No point in logging: rconn_send() only fails due to disconnection,
struct list requests;
struct rconn *rconn;
unsigned int rconn_seqno;
- int txqlen;
+ struct rconn_packet_counter *txqlen;
time_t timeout;
};
list_init(&mgr->requests);
mgr->rconn = rconn;
mgr->rconn_seqno = rconn_get_connection_seqno(rconn);
- mgr->txqlen = 0;
+ mgr->txqlen = rconn_packet_counter_create();
return mgr;
}
stats_mgr_destroy(struct stats_mgr *mgr)
{
if (mgr) {
+ rconn_packet_counter_destroy(mgr->txqlen);
cancel_all_requests(mgr, ECONNABORTED);
free(mgr);
}
|| !rconn_is_connected(mgr->rconn)) {
mgr->rconn_seqno = rconn_get_connection_seqno(mgr->rconn);
cancel_all_requests(mgr, ENOTCONN);
- } else if (!mgr->txqlen && !mgr->rq && !list_is_empty(&mgr->requests)) {
+ } else if (!mgr->txqlen->n && !mgr->rq && !list_is_empty(&mgr->requests)) {
struct stats_request *rq;
int retval;
rq = mgr->rq = CONTAINER_OF(list_pop_front(&mgr->requests),
struct stats_request, node);
- retval = rconn_send(mgr->rconn, rq->osr, &mgr->txqlen);
+ retval = rconn_send(mgr->rconn, rq->osr, mgr->txqlen);
if (!retval) {
/* rconn_send() consumed the message. */
rq->osr = NULL;