struct hmap *tbl;
for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
- HMAP_FOR_EACH_SAFE (bucket, next_bucket,
- struct cls_bucket, hmap_node, tbl) {
+ HMAP_FOR_EACH_SAFE (bucket, next_bucket, hmap_node, tbl) {
free(bucket);
}
hmap_destroy(tbl);
assert(wildcards == (wildcards & OVSFW_ALL));
table_idx = table_idx_from_wildcards(wildcards);
hash = hash_fields(target, table_idx);
- HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
+ HMAP_FOR_EACH_WITH_HASH (bucket, hmap_node, hash,
&cls->tables[table_idx]) {
if (equal_fields(&bucket->fixed, target, table_idx)) {
struct cls_rule *pos;
- LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
+ LIST_FOR_EACH (pos, node.list, &bucket->rules) {
if (pos->priority < priority) {
return NULL;
} else if (pos->priority == priority &&
for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
struct cls_bucket *bucket;
- HMAP_FOR_EACH (bucket, struct cls_bucket, hmap_node, tbl) {
+ HMAP_FOR_EACH (bucket, hmap_node, tbl) {
struct cls_rule *rule;
- LIST_FOR_EACH (rule, struct cls_rule, node.list,
- &bucket->rules) {
+ LIST_FOR_EACH (rule, node.list, &bucket->rules) {
if (rule->priority == priority
- && rules_match_2wild(rule, &target_rule, 0)) {
+ && rules_match_2wild(rule, &target_rule, 0)) {
return true;
}
}
table++) {
struct cls_bucket *bucket, *next_bucket;
- HMAP_FOR_EACH_SAFE (bucket, next_bucket,
- struct cls_bucket, hmap_node, table) {
+ HMAP_FOR_EACH_SAFE (bucket, next_bucket, hmap_node, table) {
/* XXX there is a bit of room for optimization here based on
* rejecting entire buckets on their fixed fields, but it will
* only be worthwhile for big buckets (which we hope we won't
* bucket itself will be destroyed. The bucket contains the
* list head so that's a use-after-free error. */
prev_rule = NULL;
- LIST_FOR_EACH (rule, struct cls_rule, node.list,
- &bucket->rules) {
+ LIST_FOR_EACH (rule, node.list, &bucket->rules) {
if (rules_match_1wild(rule, target, 0)) {
if (prev_rule) {
callback(prev_rule, aux);
if (target->wc.wildcards) {
struct cls_rule *rule, *next_rule;
- HMAP_FOR_EACH_SAFE (rule, next_rule, struct cls_rule, node.hmap,
+ HMAP_FOR_EACH_SAFE (rule, next_rule, node.hmap,
&cls->exact_table) {
if (rules_match_1wild(rule, target, 0)) {
callback(rule, aux);
for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
struct cls_bucket *bucket, *next_bucket;
- HMAP_FOR_EACH_SAFE (bucket, next_bucket,
- struct cls_bucket, hmap_node, tbl) {
+ HMAP_FOR_EACH_SAFE (bucket, next_bucket, hmap_node, tbl) {
struct cls_rule *prev_rule, *rule;
/* We can't just use LIST_FOR_EACH_SAFE here because, if the
* bucket itself will be destroyed. The bucket contains the
* list head so that's a use-after-free error. */
prev_rule = NULL;
- LIST_FOR_EACH (rule, struct cls_rule, node.list,
- &bucket->rules) {
+ LIST_FOR_EACH (rule, node.list, &bucket->rules) {
if (prev_rule) {
callback(prev_rule, aux);
}
if (include & CLS_INC_EXACT) {
struct cls_rule *rule, *next_rule;
- HMAP_FOR_EACH_SAFE (rule, next_rule,
- struct cls_rule, node.hmap, &cls->exact_table) {
+ HMAP_FOR_EACH_SAFE (rule, next_rule, node.hmap, &cls->exact_table) {
callback(rule, aux);
}
}
bucket_insert(struct cls_bucket *bucket, struct cls_rule *rule)
{
struct cls_rule *pos;
- LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
+ LIST_FOR_EACH (pos, node.list, &bucket->rules) {
if (pos->priority == rule->priority) {
if (pos->wc.wildcards == rule->wc.wildcards
&& rules_match_1wild(pos, rule, rule->table_idx))
find_bucket(struct hmap *table, size_t hash, const struct cls_rule *rule)
{
struct cls_bucket *bucket;
- HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
- table) {
+ HMAP_FOR_EACH_WITH_HASH (bucket, hmap_node, hash, table) {
if (equal_fields(&bucket->fixed, &rule->flow, rule->table_idx)) {
return bucket;
}
return NULL;
}
- LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
+ LIST_FOR_EACH (pos, node.list, &bucket->rules) {
if (rules_match_1wild(target, pos, field_idx)) {
return pos;
}
return search_bucket(bucket, field_idx, target);
}
- HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node,
+ HMAP_FOR_EACH_WITH_HASH (bucket, hmap_node,
hash_fields(&target->flow, field_idx), table) {
struct cls_rule *rule = search_bucket(bucket, field_idx, target);
if (rule) {
{
struct cls_rule *rule;
- HMAP_FOR_EACH_WITH_HASH (rule, struct cls_rule, node.hmap,
- hash, &cls->exact_table) {
+ HMAP_FOR_EACH_WITH_HASH (rule, node.hmap, hash, &cls->exact_table) {
if (flow_equal(&rule->flow, target)) {
return rule;
}
{
struct dp_netdev_port *port;
- LIST_FOR_EACH (port, struct dp_netdev_port, node, &dp->port_list) {
+ LIST_FOR_EACH (port, node, &dp->port_list) {
if (!strcmp(netdev_get_name(port->netdev), devname)) {
*portp = port;
return 0;
{
struct dp_netdev_flow *flow, *next;
- HMAP_FOR_EACH_SAFE (flow, next, struct dp_netdev_flow, node,
- &dp->flow_table) {
+ HMAP_FOR_EACH_SAFE (flow, next, node, &dp->flow_table) {
dp_netdev_free_flow(dp, flow);
}
}
int i;
i = 0;
- LIST_FOR_EACH (port, struct dp_netdev_port, node, &dp->port_list) {
+ LIST_FOR_EACH (port, node, &dp->port_list) {
struct odp_port *odp_port = &ports[i];
if (i >= n) {
break;
struct dp_netdev_flow *flow;
assert(!key->reserved[0] && !key->reserved[1] && !key->reserved[2]);
- HMAP_FOR_EACH_WITH_HASH (flow, struct dp_netdev_flow, node,
- flow_hash(key, 0), &dp->flow_table) {
+ HMAP_FOR_EACH_WITH_HASH (flow, node, flow_hash(key, 0), &dp->flow_table) {
if (flow_equal(&flow->key, key)) {
return flow;
}
int i;
i = 0;
- HMAP_FOR_EACH (flow, struct dp_netdev_flow, node, &dp->flow_table) {
+ HMAP_FOR_EACH (flow, node, &dp->flow_table) {
if (i >= n) {
break;
}
struct dp_netdev *dp;
ofpbuf_init(&packet, DP_NETDEV_HEADROOM + max_mtu);
- LIST_FOR_EACH (dp, struct dp_netdev, node, &dp_netdev_list) {
+ LIST_FOR_EACH (dp, node, &dp_netdev_list) {
struct dp_netdev_port *port;
- LIST_FOR_EACH (port, struct dp_netdev_port, node, &dp->port_list) {
+ LIST_FOR_EACH (port, node, &dp->port_list) {
int error;
/* Reset packet contents. */
{
struct dp_netdev *dp;
- LIST_FOR_EACH (dp, struct dp_netdev, node, &dp_netdev_list) {
+ LIST_FOR_EACH (dp, node, &dp_netdev_list) {
struct dp_netdev_port *port;
- LIST_FOR_EACH (port, struct dp_netdev_port, node, &dp->port_list) {
+ LIST_FOR_EACH (port, node, &dp->port_list) {
netdev_recv_wait(port->netdev);
}
}
*
* HMAP_FOR_EACH_WITH_HASH iterates NODE over all of the nodes in HMAP that
* have hash value equal to HASH. HMAP_FOR_EACH_IN_BUCKET iterates NODE over
- * all of the nodes in HMAP that would fall in the same bucket as HASH. STRUCT
- * and MEMBER must be the name of the struct that contains the 'struct
- * hmap_node' and the name of the 'struct hmap_node' member, respectively.
+ * all of the nodes in HMAP that would fall in the same bucket as HASH. MEMBER
+ * must be the name of the 'struct hmap_node' member within NODE.
*
* These macros may be used interchangeably to search for a particular value in
* an hmap, see, e.g. shash_find() for an example. Usually, using
*
* HASH is only evaluated once.
*/
-#define HMAP_FOR_EACH_WITH_HASH(NODE, STRUCT, MEMBER, HASH, HMAP) \
- for ((NODE) = CONTAINER_OF(hmap_first_with_hash(HMAP, HASH), \
- STRUCT, MEMBER); \
+#define HMAP_FOR_EACH_WITH_HASH(NODE, MEMBER, HASH, HMAP) \
+ for ((NODE) = OBJECT_CONTAINING(hmap_first_with_hash(HMAP, HASH), \
+ NODE, MEMBER); \
&(NODE)->MEMBER != NULL; \
- (NODE) = CONTAINER_OF(hmap_next_with_hash(&(NODE)->MEMBER), \
- STRUCT, MEMBER))
-#define HMAP_FOR_EACH_IN_BUCKET(NODE, STRUCT, MEMBER, HASH, HMAP) \
- for ((NODE) = CONTAINER_OF(hmap_first_in_bucket(HMAP, HASH), \
- STRUCT, MEMBER); \
+ (NODE) = OBJECT_CONTAINING(hmap_next_with_hash(&(NODE)->MEMBER), \
+ NODE, MEMBER))
+#define HMAP_FOR_EACH_IN_BUCKET(NODE, MEMBER, HASH, HMAP) \
+ for ((NODE) = OBJECT_CONTAINING(hmap_first_in_bucket(HMAP, HASH), \
+ NODE, MEMBER); \
&(NODE)->MEMBER != NULL; \
- (NODE) = CONTAINER_OF(hmap_next_in_bucket(&(NODE)->MEMBER), \
- STRUCT, MEMBER))
+ (NODE) = OBJECT_CONTAINING(hmap_next_in_bucket(&(NODE)->MEMBER), \
+ NODE, MEMBER))
static inline struct hmap_node *hmap_first_with_hash(const struct hmap *,
size_t hash);
* The _SAFE version is needed when NODE may be freed. It is not needed when
* NODE may be removed from the hash map but its members remain accessible and
* intact. */
-#define HMAP_FOR_EACH(NODE, STRUCT, MEMBER, HMAP) \
- for ((NODE) = CONTAINER_OF(hmap_first(HMAP), STRUCT, MEMBER); \
- &(NODE)->MEMBER != NULL; \
- (NODE) = CONTAINER_OF(hmap_next(HMAP, &(NODE)->MEMBER), \
- STRUCT, MEMBER))
-
-#define HMAP_FOR_EACH_SAFE(NODE, NEXT, STRUCT, MEMBER, HMAP) \
- for ((NODE) = CONTAINER_OF(hmap_first(HMAP), STRUCT, MEMBER); \
- (&(NODE)->MEMBER != NULL \
- ? (NEXT) = CONTAINER_OF(hmap_next(HMAP, &(NODE)->MEMBER), \
- STRUCT, MEMBER), 1 \
- : 0); \
+#define HMAP_FOR_EACH(NODE, MEMBER, HMAP) \
+ for ((NODE) = OBJECT_CONTAINING(hmap_first(HMAP), NODE, MEMBER); \
+ &(NODE)->MEMBER != NULL; \
+ (NODE) = OBJECT_CONTAINING(hmap_next(HMAP, &(NODE)->MEMBER), \
+ NODE, MEMBER))
+
+#define HMAP_FOR_EACH_SAFE(NODE, NEXT, MEMBER, HMAP) \
+ for ((NODE) = OBJECT_CONTAINING(hmap_first(HMAP), NODE, MEMBER); \
+ (&(NODE)->MEMBER != NULL \
+ ? (NEXT) = OBJECT_CONTAINING(hmap_next(HMAP, &(NODE)->MEMBER), \
+ NODE, MEMBER), 1 \
+ : 0); \
(NODE) = (NEXT))
static inline struct hmap_node *hmap_first(const struct hmap *);
size_t list_size(const struct list *);
bool list_is_empty(const struct list *);
-#define LIST_FOR_EACH(ITER, STRUCT, MEMBER, LIST) \
- for (ITER = CONTAINER_OF((LIST)->next, STRUCT, MEMBER); \
- &(ITER)->MEMBER != (LIST); \
- ITER = CONTAINER_OF((ITER)->MEMBER.next, STRUCT, MEMBER))
-#define LIST_FOR_EACH_REVERSE(ITER, STRUCT, MEMBER, LIST) \
- for (ITER = CONTAINER_OF((LIST)->prev, STRUCT, MEMBER); \
- &(ITER)->MEMBER != (LIST); \
- ITER = CONTAINER_OF((ITER)->MEMBER.prev, STRUCT, MEMBER))
-#define LIST_FOR_EACH_SAFE(ITER, NEXT, STRUCT, MEMBER, LIST) \
- for (ITER = CONTAINER_OF((LIST)->next, STRUCT, MEMBER); \
- (NEXT = CONTAINER_OF((ITER)->MEMBER.next, STRUCT, MEMBER), \
- &(ITER)->MEMBER != (LIST)); \
+#define LIST_FOR_EACH(ITER, MEMBER, LIST) \
+ for (ITER = OBJECT_CONTAINING((LIST)->next, ITER, MEMBER); \
+ &(ITER)->MEMBER != (LIST); \
+ ITER = OBJECT_CONTAINING((ITER)->MEMBER.next, ITER, MEMBER))
+#define LIST_FOR_EACH_REVERSE(ITER, MEMBER, LIST) \
+ for (ITER = OBJECT_CONTAINING((LIST)->prev, ITER, MEMBER); \
+ &(ITER)->MEMBER != (LIST); \
+ ITER = OBJECT_CONTAINING((ITER)->MEMBER.prev, ITER, MEMBER))
+#define LIST_FOR_EACH_SAFE(ITER, NEXT, MEMBER, LIST) \
+ for (ITER = OBJECT_CONTAINING((LIST)->next, ITER, MEMBER); \
+ (NEXT = OBJECT_CONTAINING((ITER)->MEMBER.next, ITER, MEMBER), \
+ &(ITER)->MEMBER != (LIST)); \
ITER = NEXT)
#endif /* list.h */
{
struct lockfile *lockfile;
- HMAP_FOR_EACH (lockfile, struct lockfile, hmap_node, &lock_table) {
+ HMAP_FOR_EACH (lockfile, hmap_node, &lock_table) {
if (lockfile->fd >= 0) {
VLOG_WARN("%s: child does not inherit lock", lockfile->name);
lockfile_unhash(lockfile);
{
struct lockfile *lockfile;
- HMAP_FOR_EACH_WITH_HASH (lockfile, struct lockfile, hmap_node,
+ HMAP_FOR_EACH_WITH_HASH (lockfile, hmap_node,
lockfile_hash(device, inode), &lock_table) {
if (lockfile->device == device && lockfile->inode == inode) {
return lockfile;
uint16_t vlan)
{
struct mac_entry *e;
- LIST_FOR_EACH (e, struct mac_entry, hash_node, bucket) {
+ LIST_FOR_EACH (e, hash_node, bucket) {
if (eth_addr_equals(e->mac, mac) && e->vlan == vlan) {
return e;
}
netdev_dev_linux_cast(netdev_get_dev(netdev));
struct tc_queue *queue;
- HMAP_FOR_EACH_IN_BUCKET (queue, struct tc_queue, hmap_node,
- hash, &netdev_dev->tc->queues) {
+ HMAP_FOR_EACH_IN_BUCKET (queue, hmap_node, hash, &netdev_dev->tc->queues) {
if (queue->queue_id == queue_id) {
return queue;
}
last_error = 0;
shash_init(&details);
- HMAP_FOR_EACH (queue, struct tc_queue, hmap_node,
- &netdev_dev->tc->queues) {
+ HMAP_FOR_EACH (queue, hmap_node, &netdev_dev->tc->queues) {
shash_clear(&details);
error = netdev_dev->tc->ops->class_get(netdev, queue, &details);
poll_notify(struct list *list)
{
struct netdev_linux_notifier *notifier;
- LIST_FOR_EACH (notifier, struct netdev_linux_notifier, node, list) {
+ LIST_FOR_EACH (notifier, node, list) {
struct netdev_notifier *n = ¬ifier->notifier;
n->cb(n);
}
struct htb *htb = CONTAINER_OF(tc, struct htb, tc);
struct htb_class *hc, *next;
- HMAP_FOR_EACH_SAFE (hc, next, struct htb_class, tc_queue.hmap_node,
- &htb->tc.queues) {
+ HMAP_FOR_EACH_SAFE (hc, next, tc_queue.hmap_node, &htb->tc.queues) {
hmap_remove(&htb->tc.queues, &hc->tc_queue.hmap_node);
free(hc);
}
if (list) {
struct netdev_vport_notifier *notifier;
- LIST_FOR_EACH (notifier, struct netdev_vport_notifier,
- list_node, list) {
+ LIST_FOR_EACH (notifier, list_node, list) {
struct netdev_notifier *n = ¬ifier->notifier;
n->cb(n);
}
close_all_netdevs(void *aux OVS_UNUSED)
{
struct netdev *netdev, *next;
- LIST_FOR_EACH_SAFE(netdev, next, struct netdev, node, &netdev_list) {
+ LIST_FOR_EACH_SAFE(netdev, next, node, &netdev_list) {
netdev_close(netdev);
}
}
}
changed = true;
- HMAP_FOR_EACH_SAFE (row, next_row, struct ovsdb_idl_row, hmap_node,
- &table->rows) {
+ HMAP_FOR_EACH_SAFE (row, next_row, hmap_node, &table->rows) {
struct ovsdb_idl_arc *arc, *next_arc;
if (!ovsdb_idl_row_is_orphan(row)) {
ovsdb_idl_row_unparse(row);
}
- LIST_FOR_EACH_SAFE (arc, next_arc, struct ovsdb_idl_arc, src_node,
- &row->src_arcs) {
+ LIST_FOR_EACH_SAFE (arc, next_arc, src_node, &row->src_arcs) {
free(arc);
}
/* No need to do anything with dst_arcs: some node has those arcs
{
struct ovsdb_idl_row *row;
- HMAP_FOR_EACH_WITH_HASH (row, struct ovsdb_idl_row, hmap_node,
- uuid_hash(uuid), &table->rows) {
+ HMAP_FOR_EACH_WITH_HASH (row, hmap_node, uuid_hash(uuid), &table->rows) {
if (uuid_equals(&row->uuid, uuid)) {
return row;
}
/* Delete all forward arcs. If 'destroy_dsts', destroy any orphaned rows
* that this causes to be unreferenced. */
- LIST_FOR_EACH_SAFE (arc, next, struct ovsdb_idl_arc, src_node,
- &row->src_arcs) {
+ LIST_FOR_EACH_SAFE (arc, next, src_node, &row->src_arcs) {
list_remove(&arc->dst_node);
if (destroy_dsts
&& ovsdb_idl_row_is_orphan(arc->dst)
* (If duplicate arcs were possible then we would need to make sure that
* 'next' didn't also point into 'arc''s destination, but we forbid
* duplicate arcs.) */
- LIST_FOR_EACH_SAFE (arc, next, struct ovsdb_idl_arc, dst_node,
- &row->dst_arcs) {
+ LIST_FOR_EACH_SAFE (arc, next, dst_node, &row->dst_arcs) {
struct ovsdb_idl_row *ref = arc->src;
ovsdb_idl_row_unparse(ref);
free(txn->inc_table);
free(txn->inc_column);
json_destroy(txn->inc_where);
- HMAP_FOR_EACH_SAFE (insert, next, struct ovsdb_idl_txn_insert, hmap_node,
- &txn->inserted_rows) {
+ HMAP_FOR_EACH_SAFE (insert, next, hmap_node, &txn->inserted_rows) {
free(insert);
}
hmap_destroy(&txn->inserted_rows);
{
const struct ovsdb_idl_row *row;
- HMAP_FOR_EACH_WITH_HASH (row, struct ovsdb_idl_row, txn_node,
- uuid_hash(uuid), &txn->txn_rows) {
+ HMAP_FOR_EACH_WITH_HASH (row, txn_node, uuid_hash(uuid), &txn->txn_rows) {
if (uuid_equals(&row->uuid, uuid)) {
return row;
}
* transaction and fail to update the graph. */
txn->idl->txn = NULL;
- HMAP_FOR_EACH_SAFE (row, next, struct ovsdb_idl_row, txn_node,
- &txn->txn_rows) {
+ HMAP_FOR_EACH_SAFE (row, next, txn_node, &txn->txn_rows) {
if (row->old) {
if (row->written) {
ovsdb_idl_row_unparse(row);
json_string_create(txn->idl->class->database));
/* Add prerequisites and declarations of new rows. */
- HMAP_FOR_EACH (row, struct ovsdb_idl_row, txn_node, &txn->txn_rows) {
+ HMAP_FOR_EACH (row, txn_node, &txn->txn_rows) {
/* XXX check that deleted rows exist even if no prereqs? */
if (row->prereqs) {
const struct ovsdb_idl_table_class *class = row->table->class;
/* Add updates. */
any_updates = false;
- HMAP_FOR_EACH (row, struct ovsdb_idl_row, txn_node, &txn->txn_rows) {
+ HMAP_FOR_EACH (row, txn_node, &txn->txn_rows) {
const struct ovsdb_idl_table_class *class = row->table->class;
if (row->old == row->new) {
const struct ovsdb_idl_txn_insert *insert;
assert(txn->status == TXN_SUCCESS || txn->status == TXN_UNCHANGED);
- HMAP_FOR_EACH_IN_BUCKET (insert, struct ovsdb_idl_txn_insert, hmap_node,
+ HMAP_FOR_EACH_IN_BUCKET (insert, hmap_node,
uuid_hash(uuid), &txn->inserted_rows) {
if (uuid_equals(uuid, &insert->dummy)) {
return &insert->real;
{
struct ovsdb_idl_txn *txn;
- HMAP_FOR_EACH (txn, struct ovsdb_idl_txn, hmap_node,
- &idl->outstanding_txns) {
+ HMAP_FOR_EACH (txn, hmap_node, &idl->outstanding_txns) {
ovsdb_idl_txn_complete(txn, TXN_TRY_AGAIN);
}
}
{
struct ovsdb_idl_txn *txn;
- HMAP_FOR_EACH_WITH_HASH (txn, struct ovsdb_idl_txn, hmap_node,
+ HMAP_FOR_EACH_WITH_HASH (txn, hmap_node,
json_hash(id, 0), &idl->outstanding_txns) {
if (json_equal(id, txn->request_id)) {
return txn;
hard_errors++;
}
- HMAP_FOR_EACH (insert, struct ovsdb_idl_txn_insert, hmap_node,
- &txn->inserted_rows) {
+ HMAP_FOR_EACH (insert, hmap_node, &txn->inserted_rows) {
if (!ovsdb_idl_txn_process_insert_reply(insert, ops)) {
hard_errors++;
}
}
n_pollfds = 0;
- LIST_FOR_EACH (pw, struct poll_waiter, node, &waiters) {
+ LIST_FOR_EACH (pw, node, &waiters) {
pw->pollfd = &pollfds[n_pollfds];
pollfds[n_pollfds].fd = pw->fd;
pollfds[n_pollfds].events = pw->events;
log_wakeup(&timeout_backtrace, "%d-ms timeout", timeout);
}
- LIST_FOR_EACH_SAFE (pw, next, struct poll_waiter, node, &waiters) {
+ LIST_FOR_EACH_SAFE (pw, next, node, &waiters) {
if (pw->pollfd->revents && VLOG_IS_DBG_ENABLED()) {
log_wakeup(pw->backtrace, "%s%s%s%s%s on fd %d",
pw->pollfd->revents & POLLIN ? "[POLLIN]" : "",
struct process *p;
COVERAGE_INC(process_sigchld);
- LIST_FOR_EACH (p, struct process, node, &all_processes) {
+ LIST_FOR_EACH (p, node, &all_processes) {
if (!p->exited) {
int retval, status;
do {
change.master_ifindex = (attrs[IFLA_MASTER]
? nl_attr_get_u32(attrs[IFLA_MASTER]) : 0);
- LIST_FOR_EACH (notifier, struct rtnetlink_notifier, node,
- &all_notifiers) {
+ LIST_FOR_EACH (notifier, node, &all_notifiers) {
notifier->cb(&change, notifier->aux);
}
}
{
struct rtnetlink_notifier *notifier;
- LIST_FOR_EACH (notifier, struct rtnetlink_notifier, node,
- &all_notifiers) {
+ LIST_FOR_EACH (notifier, node, &all_notifiers) {
notifier->cb(NULL, notifier->aux);
}
}
{
struct shash_node *node;
- HMAP_FOR_EACH_WITH_HASH (node, struct shash_node, node, hash, &sh->map) {
+ HMAP_FOR_EACH_WITH_HASH (node, node, hash, &sh->map) {
if (!strcmp(node->name, name)) {
return node;
}
#define SHASH_INITIALIZER(SHASH) { HMAP_INITIALIZER(&(SHASH)->map) }
-#define SHASH_FOR_EACH(SHASH_NODE, SHASH) \
- HMAP_FOR_EACH (SHASH_NODE, struct shash_node, node, &(SHASH)->map)
+#define SHASH_FOR_EACH(SHASH_NODE, SHASH) \
+ HMAP_FOR_EACH (SHASH_NODE, node, &(SHASH)->map)
-#define SHASH_FOR_EACH_SAFE(SHASH_NODE, NEXT, SHASH) \
- HMAP_FOR_EACH_SAFE (SHASH_NODE, NEXT, struct shash_node, node, \
- &(SHASH)->map)
+#define SHASH_FOR_EACH_SAFE(SHASH_NODE, NEXT, SHASH) \
+ HMAP_FOR_EACH_SAFE (SHASH_NODE, NEXT, node, &(SHASH)->map)
void shash_init(struct shash *);
void shash_destroy(struct shash *);
new_connection(server, fd);
}
- LIST_FOR_EACH_SAFE (conn, next,
- struct unixctl_conn, node, &server->conns) {
+ LIST_FOR_EACH_SAFE (conn, next, node, &server->conns) {
int error = run_connection(conn);
if (error && error != EAGAIN) {
kill_connection(conn);
struct unixctl_conn *conn;
poll_fd_wait(server->fd, POLLIN);
- LIST_FOR_EACH (conn, struct unixctl_conn, node, &server->conns) {
+ LIST_FOR_EACH (conn, node, &server->conns) {
if (conn->state == S_RECV) {
poll_fd_wait(conn->fd, POLLIN);
} else if (conn->state == S_SEND) {
if (server) {
struct unixctl_conn *conn, *next;
- LIST_FOR_EACH_SAFE (conn, next,
- struct unixctl_conn, node, &server->conns) {
+ LIST_FOR_EACH_SAFE (conn, next, node, &server->conns) {
kill_connection(conn);
}
{
struct ofproto_sflow_port *osp;
- HMAP_FOR_EACH_IN_BUCKET (osp, struct ofproto_sflow_port, hmap_node,
+ HMAP_FOR_EACH_IN_BUCKET (osp, hmap_node,
hash_int(odp_port, 0), &os->ports) {
if (osp->odp_port == odp_port) {
return osp;
struct ofproto_sflow_port *osp, *next;
ofproto_sflow_clear(os);
- HMAP_FOR_EACH_SAFE (osp, next, struct ofproto_sflow_port, hmap_node,
- &os->ports) {
+ HMAP_FOR_EACH_SAFE (osp, next, hmap_node, &os->ports) {
ofproto_sflow_del_port__(os, osp);
}
hmap_destroy(&os->ports);
MAX(1, UINT32_MAX / options->sampling_rate));
/* Add samplers and pollers for the currently known ports. */
- HMAP_FOR_EACH (osp, struct ofproto_sflow_port, hmap_node, &os->ports) {
+ HMAP_FOR_EACH (osp, hmap_node, &os->ports) {
ofproto_sflow_add_poller(os, osp, osp->odp_port);
ofproto_sflow_add_sampler(os, osp);
}
{
struct ofconn *ofconn;
- HMAP_FOR_EACH_WITH_HASH (ofconn, struct ofconn, hmap_node,
+ HMAP_FOR_EACH_WITH_HASH (ofconn, hmap_node,
hash_string(target, 0), &ofproto->controllers) {
if (!strcmp(ofconn_get_target(ofconn), target)) {
return ofconn;
/* Add all the remotes. */
discovery = false;
- HMAP_FOR_EACH (ofconn, struct ofconn, hmap_node, &ofproto->controllers) {
+ HMAP_FOR_EACH (ofconn, hmap_node, &ofproto->controllers) {
struct sockaddr_in *sin = &addrs[n_addrs];
if (ofconn->band == OFPROTO_OUT_OF_BAND) {
n = 0;
rconns = xmalloc(hmap_count(&p->controllers) * sizeof *rconns);
- HMAP_FOR_EACH (ofconn, struct ofconn, hmap_node, &p->controllers) {
+ HMAP_FOR_EACH (ofconn, hmap_node, &p->controllers) {
rconns[n++] = ofconn->rconn;
}
/* Delete controllers that are no longer configured.
* Update configuration of all now-existing controllers. */
ss_exists = false;
- HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, hmap_node,
- &p->controllers) {
+ HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, hmap_node, &p->controllers) {
struct ofproto_controller *c;
c = shash_find_data(&new_controllers, ofconn_get_target(ofconn));
/* Delete services that are no longer configured.
* Update configuration of all now-existing services. */
- HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, struct ofservice, node,
- &p->services) {
+ HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &p->services) {
struct ofproto_controller *c;
c = shash_find_data(&new_controllers,
{
struct ofconn *ofconn;
- LIST_FOR_EACH (ofconn, struct ofconn, node, &ofproto->all_conns) {
+ LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
rconn_reconnect(ofconn->rconn);
}
}
os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif);
refresh_port_groups(ofproto);
- HMAP_FOR_EACH (ofport, struct ofport, hmap_node, &ofproto->ports) {
+ HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) {
ofproto_sflow_add_port(os, ofport->odp_port,
netdev_get_name(ofport->netdev));
}
ofproto_flush_flows(p);
classifier_destroy(&p->cls);
- LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
- &p->all_conns) {
+ LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &p->all_conns) {
ofconn_destroy(ofconn);
}
hmap_destroy(&p->controllers);
dpif_close(p->dpif);
netdev_monitor_destroy(p->netdev_monitor);
- HMAP_FOR_EACH_SAFE (ofport, next_ofport, struct ofport, hmap_node,
- &p->ports) {
+ HMAP_FOR_EACH_SAFE (ofport, next_ofport, hmap_node, &p->ports) {
hmap_remove(&p->ports, &ofport->hmap_node);
ofport_free(ofport);
}
netflow_destroy(p->netflow);
ofproto_sflow_destroy(p->sflow);
- HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, struct ofservice, node,
- &p->services) {
+ HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &p->services) {
ofservice_destroy(p, ofservice);
}
hmap_destroy(&p->services);
/* Pick a controller for monitoring. */
best = NULL;
- LIST_FOR_EACH (ofconn, struct ofconn, node, &ofproto->all_conns) {
+ LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
if (ofconn->type == OFCONN_PRIMARY
&& (!best || snoop_preference(ofconn) > snoop_preference(best))) {
best = ofconn;
in_band_run(p->in_band);
}
- LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
- &p->all_conns) {
+ LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &p->all_conns) {
ofconn_run(ofconn, p);
}
fail_open_run(p->fail_open);
}
- HMAP_FOR_EACH (ofservice, struct ofservice, node, &p->services) {
+ HMAP_FOR_EACH (ofservice, node, &p->services) {
struct vconn *vconn;
int retval;
dpif_recv_wait(p->dpif);
dpif_port_poll_wait(p->dpif);
netdev_monitor_poll_wait(p->netdev_monitor);
- LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
+ LIST_FOR_EACH (ofconn, node, &p->all_conns) {
ofconn_wait(ofconn);
}
if (p->in_band) {
} else if (p->next_expiration != LLONG_MAX) {
poll_timer_wait_until(p->next_expiration);
}
- HMAP_FOR_EACH (ofservice, struct ofservice, node, &p->services) {
+ HMAP_FOR_EACH (ofservice, node, &p->services) {
pvconn_wait(ofservice->pvconn);
}
for (i = 0; i < p->n_snoops; i++) {
size_t i;
svec_init(&devnames);
- HMAP_FOR_EACH (ofport, struct ofport, hmap_node, &p->ports) {
+ HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
svec_add (&devnames, (char *) ofport->opp.name);
}
dpif_port_list(p->dpif, &odp_ports, &n_odp_ports);
ports = xmalloc(hmap_count(&p->ports) * sizeof *ports);
n_ports = 0;
- HMAP_FOR_EACH (port, struct ofport, hmap_node, &p->ports) {
+ HMAP_FOR_EACH (port, hmap_node, &p->ports) {
if (group == DP_GROUP_ALL || !(port->opp.config & OFPPC_NO_FLOOD)) {
ports[n_ports++] = port->odp_port;
}
{
/* XXX Should limit the number of queued port status change messages. */
struct ofconn *ofconn;
- LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
+ LIST_FOR_EACH (ofconn, node, &p->all_conns) {
struct ofp_port_status *ops;
struct ofpbuf *b;
{
struct ofport *port;
- HMAP_FOR_EACH_IN_BUCKET (port, struct ofport, hmap_node,
+ HMAP_FOR_EACH_IN_BUCKET (port, hmap_node,
hash_int(odp_port, 0), &ofproto->ports) {
if (port->odp_port == odp_port) {
return port;
{
struct ofservice *ofservice;
- HMAP_FOR_EACH_WITH_HASH (ofservice, struct ofservice, node,
- hash_string(target, 0), &ofproto->services) {
+ HMAP_FOR_EACH_WITH_HASH (ofservice, node, hash_string(target, 0),
+ &ofproto->services) {
if (!strcmp(pvconn_get_name(ofservice->pvconn), target)) {
return ofservice;
}
{
if (!rule->super) {
struct rule *subrule, *next;
- LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
+ LIST_FOR_EACH_SAFE (subrule, next, list, &rule->list) {
revalidate_rule(ofproto, subrule);
}
} else {
(1u << OFPAT_SET_TP_DST) |
(1u << OFPAT_ENQUEUE));
- HMAP_FOR_EACH (port, struct ofport, hmap_node, &p->ports) {
+ HMAP_FOR_EACH (port, hmap_node, &p->ports) {
hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
}
append_port_stat(port, ofconn, &msg);
}
} else {
- HMAP_FOR_EACH (port, struct ofport, hmap_node, &p->ports) {
+ HMAP_FOR_EACH (port, hmap_node, &p->ports) {
append_port_stat(port, ofconn, &msg);
}
}
odp_flows = xzalloc(n_odp_flows * sizeof *odp_flows);
if (rule->cr.wc.wildcards) {
size_t i = 0;
- LIST_FOR_EACH (subrule, struct rule, list, &rule->list) {
+ LIST_FOR_EACH (subrule, list, &rule->list) {
odp_flows[i++].key = subrule->cr.flow;
packet_count += subrule->packet_count;
byte_count += subrule->byte_count;
port_no = ntohs(qsr->port_no);
queue_id = ntohl(qsr->queue_id);
if (port_no == OFPP_ALL) {
- HMAP_FOR_EACH (port, struct ofport, hmap_node, &ofproto->ports) {
+ HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
handle_queue_stats_for_port(port, queue_id, &cbdata);
}
} else if (port_no < ofproto->max_ports) {
if (role == NX_ROLE_MASTER) {
struct ofconn *other;
- HMAP_FOR_EACH (other, struct ofconn, hmap_node,
- &ofproto->controllers) {
+ HMAP_FOR_EACH (other, hmap_node, &ofproto->controllers) {
if (other->role == NX_ROLE_MASTER) {
other->role = NX_ROLE_SLAVE;
}
* requests that would not add new flows, so it is imperfect.) */
prev = NULL;
- LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
+ LIST_FOR_EACH (ofconn, node, &p->all_conns) {
if (rule->send_flow_removed && rconn_is_connected(ofconn->rconn)
&& ofconn_receives_async_msgs(ofconn)) {
if (prev) {
* due to an idle timeout. */
if (rule->cr.wc.wildcards) {
struct rule *subrule, *next;
- LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
+ LIST_FOR_EACH_SAFE (subrule, next, list, &rule->list) {
rule_remove(p, subrule);
}
} else {
max_len = do_convert_to_packet_in(packet);
prev = NULL;
- LIST_FOR_EACH (ofconn, struct ofconn, node, &ofproto->all_conns) {
+ LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
if (ofconn_receives_async_msgs(ofconn)) {
if (prev) {
schedule_packet_in(prev, packet, max_len, true);
sr.request.string = (void *) (request + 1);
sr.request.length = ntohs(request->header.length) - sizeof *request;
ds_init(&sr.output);
- LIST_FOR_EACH (c, struct status_category, node, &ss->categories) {
+ LIST_FOR_EACH (c, node, &ss->categories) {
if (!memcmp(c->name, sr.request.string,
MIN(strlen(c->name), sr.request.length))) {
sr.category = c;
/* Orphan any remaining categories, so that unregistering them later
* won't write to bad memory. */
struct status_category *c, *next;
- LIST_FOR_EACH_SAFE (c, next,
- struct status_category, node, &ss->categories) {
+ LIST_FOR_EACH_SAFE (c, next, node, &ss->categories) {
list_init(&c->node);
}
switch_status_unregister(ss->config_cat);
const struct ovsdb_table *table = node->data;
const struct ovsdb_row *row;
- HMAP_FOR_EACH (row, struct ovsdb_row, hmap_node, &table->rows) {
+ HMAP_FOR_EACH (row, hmap_node, &table->rows) {
ovsdb_file_txn_add_row(&ftxn, NULL, row, NULL);
}
}
{
struct ovsdb_jsonrpc_session *s, *next;
- LIST_FOR_EACH_SAFE (s, next, struct ovsdb_jsonrpc_session, node,
- &remote->sessions) {
+ LIST_FOR_EACH_SAFE (s, next, node, &remote->sessions) {
int error = ovsdb_jsonrpc_session_run(s);
if (error) {
ovsdb_jsonrpc_session_close(s);
{
struct ovsdb_jsonrpc_session *s;
- LIST_FOR_EACH (s, struct ovsdb_jsonrpc_session, node, &remote->sessions) {
+ LIST_FOR_EACH (s, node, &remote->sessions) {
ovsdb_jsonrpc_session_wait(s);
}
}
{
struct ovsdb_jsonrpc_session *s, *next;
- LIST_FOR_EACH_SAFE (s, next, struct ovsdb_jsonrpc_session, node,
- &remote->sessions) {
+ LIST_FOR_EACH_SAFE (s, next, node, &remote->sessions) {
ovsdb_jsonrpc_session_close(s);
}
}
{
struct ovsdb_jsonrpc_session *s, *next;
- LIST_FOR_EACH_SAFE (s, next, struct ovsdb_jsonrpc_session, node,
- &remote->sessions) {
+ LIST_FOR_EACH_SAFE (s, next, node, &remote->sessions) {
jsonrpc_session_force_reconnect(s->js);
if (!jsonrpc_session_is_alive(s->js)) {
ovsdb_jsonrpc_session_close(s);
{
struct ovsdb_jsonrpc_trigger *t;
- HMAP_FOR_EACH_WITH_HASH (t, struct ovsdb_jsonrpc_trigger, hmap_node, hash,
- &s->triggers) {
+ HMAP_FOR_EACH_WITH_HASH (t, hmap_node, hash, &s->triggers) {
if (json_equal(t->id, id)) {
return t;
}
ovsdb_jsonrpc_trigger_complete_all(struct ovsdb_jsonrpc_session *s)
{
struct ovsdb_jsonrpc_trigger *t, *next;
- HMAP_FOR_EACH_SAFE (t, next, struct ovsdb_jsonrpc_trigger, hmap_node,
- &s->triggers) {
+ HMAP_FOR_EACH_SAFE (t, next, hmap_node, &s->triggers) {
ovsdb_jsonrpc_trigger_complete(t);
}
}
{
struct ovsdb_jsonrpc_monitor *m;
- HMAP_FOR_EACH_WITH_HASH (m, struct ovsdb_jsonrpc_monitor, node,
- json_hash(monitor_id, 0), &s->monitors) {
+ HMAP_FOR_EACH_WITH_HASH (m, node, json_hash(monitor_id, 0), &s->monitors) {
if (json_equal(m->monitor_id, monitor_id)) {
return m;
}
{
struct ovsdb_jsonrpc_monitor *m, *next;
- HMAP_FOR_EACH_SAFE (m, next,
- struct ovsdb_jsonrpc_monitor, node, &s->monitors) {
+ HMAP_FOR_EACH_SAFE (m, next, node, &s->monitors) {
ovsdb_remove_replica(s->remote->server->db, &m->replica);
}
}
if (mt->select & OJMS_INITIAL) {
struct ovsdb_row *row;
- HMAP_FOR_EACH (row, struct ovsdb_row, hmap_node,
- &mt->table->rows) {
+ HMAP_FOR_EACH (row, hmap_node, &mt->table->rows) {
ovsdb_jsonrpc_monitor_change_cb(NULL, row, NULL, &aux);
}
}
parse_db_string_column(db, name, &table, &column);
- HMAP_FOR_EACH (row, struct ovsdb_row, hmap_node, &table->rows) {
+ HMAP_FOR_EACH (row, hmap_node, &table->rows) {
const struct ovsdb_datum *datum;
size_t i;
parse_db_string_column(db, name, &table, &column);
- HMAP_FOR_EACH (row, struct ovsdb_row, hmap_node, &table->rows) {
+ HMAP_FOR_EACH (row, hmap_node, &table->rows) {
const struct ovsdb_datum *datum;
size_t i;
-/* Copyright (c) 2009 Nicira Networks
+/* Copyright (c) 2009, 2010 Nicira Networks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
/* Linear scan. */
const struct ovsdb_row *row, *next;
- HMAP_FOR_EACH_SAFE (row, next, struct ovsdb_row, hmap_node,
- &table->rows) {
+ HMAP_FOR_EACH_SAFE (row, next, hmap_node, &table->rows) {
if (ovsdb_condition_evaluate(row, cnd) && !output_row(row, aux)) {
break;
}
ovsdb_row_hash_init(&hash, columns);
ovsdb_query(table, condition, query_distinct_cb, &hash);
- HMAP_FOR_EACH (node, struct ovsdb_row_hash_node, hmap_node,
- &hash.rows) {
+ HMAP_FOR_EACH (node, hmap_node, &hash.rows) {
ovsdb_row_set_add_row(results, node->row);
}
ovsdb_row_hash_destroy(&hash, false);
struct ovsdb_weak_ref *weak, *next;
const struct shash_node *node;
- LIST_FOR_EACH_SAFE (weak, next, struct ovsdb_weak_ref, dst_node,
- &row->dst_refs) {
+ LIST_FOR_EACH_SAFE (weak, next, dst_node, &row->dst_refs) {
list_remove(&weak->src_node);
list_remove(&weak->dst_node);
free(weak);
}
- LIST_FOR_EACH_SAFE (weak, next, struct ovsdb_weak_ref, src_node,
- &row->src_refs) {
+ LIST_FOR_EACH_SAFE (weak, next, src_node, &row->src_refs) {
list_remove(&weak->src_node);
list_remove(&weak->dst_node);
free(weak);
{
struct ovsdb_row_hash_node *node, *next;
- HMAP_FOR_EACH_SAFE (node, next, struct ovsdb_row_hash_node, hmap_node,
- &rh->rows) {
+ HMAP_FOR_EACH_SAFE (node, next, hmap_node, &rh->rows) {
hmap_remove(&rh->rows, &node->hmap_node);
if (destroy_rows) {
ovsdb_row_destroy((struct ovsdb_row *) node->row);
struct ovsdb_row_hash_node *node;
assert(ovsdb_column_set_equals(&a->columns, &b->columns));
- HMAP_FOR_EACH (node, struct ovsdb_row_hash_node, hmap_node, &b->rows) {
+ HMAP_FOR_EACH (node, hmap_node, &b->rows) {
if (!ovsdb_row_hash_contains__(a, node->row, node->hmap_node.hash)) {
return false;
}
const struct ovsdb_row *row, size_t hash)
{
struct ovsdb_row_hash_node *node;
- HMAP_FOR_EACH_WITH_HASH (node, struct ovsdb_row_hash_node, hmap_node,
- hash, &rh->rows) {
+ HMAP_FOR_EACH_WITH_HASH (node, hmap_node, hash, &rh->rows) {
if (ovsdb_row_equal_columns(row, node->row, &rh->columns)) {
return true;
}
if (table) {
struct ovsdb_row *row, *next;
- HMAP_FOR_EACH_SAFE (row, next, struct ovsdb_row, hmap_node,
- &table->rows) {
+ HMAP_FOR_EACH_SAFE (row, next, hmap_node, &table->rows) {
ovsdb_row_destroy(row);
}
hmap_destroy(&table->rows);
{
struct ovsdb_row *row;
- HMAP_FOR_EACH_WITH_HASH (row, struct ovsdb_row, hmap_node, uuid_hash(uuid),
- &table->rows) {
+ HMAP_FOR_EACH_WITH_HASH (row, hmap_node, uuid_hash(uuid), &table->rows) {
if (uuid_equals(ovsdb_row_get_uuid(row), uuid)) {
return row;
}
return NULL;
}
- HMAP_FOR_EACH_WITH_HASH (txn_row, struct ovsdb_txn_row, hmap_node,
+ HMAP_FOR_EACH_WITH_HASH (txn_row, hmap_node,
uuid_hash(uuid), &table->txn_table->txn_rows) {
const struct ovsdb_row *row;
* that their weak references will get reassessed. */
struct ovsdb_weak_ref *weak, *next;
- LIST_FOR_EACH_SAFE (weak, next, struct ovsdb_weak_ref, dst_node,
- &txn_row->old->dst_refs) {
+ LIST_FOR_EACH_SAFE (weak, next, dst_node, &txn_row->old->dst_refs) {
if (!weak->src->txn_row) {
ovsdb_txn_row_modify(txn, weak->src);
}
{
struct ovsdb_txn_table *t;
- LIST_FOR_EACH (t, struct ovsdb_txn_table, node, &txn->txn_tables) {
+ LIST_FOR_EACH (t, node, &txn->txn_tables) {
size_t n_rows = hmap_count(&t->table->rows);
unsigned int max_rows = t->table->schema->max_rows;
}
/* Send the commit to each replica. */
- LIST_FOR_EACH (replica, struct ovsdb_replica, node, &txn->db->replicas) {
+ LIST_FOR_EACH (replica, node, &txn->db->replicas) {
error = (replica->class->commit)(replica, txn, durable);
if (error) {
/* We don't support two-phase commit so only the first replica is
struct ovsdb_txn_table *t;
struct ovsdb_txn_row *r;
- LIST_FOR_EACH (t, struct ovsdb_txn_table, node, &txn->txn_tables) {
- HMAP_FOR_EACH (r, struct ovsdb_txn_row, hmap_node, &t->txn_rows) {
+ LIST_FOR_EACH (t, node, &txn->txn_tables) {
+ HMAP_FOR_EACH (r, hmap_node, &t->txn_rows) {
if (!cb(r->old, r->new, r->changed, aux)) {
break;
}
struct ovsdb_txn_table *t, *next_txn_table;
any_work = false;
- LIST_FOR_EACH_SAFE (t, next_txn_table, struct ovsdb_txn_table, node,
- &txn->txn_tables) {
+ LIST_FOR_EACH_SAFE (t, next_txn_table, node, &txn->txn_tables) {
if (t->serial != serial) {
t->serial = serial;
t->n_processed = 0;
while (t->n_processed < hmap_count(&t->txn_rows)) {
struct ovsdb_txn_row *r, *next_txn_row;
- HMAP_FOR_EACH_SAFE (r, next_txn_row,
- struct ovsdb_txn_row, hmap_node,
- &t->txn_rows) {
+ HMAP_FOR_EACH_SAFE (r, next_txn_row, hmap_node, &t->txn_rows) {
if (r->serial != serial) {
struct ovsdb_error *error;
run_triggers = db->run_triggers;
db->run_triggers = false;
- LIST_FOR_EACH_SAFE (t, next, struct ovsdb_trigger, node, &db->triggers) {
+ LIST_FOR_EACH_SAFE (t, next, node, &db->triggers) {
if (run_triggers || now - t->created >= t->timeout_msec) {
ovsdb_trigger_try(db, t, now);
}
long long int deadline = LLONG_MAX;
struct ovsdb_trigger *t;
- LIST_FOR_EACH (t, struct ovsdb_trigger, node, &db->triggers) {
+ LIST_FOR_EACH (t, node, &db->triggers) {
if (t->created < LLONG_MAX - t->timeout_msec) {
long long int t_deadline = t->created + t->timeout_msec;
if (deadline > t_deadline) {
if (!hmap_is_empty(&cls->tables[i])) {
found_tables++;
}
- HMAP_FOR_EACH (bucket, struct cls_bucket, hmap_node, &cls->tables[i]) {
+ HMAP_FOR_EACH (bucket, hmap_node, &cls->tables[i]) {
found_buckets++;
assert(!list_is_empty(&bucket->rules));
found_rules += list_size(&bucket->rules);
hmap_values = xmalloc(sizeof *sort_values * n);
i = 0;
- HMAP_FOR_EACH (e, struct element, node, hmap) {
+ HMAP_FOR_EACH (e, node, hmap) {
assert(i < n);
hmap_values[i++] = e->value;
}
for (i = 0; i < n; i++) {
size_t count = 0;
- HMAP_FOR_EACH_WITH_HASH (e, struct element, node,
- hash(values[i]), hmap) {
+ HMAP_FOR_EACH_WITH_HASH (e, node, hash(values[i]), hmap) {
count += e->value == values[i];
}
assert(count == 1);
struct element *e;
printf("%s:", name);
- HMAP_FOR_EACH (e, struct element, node, hmap) {
+ HMAP_FOR_EACH (e, node, hmap) {
printf(" %d(%zu)", e->value, e->node.hash & hmap->mask);
}
printf("\n");
i = 0;
n_remaining = n;
- HMAP_FOR_EACH_SAFE (e, next, struct element, node, &hmap) {
+ HMAP_FOR_EACH_SAFE (e, next, node, &hmap) {
assert(i < n);
if (pattern & (1ul << e->value)) {
size_t j;
/*
- * Copyright (c) 2008, 2009 Nicira Networks.
+ * Copyright (c) 2008, 2009, 2010 Nicira Networks.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
size_t i;
i = 0;
- LIST_FOR_EACH (e, struct element, node, list) {
+ LIST_FOR_EACH (e, node, list) {
assert(i < n);
assert(e->value == values[i]);
i++;
assert(i == n);
i = 0;
- LIST_FOR_EACH_REVERSE (e, struct element, node, list) {
+ LIST_FOR_EACH_REVERSE (e, node, list) {
assert(i < n);
assert(e->value == values[n - i - 1]);
i++;
struct element *e;
printf("%s:", name);
- LIST_FOR_EACH (e, struct element, node, list) {
+ LIST_FOR_EACH (e, node, list) {
printf(" %d", e->value);
}
printf("\n");
i = 0;
values_idx = 0;
n_remaining = n;
- LIST_FOR_EACH_SAFE (e, next, struct element, node, &list) {
+ LIST_FOR_EACH_SAFE (e, next, node, &list) {
assert(i < n);
if (pattern & (1ul << i)) {
list_remove(&e->node);
n_rows = hmap_count(&do_transact_table->rows);
rows = xmalloc(n_rows * sizeof *rows);
i = 0;
- HMAP_FOR_EACH (row, struct ovsdb_row, hmap_node,
- &do_transact_table->rows) {
+ HMAP_FOR_EACH (row, hmap_node, &do_transact_table->rows) {
rows[i++] = row;
}
assert(i == n_rows);
/* Collect old and new bridges. */
shash_init(&old_br);
shash_init(&new_br);
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
shash_add(&old_br, br->name, br);
}
for (i = 0; i < ovs_cfg->n_bridges; i++) {
}
/* Get rid of deleted bridges and add new bridges. */
- LIST_FOR_EACH_SAFE (br, next, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH_SAFE (br, next, node, &all_bridges) {
struct ovsrec_bridge *br_cfg = shash_find_data(&new_br, br->name);
if (br_cfg) {
br->cfg = br_cfg;
shash_destroy(&new_br);
/* Reconfigure all bridges. */
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
bridge_reconfigure_one(br);
}
* The kernel will reject any attempt to add a given port to a datapath if
* that port already belongs to a different datapath, so we must do all
* port deletions before any port additions. */
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
struct odp_port *dpif_ports;
size_t n_dpif_ports;
struct shash want_ifaces;
shash_destroy(&want_ifaces);
free(dpif_ports);
}
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
struct odp_port *dpif_ports;
size_t n_dpif_ports;
struct shash cur_ifaces, want_ifaces;
shash_destroy(&want_ifaces);
}
sflow_bridge_number = 0;
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
uint8_t ea[8];
uint64_t dpid;
struct iface *local_iface;
* the datapath ID before the controller. */
bridge_reconfigure_remotes(br, managers, n_managers);
}
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
for (i = 0; i < br->n_ports; i++) {
struct port *port = br->ports[i];
int j;
}
}
}
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
iterate_and_prune_ifaces(br, set_iface_properties, NULL);
}
/* Let each bridge do the work that it needs to do. */
datapath_destroyed = false;
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
int error = bridge_run_one(br);
if (error) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
struct ovsdb_idl_txn *txn;
txn = ovsdb_idl_txn_create(idl);
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
size_t i;
for (i = 0; i < br->n_ports; i++) {
{
struct bridge *br;
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
ofproto_wait(br->ofproto);
if (ofproto_has_primary_controller(br->ofproto)) {
continue;
}
ds_put_cstr(&ds, " port VLAN MAC Age\n");
- LIST_FOR_EACH (e, struct mac_entry, lru_node, &br->ml->lrus) {
+ LIST_FOR_EACH (e, lru_node, &br->ml->lrus) {
if (e->port < 0 || e->port >= br->n_ports) {
continue;
}
{
struct bridge *br;
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
if (!strcmp(br->name, name)) {
return br;
}
}
ofproto_reconnect_controllers(br->ofproto);
} else {
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
ofproto_reconnect_controllers(br->ofproto);
}
}
ofpbuf_init(&packet, 128);
error = n_packets = n_errors = 0;
- LIST_FOR_EACH (e, struct mac_entry, lru_node, &br->ml->lrus) {
+ LIST_FOR_EACH (e, lru_node, &br->ml->lrus) {
union ofp_action actions[2], *a;
uint16_t dp_ifidx;
tag_type tags = 0;
ds_put_cstr(&ds, "bridge\tbond\tslaves\n");
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
size_t i;
for (i = 0; i < br->n_ports; i++) {
{
const struct bridge *br;
- LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
+ LIST_FOR_EACH (br, node, &all_bridges) {
size_t i;
for (i = 0; i < br->n_ports; i++) {
hash, be->tx_bytes / 1024);
/* MACs. */
- LIST_FOR_EACH (me, struct mac_entry, lru_node,
- &port->bridge->ml->lrus) {
+ LIST_FOR_EACH (me, lru_node, &port->bridge->ml->lrus) {
uint16_t dp_ifidx;
tag_type tags = 0;
if (bond_hash(me->mac) == hash
{
struct iface *iface;
- HMAP_FOR_EACH_IN_BUCKET (iface, struct iface, dp_ifidx_node,
+ HMAP_FOR_EACH_IN_BUCKET (iface, dp_ifidx_node,
hash_int(dp_ifidx, 0), &br->ifaces) {
if (iface->dp_ifidx == dp_ifidx) {
return iface;
/* 'tagged_dev' is not attached to any compat_vlan. Find the
* compat_vlan corresponding to (trunk_dev,vid) to attach it to, or
* create a new compat_vlan if none exists for (trunk_dev,vid). */
- HMAP_FOR_EACH_WITH_HASH (vlan, struct compat_vlan, trunk_node,
- hash_vlan(trunk_dev, vid),
+ HMAP_FOR_EACH_WITH_HASH (vlan, trunk_node, hash_vlan(trunk_dev, vid),
&vlans_by_trunk) {
if (!strcmp(trunk_dev, vlan->trunk_dev) && vid == vlan->vid) {
break;
ds_init(&ds);
ds_put_cstr(&ds, "VLAN Dev name | VLAN ID\n"
"Name-Type: VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD\n");
- HMAP_FOR_EACH (vlan, struct compat_vlan, trunk_node, &vlans_by_trunk) {
+ HMAP_FOR_EACH (vlan, trunk_node, &vlans_by_trunk) {
ds_put_format(&ds, "%-15s| %d | %s\n",
vlan->vlan_dev, vlan->vid, vlan->trunk_dev);
}