classifier: Make classifier_for_each() easier to use.
authorBen Pfaff <blp@nicira.com>
Tue, 28 Apr 2009 17:28:03 +0000 (10:28 -0700)
committerBen Pfaff <blp@nicira.com>
Fri, 1 May 2009 17:55:28 +0000 (10:55 -0700)
classifier_for_each() and classifier_for_each_match() previously had the
restriction that the callback could not delete any rule that would be
visited in the same call, even if it was in a different table (except for
the rule actually passed to the callback).  But a number of callers do
want to delete rules in other tables, and it is easy to eliminate that
restriction, so this commit does so.

lib/classifier.c
secchan/ofproto.c

index d40d1de11c3936cc071226ad16980db81223daf9..2551662255ba24c9a41d659bffff361b10fe6dee 100644 (file)
@@ -340,49 +340,62 @@ classifier_find_rule_exactly(const struct classifier *cls,
     return NULL;
 }
 
-/* Ignores target->priority. */
+/* Ignores target->priority.
+ *
+ * 'callback' is allowed to delete the rule that is passed as its argument, but
+ * it must not delete (or move) any other rules in 'cls' that are in the same
+ * table as the argument rule.  Two rules are in the same table if their
+ * cls_rule structs have the same table_idx; as a special case, a rule with
+ * wildcards and an exact-match rule will never be in the same table. */
 void
 classifier_for_each_match(const struct classifier *cls,
                           const struct cls_rule *target,
-                          int include, cls_cb_func *cb, void *aux)
+                          int include, cls_cb_func *callback, void *aux)
 {
-    struct cls_rule *prev_rule;
-
-    prev_rule = NULL;
     if (include & CLS_INC_WILD) {
         const struct hmap *table;
+
         for (table = &cls->tables[0]; table < &cls->tables[CLS_N_FIELDS];
              table++) {
-            struct cls_bucket *bucket;
-            HMAP_FOR_EACH (bucket, struct cls_bucket, hmap_node, table) {
+            struct cls_bucket *bucket, *next_bucket;
+
+            HMAP_FOR_EACH_SAFE (bucket, next_bucket,
+                                struct cls_bucket, hmap_node, table) {
                 /* XXX there is a bit of room for optimization here based on
                  * rejecting entire buckets on their fixed fields, but it will
                  * only be worthwhile for big buckets (which we hope we won't
                  * get anyway, but...) */
-                struct cls_rule *pos;
-                LIST_FOR_EACH (pos, struct cls_rule, node.list,
+                struct cls_rule *prev_rule, *rule;
+
+                /* We can't just use LIST_FOR_EACH_SAFE here because, if the
+                 * callback deletes the last rule in the bucket, then the
+                 * bucket itself will be destroyed.  The bucket contains the
+                 * list head so that's a use-after-free error. */
+                prev_rule = NULL;
+                LIST_FOR_EACH (rule, struct cls_rule, node.list,
                                &bucket->rules) {
-                    if (rules_match_1wild(pos, target, 0)) {
+                    if (rules_match_1wild(rule, target, 0)) {
                         if (prev_rule) {
-                            cb(prev_rule, aux);
+                            callback(prev_rule, aux);
                         }
-                        prev_rule = pos;
+                        prev_rule = rule;
                     }
                 }
+                if (prev_rule) {
+                    callback(prev_rule, aux);
+                }
             }
         }
     }
 
     if (include & CLS_INC_EXACT) {
         if (target->wc.wildcards) {
-            struct cls_rule *rule;
-            HMAP_FOR_EACH (rule, struct cls_rule, node.hmap,
-                           &cls->exact_table) {
+            struct cls_rule *rule, *next_rule;
+
+            HMAP_FOR_EACH_SAFE (rule, next_rule, struct cls_rule, node.hmap,
+                                &cls->exact_table) {
                 if (rules_match_1wild(rule, target, 0)) {
-                    if (prev_rule) {
-                        cb(prev_rule, aux);
-                    }
-                    prev_rule = rule;
+                    callback(rule, aux);
                 }
             }
         } else {
@@ -392,37 +405,37 @@ classifier_for_each_match(const struct classifier *cls,
             struct cls_rule *rule = search_exact_table(cls, hash,
                                                        &target->flow);
             if (rule) {
-                if (prev_rule) {
-                    cb(prev_rule, aux);
-                }
-                prev_rule = rule;
+                callback(rule, aux);
             }
         }
     }
-    if (prev_rule) {
-        cb(prev_rule, aux);
-    }
 }
 
 /* 'callback' is allowed to delete the rule that is passed as its argument, but
- * it must not delete (or move) any other rules in 'cls' that will be visited
- * by this call to classifier_for_each().  (It must not even delete rules that
- * would fall into other tables, e.g. exact-match rules if the argument rule
- * has wildcards.) */
+ * it must not delete (or move) any other rules in 'cls' that are in the same
+ * table as the argument rule.  Two rules are in the same table if their
+ * cls_rule structs have the same table_idx; as a special case, a rule with
+ * wildcards and an exact-match rule will never be in the same table. */
 void
 classifier_for_each(const struct classifier *cls, int include,
                     void (*callback)(struct cls_rule *, void *aux),
                     void *aux)
 {
-    struct cls_rule *prev_rule, *rule, *next_rule;
-    const struct hmap *tbl;
-
-    prev_rule = NULL;
     if (include & CLS_INC_WILD) {
-        struct cls_bucket *bucket, *next_bucket;
+        const struct hmap *tbl;
+
         for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
+            struct cls_bucket *bucket, *next_bucket;
+
             HMAP_FOR_EACH_SAFE (bucket, next_bucket,
                                 struct cls_bucket, hmap_node, tbl) {
+                struct cls_rule *prev_rule, *rule;
+
+                /* We can't just use LIST_FOR_EACH_SAFE here because, if the
+                 * callback deletes the last rule in the bucket, then the
+                 * bucket itself will be destroyed.  The bucket contains the
+                 * list head so that's a use-after-free error. */
+                prev_rule = NULL;
                 LIST_FOR_EACH (rule, struct cls_rule, node.list,
                                &bucket->rules) {
                     if (prev_rule) {
@@ -430,21 +443,21 @@ classifier_for_each(const struct classifier *cls, int include,
                     }
                     prev_rule = rule;
                 }
+                if (prev_rule) {
+                    callback(prev_rule, aux);
+                }
             }
         }
     }
+
     if (include & CLS_INC_EXACT) {
+        struct cls_rule *rule, *next_rule;
+
         HMAP_FOR_EACH_SAFE (rule, next_rule,
                             struct cls_rule, node.hmap, &cls->exact_table) {
-            if (prev_rule) {
-                callback(prev_rule, aux);
-            }
-            prev_rule = rule;
+            callback(rule, aux);
         }
     }
-    if (prev_rule) {
-        callback(prev_rule, aux);
-    }
 }
 \f
 static struct cls_bucket *create_bucket(struct hmap *, size_t hash,
index 5e401c9440c7c2229c1ad0be9a447c89e708343d..9191ab6e61e10f9f0d115fc09bd53b8fe8d3d8fc 100644 (file)
@@ -837,13 +837,7 @@ ofproto_run1(struct ofproto *p)
         p->next_expiration = time_msec() + 1000;
         update_used(p);
 
-        /* This is broken into two calls to classifier_for_each() due to that
-         * function's requirement that the callback not delete any rules that
-         * will be visited, other than the callback's argument.  In particular,
-         * calling expire_rule() on a rule with wildcards can delete an
-         * arbitrary number of exact-match rules. */
-        classifier_for_each(&p->cls, CLS_INC_WILD, expire_rule, p);
-        classifier_for_each(&p->cls, CLS_INC_EXACT, expire_rule, p);
+        classifier_for_each(&p->cls, CLS_INC_ALL, expire_rule, p);
     }
 
     if (p->netflow) {