From b5d97350cdb4559fbce80057574e66daa1ac68df Mon Sep 17 00:00:00 2001 From: Ben Pfaff Date: Wed, 3 Nov 2010 11:00:58 -0700 Subject: [PATCH] classifier: Rewrite. The old classifier was not adaptive: it required knowing the structure of the flows that were likely to be in use to get good performance. It is likely that it degenerated to linear search in any real-world case. This new classifier is adaptive and should perform better in the real world. --- lib/classifier.c | 1000 ++++++++++++++++----------------------- lib/classifier.h | 130 ++--- lib/flow.c | 77 ++- lib/flow.h | 28 +- tests/classifier.at | 8 +- tests/test-classifier.c | 584 +++++++++++------------ 6 files changed, 836 insertions(+), 991 deletions(-) diff --git a/lib/classifier.c b/lib/classifier.c index ae2019fd..8da2d795 100644 --- a/lib/classifier.c +++ b/lib/classifier.c @@ -22,36 +22,91 @@ #include "dynamic-string.h" #include "flow.h" #include "hash.h" +#include "packets.h" -const struct cls_field cls_fields[CLS_N_FIELDS + 1] = { -#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \ - { offsetof(struct flow, MEMBER), \ - sizeof ((struct flow *)0)->MEMBER, \ - WILDCARDS, \ - #NAME }, - CLS_FIELDS -#undef CLS_FIELD - { sizeof(struct flow), 0, 0, "exact" }, -}; - -static uint32_t hash_fields(const struct flow *, int table_idx); -static bool equal_fields(const struct flow *, const struct flow *, - int table_idx); - -static int table_idx_from_wildcards(uint32_t wildcards); -static struct cls_rule *table_insert(struct hmap *, struct cls_rule *); -static struct cls_rule *insert_exact_rule(struct classifier *, - struct cls_rule *); -static struct cls_bucket *find_bucket(struct hmap *, size_t hash, - const struct cls_rule *); -static struct cls_rule *search_table(const struct hmap *table, int field_idx, - const struct cls_rule *); -static struct cls_rule *search_exact_table(const struct classifier *, - size_t hash, const struct flow *); -static bool rules_match_1wild(const struct cls_rule *fixed, - const struct cls_rule *wild, int field_idx); -static bool rules_match_2wild(const struct cls_rule *wild1, - const struct cls_rule *wild2, int field_idx); +static struct cls_table *find_table(const struct classifier *, + const struct flow_wildcards *); +static struct cls_table *insert_table(struct classifier *, + const struct flow_wildcards *); + +static struct cls_table *classifier_first_table(const struct classifier *); +static struct cls_table *classifier_next_table(const struct classifier *, + const struct cls_table *); +static void destroy_table(struct classifier *, struct cls_table *); + +static bool should_include(const struct cls_table *, int include); + +static struct cls_rule *find_match(const struct cls_table *, + const struct flow *); +static struct cls_rule *find_equal(struct cls_table *, const struct flow *, + uint32_t hash); +static struct cls_rule *insert_rule(struct cls_table *, struct cls_rule *); + +static bool flow_equal_except(const struct flow *, const struct flow *, + const struct flow_wildcards *); +static void zero_wildcards(struct flow *, const struct flow_wildcards *); + +/* Iterates RULE over HEAD and all of the cls_rules on HEAD->list. */ +#define FOR_EACH_RULE_IN_LIST(RULE, HEAD) \ + for ((RULE) = (HEAD); (RULE) != NULL; (RULE) = next_rule_in_list(RULE)) +#define FOR_EACH_RULE_IN_LIST_SAFE(RULE, NEXT, HEAD) \ + for ((RULE) = (HEAD); \ + (RULE) != NULL && ((NEXT) = next_rule_in_list(RULE), true); \ + (RULE) = (NEXT)) + +static struct cls_rule *next_rule_in_list(struct cls_rule *); + +static struct cls_table * +cls_table_from_hmap_node(const struct hmap_node *node) +{ + return node ? CONTAINER_OF(node, struct cls_table, hmap_node) : NULL; +} + +static struct cls_rule * +cls_rule_from_hmap_node(const struct hmap_node *node) +{ + return node ? CONTAINER_OF(node, struct cls_rule, hmap_node) : NULL; +} + +/* Returns the cls_table within 'cls' that has no wildcards, or NULL if there + * is none. */ +struct cls_table * +classifier_exact_table(const struct classifier *cls) +{ + struct flow_wildcards exact_wc; + flow_wildcards_init_exact(&exact_wc); + return find_table(cls, &exact_wc); +} + +/* Returns the first rule in 'table', or a null pointer if 'table' is NULL. */ +struct cls_rule * +cls_table_first_rule(const struct cls_table *table) +{ + return table ? cls_rule_from_hmap_node(hmap_first(&table->rules)) : NULL; +} + +/* Returns the next rule in 'table' following 'rule', or a null pointer if + * 'rule' is the last rule in 'table'. */ +struct cls_rule * +cls_table_next_rule(const struct cls_table *table, const struct cls_rule *rule) +{ + struct cls_rule *next + = CONTAINER_OF(rule->list.next, struct cls_rule, hmap_node); + + return (next->priority < rule->priority + ? next + : cls_rule_from_hmap_node(hmap_next(&table->rules, + &next->hmap_node))); +} + +static void +cls_rule_init__(struct cls_rule *rule, + const struct flow *flow, uint32_t wildcards) +{ + rule->flow = *flow; + flow_wildcards_init(&rule->wc, wildcards); + cls_rule_zero_wildcards(rule); +} /* Converts the flow in 'flow' into a cls_rule in 'rule', with the given * 'wildcards' and 'priority'.*/ @@ -59,10 +114,8 @@ void cls_rule_from_flow(const struct flow *flow, uint32_t wildcards, unsigned int priority, struct cls_rule *rule) { - rule->flow = *flow; - flow_wildcards_init(&rule->wc, wildcards); + cls_rule_init__(rule, flow, wildcards); rule->priority = priority; - rule->table_idx = table_idx_from_wildcards(rule->wc.wildcards); } /* Converts the ofp_match in 'match' into a cls_rule in 'rule', with the given @@ -74,10 +127,25 @@ cls_rule_from_match(const struct ofp_match *match, unsigned int priority, struct cls_rule *rule) { uint32_t wildcards; - flow_from_match(match, tun_id_from_cookie, cookie, &rule->flow, &wildcards); - flow_wildcards_init(&rule->wc, wildcards); + struct flow flow; + + flow_from_match(match, tun_id_from_cookie, cookie, &flow, &wildcards); + cls_rule_init__(rule, &flow, wildcards); rule->priority = rule->wc.wildcards ? priority : UINT16_MAX; - rule->table_idx = table_idx_from_wildcards(rule->wc.wildcards); +} + +/* For each bit or field wildcarded in 'rule', sets the corresponding bit or + * field in 'flow' to all-0-bits. It is important to maintain this invariant + * in a clr_rule that might be inserted into a classifier. + * + * It is never necessary to call this function directly for a cls_rule that is + * initialized or modified only by cls_rule_*() functions. It is useful to + * restore the invariant in a cls_rule whose 'wc' member is modified by hand. + */ +void +cls_rule_zero_wildcards(struct cls_rule *rule) +{ + zero_wildcards(&rule->flow, &rule->wc); } /* Converts 'rule' to a string and returns the string. The caller must free @@ -109,13 +177,8 @@ cls_rule_print(const struct cls_rule *rule) void classifier_init(struct classifier *cls) { - int i; - cls->n_rules = 0; - for (i = 0; i < ARRAY_SIZE(cls->tables); i++) { - hmap_init(&cls->tables[i]); - } - hmap_init(&cls->exact_table); + hmap_init(&cls->tables); } /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the @@ -124,21 +187,18 @@ void classifier_destroy(struct classifier *cls) { if (cls) { - struct cls_bucket *bucket, *next_bucket; - struct hmap *tbl; + struct cls_table *table, *next_table; - for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) { - HMAP_FOR_EACH_SAFE (bucket, next_bucket, hmap_node, tbl) { - free(bucket); - } - hmap_destroy(tbl); + HMAP_FOR_EACH_SAFE (table, next_table, hmap_node, &cls->tables) { + hmap_destroy(&table->rules); + hmap_remove(&cls->tables, &table->hmap_node); + free(table); } - hmap_destroy(&cls->exact_table); + hmap_destroy(&cls->tables); } } -/* Returns true if 'cls' does not contain any classification rules, false - * otherwise. */ +/* Returns true if 'cls' contains no classification rules, false otherwise. */ bool classifier_is_empty(const struct classifier *cls) { @@ -156,10 +216,12 @@ classifier_count(const struct classifier *cls) int classifier_count_exact(const struct classifier *cls) { - return hmap_count(&cls->exact_table); + struct cls_table *exact_table = classifier_exact_table(cls); + return exact_table ? exact_table->n_table_rules : 0; } -/* Inserts 'rule' into 'cls'. Transfers ownership of 'rule' to 'cls'. +/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller + * must not modify or free it. * * If 'cls' already contains an identical rule (including wildcards, values of * fixed fields, and priority), replaces the old rule by 'rule' and returns the @@ -173,127 +235,101 @@ classifier_count_exact(const struct classifier *cls) struct cls_rule * classifier_insert(struct classifier *cls, struct cls_rule *rule) { - struct cls_rule *old; - assert((rule->wc.wildcards == 0) == (rule->table_idx == CLS_F_IDX_EXACT)); - old = (rule->wc.wildcards - ? table_insert(&cls->tables[rule->table_idx], rule) - : insert_exact_rule(cls, rule)); - if (!old) { + struct cls_rule *old_rule; + struct cls_table *table; + + table = find_table(cls, &rule->wc); + if (!table) { + table = insert_table(cls, &rule->wc); + } + + old_rule = insert_rule(table, rule); + if (!old_rule) { + table->n_table_rules++; cls->n_rules++; } - return old; + return old_rule; } -/* Removes 'rule' from 'cls'. It is caller's responsibility to free 'rule', if - * this is desirable. */ +/* Removes 'rule' from 'cls'. It is the caller's responsibility to free + * 'rule', if this is desirable. */ void classifier_remove(struct classifier *cls, struct cls_rule *rule) { - if (rule->wc.wildcards) { - /* Remove 'rule' from bucket. If that empties the bucket, remove the - * bucket from its table. */ - struct hmap *table = &cls->tables[rule->table_idx]; - struct list *rules = list_remove(&rule->node.list); - if (list_is_empty(rules)) { - /* This code is a little tricky. list_remove() returns the list - * element just after the one removed. Since the list is now - * empty, this will be the address of the 'rules' member of the - * bucket that was just emptied, so pointer arithmetic (via - * CONTAINER_OF) can find that bucket. */ - struct cls_bucket *bucket; - bucket = CONTAINER_OF(rules, struct cls_bucket, rules); - hmap_remove(table, &bucket->hmap_node); - free(bucket); - } - } else { - /* Remove 'rule' from cls->exact_table. */ - hmap_remove(&cls->exact_table, &rule->node.hmap); - } - cls->n_rules--; -} + struct cls_rule *head; + struct cls_table *table; -static struct cls_rule * -classifier_lookup_exact(const struct classifier *cls, const struct flow *flow) -{ - return (!hmap_is_empty(&cls->exact_table) - ? search_exact_table(cls, flow_hash(flow, 0), flow) - : NULL); -} + table = find_table(cls, &rule->wc); + head = find_equal(table, &rule->flow, rule->hmap_node.hash); + if (head != rule) { + list_remove(&rule->list); + } else if (list_is_empty(&rule->list)) { + hmap_remove(&table->rules, &rule->hmap_node); + } else { + struct cls_rule *next = CONTAINER_OF(rule->list.next, + struct cls_rule, list); -static struct cls_rule * -classifier_lookup_wild(const struct classifier *cls, const struct flow *flow) -{ - struct cls_rule *best = NULL; - if (cls->n_rules > hmap_count(&cls->exact_table)) { - struct cls_rule target; - int i; + list_remove(&rule->list); + hmap_replace(&table->rules, &rule->hmap_node, &next->hmap_node); + } - cls_rule_from_flow(flow, 0, 0, &target); - for (i = 0; i < CLS_N_FIELDS; i++) { - struct cls_rule *rule = search_table(&cls->tables[i], i, &target); - if (rule && (!best || rule->priority > best->priority)) { - best = rule; - } - } + if (--table->n_table_rules == 0 && !table->n_refs) { + destroy_table(cls, table); } - return best; + + cls->n_rules--; } /* Finds and returns the highest-priority rule in 'cls' that matches 'flow'. * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules * of equal priority match 'flow', returns one arbitrarily. * - * (When multiple rules of equal priority happen to fall into the same bucket, - * rules added more recently take priority over rules added less recently, but - * this is subject to change and should not be depended upon.) */ + * 'include' is a combination of CLS_INC_* values that specify tables to + * include in the search. */ struct cls_rule * classifier_lookup(const struct classifier *cls, const struct flow *flow, int include) { - if (include & CLS_INC_EXACT) { - struct cls_rule *rule = classifier_lookup_exact(cls, flow); - if (rule) { - return rule; - } - } + struct cls_table *table; + struct cls_rule *best; - if (include & CLS_INC_WILD) { - return classifier_lookup_wild(cls, flow); + best = NULL; + HMAP_FOR_EACH (table, hmap_node, &cls->tables) { + if (should_include(table, include)) { + struct cls_rule *rule = find_match(table, flow); + if (rule && (!best || rule->priority > best->priority)) { + best = rule; + } + } } - - return NULL; + return best; } +/* Finds and returns a rule in 'cls' with exactly the same priority and + * matching criteria as 'target'. Returns a null pointer if 'cls' doesn't + * contain an exact match. + * + * Priority is ignored for exact-match rules (because OpenFlow 1.0 always + * treats exact-match rules as highest priority). */ struct cls_rule * classifier_find_rule_exactly(const struct classifier *cls, const struct cls_rule *target) { - struct cls_bucket *bucket; - int table_idx; - uint32_t hash; + struct cls_rule *head, *rule; + struct cls_table *table; - if (!target->wc.wildcards) { - /* Ignores 'target->priority'. */ - return search_exact_table(cls, flow_hash(&target->flow, 0), - &target->flow); + table = find_table(cls, &target->wc); + if (!table) { + return NULL; } - assert(target->wc.wildcards == (target->wc.wildcards & OVSFW_ALL)); - table_idx = table_idx_from_wildcards(target->wc.wildcards); - hash = hash_fields(&target->flow, table_idx); - HMAP_FOR_EACH_WITH_HASH (bucket, hmap_node, hash, - &cls->tables[table_idx]) { - if (equal_fields(&bucket->fixed, &target->flow, table_idx)) { - struct cls_rule *pos; - LIST_FOR_EACH (pos, node.list, &bucket->rules) { - if (pos->priority < target->priority) { - return NULL; - } else if (pos->priority == target->priority && - pos->wc.wildcards == target->wc.wildcards && - flow_equal(&target->flow, &pos->flow)) { - return pos; - } - } + head = find_equal(table, &target->flow, flow_hash(&target->flow, 0)); + if (!target->wc.wildcards) { + return head; + } + FOR_EACH_RULE_IN_LIST (rule, head) { + if (target->priority >= rule->priority) { + return target->priority == rule->priority ? rule : NULL; } } return NULL; @@ -306,22 +342,19 @@ bool classifier_rule_overlaps(const struct classifier *cls, const struct cls_rule *target) { - const struct hmap *tbl; + struct cls_table *table; - if (!target->wc.wildcards) { - return (search_exact_table(cls, flow_hash(&target->flow, 0), - &target->flow) != NULL); - } - - for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) { - struct cls_bucket *bucket; + HMAP_FOR_EACH (table, hmap_node, &cls->tables) { + struct flow_wildcards wc; + struct cls_rule *head; - HMAP_FOR_EACH (bucket, hmap_node, tbl) { + flow_wildcards_combine(&wc, &target->wc, &table->wc); + HMAP_FOR_EACH (head, hmap_node, &table->rules) { struct cls_rule *rule; - LIST_FOR_EACH (rule, node.list, &bucket->rules) { + FOR_EACH_RULE_IN_LIST (rule, head) { if (rule->priority == target->priority - && rules_match_2wild(rule, target, 0)) { + && flow_equal_except(&target->flow, &rule->flow, &wc)) { return true; } } @@ -331,511 +364,312 @@ classifier_rule_overlaps(const struct classifier *cls, return false; } -/* Ignores target->priority. +/* Searches 'cls' for rules that exactly match 'target' or are more specific + * than 'target'. That is, a given 'rule' matches 'target' if, for every + * field: + * + * - 'target' and 'rule' specify the same (non-wildcarded) value for the + * field, or + * + * - 'target' wildcards the field, + * + * but not if: + * + * - 'target' and 'rule' specify different values for the field, or + * + * - 'target' specifies a value for the field but 'rule' wildcards it. + * + * Equivalently, the truth table for whether a field matches is: + * + * rule + * + * wildcard exact + * +---------+---------+ + * t wild | yes | yes | + * a card | | | + * r +---------+---------+ + * g exact | no |if values| + * e | |are equal| + * t +---------+---------+ + * + * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD + * commands and by OpenFlow 1.0 aggregate and flow stats. + * + * Ignores target->priority. * * 'callback' is allowed to delete the rule that is passed as its argument, but - * it must not delete (or move) any other rules in 'cls' that are in the same - * table as the argument rule. Two rules are in the same table if their - * cls_rule structs have the same table_idx; as a special case, a rule with - * wildcards and an exact-match rule will never be in the same table. */ + * it must not delete (or move) any other rules in 'cls' that have the same + * wildcards as the argument rule. */ void -classifier_for_each_match(const struct classifier *cls, +classifier_for_each_match(const struct classifier *cls_, const struct cls_rule *target, int include, cls_cb_func *callback, void *aux) { - if (include & CLS_INC_WILD) { - const struct hmap *table; - - for (table = &cls->tables[0]; table < &cls->tables[CLS_N_FIELDS]; - table++) { - struct cls_bucket *bucket, *next_bucket; - - HMAP_FOR_EACH_SAFE (bucket, next_bucket, hmap_node, table) { - /* XXX there is a bit of room for optimization here based on - * rejecting entire buckets on their fixed fields, but it will - * only be worthwhile for big buckets (which we hope we won't - * get anyway, but...) */ - struct cls_rule *prev_rule, *rule; - - /* We can't just use LIST_FOR_EACH_SAFE here because, if the - * callback deletes the last rule in the bucket, then the - * bucket itself will be destroyed. The bucket contains the - * list head so that's a use-after-free error. */ - prev_rule = NULL; - LIST_FOR_EACH (rule, node.list, &bucket->rules) { - if (rules_match_1wild(rule, target, 0)) { - if (prev_rule) { - callback(prev_rule, aux); - } - prev_rule = rule; + struct classifier *cls = (struct classifier *) cls_; + struct cls_table *table, *next_table; + + for (table = classifier_first_table(cls); table; table = next_table) { + if (should_include(table, include) + && !flow_wildcards_has_extra(&table->wc, &target->wc)) { + /* We have eliminated the "no" case in the truth table above. Two + * of the three remaining cases are trivial. We only need to check + * the fourth case, where both 'rule' and 'target' require an exact + * match. */ + struct cls_rule *head, *next_head; + + table->n_refs++; + HMAP_FOR_EACH_SAFE (head, next_head, hmap_node, &table->rules) { + if (flow_equal_except(&head->flow, &target->flow, + &target->wc)) { + struct cls_rule *rule, *next_rule; + + FOR_EACH_RULE_IN_LIST_SAFE (rule, next_rule, head) { + callback(rule, aux); } } - if (prev_rule) { - callback(prev_rule, aux); - } } - } - } - - if (include & CLS_INC_EXACT) { - if (target->wc.wildcards) { - struct cls_rule *rule, *next_rule; - - HMAP_FOR_EACH_SAFE (rule, next_rule, node.hmap, - &cls->exact_table) { - if (rules_match_1wild(rule, target, 0)) { - callback(rule, aux); - } + next_table = classifier_next_table(cls, table); + if (!--table->n_refs && !table->n_table_rules) { + destroy_table(cls, table); } } else { - /* Optimization: there can be at most one match in the exact - * table. */ - size_t hash = flow_hash(&target->flow, 0); - struct cls_rule *rule = search_exact_table(cls, hash, - &target->flow); - if (rule) { - callback(rule, aux); - } + next_table = classifier_next_table(cls, table); } } } /* 'callback' is allowed to delete the rule that is passed as its argument, but - * it must not delete (or move) any other rules in 'cls' that are in the same - * table as the argument rule. Two rules are in the same table if their - * cls_rule structs have the same table_idx; as a special case, a rule with - * wildcards and an exact-match rule will never be in the same table. + * it must not delete (or move) any other rules in 'cls' that have the same + * wildcards as the argument rule. * - * If 'include' is CLS_INC_EXACT then CLASSIFIER_FOR_EACH_EXACT_RULE(_SAFE) is + * If 'include' is CLS_INC_EXACT then CLASSIFIER_FOR_EACH_EXACT_RULE is * probably easier to use. */ void -classifier_for_each(const struct classifier *cls, int include, - void (*callback)(struct cls_rule *, void *aux), - void *aux) -{ - if (include & CLS_INC_WILD) { - const struct hmap *tbl; - - for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) { - struct cls_bucket *bucket, *next_bucket; - - HMAP_FOR_EACH_SAFE (bucket, next_bucket, hmap_node, tbl) { - struct cls_rule *prev_rule, *rule; - - /* We can't just use LIST_FOR_EACH_SAFE here because, if the - * callback deletes the last rule in the bucket, then the - * bucket itself will be destroyed. The bucket contains the - * list head so that's a use-after-free error. */ - prev_rule = NULL; - LIST_FOR_EACH (rule, node.list, &bucket->rules) { - if (prev_rule) { - callback(prev_rule, aux); - } - prev_rule = rule; - } - if (prev_rule) { - callback(prev_rule, aux); +classifier_for_each(const struct classifier *cls_, int include, + cls_cb_func *callback, void *aux) +{ + struct classifier *cls = (struct classifier *) cls_; + struct cls_table *table, *next_table; + + for (table = classifier_first_table(cls); table; table = next_table) { + if (should_include(table, include)) { + struct cls_rule *head, *next_head; + + table->n_refs++; + HMAP_FOR_EACH_SAFE (head, next_head, hmap_node, &table->rules) { + struct cls_rule *rule, *next_rule; + + FOR_EACH_RULE_IN_LIST_SAFE (rule, next_rule, head) { + callback(rule, aux); } } + next_table = classifier_next_table(cls, table); + if (!--table->n_refs && !table->n_table_rules) { + destroy_table(cls, table); + } + } else { + next_table = classifier_next_table(cls, table); } } +} + +static struct cls_table * +find_table(const struct classifier *cls, const struct flow_wildcards *wc) +{ + struct cls_table *table; - if (include & CLS_INC_EXACT) { - struct cls_rule *rule, *next_rule; - - HMAP_FOR_EACH_SAFE (rule, next_rule, node.hmap, &cls->exact_table) { - callback(rule, aux); + HMAP_FOR_EACH_IN_BUCKET (table, hmap_node, flow_wildcards_hash(wc), + &cls->tables) { + if (flow_wildcards_equal(wc, &table->wc)) { + return table; } } + return NULL; } - -static struct cls_bucket *create_bucket(struct hmap *, size_t hash, - const struct flow *fixed); -static struct cls_rule *bucket_insert(struct cls_bucket *, struct cls_rule *); - -static inline bool equal_bytes(const void *, const void *, size_t n); - -/* Returns a hash computed across the fields in 'flow' whose field indexes - * (CLS_F_IDX_*) are less than 'table_idx'. (If 'table_idx' is - * CLS_F_IDX_EXACT, hashes all the fields in 'flow'). */ -static uint32_t -hash_fields(const struct flow *flow, int table_idx) -{ - /* I just know I'm going to hell for writing code this way. - * - * GCC generates pretty good code here, with only a single taken - * conditional jump per execution. Now the question is, would we be better - * off marking this function ALWAYS_INLINE and writing a wrapper that - * switches on the value of 'table_idx' to get rid of all the conditional - * jumps entirely (except for one in the wrapper)? Honestly I really, - * really hope that it doesn't matter in practice. - * - * We could do better by calculating hashes incrementally, instead of - * starting over from the top each time. But that would be even uglier. */ - uint32_t a, b, c; - uint32_t tmp[3]; - size_t n; - - a = b = c = 0xdeadbeef + table_idx; - n = 0; - -#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \ - if (table_idx == CLS_F_IDX_##NAME) { \ - /* Done. */ \ - memset((uint8_t *) tmp + n, 0, sizeof tmp - n); \ - goto finish; \ - } else { \ - const size_t size = sizeof flow->MEMBER; \ - const uint8_t *p1 = (const uint8_t *) &flow->MEMBER; \ - const size_t p1_size = MIN(sizeof tmp - n, size); \ - const uint8_t *p2 = p1 + p1_size; \ - const size_t p2_size = size - p1_size; \ - \ - /* Append to 'tmp' as much data as will fit. */ \ - memcpy((uint8_t *) tmp + n, p1, p1_size); \ - n += p1_size; \ - \ - /* If 'tmp' is full, mix. */ \ - if (n == sizeof tmp) { \ - a += tmp[0]; \ - b += tmp[1]; \ - c += tmp[2]; \ - HASH_MIX(a, b, c); \ - n = 0; \ - } \ - \ - /* Append to 'tmp' any data that didn't fit. */ \ - memcpy(tmp, p2, p2_size); \ - n += p2_size; \ - } - CLS_FIELDS -#undef CLS_FIELD -finish: - a += tmp[0]; - b += tmp[1]; - c += tmp[2]; - HASH_FINAL(a, b, c); - return c; -} +static struct cls_table * +insert_table(struct classifier *cls, const struct flow_wildcards *wc) +{ + struct cls_table *table; -/* Compares the fields in 'a' and 'b' whose field indexes (CLS_F_IDX_*) are - * less than 'table_idx'. (If 'table_idx' is CLS_F_IDX_EXACT, compares all the - * fields in 'a' and 'b'). - * - * Returns true if all the compared fields are equal, false otherwise. */ -static bool -equal_fields(const struct flow *a, const struct flow *b, int table_idx) -{ - /* XXX The generated code could be better here. */ -#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \ - if (table_idx == CLS_F_IDX_##NAME) { \ - return true; \ - } else if (!equal_bytes(&a->MEMBER, &b->MEMBER, sizeof a->MEMBER)) { \ - return false; \ - } - CLS_FIELDS -#undef CLS_FIELD + table = xzalloc(sizeof *table); + hmap_init(&table->rules); + table->wc = *wc; + hmap_insert(&cls->tables, &table->hmap_node, flow_wildcards_hash(wc)); - return true; + return table; } -static int -table_idx_from_wildcards(uint32_t wildcards) +static struct cls_table * +classifier_first_table(const struct classifier *cls) { - if (!wildcards) { - return CLS_F_IDX_EXACT; - } -#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \ - if (wildcards & WILDCARDS) { \ - return CLS_F_IDX_##NAME; \ - } - CLS_FIELDS -#undef CLS_FIELD - NOT_REACHED(); + return cls_table_from_hmap_node(hmap_first(&cls->tables)); } -/* Inserts 'rule' into 'table'. Returns the rule, if any, that was displaced - * in favor of 'rule'. */ -static struct cls_rule * -table_insert(struct hmap *table, struct cls_rule *rule) +static struct cls_table * +classifier_next_table(const struct classifier *cls, + const struct cls_table *table) { - struct cls_bucket *bucket; - size_t hash; + return cls_table_from_hmap_node(hmap_next(&cls->tables, + &table->hmap_node)); +} - hash = hash_fields(&rule->flow, rule->table_idx); - bucket = find_bucket(table, hash, rule); - if (!bucket) { - bucket = create_bucket(table, hash, &rule->flow); - } +static void +destroy_table(struct classifier *cls, struct cls_table *table) +{ + hmap_remove(&cls->tables, &table->hmap_node); + hmap_destroy(&table->rules); + free(table); +} - return bucket_insert(bucket, rule); +/* Returns true if 'table' should be included by an operation with the + * specified 'include' (a combination of CLS_INC_*). */ +static bool +should_include(const struct cls_table *table, int include) +{ + return include & (table->wc.wildcards ? CLS_INC_WILD : CLS_INC_EXACT); } -/* Inserts 'rule' into 'bucket', given that 'field' is the first wildcarded - * field in 'rule'. - * - * Returns the rule, if any, that was displaced in favor of 'rule'. */ static struct cls_rule * -bucket_insert(struct cls_bucket *bucket, struct cls_rule *rule) -{ - struct cls_rule *pos; - LIST_FOR_EACH (pos, node.list, &bucket->rules) { - if (pos->priority == rule->priority) { - if (pos->wc.wildcards == rule->wc.wildcards - && rules_match_1wild(pos, rule, rule->table_idx)) - { - list_replace(&rule->node.list, &pos->node.list); - return pos; - } - } else if (pos->priority < rule->priority) { - break; +find_match(const struct cls_table *table, const struct flow *flow) +{ + struct cls_rule *rule; + struct flow f; + + f = *flow; + zero_wildcards(&f, &table->wc); + HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, flow_hash(&f, 0), + &table->rules) { + if (flow_equal(&f, &rule->flow)) { + return rule; } } - list_insert(&pos->node.list, &rule->node.list); return NULL; } static struct cls_rule * -insert_exact_rule(struct classifier *cls, struct cls_rule *rule) +find_equal(struct cls_table *table, const struct flow *flow, uint32_t hash) { - struct cls_rule *old_rule; - size_t hash; - - hash = flow_hash(&rule->flow, 0); - old_rule = search_exact_table(cls, hash, &rule->flow); - if (old_rule) { - hmap_remove(&cls->exact_table, &old_rule->node.hmap); - } - hmap_insert(&cls->exact_table, &rule->node.hmap, hash); - return old_rule; -} + struct cls_rule *head; -/* Returns the bucket in 'table' that has the given 'hash' and the same fields - * as 'rule->flow' (up to 'rule->table_idx'), or a null pointer if no bucket - * matches. */ -static struct cls_bucket * -find_bucket(struct hmap *table, size_t hash, const struct cls_rule *rule) -{ - struct cls_bucket *bucket; - HMAP_FOR_EACH_WITH_HASH (bucket, hmap_node, hash, table) { - if (equal_fields(&bucket->fixed, &rule->flow, rule->table_idx)) { - return bucket; + HMAP_FOR_EACH_WITH_HASH (head, hmap_node, hash, &table->rules) { + if (flow_equal(&head->flow, flow)) { + return head; } } return NULL; } -/* Creates a bucket and inserts it in 'table' with the given 'hash' and 'fixed' - * values. Returns the new bucket. */ -static struct cls_bucket * -create_bucket(struct hmap *table, size_t hash, const struct flow *fixed) -{ - struct cls_bucket *bucket = xmalloc(sizeof *bucket); - list_init(&bucket->rules); - bucket->fixed = *fixed; - hmap_insert(table, &bucket->hmap_node, hash); - return bucket; -} - -/* Returns true if the 'n' bytes in 'a' and 'b' are equal, false otherwise. */ -static inline bool ALWAYS_INLINE -equal_bytes(const void *a, const void *b, size_t n) +static struct cls_rule * +insert_rule(struct cls_table *table, struct cls_rule *new) { -#ifdef __i386__ - /* For some reason GCC generates stupid code for memcmp() of small - * constant integer lengths. Help it out. - * - * This function is always inlined, and it is always called with 'n' as a - * compile-time constant, so the switch statement gets optimized out and - * this whole function just expands to an instruction or two. */ - switch (n) { - case 1: - return *(uint8_t *) a == *(uint8_t *) b; + struct cls_rule *head; - case 2: - return *(uint16_t *) a == *(uint16_t *) b; + new->hmap_node.hash = flow_hash(&new->flow, 0); - case 4: - return *(uint32_t *) a == *(uint32_t *) b; + head = find_equal(table, &new->flow, new->hmap_node.hash); + if (!head) { + hmap_insert(&table->rules, &new->hmap_node, new->hmap_node.hash); + list_init(&new->list); + return NULL; + } else { + /* Scan the list for the insertion point that will keep the list in + * order of decreasing priority. */ + struct cls_rule *rule; + FOR_EACH_RULE_IN_LIST (rule, head) { + if (new->priority >= rule->priority) { + if (rule == head) { + /* 'new' is the new highest-priority flow in the list. */ + hmap_replace(&table->rules, + &rule->hmap_node, &new->hmap_node); + } - case 6: - return (*(uint32_t *) a == *(uint32_t *) b - && ((uint16_t *) a)[2] == ((uint16_t *) b)[2]); + if (new->priority == rule->priority) { + list_replace(&new->list, &rule->list); + return rule; + } else { + list_insert(&rule->list, &new->list); + return NULL; + } + } + } - default: - abort(); + /* Insert 'new' at the end of the list. */ + list_push_back(&head->list, &new->list); + return NULL; } -#else - /* I hope GCC is smarter on your platform. */ - return !memcmp(a, b, n); -#endif } -/* Returns the 32-bit unsigned integer at 'p'. */ -static inline uint32_t -read_uint32(const void *p) +static struct cls_rule * +next_rule_in_list(struct cls_rule *rule) { - /* GCC optimizes this into a single machine instruction on x86. */ - uint32_t x; - memcpy(&x, p, sizeof x); - return x; -} - -/* Compares the specified field in 'a' and 'b'. Returns true if the fields are - * equal, or if the ofp_match wildcard bits in 'wildcards' are set such that - * non-equal values may be ignored. 'nw_src_mask' and 'nw_dst_mask' must be - * those that would be set for 'wildcards' by cls_rule_set_masks(). - * - * The compared field is the one with wildcard bit or bits 'field_wc', offset - * 'rule_ofs' within cls_rule's "fields" member, and length 'len', in bytes. */ -static inline bool ALWAYS_INLINE -field_matches(const struct flow *a_, const struct flow *b_, - uint32_t wildcards, uint32_t nw_src_mask, uint32_t nw_dst_mask, - uint32_t field_wc, int ofs, int len) -{ - /* This function is always inlined, and it is always called with 'field_wc' - * as a compile-time constant, so the "if" conditionals here generate no - * code. */ - const void *a = (const uint8_t *) a_ + ofs; - const void *b = (const uint8_t *) b_ + ofs; - if (!(field_wc & (field_wc - 1))) { - /* Handle all the single-bit wildcard cases. */ - return wildcards & field_wc || equal_bytes(a, b, len); - } else if (field_wc == OFPFW_NW_SRC_MASK || - field_wc == OFPFW_NW_DST_MASK) { - uint32_t a_ip = read_uint32(a); - uint32_t b_ip = read_uint32(b); - uint32_t mask = (field_wc == OFPFW_NW_SRC_MASK - ? nw_src_mask : nw_dst_mask); - return ((a_ip ^ b_ip) & mask) == 0; - } else { - abort(); - } -} - -/* Returns true if 'a' and 'b' match, ignoring fields for which the wildcards - * in 'wildcards' are set. 'nw_src_mask' and 'nw_dst_mask' must be those that - * would be set for 'wildcards' by cls_rule_set_masks(). 'field_idx' is the - * index of the first field to be compared; fields before 'field_idx' are - * assumed to match. (Always returns true if 'field_idx' is CLS_N_FIELDS.) */ -static bool -rules_match(const struct cls_rule *a, const struct cls_rule *b, - uint32_t wildcards, uint32_t nw_src_mask, uint32_t nw_dst_mask, - int field_idx) -{ - /* This is related to Duff's device (see - * http://en.wikipedia.org/wiki/Duff's_device). */ - switch (field_idx) { -#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \ - case CLS_F_IDX_##NAME: \ - if (!field_matches(&a->flow, &b->flow, \ - wildcards, nw_src_mask, nw_dst_mask, \ - WILDCARDS, offsetof(struct flow, MEMBER), \ - sizeof a->flow.MEMBER)) { \ - return false; \ - } \ - /* Fall though */ - CLS_FIELDS -#undef CLS_FIELD - } - return true; + struct cls_rule *next = OBJECT_CONTAINING(rule->list.next, next, list); + return next->priority < rule->priority ? next : NULL; } -/* Returns true if 'fixed' and 'wild' match. All fields in 'fixed' must have - * fixed values; 'wild' may contain wildcards. - * - * 'field_idx' is the index of the first field to be compared; fields before - * 'field_idx' are assumed to match. Always returns true if 'field_idx' is - * CLS_N_FIELDS. */ static bool -rules_match_1wild(const struct cls_rule *fixed, const struct cls_rule *wild, - int field_idx) +flow_equal_except(const struct flow *a, const struct flow *b, + const struct flow_wildcards *wildcards) { - return rules_match(fixed, wild, wild->wc.wildcards, wild->wc.nw_src_mask, - wild->wc.nw_dst_mask, field_idx); -} + const uint32_t wc = wildcards->wildcards; -/* Returns true if 'wild1' and 'wild2' match, that is, if their fields - * are equal modulo wildcards in 'wild1' or 'wild2'. - * - * 'field_idx' is the index of the first field to be compared; fields before - * 'field_idx' are assumed to match. Always returns true if 'field_idx' is - * CLS_N_FIELDS. */ -static bool -rules_match_2wild(const struct cls_rule *wild1, const struct cls_rule *wild2, - int field_idx) -{ - return rules_match(wild1, wild2, - wild1->wc.wildcards | wild2->wc.wildcards, - wild1->wc.nw_src_mask & wild2->wc.nw_src_mask, - wild1->wc.nw_dst_mask & wild2->wc.nw_dst_mask, - field_idx); + BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 37); + + return ((wc & NXFW_TUN_ID || a->tun_id == b->tun_id) + && !((a->nw_src ^ b->nw_src) & wildcards->nw_src_mask) + && !((a->nw_dst ^ b->nw_dst) & wildcards->nw_dst_mask) + && (wc & OFPFW_IN_PORT || a->in_port == b->in_port) + && (wc & OFPFW_DL_VLAN || a->dl_vlan == b->dl_vlan) + && (wc & OFPFW_DL_TYPE || a->dl_type == b->dl_type) + && (wc & OFPFW_TP_SRC || a->tp_src == b->tp_src) + && (wc & OFPFW_TP_DST || a->tp_dst == b->tp_dst) + && (wc & OFPFW_DL_SRC || eth_addr_equals(a->dl_src, b->dl_src)) + && (wc & OFPFW_DL_DST || eth_addr_equals(a->dl_dst, b->dl_dst)) + && (wc & OFPFW_NW_PROTO || a->nw_proto == b->nw_proto) + && (wc & OFPFW_DL_VLAN_PCP || a->dl_vlan_pcp == b->dl_vlan_pcp) + && (wc & OFPFW_NW_TOS || a->nw_tos == b->nw_tos)); } -/* Searches 'bucket' for a rule that matches 'target'. Returns the - * highest-priority match, if one is found, or a null pointer if there is no - * match. - * - * 'field_idx' must be the index of the first wildcarded field in 'bucket'. */ -static struct cls_rule * -search_bucket(struct cls_bucket *bucket, int field_idx, - const struct cls_rule *target) +static void +zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards) { - struct cls_rule *pos; + const uint32_t wc = wildcards->wildcards; - if (!equal_fields(&bucket->fixed, &target->flow, field_idx)) { - return NULL; - } + BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 37); - LIST_FOR_EACH (pos, node.list, &bucket->rules) { - if (rules_match_1wild(target, pos, field_idx)) { - return pos; - } + if (wc & NXFW_TUN_ID) { + flow->tun_id = 0; } - return NULL; -} - -/* Searches 'table' for a rule that matches 'target'. Returns the - * highest-priority match, if one is found, or a null pointer if there is no - * match. - * - * 'field_idx' must be the index of the first wildcarded field in 'table'. */ -static struct cls_rule * -search_table(const struct hmap *table, int field_idx, - const struct cls_rule *target) -{ - struct cls_bucket *bucket; - - switch (hmap_count(table)) { - /* In these special cases there's no need to hash. */ - case 0: - return NULL; - case 1: - bucket = CONTAINER_OF(hmap_first(table), struct cls_bucket, hmap_node); - return search_bucket(bucket, field_idx, target); + flow->nw_src &= wildcards->nw_src_mask; + flow->nw_dst &= wildcards->nw_dst_mask; + if (wc & OFPFW_IN_PORT) { + flow->in_port = 0; } - - HMAP_FOR_EACH_WITH_HASH (bucket, hmap_node, - hash_fields(&target->flow, field_idx), table) { - struct cls_rule *rule = search_bucket(bucket, field_idx, target); - if (rule) { - return rule; - } + if (wc & OFPFW_DL_VLAN) { + flow->dl_vlan = 0; } - return NULL; -} - -static struct cls_rule * -search_exact_table(const struct classifier *cls, size_t hash, - const struct flow *target) -{ - struct cls_rule *rule; - - HMAP_FOR_EACH_WITH_HASH (rule, node.hmap, hash, &cls->exact_table) { - if (flow_equal(&rule->flow, target)) { - return rule; - } + if (wc & OFPFW_DL_TYPE) { + flow->dl_type = 0; + } + if (wc & OFPFW_TP_SRC) { + flow->tp_src = 0; + } + if (wc & OFPFW_TP_DST) { + flow->tp_dst = 0; + } + if (wc & OFPFW_DL_SRC) { + memset(flow->dl_src, 0, sizeof flow->dl_src); + } + if (wc & OFPFW_DL_DST) { + memset(flow->dl_dst, 0, sizeof flow->dl_dst); + } + if (wc & OFPFW_NW_PROTO) { + flow->nw_proto = 0; + } + if (wc & OFPFW_DL_VLAN_PCP) { + flow->dl_vlan_pcp = 0; + } + if (wc & OFPFW_NW_TOS) { + flow->nw_tos = 0; } - return NULL; } diff --git a/lib/classifier.h b/lib/classifier.h index d2e2b8bd..7ed3cf74 100644 --- a/lib/classifier.h +++ b/lib/classifier.h @@ -19,26 +19,11 @@ /* Flow classifier. * - * This flow classifier assumes that we can arrange the fields in a flow in an - * order such that the set of wildcarded fields in a rule tend to fall toward - * the end of the ordering. That is, if field F is wildcarded, then all the - * fields after F tend to be wildcarded as well. If this assumption is - * violated, then the classifier will still classify flows correctly, but its - * performance will suffer. - * - * The classifier uses a collection of CLS_N_FIELDS hash tables for wildcarded - * flows. Each of these tables contains the flows that wildcard a given field - * and do not wildcard any of the fields that precede F in the ordering. The - * key for each hash table is the value of the fields preceding F that are not - * wildcarded. All the flows that fall within a table and have the same key - * are kept as a linked list ordered from highest to lowest priority. - * - * The classifier also maintains a separate hash table of exact-match flows. - * - * To search the classifier we first search the table of exact-match flows, - * since exact-match flows always have highest priority. If there is a match, - * we're done. Otherwise, we search each of the CLS_N_FIELDS hash tables in - * turn, looking for the highest-priority match, and return it (if any). + * A classifier is a "struct classifier", + * a hash map from a set of wildcards to a "struct cls_table", + * a hash map from fixed field values to "struct cls_rule", + * which can contain a list of otherwise identical rules + * with lower priorities. */ #include "flow.h" @@ -47,80 +32,38 @@ #include "openflow/nicira-ext.h" #include "openflow/openflow.h" -/* Number of bytes of fields in a rule. */ -#define CLS_N_BYTES 37 - -/* Fields in a rule. - * - * This definition sets the ordering of fields, which is important for - * performance (see above). To adjust the ordering, change the order of the - * lines. */ -#define CLS_FIELDS \ - /* struct flow all-caps */ \ - /* wildcard bit(s) member name name */ \ - /* ----------------- ----------- -------- */ \ - CLS_FIELD(OFPFW_IN_PORT, in_port, IN_PORT) \ - CLS_FIELD(NXFW_TUN_ID, tun_id, TUN_ID) \ - CLS_FIELD(OFPFW_DL_VLAN, dl_vlan, DL_VLAN) \ - CLS_FIELD(OFPFW_DL_VLAN_PCP, dl_vlan_pcp, DL_VLAN_PCP) \ - CLS_FIELD(OFPFW_DL_SRC, dl_src, DL_SRC) \ - CLS_FIELD(OFPFW_DL_DST, dl_dst, DL_DST) \ - CLS_FIELD(OFPFW_DL_TYPE, dl_type, DL_TYPE) \ - CLS_FIELD(OFPFW_NW_SRC_MASK, nw_src, NW_SRC) \ - CLS_FIELD(OFPFW_NW_DST_MASK, nw_dst, NW_DST) \ - CLS_FIELD(OFPFW_NW_PROTO, nw_proto, NW_PROTO) \ - CLS_FIELD(OFPFW_NW_TOS, nw_tos, NW_TOS) \ - CLS_FIELD(OFPFW_TP_SRC, tp_src, TP_SRC) \ - CLS_FIELD(OFPFW_TP_DST, tp_dst, TP_DST) - -/* Field indexes. - * - * (These are also indexed into struct classifier's 'tables' array.) */ -enum { -#define CLS_FIELD(WILDCARDS, MEMBER, NAME) CLS_F_IDX_##NAME, - CLS_FIELDS -#undef CLS_FIELD - CLS_F_IDX_EXACT, /* Exact-match table. */ - CLS_N_FIELDS = CLS_F_IDX_EXACT -}; - -/* Field information. */ -struct cls_field { - int ofs; /* Offset in struct flow. */ - int len; /* Length in bytes. */ - uint32_t wildcards; /* OFPFW_* bit or bits for this field. */ - const char *name; /* Name (for debugging). */ -}; -extern const struct cls_field cls_fields[CLS_N_FIELDS + 1]; - /* A flow classifier. */ struct classifier { - int n_rules; /* Sum of hmap_count() over tables[]. */ - struct hmap tables[CLS_N_FIELDS]; /* Contain cls_bucket elements. */ - struct hmap exact_table; /* Contain cls_rule elements. */ + int n_rules; /* Total number of rules. */ + struct hmap tables; /* Contains "struct cls_table"s. */ }; -/* A group of rules with the same fixed values for initial fields. */ -struct cls_bucket { - struct hmap_node hmap_node; /* Within struct classifier 'tables'. */ - struct list rules; /* In order from highest to lowest priority. */ - struct flow fixed; /* Values for fixed fields. */ +/* A set of rules that all have the same fields wildcarded. */ +struct cls_table { + struct hmap_node hmap_node; /* Within struct classifier 'wctables'. */ + struct hmap rules; /* Contains "struct cls_rule"s. */ + struct flow_wildcards wc; /* Wildcards for fields. */ + int n_table_rules; /* Number of rules, including duplicates. */ + int n_refs; /* Reference count used during iteration. */ }; /* A flow classification rule. * - * Use cls_rule_from_flow() or cls_rule_from_match() to initialize a cls_rule - * or you will almost certainly not initialize 'table_idx' correctly, with - * disastrous results! */ + * Use one of the cls_rule_*() functions to initialize a cls_rule. + * + * The cls_rule_*() functions below maintain the following important + * invariant that the classifier depends on: + * + * - If a bit or a field is wildcarded in 'wc', then the corresponding bit or + * field in 'flow' is set to all-0-bits. (The cls_rule_zero_wildcards() + * function can be used to restore this invariant after adding wildcards.) + */ struct cls_rule { - union { - struct list list; /* Within struct cls_bucket 'rules'. */ - struct hmap_node hmap; /* Within struct classifier 'exact_table'. */ - } node; + struct hmap_node hmap_node; /* Within struct cls_table 'rules'. */ + struct list list; /* List of identical, lower-priority rules. */ struct flow flow; /* All field values. */ struct flow_wildcards wc; /* Wildcards for fields. */ unsigned int priority; /* Larger numbers are higher priorities. */ - unsigned int table_idx; /* Index into struct classifier 'tables'. */ }; enum { @@ -134,6 +77,9 @@ void cls_rule_from_flow(const struct flow *, uint32_t wildcards, void cls_rule_from_match(const struct ofp_match *, unsigned int priority, bool tun_id_from_cookie, uint64_t cookie, struct cls_rule *); + +void cls_rule_zero_wildcards(struct cls_rule *); + char *cls_rule_to_string(const struct cls_rule *); void cls_rule_print(const struct cls_rule *); @@ -160,10 +106,22 @@ void classifier_for_each_match(const struct classifier *, struct cls_rule *classifier_find_rule_exactly(const struct classifier *, const struct cls_rule *); -#define CLASSIFIER_FOR_EACH_EXACT_RULE(RULE, MEMBER, CLS) \ - HMAP_FOR_EACH (RULE, MEMBER.node.hmap, &(CLS)->exact_table) +/* Iteration shorthands. */ + +struct cls_table *classifier_exact_table(const struct classifier *); +struct cls_rule *cls_table_first_rule(const struct cls_table *); +struct cls_rule *cls_table_next_rule(const struct cls_table *, + const struct cls_rule *); + +#define CLS_TABLE_FOR_EACH_RULE(RULE, MEMBER, TABLE) \ + for ((RULE) = OBJECT_CONTAINING(cls_table_first_rule(TABLE), \ + RULE, MEMBER); \ + &(RULE)->MEMBER != NULL; \ + (RULE) = OBJECT_CONTAINING(cls_table_next_rule(TABLE, \ + &(RULE)->MEMBER), \ + RULE, MEMBER)) -#define CLASSIFIER_FOR_EACH_EXACT_RULE_SAFE(RULE, NEXT, MEMBER, CLS) \ - HMAP_FOR_EACH_SAFE (RULE, NEXT, MEMBER.node.hmap, &(CLS)->exact_table) +#define CLASSIFIER_FOR_EACH_EXACT_RULE(RULE, MEMBER, CLS) \ + CLS_TABLE_FOR_EACH_RULE (RULE, MEMBER, classifier_exact_table(CLS)) #endif /* classifier.h */ diff --git a/lib/flow.c b/lib/flow.c index ca745187..aba77cca 100644 --- a/lib/flow.c +++ b/lib/flow.c @@ -367,12 +367,31 @@ flow_nw_bits_to_mask(uint32_t wildcards, int shift) return wildcards < 32 ? htonl(~((1u << wildcards) - 1)) : 0; } +/* Return 'wildcards' in "normal form": + * + * - Forces unknown bits to 0. + * + * - Forces nw_src and nw_dst masks greater than 32 to exactly 32. + */ +static inline uint32_t +flow_wildcards_normalize(uint32_t wildcards) +{ + wildcards &= wildcards & OVSFW_ALL; + if (wildcards & (0x20 << OFPFW_NW_SRC_SHIFT)) { + wildcards &= ~(0x1f << OFPFW_NW_SRC_SHIFT); + } + if (wildcards & (0x20 << OFPFW_NW_DST_SHIFT)) { + wildcards &= ~(0x1f << OFPFW_NW_DST_SHIFT); + } + return wildcards; +} + /* Initializes 'wc' from 'wildcards', which may be any combination of the * OFPFW_* and OVSFW_* wildcard bits. */ void flow_wildcards_init(struct flow_wildcards *wc, uint32_t wildcards) { - wc->wildcards = wildcards & OVSFW_ALL; + wc->wildcards = flow_wildcards_normalize(wildcards); wc->nw_src_mask = flow_nw_bits_to_mask(wc->wildcards, OFPFW_NW_SRC_SHIFT); wc->nw_dst_mask = flow_nw_bits_to_mask(wc->wildcards, OFPFW_NW_DST_SHIFT); } @@ -385,6 +404,62 @@ flow_wildcards_init_exact(struct flow_wildcards *wc) flow_wildcards_init(wc, 0); } +static inline uint32_t +combine_nw_bits(uint32_t wb1, uint32_t wb2, int shift) +{ + uint32_t sb1 = (wb1 >> shift) & 0x3f; + uint32_t sb2 = (wb2 >> shift) & 0x3f; + return MAX(sb1, sb2) << shift; +} + +/* Initializes 'dst' as the combination of wildcards in 'src1' and 'src2'. + * That is, a bit or a field is wildcarded in 'dst' if it is wildcarded in + * 'src1' or 'src2' or both. */ +void +flow_wildcards_combine(struct flow_wildcards *dst, + const struct flow_wildcards *src1, + const struct flow_wildcards *src2) +{ + uint32_t wb1 = src1->wildcards; + uint32_t wb2 = src2->wildcards; + + dst->wildcards = (wb1 | wb2) & ~(OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK); + dst->wildcards |= combine_nw_bits(wb1, wb2, OFPFW_NW_SRC_SHIFT); + dst->wildcards |= combine_nw_bits(wb1, wb2, OFPFW_NW_DST_SHIFT); + dst->nw_src_mask = src1->nw_src_mask & src2->nw_src_mask; + dst->nw_dst_mask = src1->nw_dst_mask & src2->nw_dst_mask; +} + +/* Returns a hash of the wildcards in 'wc'. */ +uint32_t +flow_wildcards_hash(const struct flow_wildcards *wc) +{ + /* There is no need to include nw_src_mask or nw_dst_mask because they do + * not add any information (they can be computed from wc->wildcards). */ + return hash_int(wc->wildcards, 0); +} + +/* Returns true if 'a' and 'b' represent the same wildcards, false if they are + * different. */ +bool +flow_wildcards_equal(const struct flow_wildcards *a, + const struct flow_wildcards *b) +{ + return a->wildcards == b->wildcards; +} + +/* Returns true if at least one bit or field is wildcarded in 'a' but not in + * 'b', false otherwise. */ +bool +flow_wildcards_has_extra(const struct flow_wildcards *a, + const struct flow_wildcards *b) +{ +#define OFPFW_NW_MASK (OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK) + return ((a->wildcards & ~(b->wildcards | OFPFW_NW_MASK)) + || (a->nw_src_mask & b->nw_src_mask) != b->nw_src_mask + || (a->nw_dst_mask & b->nw_dst_mask) != b->nw_dst_mask); +} + static int count_ones(ovs_be32 mask) { diff --git a/lib/flow.h b/lib/flow.h index 7667cb04..f1a32d55 100644 --- a/lib/flow.h +++ b/lib/flow.h @@ -88,7 +88,23 @@ flow_hash(const struct flow *flow, uint32_t basis) return hash_bytes(flow, FLOW_SIG_SIZE, basis); } -/* Information on wildcards for a flow, as a supplement to struct flow. */ +/* Information on wildcards for a flow, as a supplement to "struct flow". + * + * The flow_wildcards_*() functions below both depend on and maintain the + * following important invariants: + * + * 1. 'wildcards' is nonzero if and only if at least one bit or field is + * wildcarded. + * + * 2. Bits in 'wildcards' not included in OVSFW_ALL are set to 0. (This is a + * corollary to invariant #1.) + * + * 3. The fields in 'wildcards' masked by OFPFW_NW_SRC_MASK and + * OFPFW_NW_DST_MASK have values between 0 and 32, inclusive. + * + * 4. The fields masked by OFPFW_NW_SRC_MASK and OFPFW_NW_DST_MASK correspond + * correctly to the masks in 'nw_src_mask' and 'nw_dst_mask', respectively. + */ struct flow_wildcards { uint32_t wildcards; /* enum ofp_flow_wildcards. */ ovs_be32 nw_src_mask; /* 1-bit in each significant nw_src bit. */ @@ -102,4 +118,14 @@ void flow_wildcards_init_exact(struct flow_wildcards *); bool flow_wildcards_set_nw_src_mask(struct flow_wildcards *, ovs_be32); bool flow_wildcards_set_nw_dst_mask(struct flow_wildcards *, ovs_be32); +void flow_wildcards_combine(struct flow_wildcards *dst, + const struct flow_wildcards *src1, + const struct flow_wildcards *src2); +bool flow_wildcards_has_extra(const struct flow_wildcards *, + const struct flow_wildcards *); + +uint32_t flow_wildcards_hash(const struct flow_wildcards *); +bool flow_wildcards_equal(const struct flow_wildcards *, + const struct flow_wildcards *); + #endif /* flow.h */ diff --git a/tests/classifier.at b/tests/classifier.at index f9e6953d..26ca0798 100644 --- a/tests/classifier.at +++ b/tests/classifier.at @@ -5,12 +5,10 @@ m4_foreach( [destroy-null], [single-rule], [rule-replacement], - [two-rules-in-one-bucket], - [two-rules-in-one-table], - [two-rules-in-different-tables], - [many-rules-in-one-bucket], + [many-rules-in-one-list], [many-rules-in-one-table], - [many-rules-in-different-tables]], + [many-rules-in-two-tables], + [many-rules-in-five-tables]], [AT_SETUP([flow classifier - m4_bpatsubst(testname, [-], [ ])]) AT_CHECK([test-classifier testname], [0], [], []) AT_CLEANUP])]) diff --git a/tests/test-classifier.c b/tests/test-classifier.c index b5972bdb..70af7ed0 100644 --- a/tests/test-classifier.c +++ b/tests/test-classifier.c @@ -37,6 +37,53 @@ #undef NDEBUG #include +/* Fields in a rule. */ +#define CLS_FIELDS \ + /* struct flow all-caps */ \ + /* wildcard bit(s) member name name */ \ + /* ----------------- ----------- -------- */ \ + CLS_FIELD(NXFW_TUN_ID, tun_id, TUN_ID) \ + CLS_FIELD(OFPFW_NW_SRC_MASK, nw_src, NW_SRC) \ + CLS_FIELD(OFPFW_NW_DST_MASK, nw_dst, NW_DST) \ + CLS_FIELD(OFPFW_IN_PORT, in_port, IN_PORT) \ + CLS_FIELD(OFPFW_DL_VLAN, dl_vlan, DL_VLAN) \ + CLS_FIELD(OFPFW_DL_TYPE, dl_type, DL_TYPE) \ + CLS_FIELD(OFPFW_TP_SRC, tp_src, TP_SRC) \ + CLS_FIELD(OFPFW_TP_DST, tp_dst, TP_DST) \ + CLS_FIELD(OFPFW_DL_SRC, dl_src, DL_SRC) \ + CLS_FIELD(OFPFW_DL_DST, dl_dst, DL_DST) \ + CLS_FIELD(OFPFW_NW_PROTO, nw_proto, NW_PROTO) \ + CLS_FIELD(OFPFW_DL_VLAN_PCP, dl_vlan_pcp, DL_VLAN_PCP) \ + CLS_FIELD(OFPFW_NW_TOS, nw_tos, NW_TOS) + +/* Field indexes. + * + * (These are also indexed into struct classifier's 'tables' array.) */ +enum { +#define CLS_FIELD(WILDCARDS, MEMBER, NAME) CLS_F_IDX_##NAME, + CLS_FIELDS +#undef CLS_FIELD + CLS_N_FIELDS +}; + +/* Field information. */ +struct cls_field { + int ofs; /* Offset in struct flow. */ + int len; /* Length in bytes. */ + uint32_t wildcards; /* OFPFW_* bit or bits for this field. */ + const char *name; /* Name (for debugging). */ +}; + +static const struct cls_field cls_fields[CLS_N_FIELDS] = { +#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \ + { offsetof(struct flow, MEMBER), \ + sizeof ((struct flow *)0)->MEMBER, \ + WILDCARDS, \ + #NAME }, + CLS_FIELDS +#undef CLS_FIELD +}; + struct test_rule { int aux; /* Auxiliary data. */ struct cls_rule cls_rule; /* Classifier rule data. */ @@ -214,6 +261,7 @@ tcls_delete_matches(struct tcls *cls, struct test_rule *pos = cls->rules[i]; uint32_t wildcards = pos->cls_rule.wc.wildcards; if (include & (wildcards ? CLS_INC_WILD : CLS_INC_EXACT) + && !flow_wildcards_has_extra(&pos->cls_rule.wc, &target->wc) && match(target, &pos->cls_rule.flow)) { tcls_remove(cls, pos); } else { @@ -391,35 +439,38 @@ destroy_classifier(struct classifier *cls) static void check_tables(const struct classifier *cls, - int n_tables, int n_buckets, int n_rules) + int n_tables, int n_rules, int n_dups) { + const struct cls_table *table; int found_tables = 0; - int found_buckets = 0; int found_rules = 0; - int i; + int found_dups = 0; - BUILD_ASSERT(CLS_N_FIELDS == ARRAY_SIZE(cls->tables)); - for (i = 0; i < CLS_N_FIELDS; i++) { - const struct cls_bucket *bucket; - if (!hmap_is_empty(&cls->tables[i])) { - found_tables++; - } - HMAP_FOR_EACH (bucket, hmap_node, &cls->tables[i]) { - found_buckets++; - assert(!list_is_empty(&bucket->rules)); - found_rules += list_size(&bucket->rules); - } - } + HMAP_FOR_EACH (table, hmap_node, &cls->tables) { + const struct cls_rule *head; + + assert(!hmap_is_empty(&table->rules)); - if (!hmap_is_empty(&cls->exact_table)) { found_tables++; - found_buckets++; - found_rules += hmap_count(&cls->exact_table); + HMAP_FOR_EACH (head, hmap_node, &table->rules) { + unsigned int prev_priority = UINT_MAX; + const struct cls_rule *rule; + + found_rules++; + LIST_FOR_EACH (rule, list, &head->list) { + assert(rule->priority < prev_priority); + prev_priority = rule->priority; + found_rules++; + found_dups++; + assert(classifier_find_rule_exactly(cls, rule) == rule); + } + } } - assert(n_tables == -1 || found_tables == n_tables); + assert(found_tables == hmap_count(&cls->tables)); + assert(n_tables == -1 || n_tables == hmap_count(&cls->tables)); assert(n_rules == -1 || found_rules == n_rules); - assert(n_buckets == -1 || found_buckets == n_buckets); + assert(n_dups == -1 || found_dups == n_dups); } static struct test_rule * @@ -501,7 +552,7 @@ test_single_rule(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) tcls_rule = tcls_insert(&tcls, rule); assert(!classifier_insert(&cls, &rule->cls_rule)); - check_tables(&cls, 1, 1, 1); + check_tables(&cls, 1, 1, 0); compare_classifiers(&cls, &tcls); classifier_remove(&cls, &rule->cls_rule); @@ -537,7 +588,7 @@ test_rule_replacement(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) tcls_init(&tcls); tcls_insert(&tcls, rule1); assert(!classifier_insert(&cls, &rule1->cls_rule)); - check_tables(&cls, 1, 1, 1); + check_tables(&cls, 1, 1, 0); compare_classifiers(&cls, &tcls); tcls_destroy(&tcls); @@ -546,7 +597,7 @@ test_rule_replacement(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) assert(test_rule_from_cls_rule( classifier_insert(&cls, &rule2->cls_rule)) == rule1); free(rule1); - check_tables(&cls, 1, 1, 1); + check_tables(&cls, 1, 1, 0); compare_classifiers(&cls, &tcls); tcls_destroy(&tcls); destroy_classifier(&cls); @@ -554,354 +605,248 @@ test_rule_replacement(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) } static int -table_mask(int table) +factorial(int n_items) { - return ((1u << CLS_N_FIELDS) - 1) & ~((1u << table) - 1); + int n, i; + + n = 1; + for (i = 2; i <= n_items; i++) { + n *= i; + } + return n; } -static int -random_wcf_in_table(int table, int seed) +static void +swap(int *a, int *b) { - int wc_fields = (1u << table) | hash_int(seed, 0); - return wc_fields & table_mask(table); + int tmp = *a; + *a = *b; + *b = tmp; } -/* Tests classification with two rules at a time that fall into the same - * bucket. */ static void -test_two_rules_in_one_bucket(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) +reverse(int *a, int n) { - int table, rel_pri, wcf_pat, value_pat; - - for (table = 0; table <= CLS_N_FIELDS; table++) { - for (rel_pri = -1; rel_pri <= +1; rel_pri++) { - for (wcf_pat = 0; wcf_pat < 4; wcf_pat++) { - int n_value_pats = table == CLS_N_FIELDS - 1 ? 1 : 2; - for (value_pat = 0; value_pat < n_value_pats; value_pat++) { - struct test_rule *rule1, *tcls_rule1; - struct test_rule *rule2, *tcls_rule2; - struct test_rule *displaced_rule; - struct classifier cls; - struct tcls tcls; - unsigned int pri1, pri2; - int wcf1, wcf2; - - if (table != CLS_F_IDX_EXACT) { - /* We can use identical priorities in this test because - * the classifier always chooses the rule added later - * for equal-priority rules that fall into the same - * bucket. */ - pri1 = table * 257 + 50; - pri2 = pri1 + rel_pri; - - wcf1 = (wcf_pat & 1 - ? random_wcf_in_table(table, pri1) - : 1u << table); - wcf2 = (wcf_pat & 2 - ? random_wcf_in_table(table, pri2) - : 1u << table); - if (value_pat) { - wcf1 &= ~(1u << (CLS_N_FIELDS - 1)); - wcf2 &= ~(1u << (CLS_N_FIELDS - 1)); - } - } else { - /* This classifier always puts exact-match rules at - * maximum priority. */ - pri1 = pri2 = UINT_MAX; - - /* No wildcard fields. */ - wcf1 = wcf2 = 0; - } - - rule1 = make_rule(wcf1, pri1, 0); - rule2 = make_rule(wcf2, pri2, - value_pat << (CLS_N_FIELDS - 1)); - - classifier_init(&cls); - tcls_init(&tcls); - - tcls_rule1 = tcls_insert(&tcls, rule1); - tcls_rule2 = tcls_insert(&tcls, rule2); - assert(!classifier_insert(&cls, &rule1->cls_rule)); - displaced_rule = test_rule_from_cls_rule( - classifier_insert(&cls, &rule2->cls_rule)); - if (wcf1 != wcf2 || pri1 != pri2 || value_pat) { - assert(!displaced_rule); + int i; - check_tables(&cls, 1, 1, 2); - compare_classifiers(&cls, &tcls); + for (i = 0; i < n / 2; i++) { + int j = n - (i + 1); + swap(&a[i], &a[j]); + } +} - classifier_remove(&cls, &rule1->cls_rule); - tcls_remove(&tcls, tcls_rule1); - check_tables(&cls, 1, 1, 1); - compare_classifiers(&cls, &tcls); - } else { - assert(displaced_rule == rule1); - check_tables(&cls, 1, 1, 1); - compare_classifiers(&cls, &tcls); - } - free(rule1); +static bool +next_permutation(int *a, int n) +{ + int k; - classifier_remove(&cls, &rule2->cls_rule); - tcls_remove(&tcls, tcls_rule2); - compare_classifiers(&cls, &tcls); - free(rule2); + for (k = n - 2; k >= 0; k--) { + if (a[k] < a[k + 1]) { + int l; - destroy_classifier(&cls); - tcls_destroy(&tcls); + for (l = n - 1; ; l--) { + if (a[l] > a[k]) { + swap(&a[k], &a[l]); + reverse(a + (k + 1), n - (k + 1)); + return true; } } } } + return false; } -/* Tests classification with two rules at a time that fall into the same - * table but different buckets. */ +/* Tests classification with rules that have the same matching criteria. */ static void -test_two_rules_in_one_table(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) -{ - int table, rel_pri, wcf_pat; - - /* Skip tables 0 and CLS_F_IDX_EXACT because they have one bucket. */ - for (table = 1; table < CLS_N_FIELDS; table++) { - for (rel_pri = -1; rel_pri <= +1; rel_pri++) { - for (wcf_pat = 0; wcf_pat < 5; wcf_pat++) { - struct test_rule *rule1, *tcls_rule1; - struct test_rule *rule2, *tcls_rule2; - struct classifier cls; - struct tcls tcls; - unsigned int pri1, pri2; - int wcf1, wcf2; - int value_mask, value_pat1, value_pat2; - int i; - - /* We can use identical priorities in this test because the - * classifier always chooses the rule added later for - * equal-priority rules that fall into the same table. */ - pri1 = table * 257 + 50; - pri2 = pri1 + rel_pri; - - if (wcf_pat & 4) { - wcf1 = wcf2 = random_wcf_in_table(table, pri1); - } else { - wcf1 = (wcf_pat & 1 - ? random_wcf_in_table(table, pri1) - : 1u << table); - wcf2 = (wcf_pat & 2 - ? random_wcf_in_table(table, pri2) - : 1u << table); - } - - /* Generate value patterns that will put the two rules into - * different buckets. */ - value_mask = ((1u << table) - 1); - value_pat1 = hash_int(pri1, 1) & value_mask; - i = 0; - do { - value_pat2 = (hash_int(pri2, i++) & value_mask); - } while (value_pat1 == value_pat2); - rule1 = make_rule(wcf1, pri1, value_pat1); - rule2 = make_rule(wcf2, pri2, value_pat2); - - classifier_init(&cls); - tcls_init(&tcls); - - tcls_rule1 = tcls_insert(&tcls, rule1); - tcls_rule2 = tcls_insert(&tcls, rule2); - assert(!classifier_insert(&cls, &rule1->cls_rule)); - assert(!classifier_insert(&cls, &rule2->cls_rule)); - check_tables(&cls, 1, 2, 2); - compare_classifiers(&cls, &tcls); +test_many_rules_in_one_list (int argc OVS_UNUSED, char *argv[] OVS_UNUSED) +{ + enum { N_RULES = 3 }; + int n_pris; - classifier_remove(&cls, &rule1->cls_rule); - tcls_remove(&tcls, tcls_rule1); - check_tables(&cls, 1, 1, 1); - compare_classifiers(&cls, &tcls); - free(rule1); + for (n_pris = N_RULES; n_pris >= 1; n_pris--) { + int ops[N_RULES * 2]; + int pris[N_RULES]; + int n_permutations; + int i; - classifier_remove(&cls, &rule2->cls_rule); - tcls_remove(&tcls, tcls_rule2); - compare_classifiers(&cls, &tcls); - free(rule2); + pris[0] = 0; + for (i = 1; i < N_RULES; i++) { + pris[i] = pris[i - 1] + (n_pris > i); + } - classifier_destroy(&cls); - tcls_destroy(&tcls); - } + for (i = 0; i < N_RULES * 2; i++) { + ops[i] = i / 2; } - } -} -/* Tests classification with two rules at a time that fall into different - * tables. */ -static void -test_two_rules_in_different_tables(int argc OVS_UNUSED, - char *argv[] OVS_UNUSED) -{ - int table1, table2, rel_pri, wcf_pat; - - for (table1 = 0; table1 < CLS_N_FIELDS; table1++) { - for (table2 = table1 + 1; table2 <= CLS_N_FIELDS; table2++) { - for (rel_pri = 0; rel_pri < 2; rel_pri++) { - for (wcf_pat = 0; wcf_pat < 4; wcf_pat++) { - struct test_rule *rule1, *tcls_rule1; - struct test_rule *rule2, *tcls_rule2; - struct classifier cls; - struct tcls tcls; - unsigned int pri1, pri2; - int wcf1, wcf2; - - /* We must use unique priorities in this test because the - * classifier makes the rule choice undefined for rules of - * equal priority that fall into different tables. (In - * practice, lower-numbered tables win.) */ - pri1 = table1 * 257 + 50; - pri2 = rel_pri ? pri1 - 1 : pri1 + 1; - - wcf1 = (wcf_pat & 1 - ? random_wcf_in_table(table1, pri1) - : 1u << table1); - wcf2 = (wcf_pat & 2 - ? random_wcf_in_table(table2, pri2) - : 1u << table2); - - if (table2 == CLS_F_IDX_EXACT) { - pri2 = UINT16_MAX; - wcf2 = 0; - } + n_permutations = 0; + do { + struct test_rule *rules[N_RULES]; + struct test_rule *tcls_rules[N_RULES]; + int pri_rules[N_RULES]; + struct classifier cls; + struct tcls tcls; - rule1 = make_rule(wcf1, pri1, 0); - rule2 = make_rule(wcf2, pri2, 0); + n_permutations++; - classifier_init(&cls); - tcls_init(&tcls); + for (i = 0; i < N_RULES; i++) { + rules[i] = make_rule(456, pris[i], 0); + tcls_rules[i] = NULL; + pri_rules[i] = -1; + } - tcls_rule1 = tcls_insert(&tcls, rule1); - tcls_rule2 = tcls_insert(&tcls, rule2); - assert(!classifier_insert(&cls, &rule1->cls_rule)); - assert(!classifier_insert(&cls, &rule2->cls_rule)); - check_tables(&cls, 2, 2, 2); - compare_classifiers(&cls, &tcls); + classifier_init(&cls); + tcls_init(&tcls); - classifier_remove(&cls, &rule1->cls_rule); - tcls_remove(&tcls, tcls_rule1); - check_tables(&cls, 1, 1, 1); - compare_classifiers(&cls, &tcls); - free(rule1); + for (i = 0; i < ARRAY_SIZE(ops); i++) { + int j = ops[i]; + int m, n; - classifier_remove(&cls, &rule2->cls_rule); - tcls_remove(&tcls, tcls_rule2); - compare_classifiers(&cls, &tcls); - free(rule2); + if (!tcls_rules[j]) { + struct test_rule *displaced_rule; + + tcls_rules[j] = tcls_insert(&tcls, rules[j]); + displaced_rule = test_rule_from_cls_rule( + classifier_insert(&cls, &rules[j]->cls_rule)); + if (pri_rules[pris[j]] >= 0) { + int k = pri_rules[pris[j]]; + assert(displaced_rule != NULL); + assert(displaced_rule != rules[j]); + assert(pris[j] == displaced_rule->cls_rule.priority); + tcls_rules[k] = NULL; + } else { + assert(displaced_rule == NULL); + } + pri_rules[pris[j]] = j; + } else { + classifier_remove(&cls, &rules[j]->cls_rule); + tcls_remove(&tcls, tcls_rules[j]); + tcls_rules[j] = NULL; + pri_rules[pris[j]] = -1; + } - classifier_destroy(&cls); - tcls_destroy(&tcls); + n = 0; + for (m = 0; m < N_RULES; m++) { + n += tcls_rules[m] != NULL; } + check_tables(&cls, n > 0, n, n - 1); + + compare_classifiers(&cls, &tcls); } - } + + classifier_destroy(&cls); + tcls_destroy(&tcls); + + for (i = 0; i < N_RULES; i++) { + free(rules[i]); + } + } while (next_permutation(ops, ARRAY_SIZE(ops))); + assert(n_permutations == (factorial(N_RULES * 2) >> N_RULES)); } } -/* Tests classification with many rules at a time that fall into the same - * bucket but have unique priorities (and various wildcards). */ -static void -test_many_rules_in_one_bucket(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) +static int +count_ones(unsigned long int x) { - enum { MAX_RULES = 50 }; - int iteration, table; - - for (iteration = 0; iteration < 3; iteration++) { - for (table = 0; table <= CLS_N_FIELDS; table++) { - unsigned int priorities[MAX_RULES]; - struct classifier cls; - struct tcls tcls; - int i; + int n = 0; - srand(hash_int(table, iteration)); - for (i = 0; i < MAX_RULES; i++) { - priorities[i] = i * 129; - } - shuffle(priorities, ARRAY_SIZE(priorities)); + while (x) { + x &= x - 1; + n++; + } - classifier_init(&cls); - tcls_init(&tcls); + return n; +} - for (i = 0; i < MAX_RULES; i++) { - struct test_rule *rule; - unsigned int priority = priorities[i]; - int wcf; - - wcf = random_wcf_in_table(table, priority); - rule = make_rule(wcf, priority, - table == CLS_F_IDX_EXACT ? i : 1234); - tcls_insert(&tcls, rule); - assert(!classifier_insert(&cls, &rule->cls_rule)); - check_tables(&cls, 1, 1, i + 1); - compare_classifiers(&cls, &tcls); - } +static bool +array_contains(int *array, int n, int value) +{ + int i; - destroy_classifier(&cls); - tcls_destroy(&tcls); + for (i = 0; i < n; i++) { + if (array[i] == value) { + return true; } } + + return false; } -/* Tests classification with many rules at a time that fall into the same - * table but random buckets. */ +/* Tests classification with two rules at a time that fall into the same + * table but different lists. */ static void test_many_rules_in_one_table(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) { - enum { MAX_RULES = 50 }; - int iteration, table; + int iteration; - for (iteration = 0; iteration < 3; iteration++) { - for (table = 0; table < CLS_N_FIELDS; table++) { - unsigned int priorities[MAX_RULES]; - struct classifier cls; - struct tcls tcls; - int i; + for (iteration = 0; iteration < 50; iteration++) { + enum { N_RULES = 20 }; + struct test_rule *rules[N_RULES]; + struct test_rule *tcls_rules[N_RULES]; + struct classifier cls; + struct tcls tcls; + int value_pats[N_RULES]; + int value_mask; + int wcf; + int i; - srand(hash_int(table, iteration)); - for (i = 0; i < MAX_RULES; i++) { - priorities[i] = i * 129; - } - shuffle(priorities, ARRAY_SIZE(priorities)); + do { + wcf = rand() & ((1u << CLS_N_FIELDS) - 1); + value_mask = ~wcf & ((1u << CLS_N_FIELDS) - 1); + } while ((1 << count_ones(value_mask)) < N_RULES); - classifier_init(&cls); - tcls_init(&tcls); + classifier_init(&cls); + tcls_init(&tcls); - for (i = 0; i < MAX_RULES; i++) { - struct test_rule *rule; - unsigned int priority = priorities[i]; - int wcf; + for (i = 0; i < N_RULES; i++) { + unsigned int priority = rand(); - wcf = random_wcf_in_table(table, priority); - rule = make_rule(wcf, priority, hash_int(priority, 1)); - tcls_insert(&tcls, rule); - assert(!classifier_insert(&cls, &rule->cls_rule)); - check_tables(&cls, 1, -1, i + 1); - compare_classifiers(&cls, &tcls); - } + do { + value_pats[i] = rand() & value_mask; + } while (array_contains(value_pats, i, value_pats[i])); - destroy_classifier(&cls); - tcls_destroy(&tcls); + rules[i] = make_rule(wcf, priority, value_pats[i]); + tcls_rules[i] = tcls_insert(&tcls, rules[i]); + assert(!classifier_insert(&cls, &rules[i]->cls_rule)); + + check_tables(&cls, 1, i + 1, 0); + compare_classifiers(&cls, &tcls); + } + + for (i = 0; i < N_RULES; i++) { + tcls_remove(&tcls, tcls_rules[i]); + classifier_remove(&cls, &rules[i]->cls_rule); + free(rules[i]); + + check_tables(&cls, i < N_RULES - 1, N_RULES - (i + 1), 0); + compare_classifiers(&cls, &tcls); } + + classifier_destroy(&cls); + tcls_destroy(&tcls); } } -/* Tests classification with many rules at a time that fall into random buckets - * in random tables. */ +/* Tests classification with many rules at a time that fall into random lists + * in 'n' tables. */ static void -test_many_rules_in_different_tables(int argc OVS_UNUSED, - char *argv[] OVS_UNUSED) +test_many_rules_in_n_tables(int n_tables) { enum { MAX_RULES = 50 }; + int wcfs[10]; int iteration; + int i; + + assert(n_tables < 10); + for (i = 0; i < n_tables; i++) { + do { + wcfs[i] = rand() & ((1u << CLS_N_FIELDS) - 1); + } while (array_contains(wcfs, i, wcfs[i])); + } for (iteration = 0; iteration < 30; iteration++) { unsigned int priorities[MAX_RULES]; struct classifier cls; struct tcls tcls; - int i; srand(iteration); for (i = 0; i < MAX_RULES; i++) { @@ -915,13 +860,12 @@ test_many_rules_in_different_tables(int argc OVS_UNUSED, for (i = 0; i < MAX_RULES; i++) { struct test_rule *rule; unsigned int priority = priorities[i]; - int table = rand() % (CLS_N_FIELDS + 1); - int wcf = random_wcf_in_table(table, rand()); + int wcf = wcfs[rand() % n_tables]; int value_pat = rand() & ((1u << CLS_N_FIELDS) - 1); rule = make_rule(wcf, priority, value_pat); tcls_insert(&tcls, rule); assert(!classifier_insert(&cls, &rule->cls_rule)); - check_tables(&cls, -1, -1, i + 1); + check_tables(&cls, -1, i + 1, -1); compare_classifiers(&cls, &tcls); } @@ -935,6 +879,7 @@ test_many_rules_in_different_tables(int argc OVS_UNUSED, free_rule, &cls); tcls_delete_matches(&tcls, &rule->cls_rule, include); compare_classifiers(&cls, &tcls); + check_tables(&cls, -1, -1, -1); free(rule); } @@ -942,26 +887,35 @@ test_many_rules_in_different_tables(int argc OVS_UNUSED, tcls_destroy(&tcls); } } + +static void +test_many_rules_in_two_tables(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) +{ + test_many_rules_in_n_tables(2); +} + +static void +test_many_rules_in_five_tables(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) +{ + test_many_rules_in_n_tables(5); +} static const struct command commands[] = { {"empty", 0, 0, test_empty}, {"destroy-null", 0, 0, test_destroy_null}, {"single-rule", 0, 0, test_single_rule}, {"rule-replacement", 0, 0, test_rule_replacement}, - {"two-rules-in-one-bucket", 0, 0, test_two_rules_in_one_bucket}, - {"two-rules-in-one-table", 0, 0, test_two_rules_in_one_table}, - {"two-rules-in-different-tables", 0, 0, - test_two_rules_in_different_tables}, - {"many-rules-in-one-bucket", 0, 0, test_many_rules_in_one_bucket}, + {"many-rules-in-one-list", 0, 0, test_many_rules_in_one_list}, {"many-rules-in-one-table", 0, 0, test_many_rules_in_one_table}, - {"many-rules-in-different-tables", 0, 0, - test_many_rules_in_different_tables}, + {"many-rules-in-two-tables", 0, 0, test_many_rules_in_two_tables}, + {"many-rules-in-five-tables", 0, 0, test_many_rules_in_five_tables}, {NULL, 0, 0, NULL}, }; int main(int argc, char *argv[]) { + set_program_name(argv[0]); init_values(); run_command(argc - 1, argv + 1, commands); return 0; -- 2.30.2