lib/bitmap.h \
lib/cfg.c \
lib/cfg.h \
+ lib/classifier.c \
+ lib/classifier.h \
lib/command-line.c \
lib/command-line.h \
lib/compiler.h \
--- /dev/null
+/* Copyright (c) 2009 The Board of Trustees of The Leland Stanford
+ * Junior University
+ *
+ * We are making the OpenFlow specification and associated documentation
+ * (Software) available for public use and benefit with the expectation
+ * that others will use, modify and enhance the Software and contribute
+ * those enhancements back to the community. However, since we would
+ * like to make the Software available for broadest use, with as few
+ * restrictions as possible permission is hereby granted, free of
+ * charge, to any person obtaining a copy of this Software to deal in
+ * the Software under the copyrights without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * The name and trademarks of copyright holder(s) may NOT be used in
+ * advertising or publicity pertaining to the Software or any
+ * derivatives without specific, written prior permission.
+ */
+
+#include <config.h>
+#include "classifier.h"
+#include <assert.h>
+#include <errno.h>
+#include <netinet/in.h>
+#include "flow.h"
+#include "hash.h"
+
+const struct cls_field cls_fields[CLS_N_FIELDS + 1] = {
+#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
+ { offsetof(struct flow, MEMBER), \
+ sizeof ((struct flow *)0)->MEMBER, \
+ WILDCARDS, \
+ #NAME }, \
+ CLS_FIELDS
+#undef CLS_FIELD
+ { sizeof(struct flow), 0, 0, "exact" },
+};
+
+static uint32_t hash_fields(const struct flow *, int table_idx);
+static bool equal_fields(const struct flow *, const struct flow *, int table_idx);
+
+static int table_idx_from_wildcards(uint32_t wildcards);
+static struct cls_rule *table_insert(struct hmap *, struct cls_rule *);
+static struct cls_rule *insert_exact_rule(struct classifier *,
+ struct cls_rule *);
+static struct cls_bucket *find_bucket(struct hmap *, size_t hash,
+ const struct cls_rule *);
+static struct cls_rule *search_table(const struct hmap *table, int field_idx,
+ const struct cls_rule *);
+static struct cls_rule *search_exact_table(const struct classifier *,
+ size_t hash, const struct flow *);
+static bool rules_match_1wild(const struct cls_rule *fixed,
+ const struct cls_rule *wild, int field_idx);
+
+/* Converts the flow in 'flow' into a cls_rule in 'rule', with the given
+ * 'wildcards' and 'priority'.
+ *
+ * Rules without wildcards always have the maximum priority 65535. */
+void
+cls_rule_from_flow(struct cls_rule *rule, const struct flow *flow,
+ uint32_t wildcards, uint16_t priority)
+{
+ assert(flow->reserved == 0);
+ rule->flow = *flow;
+ flow_wildcards_init(&rule->wc, wildcards);
+ rule->priority = rule->wc.wildcards ? priority : UINT16_MAX;
+ rule->table_idx = table_idx_from_wildcards(rule->wc.wildcards);
+}
+
+/* Converts the ofp_match in 'match' into a cls_rule in 'rule', with the given
+ * 'priority'. */
+void
+cls_rule_from_match(struct cls_rule *rule, const struct ofp_match *match,
+ uint16_t priority)
+{
+ uint32_t wildcards;
+ flow_from_match(&rule->flow, &wildcards, match);
+ flow_wildcards_init(&rule->wc, wildcards);
+ rule->priority = rule->wc.wildcards ? priority : UINT16_MAX;
+ rule->table_idx = table_idx_from_wildcards(rule->wc.wildcards);
+}
+
+/* Prints cls_rule 'rule', for debugging.
+ *
+ * (The output could be improved and expanded, but this was good enough to
+ * debug the classifier.) */
+void
+cls_rule_print(const struct cls_rule *rule)
+{
+ printf("wildcards=%x priority=%d ", rule->wc.wildcards, rule->priority);
+ flow_print(stdout, &rule->flow);
+ putc('\n', stdout);
+}
+
+/* Adjusts pointers around 'old', which must be in classifier 'cls', to
+ * compensate for it having been moved in memory to 'new' (e.g. due to
+ * realloc()).
+ *
+ * This function cannot be realized in all possible flow classifier
+ * implementations, so we will probably have to change the interface if we
+ * change the implementation. Shouldn't be a big deal though. */
+void
+cls_rule_moved(struct classifier *cls, struct cls_rule *old,
+ struct cls_rule *new)
+{
+ assert(old != new);
+ if (new->wc.wildcards) {
+ list_moved(&new->node.list);
+ } else {
+ hmap_moved(&cls->exact_table, &old->node.hmap, &new->node.hmap);
+ }
+}
+
+/* Replaces 'old', which must be in classifier 'cls', by 'new' (e.g. due to
+ * realloc()); that is, after calling this function 'new' will be in 'cls' in
+ * place of 'old'.
+ *
+ * 'new' and 'old' must be exactly the same: wildcard the same fields, have the
+ * same fixed values for non-wildcarded fields, and have the same priority.
+ *
+ * The caller takes ownership of 'old' and is thus responsible for freeing it,
+ * etc., as necessary.
+ *
+ * This function cannot be realized in all possible flow classifier
+ * implementations, so we will probably have to change the interface if we
+ * change the implementation. Shouldn't be a big deal though. */
+void
+cls_rule_replace(struct classifier *cls, const struct cls_rule *old,
+ struct cls_rule *new)
+{
+ assert(old != new);
+ assert(old->wc.wildcards == new->wc.wildcards);
+ assert(old->priority == new->priority);
+
+ if (new->wc.wildcards) {
+ list_replace(&new->node.list, &old->node.list);
+ } else {
+ hmap_replace(&cls->exact_table, &old->node.hmap, &new->node.hmap);
+ }
+}
+\f
+/* Initializes 'cls' as a classifier that initially contains no classification
+ * rules. */
+void
+classifier_init(struct classifier *cls)
+{
+ int i;
+
+ cls->n_rules = 0;
+ for (i = 0; i < ARRAY_SIZE(cls->tables); i++) {
+ hmap_init(&cls->tables[i]);
+ }
+ hmap_init(&cls->exact_table);
+}
+
+/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
+ * caller's responsibility. */
+void
+classifier_destroy(struct classifier *cls)
+{
+ if (cls) {
+ struct cls_bucket *bucket, *next_bucket;
+ struct hmap *tbl;
+
+ for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
+ HMAP_FOR_EACH_SAFE (bucket, next_bucket,
+ struct cls_bucket, hmap_node, tbl) {
+ free(bucket);
+ }
+ hmap_destroy(tbl);
+ }
+ hmap_destroy(&cls->exact_table);
+ }
+}
+
+/* Returns true if 'cls' does not contain any classification rules, false
+ * otherwise. */
+bool
+classifier_is_empty(const struct classifier *cls)
+{
+ return cls->n_rules == 0;
+}
+
+/* Returns the number of rules in 'classifier'. */
+int
+classifier_count(const struct classifier *cls)
+{
+ return cls->n_rules;
+}
+
+/* Returns the number of rules in 'classifier' that have no wildcards. */
+int
+classifier_count_exact(const struct classifier *cls)
+{
+ return hmap_count(&cls->exact_table);
+}
+
+/* Inserts 'rule' into 'cls'. Transfers ownership of 'rule' to 'cls'.
+ *
+ * If 'cls' already contains an identical rule (including wildcards, values of
+ * fixed fields, and priority), replaces the old rule by 'rule' and returns the
+ * rule that was replaced. The caller takes ownership of the returned rule and
+ * is thus responsible for freeing it, etc., as necessary.
+ *
+ * Returns NULL if 'cls' does not contain a rule with an identical key, after
+ * inserting the new rule. In this case, no rules are displaced by the new
+ * rule, even rules that cannot have any effect because the new rule matches a
+ * superset of their flows and has higher priority. */
+struct cls_rule *
+classifier_insert(struct classifier *cls, struct cls_rule *rule)
+{
+ struct cls_rule *old;
+ assert((rule->wc.wildcards == 0) == (rule->table_idx == CLS_F_IDX_EXACT));
+ old = (rule->wc.wildcards
+ ? table_insert(&cls->tables[rule->table_idx], rule)
+ : insert_exact_rule(cls, rule));
+ if (!old) {
+ cls->n_rules++;
+ }
+ return old;
+}
+
+/* Removes 'rule' from 'cls'. It is caller's responsibility to free 'rule', if
+ * this is desirable. */
+void
+classifier_remove(struct classifier *cls, struct cls_rule *rule)
+{
+ if (rule->wc.wildcards) {
+ /* Remove 'rule' from bucket. If that empties the bucket, remove the
+ * bucket from its table. */
+ struct hmap *table = &cls->tables[rule->table_idx];
+ struct list *rules = list_remove(&rule->node.list);
+ if (list_is_empty(rules)) {
+ /* This code is a little tricky. list_remove() returns the list
+ * element just after the one removed. Since the list is now
+ * empty, this will be the address of the 'rules' member of the
+ * bucket that was just emptied, so pointer arithmetic (via
+ * CONTAINER_OF) can find that bucket. */
+ struct cls_bucket *bucket;
+ bucket = CONTAINER_OF(rules, struct cls_bucket, rules);
+ hmap_remove(table, &bucket->hmap_node);
+ free(bucket);
+ }
+ } else {
+ /* Remove 'rule' from cls->exact_table. */
+ hmap_remove(&cls->exact_table, &rule->node.hmap);
+ }
+ cls->n_rules--;
+}
+
+/* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
+ * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules
+ * of equal priority match 'flow', returns one arbitrarily.
+ *
+ * (When multiple rules of equal priority happen to fall into the same bucket,
+ * rules added more recently take priority over rules added less recently, but
+ * this is subject to change and should not be depended upon.) */
+struct cls_rule *
+classifier_lookup(const struct classifier *cls, const struct flow *flow)
+{
+ struct cls_rule *best = NULL;
+ if (!hmap_is_empty(&cls->exact_table)) {
+ best = search_exact_table(cls, flow_hash(flow, 0), flow);
+ }
+ if (!best && cls->n_rules > hmap_count(&cls->exact_table)) {
+ struct cls_rule target;
+ int i;
+
+ cls_rule_from_flow(&target, flow, 0, 0);
+ for (i = 0; i < CLS_N_FIELDS; i++) {
+ struct cls_rule *rule = search_table(&cls->tables[i], i, &target);
+ if (rule && (!best || rule->priority > best->priority)) {
+ best = rule;
+ }
+ }
+ }
+ return best;
+}
+
+struct cls_rule *
+classifier_find_rule_exactly(const struct classifier *cls,
+ const struct flow *target, uint32_t wildcards,
+ uint16_t priority)
+{
+ struct cls_bucket *bucket;
+ int table_idx;
+ uint32_t hash;
+
+ if (!wildcards) {
+ /* Ignores 'priority', should we check that it is UINT16_MAX? */
+ return search_exact_table(cls, flow_hash(target, 0), target);
+ }
+
+ assert(wildcards == (wildcards & OFPFW_ALL));
+ table_idx = table_idx_from_wildcards(wildcards);
+ hash = hash_fields(target, table_idx);
+ HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
+ &cls->tables[table_idx]) {
+ if (equal_fields(&bucket->fixed, target, table_idx)) {
+ struct cls_rule *pos;
+ LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
+ if (pos->priority < priority) {
+ return NULL;
+ } else if (pos->priority == priority &&
+ pos->wc.wildcards == wildcards &&
+ flow_equal(target, &pos->flow)) {
+ return pos;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Ignores target->priority. */
+void
+classifier_for_each_match(const struct classifier *cls,
+ const struct cls_rule *target,
+ int include, cls_cb_func *cb, void *aux)
+{
+ if (include & CLS_INC_WILD) {
+ const struct hmap *table;
+ for (table = &cls->tables[0]; table < &cls->tables[CLS_N_FIELDS];
+ table++) {
+ struct cls_bucket *bucket;
+ HMAP_FOR_EACH (bucket, struct cls_bucket, hmap_node, table) {
+ /* XXX there is a bit of room for optimization here based on
+ * rejecting entire buckets on their fixed fields, but it will
+ * only be worthwhile for big buckets (which we hope we won't
+ * get anyway, but...) */
+ struct cls_rule *pos;
+ LIST_FOR_EACH (pos, struct cls_rule, node.list,
+ &bucket->rules) {
+ if (rules_match_1wild(pos, target, 0)) {
+ cb(pos, aux);
+ }
+ }
+ }
+ }
+ }
+
+ if (include & CLS_INC_EXACT) {
+ if (target->wc.wildcards) {
+ struct cls_rule *rule;
+ HMAP_FOR_EACH (rule, struct cls_rule, node.hmap,
+ &cls->exact_table) {
+ if (rules_match_1wild(rule, target, 0)) {
+ cb(rule, aux);
+ }
+ }
+ } else {
+ /* Optimization: there can be at most one match in the exact
+ * table. */
+ size_t hash = flow_hash(&target->flow, 0);
+ struct cls_rule *rule = search_exact_table(cls, hash,
+ &target->flow);
+ if (rule) {
+ cb(rule, aux);
+ }
+ }
+ }
+}
+
+void
+classifier_for_each(const struct classifier *cls,
+ void (*callback)(struct cls_rule *, void *aux),
+ void *aux)
+{
+ struct cls_bucket *bucket, *next_bucket;
+ struct cls_rule *prev_rule, *rule, *next_rule;
+ const struct hmap *tbl;
+
+ prev_rule = NULL;
+ for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
+ HMAP_FOR_EACH_SAFE (bucket, next_bucket,
+ struct cls_bucket, hmap_node, tbl) {
+ LIST_FOR_EACH (rule, struct cls_rule, node.list, &bucket->rules) {
+ if (prev_rule) {
+ callback(prev_rule, aux);
+ }
+ prev_rule = rule;
+ }
+ }
+ }
+ HMAP_FOR_EACH_SAFE (rule, next_rule,
+ struct cls_rule, node.hmap, &cls->exact_table) {
+ if (prev_rule) {
+ callback(prev_rule, aux);
+ }
+ prev_rule = rule;
+ }
+ if (prev_rule) {
+ callback(prev_rule, aux);
+ }
+}
+
+void
+classifier_for_each_with_wildcards(const struct classifier *cls,
+ uint32_t wildcards,
+ cls_cb_func *callback, void *aux)
+{
+ struct cls_rule *prev_rule = NULL;
+
+ assert(!(wildcards & ~OFPFW_ALL));
+ if (wildcards) {
+ int table_idx = table_idx_from_wildcards(wildcards);
+ const struct hmap *tbl = &cls->tables[table_idx];
+ struct cls_bucket *bucket, *next_bucket;
+ struct cls_rule *rule;
+
+ HMAP_FOR_EACH_SAFE (bucket, next_bucket,
+ struct cls_bucket, hmap_node, tbl) {
+ LIST_FOR_EACH (rule, struct cls_rule, node.list, &bucket->rules) {
+ if (rule->wc.wildcards == wildcards) {
+ if (prev_rule) {
+ callback(prev_rule, aux);
+ }
+ prev_rule = rule;
+ }
+ }
+ }
+ } else {
+ struct cls_rule *rule, *next_rule;
+ HMAP_FOR_EACH_SAFE (rule, next_rule,
+ struct cls_rule, node.hmap, &cls->exact_table) {
+ if (prev_rule) {
+ callback(prev_rule, aux);
+ }
+ prev_rule = rule;
+ }
+ }
+ if (prev_rule) {
+ callback(prev_rule, aux);
+ }
+}
+\f
+static struct cls_bucket *create_bucket(struct hmap *, size_t hash,
+ const struct flow *fixed);
+static struct cls_rule *bucket_insert(struct cls_bucket *, struct cls_rule *);
+
+static inline bool equal_bytes(const void *, const void *, size_t n);
+
+/* Returns a hash computed across the fields in 'flow' whose field indexes
+ * (CLS_F_IDX_*) are less than 'table_idx'. (If 'table_idx' is
+ * CLS_F_IDX_EXACT, hashes all the fields in 'flow'). */
+static uint32_t
+hash_fields(const struct flow *flow, int table_idx)
+{
+ /* I just know I'm going to hell for writing code this way.
+ *
+ * GCC generates pretty good code here, with only a single taken
+ * conditional jump per execution. Now the question is, would we be better
+ * off marking this function ALWAYS_INLINE and writing a wrapper that
+ * switches on the value of 'table_idx' to get rid of all the conditional
+ * jumps entirely (except for one in the wrapper)? Honestly I really,
+ * really hope that it doesn't matter in practice.
+ *
+ * We could do better by calculating hashes incrementally, instead of
+ * starting over from the top each time. But that would be even uglier. */
+ uint32_t a, b, c;
+ uint32_t tmp[3];
+ size_t n;
+
+ a = b = c = 0xdeadbeef + table_idx;
+ n = 0;
+
+#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
+ if (table_idx == CLS_F_IDX_##NAME) { \
+ /* Done. */ \
+ memset((uint8_t *) tmp + n, 0, sizeof tmp - n); \
+ goto finish; \
+ } else { \
+ const size_t size = sizeof flow->MEMBER; \
+ const uint8_t *p1 = (const uint8_t *) &flow->MEMBER; \
+ const size_t p1_size = MIN(sizeof tmp - n, size); \
+ const uint8_t *p2 = p1 + p1_size; \
+ const size_t p2_size = size - p1_size; \
+ \
+ /* Append to 'tmp' as much data as will fit. */ \
+ memcpy((uint8_t *) tmp + n, p1, p1_size); \
+ n += p1_size; \
+ \
+ /* If 'tmp' is full, mix. */ \
+ if (n == sizeof tmp) { \
+ a += tmp[0]; \
+ b += tmp[1]; \
+ c += tmp[2]; \
+ HASH_MIX(a, b, c); \
+ n = 0; \
+ } \
+ \
+ /* Append to 'tmp' any data that didn't fit. */ \
+ memcpy(tmp, p2, p2_size); \
+ n += p2_size; \
+ }
+ CLS_FIELDS
+#undef CLS_FIELD
+
+finish:
+ a += tmp[0];
+ b += tmp[1];
+ c += tmp[2];
+ HASH_FINAL(a, b, c);
+ return c;
+}
+
+/* Compares the fields in 'a' and 'b' whose field indexes (CLS_F_IDX_*) are
+ * less than 'table_idx'. (If 'table_idx' is CLS_F_IDX_EXACT, compares all the
+ * fields in 'a' and 'b').
+ *
+ * Returns true if all the compared fields are equal, false otherwise. */
+static bool
+equal_fields(const struct flow *a, const struct flow *b, int table_idx)
+{
+ /* XXX The generated code could be better here. */
+#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
+ if (table_idx == CLS_F_IDX_##NAME) { \
+ return true; \
+ } else if (!equal_bytes(&a->MEMBER, &b->MEMBER, sizeof a->MEMBER)) { \
+ return false; \
+ }
+ CLS_FIELDS
+#undef CLS_FIELD
+
+ return true;
+}
+
+static int
+table_idx_from_wildcards(uint32_t wildcards)
+{
+ if (!wildcards) {
+ return CLS_F_IDX_EXACT;
+ }
+#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
+ if (wildcards & WILDCARDS) { \
+ return CLS_F_IDX_##NAME; \
+ }
+ CLS_FIELDS
+#undef CLS_FIELD
+ NOT_REACHED();
+}
+
+/* Inserts 'rule' into 'table'. Returns the rule, if any, that was displaced
+ * in favor of 'rule'. */
+static struct cls_rule *
+table_insert(struct hmap *table, struct cls_rule *rule)
+{
+ struct cls_bucket *bucket;
+ size_t hash;
+
+ hash = hash_fields(&rule->flow, rule->table_idx);
+ bucket = find_bucket(table, hash, rule);
+ if (!bucket) {
+ bucket = create_bucket(table, hash, &rule->flow);
+ }
+
+ return bucket_insert(bucket, rule);
+}
+
+/* Inserts 'rule' into 'bucket', given that 'field' is the first wildcarded
+ * field in 'rule'.
+ *
+ * Returns the rule, if any, that was displaced in favor of 'rule'. */
+static struct cls_rule *
+bucket_insert(struct cls_bucket *bucket, struct cls_rule *rule)
+{
+ struct cls_rule *pos;
+ LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
+ if (pos->priority <= rule->priority) {
+ if (pos->priority == rule->priority
+ && pos->wc.wildcards == rule->wc.wildcards
+ && rules_match_1wild(pos, rule, rule->table_idx))
+ {
+ list_replace(&rule->node.list, &pos->node.list);
+ return pos;
+ }
+ break;
+ }
+ }
+ list_insert(&pos->node.list, &rule->node.list);
+ return NULL;
+}
+
+static struct cls_rule *
+insert_exact_rule(struct classifier *cls, struct cls_rule *rule)
+{
+ struct cls_rule *old_rule;
+ size_t hash;
+
+ hash = flow_hash(&rule->flow, 0);
+ old_rule = search_exact_table(cls, hash, &rule->flow);
+ if (old_rule) {
+ hmap_remove(&cls->exact_table, &old_rule->node.hmap);
+ }
+ hmap_insert(&cls->exact_table, &rule->node.hmap, hash);
+ return old_rule;
+}
+
+/* Returns the bucket in 'table' that has the given 'hash' and the same fields
+ * as 'rule->flow' (up to 'rule->table_idx'), or a null pointer if no bucket
+ * matches. */
+static struct cls_bucket *
+find_bucket(struct hmap *table, size_t hash, const struct cls_rule *rule)
+{
+ struct cls_bucket *bucket;
+ HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
+ table) {
+ if (equal_fields(&bucket->fixed, &rule->flow, rule->table_idx)) {
+ return bucket;
+ }
+ }
+ return NULL;
+}
+
+/* Creates a bucket and inserts it in 'table' with the given 'hash' and 'fixed'
+ * values. Returns the new bucket. */
+static struct cls_bucket *
+create_bucket(struct hmap *table, size_t hash, const struct flow *fixed)
+{
+ struct cls_bucket *bucket = xmalloc(sizeof *bucket);
+ list_init(&bucket->rules);
+ bucket->fixed = *fixed;
+ hmap_insert(table, &bucket->hmap_node, hash);
+ return bucket;
+}
+
+/* Returns true if the 'n' bytes in 'a' and 'b' are equal, false otherwise. */
+static inline bool ALWAYS_INLINE
+equal_bytes(const void *a, const void *b, size_t n)
+{
+#ifdef __i386__
+ /* For some reason GCC generates stupid code for memcmp() of small
+ * constant integer lengths. Help it out.
+ *
+ * This function is always inlined, and it is always called with 'n' as a
+ * compile-time constant, so the switch statement gets optimized out and
+ * this whole function just expands to an instruction or two. */
+ switch (n) {
+ case 1:
+ return *(uint8_t *) a == *(uint8_t *) b;
+
+ case 2:
+ return *(uint16_t *) a == *(uint16_t *) b;
+
+ case 4:
+ return *(uint32_t *) a == *(uint32_t *) b;
+
+ case 6:
+ return (*(uint32_t *) a == *(uint32_t *) b
+ && ((uint16_t *) a)[2] == ((uint16_t *) b)[2]);
+
+ default:
+ abort();
+ }
+#else
+ /* I hope GCC is smarter on your platform. */
+ return !memcmp(a, b, n);
+#endif
+}
+
+/* Returns the 32-bit unsigned integer at 'p'. */
+static inline uint32_t
+read_uint32(const void *p)
+{
+ /* GCC optimizes this into a single machine instruction on x86. */
+ uint32_t x;
+ memcpy(&x, p, sizeof x);
+ return x;
+}
+
+/* Compares the specified field in 'a' and 'b'. Returns true if the fields are
+ * equal, or if the ofp_match wildcard bits in 'wildcards' are set such that
+ * non-equal values may be ignored. 'nw_src_mask' and 'nw_dst_mask' must be
+ * those that would be set for 'wildcards' by cls_rule_set_masks().
+ *
+ * The compared field is the one with wildcard bit or bits 'field_wc', offset
+ * 'rule_ofs' within cls_rule's "fields" member, and length 'len', in bytes. */
+static inline bool ALWAYS_INLINE
+field_matches(const struct flow *a_, const struct flow *b_,
+ uint32_t wildcards, uint32_t nw_src_mask, uint32_t nw_dst_mask,
+ uint32_t field_wc, int ofs, int len)
+{
+ /* This function is always inlined, and it is always called with 'field_wc'
+ * as a compile-time constant, so the "if" conditionals here generate no
+ * code. */
+ const void *a = (const uint8_t *) a_ + ofs;
+ const void *b = (const uint8_t *) b_ + ofs;
+ if (!(field_wc & (field_wc - 1))) {
+ /* Handle all the single-bit wildcard cases. */
+ return wildcards & field_wc || equal_bytes(a, b, len);
+ } else if (field_wc == OFPFW_NW_SRC_MASK ||
+ field_wc == OFPFW_NW_DST_MASK) {
+ uint32_t a_ip = read_uint32(a);
+ uint32_t b_ip = read_uint32(b);
+ uint32_t mask = (field_wc == OFPFW_NW_SRC_MASK
+ ? nw_src_mask : nw_dst_mask);
+ return ((a_ip ^ b_ip) & mask) == 0;
+ } else {
+ abort();
+ }
+}
+
+/* Returns true if 'a' and 'b' match, ignoring fields for which the wildcards
+ * in 'wildcards' are set. 'nw_src_mask' and 'nw_dst_mask' must be those that
+ * would be set for 'wildcards' by cls_rule_set_masks(). 'field_idx' is the
+ * index of the first field to be compared; fields before 'field_idx' are
+ * assumed to match. (Always returns true if 'field_idx' is CLS_N_FIELDS.) */
+static bool
+rules_match(const struct cls_rule *a, const struct cls_rule *b,
+ uint32_t wildcards, uint32_t nw_src_mask, uint32_t nw_dst_mask,
+ int field_idx)
+{
+ /* This is related to Duff's device (see
+ * http://en.wikipedia.org/wiki/Duff's_device). */
+ switch (field_idx) {
+#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
+ case CLS_F_IDX_##NAME: \
+ if (!field_matches(&a->flow, &b->flow, \
+ wildcards, nw_src_mask, nw_dst_mask, \
+ WILDCARDS, offsetof(struct flow, MEMBER), \
+ sizeof a->flow.MEMBER)) { \
+ return false; \
+ } \
+ /* Fall though */
+ CLS_FIELDS
+#undef CLS_FIELD
+ }
+ return true;
+}
+
+/* Returns true if 'fixed' and 'wild' match. All fields in 'fixed' must have
+ * fixed values; 'wild' may contain wildcards.
+ *
+ * 'field_idx' is the index of the first field to be compared; fields before
+ * 'field_idx' are assumed to match. Always returns true if 'field_idx' is
+ * CLS_N_FIELDS. */
+static bool
+rules_match_1wild(const struct cls_rule *fixed, const struct cls_rule *wild,
+ int field_idx)
+{
+ return rules_match(fixed, wild, wild->wc.wildcards, wild->wc.nw_src_mask,
+ wild->wc.nw_dst_mask, field_idx);
+}
+
+/* Searches 'bucket' for a rule that matches 'target'. Returns the
+ * highest-priority match, if one is found, or a null pointer if there is no
+ * match.
+ *
+ * 'field_idx' must be the index of the first wildcarded field in 'bucket'. */
+static struct cls_rule *
+search_bucket(struct cls_bucket *bucket, int field_idx,
+ const struct cls_rule *target)
+{
+ struct cls_rule *pos;
+
+ if (!equal_fields(&bucket->fixed, &target->flow, field_idx)) {
+ return NULL;
+ }
+
+ LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
+ if (rules_match_1wild(target, pos, field_idx)) {
+ return pos;
+ }
+ }
+ return NULL;
+}
+
+/* Searches 'table' for a rule that matches 'target'. Returns the
+ * highest-priority match, if one is found, or a null pointer if there is no
+ * match.
+ *
+ * 'field_idx' must be the index of the first wildcarded field in 'table'. */
+static struct cls_rule *
+search_table(const struct hmap *table, int field_idx,
+ const struct cls_rule *target)
+{
+ struct cls_bucket *bucket;
+
+ switch (hmap_count(table)) {
+ /* In these special cases there's no need to hash. */
+ case 0:
+ return NULL;
+ case 1:
+ bucket = CONTAINER_OF(hmap_first(table), struct cls_bucket, hmap_node);
+ return search_bucket(bucket, field_idx, target);
+ }
+
+ HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node,
+ hash_fields(&target->flow, field_idx), table) {
+ struct cls_rule *rule = search_bucket(bucket, field_idx, target);
+ if (rule) {
+ return rule;
+ }
+ }
+ return NULL;
+}
+
+static struct cls_rule *
+search_exact_table(const struct classifier *cls, size_t hash,
+ const struct flow *target)
+{
+ struct cls_rule *rule;
+
+ HMAP_FOR_EACH_WITH_HASH (rule, struct cls_rule, node.hmap,
+ hash, &cls->exact_table) {
+ if (flow_equal(&rule->flow, target)) {
+ return rule;
+ }
+ }
+ return NULL;
+}
--- /dev/null
+/* Copyright (c) 2009 The Board of Trustees of The Leland Stanford
+ * Junior University
+ *
+ * We are making the OpenFlow specification and associated documentation
+ * (Software) available for public use and benefit with the expectation
+ * that others will use, modify and enhance the Software and contribute
+ * those enhancements back to the community. However, since we would
+ * like to make the Software available for broadest use, with as few
+ * restrictions as possible permission is hereby granted, free of
+ * charge, to any person obtaining a copy of this Software to deal in
+ * the Software under the copyrights without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * The name and trademarks of copyright holder(s) may NOT be used in
+ * advertising or publicity pertaining to the Software or any
+ * derivatives without specific, written prior permission.
+ */
+
+#ifndef CLASSIFIER_H
+#define CLASSIFIER_H 1
+
+/* Flow classifier.
+ *
+ * This flow classifier assumes that we can arrange the fields in a flow in an
+ * order such that the set of wildcarded fields in a rule tend to fall toward
+ * the end of the ordering. That is, if field F is wildcarded, then all the
+ * fields after F tend to be wildcarded as well. If this assumption is
+ * violated, then the classifier will still classify flows correctly, but its
+ * performance will suffer.
+ */
+
+#include "flow.h"
+#include "hmap.h"
+#include "list.h"
+#include "openflow/openflow.h"
+
+/* Number of bytes of fields in a rule. */
+#define CLS_N_BYTES 31
+
+/* Fields in a rule.
+ *
+ * This definition sets the ordering of fields, which is important for
+ * performance (see above). To adjust the ordering, change the order of the
+ * lines. */
+#define CLS_FIELDS \
+ /* flow_t all-caps */ \
+ /* wildcard bit(s) member name name */ \
+ /* ----------------- ----------- -------- */ \
+ CLS_FIELD(OFPFW_IN_PORT, in_port, IN_PORT) \
+ CLS_FIELD(OFPFW_DL_VLAN, dl_vlan, DL_VLAN) \
+ CLS_FIELD(OFPFW_DL_SRC, dl_src, DL_SRC) \
+ CLS_FIELD(OFPFW_DL_DST, dl_dst, DL_DST) \
+ CLS_FIELD(OFPFW_DL_TYPE, dl_type, DL_TYPE) \
+ CLS_FIELD(OFPFW_NW_SRC_MASK, nw_src, NW_SRC) \
+ CLS_FIELD(OFPFW_NW_DST_MASK, nw_dst, NW_DST) \
+ CLS_FIELD(OFPFW_NW_PROTO, nw_proto, NW_PROTO) \
+ CLS_FIELD(OFPFW_TP_SRC, tp_src, TP_SRC) \
+ CLS_FIELD(OFPFW_TP_DST, tp_dst, TP_DST)
+
+/* Field indexes.
+ *
+ * (These are also indexed into struct classifier's 'tables' array.) */
+enum {
+#define CLS_FIELD(WILDCARDS, MEMBER, NAME) CLS_F_IDX_##NAME,
+ CLS_FIELDS
+#undef CLS_FIELD
+ CLS_F_IDX_EXACT, /* Exact-match table. */
+ CLS_N_FIELDS = CLS_F_IDX_EXACT
+};
+
+/* Field information. */
+struct cls_field {
+ int ofs; /* Offset in flow_t. */
+ int len; /* Length in bytes. */
+ uint32_t wildcards; /* OFPFW_* bit or bits for this field. */
+ const char *name; /* Name (for debugging). */
+};
+extern const struct cls_field cls_fields[CLS_N_FIELDS + 1];
+
+/* A flow classifier. */
+struct classifier {
+ int n_rules; /* Sum of hmap_count() over tables[]. */
+ struct hmap tables[CLS_N_FIELDS]; /* Contain cls_bucket elements. */
+ struct hmap exact_table; /* Contain cls_rule elements. */
+};
+
+/* A group of rules with the same fixed values for initial fields. */
+struct cls_bucket {
+ struct hmap_node hmap_node; /* Within struct classifier 'tables'. */
+ struct list rules; /* In order from highest to lowest priority. */
+ struct flow fixed; /* Values for fixed fields. */
+};
+
+/* A flow classification rule.
+ *
+ * Use cls_rule_from_flow() or cls_rule_from_match() to initialize a cls_rule
+ * or you will almost certainly not initialize 'table_idx' correctly, with
+ * disastrous results! */
+struct cls_rule {
+ union {
+ struct list list; /* Within struct cls_bucket 'rules'. */
+ struct hmap_node hmap; /* Within struct classifier 'exact_table'. */
+ } node;
+ struct flow flow; /* All field values. */
+ struct flow_wildcards wc; /* Wildcards for fields. */
+ uint16_t priority; /* Larger numbers are higher priorities. */
+ unsigned short table_idx; /* Index into struct classifier 'tables'. */
+};
+
+void cls_rule_from_flow(struct cls_rule *, const struct flow *,
+ uint32_t wildcards, uint16_t priority);
+void cls_rule_from_match(struct cls_rule *, const struct ofp_match *,
+ uint16_t priority);
+void cls_rule_print(const struct cls_rule *);
+void cls_rule_moved(struct classifier *,
+ struct cls_rule *old, struct cls_rule *new);
+void cls_rule_replace(struct classifier *, const struct cls_rule *old,
+ struct cls_rule *new);
+
+void classifier_init(struct classifier *);
+void classifier_destroy(struct classifier *);
+bool classifier_is_empty(const struct classifier *);
+int classifier_count(const struct classifier *);
+int classifier_count_exact(const struct classifier *);
+struct cls_rule *classifier_insert(struct classifier *, struct cls_rule *);
+void classifier_remove(struct classifier *, struct cls_rule *);
+struct cls_rule *classifier_lookup(const struct classifier *,
+ const struct flow *);
+
+typedef void cls_cb_func(struct cls_rule *, void *aux);
+void classifier_for_each(const struct classifier *, cls_cb_func *, void *aux);
+void classifier_for_each_with_wildcards(const struct classifier *,
+ uint32_t wildcards,
+ cls_cb_func *, void *aux);
+
+enum {
+ CLS_INC_EXACT = 1 << 0, /* Include exact-match flows? */
+ CLS_INC_WILD = 1 << 1 /* Include flows with wildcards? */
+};
+void classifier_for_each_match(const struct classifier *,
+ const struct cls_rule *,
+ int include, cls_cb_func *, void *aux);
+
+struct cls_rule *classifier_find_rule_exactly(const struct classifier *,
+ const struct flow *target,
+ uint32_t wildcards,
+ uint16_t priority);
+
+#endif /* classifier.h */
+TESTS += tests/test-classifier
+noinst_PROGRAMS += tests/test-classifier
+tests_test_classifier_SOURCES = tests/test-classifier.c
+tests_test_classifier_LDADD = lib/libopenflow.a
+
TESTS += tests/test-flows.sh
noinst_PROGRAMS += tests/test-flows
tests_test_flows_SOURCES = tests/test-flows.c
--- /dev/null
+/* Copyright (c) 2009 The Board of Trustees of The Leland Stanford
+ * Junior University
+ *
+ * We are making the OpenFlow specification and associated documentation
+ * (Software) available for public use and benefit with the expectation
+ * that others will use, modify and enhance the Software and contribute
+ * those enhancements back to the community. However, since we would
+ * like to make the Software available for broadest use, with as few
+ * restrictions as possible permission is hereby granted, free of
+ * charge, to any person obtaining a copy of this Software to deal in
+ * the Software under the copyrights without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * The name and trademarks of copyright holder(s) may NOT be used in
+ * advertising or publicity pertaining to the Software or any
+ * derivatives without specific, written prior permission.
+ */
+
+/* "White box" tests for classifier.
+ *
+ * With very few exceptions, these tests obtain complete coverage of every
+ * basic block and every branch in the classifier implementation, e.g. a clean
+ * report from "gcov -b". (Covering the exceptions would require finding
+ * collisions in the hash function used for flow data, etc.)
+ *
+ * This test should receive a clean report from "valgrind --leak-check=full":
+ * it frees every heap block that it allocates.
+ */
+
+#include <config.h>
+#include "classifier.h"
+#include <errno.h>
+#include "flow.h"
+#include "packets.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+struct test_rule {
+ struct cls_rule cls_rule; /* Classifier rule data. */
+ int aux; /* Auxiliary data. */
+};
+
+static struct test_rule *
+test_rule_from_cls_rule(const struct cls_rule *rule)
+{
+ return rule ? CONTAINER_OF(rule, struct test_rule, cls_rule) : NULL;
+}
+
+/* Trivial (linear) classifier. */
+struct tcls {
+ size_t n_rules;
+ size_t allocated_rules;
+ struct test_rule **rules;
+};
+
+static void
+tcls_init(struct tcls *tcls)
+{
+ tcls->n_rules = 0;
+ tcls->allocated_rules = 0;
+ tcls->rules = NULL;
+}
+
+static void
+tcls_destroy(struct tcls *tcls)
+{
+ if (tcls) {
+ size_t i;
+
+ for (i = 0; i < tcls->n_rules; i++) {
+ free(tcls->rules[i]);
+ }
+ free(tcls->rules);
+ }
+}
+
+static bool
+tcls_is_empty(const struct tcls *tcls)
+{
+ return tcls->n_rules == 0;
+}
+
+static struct test_rule *
+tcls_insert(struct tcls *tcls, const struct test_rule *rule)
+{
+ size_t i;
+
+ assert(rule->cls_rule.wc.wildcards || rule->cls_rule.priority == UINT16_MAX);
+ for (i = 0; i < tcls->n_rules; i++) {
+ const struct cls_rule *pos = &tcls->rules[i]->cls_rule;
+ if (pos->priority == rule->cls_rule.priority
+ && pos->wc.wildcards == rule->cls_rule.wc.wildcards
+ && flow_equal(&pos->flow, &rule->cls_rule.flow)) {
+ /* Exact match.
+ * XXX flow_equal should ignore wildcarded fields */
+ free(tcls->rules[i]);
+ tcls->rules[i] = xmemdup(rule, sizeof *rule);
+ return tcls->rules[i];
+ } else if (pos->priority <= rule->cls_rule.priority) {
+ break;
+ }
+ }
+
+ if (tcls->n_rules >= tcls->allocated_rules) {
+ tcls->rules = x2nrealloc(tcls->rules, &tcls->allocated_rules,
+ sizeof *tcls->rules);
+ }
+ if (i != tcls->n_rules) {
+ memmove(&tcls->rules[i + 1], &tcls->rules[i],
+ sizeof *tcls->rules * (tcls->n_rules - i));
+ }
+ tcls->rules[i] = xmemdup(rule, sizeof *rule);
+ tcls->n_rules++;
+ return tcls->rules[i];
+}
+
+static void
+tcls_remove(struct tcls *cls, const struct test_rule *rule)
+{
+ size_t i;
+
+ for (i = 0; i < cls->n_rules; i++) {
+ struct test_rule *pos = cls->rules[i];
+ if (pos == rule) {
+ free(pos);
+ memmove(&cls->rules[i], &cls->rules[i + 1],
+ sizeof *cls->rules * (cls->n_rules - i - 1));
+ cls->n_rules--;
+ return;
+ }
+ }
+ NOT_REACHED();
+}
+
+static uint32_t
+read_uint32(const void *p)
+{
+ uint32_t x;
+ memcpy(&x, p, sizeof x);
+ return x;
+}
+
+static bool
+match(const struct cls_rule *wild, const struct flow *fixed)
+{
+ int f_idx;
+
+ for (f_idx = 0; f_idx < CLS_N_FIELDS; f_idx++) {
+ const struct cls_field *f = &cls_fields[f_idx];
+ void *wild_field = (char *) &wild->flow + f->ofs;
+ void *fixed_field = (char *) fixed + f->ofs;
+
+ if ((wild->wc.wildcards & f->wildcards) == f->wildcards ||
+ !memcmp(wild_field, fixed_field, f->len)) {
+ /* Definite match. */
+ continue;
+ }
+
+ if (wild->wc.wildcards & f->wildcards) {
+ uint32_t test = read_uint32(wild_field);
+ uint32_t ip = read_uint32(fixed_field);
+ int shift = (f_idx == CLS_F_IDX_NW_SRC
+ ? OFPFW_NW_SRC_SHIFT : OFPFW_NW_DST_SHIFT);
+ uint32_t mask = flow_nw_bits_to_mask(wild->wc.wildcards, shift);
+ if (!((test ^ ip) & mask)) {
+ continue;
+ }
+ }
+
+ return false;
+ }
+ return true;
+}
+
+static struct cls_rule *
+tcls_lookup(const struct tcls *cls, const struct flow *flow)
+{
+ size_t i;
+
+ for (i = 0; i < cls->n_rules; i++) {
+ struct test_rule *pos = cls->rules[i];
+ if (match(&pos->cls_rule, flow)) {
+ return &pos->cls_rule;
+ }
+ }
+ return NULL;
+}
+\f
+#ifdef WORDS_BIGENDIAN
+#define HTONL(VALUE) ((uint32_t) (VALUE))
+#define HTONS(VALUE) ((uint32_t) (VALUE))
+#else
+#define HTONL(VALUE) (((((uint32_t) (VALUE)) & 0x000000ff) << 24) | \
+ ((((uint32_t) (VALUE)) & 0x0000ff00) << 8) | \
+ ((((uint32_t) (VALUE)) & 0x00ff0000) >> 8) | \
+ ((((uint32_t) (VALUE)) & 0xff000000) >> 24))
+#define HTONS(VALUE) (((((uint16_t) (VALUE)) & 0xff00) >> 8) | \
+ ((((uint16_t) (VALUE)) & 0x00ff) << 8))
+#endif
+
+static uint32_t nw_src_values[] = { HTONL(0xc0a80001),
+ HTONL(0xc0a04455) };
+static uint32_t nw_dst_values[] = { HTONL(0xc0a80002),
+ HTONL(0xc0a04455) };
+static uint16_t in_port_values[] = { HTONS(1), HTONS(OFPP_LOCAL) };
+static uint16_t dl_vlan_values[] = { HTONS(101), HTONS(0) };
+static uint16_t dl_type_values[] = { HTONS(ETH_TYPE_IP), HTONS(ETH_TYPE_ARP) };
+static uint16_t tp_src_values[] = { HTONS(49362), HTONS(80) };
+static uint16_t tp_dst_values[] = { HTONS(6667), HTONS(22) };
+static uint8_t dl_src_values[][6] = { { 0x00, 0x02, 0xe3, 0x0f, 0x80, 0xa4 },
+ { 0x5e, 0x33, 0x7f, 0x5f, 0x1e, 0x99 } };
+static uint8_t dl_dst_values[][6] = { { 0x4a, 0x27, 0x71, 0xae, 0x64, 0xc1 },
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
+static uint8_t nw_proto_values[] = { IP_TYPE_TCP, IP_TYPE_ICMP };
+
+static void *values[CLS_N_FIELDS][2];
+
+static void
+init_values(void)
+{
+ values[CLS_F_IDX_IN_PORT][0] = &in_port_values[0];
+ values[CLS_F_IDX_IN_PORT][1] = &in_port_values[1];
+
+ values[CLS_F_IDX_DL_VLAN][0] = &dl_vlan_values[0];
+ values[CLS_F_IDX_DL_VLAN][1] = &dl_vlan_values[1];
+
+ values[CLS_F_IDX_DL_SRC][0] = dl_src_values[0];
+ values[CLS_F_IDX_DL_SRC][1] = dl_src_values[1];
+
+ values[CLS_F_IDX_DL_DST][0] = dl_dst_values[0];
+ values[CLS_F_IDX_DL_DST][1] = dl_dst_values[1];
+
+ values[CLS_F_IDX_DL_TYPE][0] = &dl_type_values[0];
+ values[CLS_F_IDX_DL_TYPE][1] = &dl_type_values[1];
+
+ values[CLS_F_IDX_NW_SRC][0] = &nw_src_values[0];
+ values[CLS_F_IDX_NW_SRC][1] = &nw_src_values[1];
+
+ values[CLS_F_IDX_NW_DST][0] = &nw_dst_values[0];
+ values[CLS_F_IDX_NW_DST][1] = &nw_dst_values[1];
+
+ values[CLS_F_IDX_NW_PROTO][0] = &nw_proto_values[0];
+ values[CLS_F_IDX_NW_PROTO][1] = &nw_proto_values[1];
+
+ values[CLS_F_IDX_TP_SRC][0] = &tp_src_values[0];
+ values[CLS_F_IDX_TP_SRC][1] = &tp_src_values[1];
+
+ values[CLS_F_IDX_TP_DST][0] = &tp_dst_values[0];
+ values[CLS_F_IDX_TP_DST][1] = &tp_dst_values[1];
+}
+
+#define N_NW_SRC_VALUES ARRAY_SIZE(nw_src_values)
+#define N_NW_DST_VALUES ARRAY_SIZE(nw_dst_values)
+#define N_IN_PORT_VALUES ARRAY_SIZE(in_port_values)
+#define N_DL_VLAN_VALUES ARRAY_SIZE(dl_vlan_values)
+#define N_DL_TYPE_VALUES ARRAY_SIZE(dl_type_values)
+#define N_TP_SRC_VALUES ARRAY_SIZE(tp_src_values)
+#define N_TP_DST_VALUES ARRAY_SIZE(tp_dst_values)
+#define N_DL_SRC_VALUES ARRAY_SIZE(dl_src_values)
+#define N_DL_DST_VALUES ARRAY_SIZE(dl_dst_values)
+#define N_NW_PROTO_VALUES ARRAY_SIZE(nw_proto_values)
+
+#define N_FLOW_VALUES (N_NW_SRC_VALUES * \
+ N_NW_DST_VALUES * \
+ N_IN_PORT_VALUES * \
+ N_DL_VLAN_VALUES * \
+ N_DL_TYPE_VALUES * \
+ N_TP_SRC_VALUES * \
+ N_TP_DST_VALUES * \
+ N_DL_SRC_VALUES * \
+ N_DL_DST_VALUES * \
+ N_NW_PROTO_VALUES)
+
+static unsigned int
+get_value(unsigned int *x, unsigned n_values)
+{
+ unsigned int rem = *x % n_values;
+ *x /= n_values;
+ return rem;
+}
+
+static void
+compare_classifiers(struct classifier *cls, struct tcls *tcls)
+{
+ unsigned int i;
+
+ for (i = 0; i < N_FLOW_VALUES; i++) {
+ struct cls_rule *cr0, *cr1;
+ struct flow flow;
+ unsigned int x;
+
+ x = i;
+ flow.nw_src = nw_src_values[get_value(&x, N_NW_SRC_VALUES)];
+ flow.nw_dst = nw_dst_values[get_value(&x, N_NW_DST_VALUES)];
+ flow.in_port = in_port_values[get_value(&x, N_IN_PORT_VALUES)];
+ flow.dl_vlan = dl_vlan_values[get_value(&x, N_DL_VLAN_VALUES)];
+ flow.dl_type = dl_type_values[get_value(&x, N_DL_TYPE_VALUES)];
+ flow.tp_src = tp_src_values[get_value(&x, N_TP_SRC_VALUES)];
+ flow.tp_dst = tp_dst_values[get_value(&x, N_TP_DST_VALUES)];
+ memcpy(flow.dl_src, dl_src_values[get_value(&x, N_DL_SRC_VALUES)],
+ ETH_ADDR_LEN);
+ memcpy(flow.dl_dst, dl_dst_values[get_value(&x, N_DL_DST_VALUES)],
+ ETH_ADDR_LEN);
+ flow.nw_proto = nw_proto_values[get_value(&x, N_NW_PROTO_VALUES)];
+ flow.reserved = 0;
+
+ cr0 = classifier_lookup(cls, &flow);
+ cr1 = tcls_lookup(tcls, &flow);
+ assert((cr0 == NULL) == (cr1 == NULL));
+ if (cr0 != NULL) {
+ const struct test_rule *tr0 = test_rule_from_cls_rule(cr0);
+ const struct test_rule *tr1 = test_rule_from_cls_rule(cr1);
+
+ assert(flow_equal(&cr0->flow, &cr1->flow));
+ assert(cr0->wc.wildcards == cr1->wc.wildcards);
+ assert(cr0->priority == cr1->priority);
+ /* Skip nw_src_mask and nw_dst_mask, because they are derived
+ * members whose values are used only for optimization. */
+ assert(tr0->aux == tr1->aux);
+ }
+ }
+}
+
+static void
+free_rule(struct cls_rule *cls_rule, void *cls)
+{
+ classifier_remove(cls, cls_rule);
+ free(test_rule_from_cls_rule(cls_rule));
+}
+
+static void
+destroy_classifier(struct classifier *cls)
+{
+ classifier_for_each(cls, free_rule, cls);
+ classifier_destroy(cls);
+}
+
+static void
+check_tables(const struct classifier *cls,
+ int n_tables, int n_buckets, int n_rules)
+{
+ int found_tables = 0;
+ int found_buckets = 0;
+ int found_rules = 0;
+ int i;
+
+ BUILD_ASSERT(CLS_N_FIELDS == ARRAY_SIZE(cls->tables));
+ for (i = 0; i < CLS_N_FIELDS; i++) {
+ const struct cls_bucket *bucket;
+ if (!hmap_is_empty(&cls->tables[i])) {
+ found_tables++;
+ }
+ HMAP_FOR_EACH (bucket, struct cls_bucket, hmap_node, &cls->tables[i]) {
+ found_buckets++;
+ assert(!list_is_empty(&bucket->rules));
+ found_rules += list_size(&bucket->rules);
+ }
+ }
+
+ if (!hmap_is_empty(&cls->exact_table)) {
+ found_tables++;
+ found_buckets++;
+ found_rules += hmap_count(&cls->exact_table);
+ }
+
+ assert(n_tables == -1 || found_tables == n_tables);
+ assert(n_rules == -1 || found_rules == n_rules);
+ assert(n_buckets == -1 || found_buckets == n_buckets);
+}
+
+static struct test_rule *
+make_rule(int wc_fields, int priority, int value_pat)
+{
+ const struct cls_field *f;
+ struct test_rule *rule;
+ uint32_t wildcards;
+ struct flow flow;
+
+ wildcards = 0;
+ memset(&flow, 0, sizeof flow);
+ for (f = &cls_fields[0]; f < &cls_fields[CLS_N_FIELDS]; f++) {
+ int f_idx = f - cls_fields;
+ if (wc_fields & (1u << f_idx)) {
+ wildcards |= f->wildcards;
+ } else {
+ int value_idx = (value_pat & (1u << f_idx)) != 0;
+ memcpy((char *) &flow + f->ofs, values[f_idx][value_idx], f->len);
+ }
+ }
+
+ rule = xcalloc(1, sizeof *rule);
+ cls_rule_from_flow(&rule->cls_rule, &flow, wildcards, priority);
+ return rule;
+}
+
+static void
+shuffle(int *p, size_t n)
+{
+ for (; n > 1; n--, p++) {
+ int *q = &p[rand() % n];
+ int tmp = *p;
+ *p = *q;
+ *q = tmp;
+ }
+}
+\f
+/* Tests an empty classifier. */
+static void
+test_empty(void)
+{
+ struct classifier cls;
+ struct tcls tcls;
+
+ classifier_init(&cls);
+ tcls_init(&tcls);
+ assert(classifier_is_empty(&cls));
+ assert(tcls_is_empty(&tcls));
+ compare_classifiers(&cls, &tcls);
+ classifier_destroy(&cls);
+ tcls_destroy(&tcls);
+}
+
+/* Destroys a null classifier. */
+static void
+test_destroy_null(void)
+{
+ classifier_destroy(NULL);
+}
+
+/* Tests classification with one rule at a time. */
+static void
+test_single_rule(void)
+{
+ unsigned int wc_fields; /* Hilarious. */
+
+ for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
+ struct classifier cls;
+ struct test_rule *rule, *tcls_rule;
+ struct tcls tcls;
+
+ rule = make_rule(wc_fields,
+ hash_bytes(&wc_fields, sizeof wc_fields, 0), 0);
+
+ classifier_init(&cls);
+ tcls_init(&tcls);
+
+ tcls_rule = tcls_insert(&tcls, rule);
+ assert(!classifier_insert(&cls, &rule->cls_rule));
+ check_tables(&cls, 1, 1, 1);
+ compare_classifiers(&cls, &tcls);
+
+ classifier_remove(&cls, &rule->cls_rule);
+ tcls_remove(&tcls, tcls_rule);
+ assert(classifier_is_empty(&cls));
+ assert(tcls_is_empty(&tcls));
+ compare_classifiers(&cls, &tcls);
+
+ free(rule);
+ classifier_destroy(&cls);
+ tcls_destroy(&tcls);
+ }
+}
+
+/* Tests replacing one rule by another. */
+static void
+test_rule_replacement(void)
+{
+ unsigned int wc_fields;
+
+ for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
+ struct classifier cls;
+ struct test_rule *rule1, *tcls_rule1;
+ struct test_rule *rule2, *tcls_rule2;
+ struct tcls tcls;
+
+ rule1 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX);
+ rule2 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX);
+ rule2->aux += 5;
+ rule2->aux += 5;
+
+ classifier_init(&cls);
+ tcls_init(&tcls);
+ tcls_rule1 = tcls_insert(&tcls, rule1);
+ assert(!classifier_insert(&cls, &rule1->cls_rule));
+ check_tables(&cls, 1, 1, 1);
+ compare_classifiers(&cls, &tcls);
+ tcls_destroy(&tcls);
+
+ tcls_init(&tcls);
+ tcls_rule2 = tcls_insert(&tcls, rule2);
+ assert(test_rule_from_cls_rule(
+ classifier_insert(&cls, &rule2->cls_rule)) == rule1);
+ free(rule1);
+ check_tables(&cls, 1, 1, 1);
+ compare_classifiers(&cls, &tcls);
+ tcls_destroy(&tcls);
+ destroy_classifier(&cls);
+ }
+}
+
+static int
+table_mask(int table)
+{
+ return ((1u << CLS_N_FIELDS) - 1) & ~((1u << table) - 1);
+}
+
+static int
+random_wcf_in_table(int table, int seed)
+{
+ int wc_fields = (1u << table) | hash_bytes(&seed, sizeof seed, 0);
+ return wc_fields & table_mask(table);
+}
+
+/* Tests classification with two rules at a time that fall into the same
+ * bucket. */
+static void
+test_two_rules_in_one_bucket(void)
+{
+ int table, rel_pri, wcf_pat, value_pat;
+
+ for (table = 0; table <= CLS_N_FIELDS; table++) {
+ for (rel_pri = -1; rel_pri <= +1; rel_pri++) {
+ for (wcf_pat = 0; wcf_pat < 4; wcf_pat++) {
+ int n_value_pats = table == CLS_N_FIELDS - 1 ? 1 : 2;
+ for (value_pat = 0; value_pat < n_value_pats; value_pat++) {
+ struct test_rule *rule1, *tcls_rule1;
+ struct test_rule *rule2, *tcls_rule2;
+ struct test_rule *displaced_rule;
+ struct classifier cls;
+ struct tcls tcls;
+ int pri1, pri2;
+ int wcf1, wcf2;
+
+ if (table != CLS_F_IDX_EXACT) {
+ /* We can use identical priorities in this test because
+ * the classifier always chooses the rule added later
+ * for equal-priority rules that fall into the same
+ * bucket. */
+ pri1 = table * 257 + 50;
+ pri2 = pri1 + rel_pri;
+
+ wcf1 = (wcf_pat & 1
+ ? random_wcf_in_table(table, pri1)
+ : 1u << table);
+ wcf2 = (wcf_pat & 2
+ ? random_wcf_in_table(table, pri2)
+ : 1u << table);
+ if (value_pat) {
+ wcf1 &= ~(1u << (CLS_N_FIELDS - 1));
+ wcf2 &= ~(1u << (CLS_N_FIELDS - 1));
+ }
+ } else {
+ /* This classifier always puts exact-match rules at
+ * maximum priority. */
+ pri1 = pri2 = UINT16_MAX;
+
+ /* No wildcard fields. */
+ wcf1 = wcf2 = 0;
+ }
+
+ rule1 = make_rule(wcf1, pri1, 0);
+ rule2 = make_rule(wcf2, pri2,
+ value_pat << (CLS_N_FIELDS - 1));
+
+ classifier_init(&cls);
+ tcls_init(&tcls);
+
+ tcls_rule1 = tcls_insert(&tcls, rule1);
+ tcls_rule2 = tcls_insert(&tcls, rule2);
+ assert(!classifier_insert(&cls, &rule1->cls_rule));
+ displaced_rule = test_rule_from_cls_rule(
+ classifier_insert(&cls, &rule2->cls_rule));
+ if (wcf1 != wcf2 || pri1 != pri2 || value_pat) {
+ assert(!displaced_rule);
+
+ check_tables(&cls, 1, 1, 2);
+ compare_classifiers(&cls, &tcls);
+
+ classifier_remove(&cls, &rule1->cls_rule);
+ tcls_remove(&tcls, tcls_rule1);
+ check_tables(&cls, 1, 1, 1);
+ compare_classifiers(&cls, &tcls);
+ } else {
+ assert(displaced_rule == rule1);
+ check_tables(&cls, 1, 1, 1);
+ compare_classifiers(&cls, &tcls);
+ }
+ free(rule1);
+
+ classifier_remove(&cls, &rule2->cls_rule);
+ tcls_remove(&tcls, tcls_rule2);
+ compare_classifiers(&cls, &tcls);
+ free(rule2);
+
+ destroy_classifier(&cls);
+ tcls_destroy(&tcls);
+ }
+ }
+ }
+ }
+}
+
+/* Tests classification with two rules at a time that fall into the same
+ * table but different buckets. */
+static void
+test_two_rules_in_one_table(void)
+{
+ int table, rel_pri, wcf_pat;
+
+ /* Skip tables 0 and CLS_F_IDX_EXACT because they have one bucket. */
+ for (table = 1; table < CLS_N_FIELDS; table++) {
+ for (rel_pri = -1; rel_pri <= +1; rel_pri++) {
+ for (wcf_pat = 0; wcf_pat < 5; wcf_pat++) {
+ struct test_rule *rule1, *tcls_rule1;
+ struct test_rule *rule2, *tcls_rule2;
+ struct classifier cls;
+ struct tcls tcls;
+ int pri1, pri2;
+ int wcf1, wcf2;
+ int value_mask, value_pat1, value_pat2;
+ int i;
+
+ /* We can use identical priorities in this test because the
+ * classifier always chooses the rule added later for
+ * equal-priority rules that fall into the same table. */
+ pri1 = table * 257 + 50;
+ pri2 = pri1 + rel_pri;
+
+ if (wcf_pat & 4) {
+ wcf1 = wcf2 = random_wcf_in_table(table, pri1);
+ } else {
+ wcf1 = (wcf_pat & 1
+ ? random_wcf_in_table(table, pri1)
+ : 1u << table);
+ wcf2 = (wcf_pat & 2
+ ? random_wcf_in_table(table, pri2)
+ : 1u << table);
+ }
+
+ /* Generate value patterns that will put the two rules into
+ * different buckets. */
+ value_mask = ((1u << table) - 1);
+ value_pat1 = hash_bytes(&pri1, sizeof pri1, 1) & value_mask;
+ i = 0;
+ do {
+ value_pat2 = (hash_bytes(&pri2, sizeof pri2, i++)
+ & value_mask);
+ } while (value_pat1 == value_pat2);
+ rule1 = make_rule(wcf1, pri1, value_pat1);
+ rule2 = make_rule(wcf2, pri2, value_pat2);
+
+ classifier_init(&cls);
+ tcls_init(&tcls);
+
+ tcls_rule1 = tcls_insert(&tcls, rule1);
+ tcls_rule2 = tcls_insert(&tcls, rule2);
+ assert(!classifier_insert(&cls, &rule1->cls_rule));
+ assert(!classifier_insert(&cls, &rule2->cls_rule));
+ check_tables(&cls, 1, 2, 2);
+ compare_classifiers(&cls, &tcls);
+
+ classifier_remove(&cls, &rule1->cls_rule);
+ tcls_remove(&tcls, tcls_rule1);
+ check_tables(&cls, 1, 1, 1);
+ compare_classifiers(&cls, &tcls);
+ free(rule1);
+
+ classifier_remove(&cls, &rule2->cls_rule);
+ tcls_remove(&tcls, tcls_rule2);
+ compare_classifiers(&cls, &tcls);
+ free(rule2);
+
+ classifier_destroy(&cls);
+ tcls_destroy(&tcls);
+ }
+ }
+ }
+}
+
+/* Tests classification with two rules at a time that fall into different
+ * tables. */
+static void
+test_two_rules_in_different_tables(void)
+{
+ int table1, table2, rel_pri, wcf_pat;
+
+ for (table1 = 0; table1 < CLS_N_FIELDS; table1++) {
+ for (table2 = table1 + 1; table2 <= CLS_N_FIELDS; table2++) {
+ for (rel_pri = 0; rel_pri < 2; rel_pri++) {
+ for (wcf_pat = 0; wcf_pat < 4; wcf_pat++) {
+ struct test_rule *rule1, *tcls_rule1;
+ struct test_rule *rule2, *tcls_rule2;
+ struct classifier cls;
+ struct tcls tcls;
+ int pri1, pri2;
+ int wcf1, wcf2;
+
+ /* We must use unique priorities in this test because the
+ * classifier makes the rule choice undefined for rules of
+ * equal priority that fall into different tables. (In
+ * practice, lower-numbered tables win.) */
+ pri1 = table1 * 257 + 50;
+ pri2 = rel_pri ? pri1 - 1 : pri1 + 1;
+
+ wcf1 = (wcf_pat & 1
+ ? random_wcf_in_table(table1, pri1)
+ : 1u << table1);
+ wcf2 = (wcf_pat & 2
+ ? random_wcf_in_table(table2, pri2)
+ : 1u << table2);
+
+ if (table2 == CLS_F_IDX_EXACT) {
+ pri2 = UINT16_MAX;
+ wcf2 = 0;
+ }
+
+ rule1 = make_rule(wcf1, pri1, 0);
+ rule2 = make_rule(wcf2, pri2, 0);
+
+ classifier_init(&cls);
+ tcls_init(&tcls);
+
+ tcls_rule1 = tcls_insert(&tcls, rule1);
+ tcls_rule2 = tcls_insert(&tcls, rule2);
+ assert(!classifier_insert(&cls, &rule1->cls_rule));
+ assert(!classifier_insert(&cls, &rule2->cls_rule));
+ check_tables(&cls, 2, 2, 2);
+ compare_classifiers(&cls, &tcls);
+
+ classifier_remove(&cls, &rule1->cls_rule);
+ tcls_remove(&tcls, tcls_rule1);
+ check_tables(&cls, 1, 1, 1);
+ compare_classifiers(&cls, &tcls);
+ free(rule1);
+
+ classifier_remove(&cls, &rule2->cls_rule);
+ tcls_remove(&tcls, tcls_rule2);
+ compare_classifiers(&cls, &tcls);
+ free(rule2);
+
+ classifier_destroy(&cls);
+ tcls_destroy(&tcls);
+ }
+ }
+ }
+ }
+}
+
+/* Tests classification with many rules at a time that fall into the same
+ * bucket but have unique priorities (and various wildcards). */
+static void
+test_many_rules_in_one_bucket(void)
+{
+ enum { MAX_RULES = 50 };
+ int iteration, table;
+
+ for (iteration = 0; iteration < 3; iteration++) {
+ for (table = 0; table <= CLS_N_FIELDS; table++) {
+ int priorities[MAX_RULES];
+ struct classifier cls;
+ struct tcls tcls;
+ int i;
+
+ srand(hash_bytes(&table, sizeof table, iteration));
+ for (i = 0; i < MAX_RULES; i++) {
+ priorities[i] = i * 129;
+ }
+ shuffle(priorities, ARRAY_SIZE(priorities));
+
+ classifier_init(&cls);
+ tcls_init(&tcls);
+
+ for (i = 0; i < MAX_RULES; i++) {
+ struct test_rule *rule;
+ int priority = priorities[i];
+ int wcf;
+
+ wcf = random_wcf_in_table(table, priority);
+ rule = make_rule(wcf, priority,
+ table == CLS_F_IDX_EXACT ? i : 1234);
+ tcls_insert(&tcls, rule);
+ assert(!classifier_insert(&cls, &rule->cls_rule));
+ check_tables(&cls, 1, 1, i + 1);
+ compare_classifiers(&cls, &tcls);
+ }
+
+ destroy_classifier(&cls);
+ tcls_destroy(&tcls);
+ }
+ }
+}
+
+/* Tests classification with many rules at a time that fall into the same
+ * table but random buckets. */
+static void
+test_many_rules_in_one_table(void)
+{
+ enum { MAX_RULES = 50 };
+ int iteration, table;
+
+ for (iteration = 0; iteration < 3; iteration++) {
+ for (table = 0; table < CLS_N_FIELDS; table++) {
+ int priorities[MAX_RULES];
+ struct classifier cls;
+ struct tcls tcls;
+ int i;
+
+ srand(hash_bytes(&table, sizeof table, iteration));
+ for (i = 0; i < MAX_RULES; i++) {
+ priorities[i] = i * 129;
+ }
+ shuffle(priorities, ARRAY_SIZE(priorities));
+
+ classifier_init(&cls);
+ tcls_init(&tcls);
+
+ for (i = 0; i < MAX_RULES; i++) {
+ struct test_rule *rule;
+ int priority = priorities[i];
+ int wcf;
+
+ wcf = random_wcf_in_table(table, priority);
+ rule = make_rule(wcf, priority,
+ hash_bytes(&priority, sizeof priority, 1));
+ tcls_insert(&tcls, rule);
+ assert(!classifier_insert(&cls, &rule->cls_rule));
+ check_tables(&cls, 1, -1, i + 1);
+ compare_classifiers(&cls, &tcls);
+ }
+
+ destroy_classifier(&cls);
+ tcls_destroy(&tcls);
+ }
+ }
+}
+
+/* Tests classification with many rules at a time that fall into random buckets
+ * in random tables. */
+static void
+test_many_rules_in_different_tables(void)
+{
+ enum { MAX_RULES = 50 };
+ int iteration;
+
+ for (iteration = 0; iteration < 30; iteration++) {
+ int priorities[MAX_RULES];
+ struct classifier cls;
+ struct tcls tcls;
+ int i;
+
+ srand(iteration);
+ for (i = 0; i < MAX_RULES; i++) {
+ priorities[i] = i * 129;
+ }
+ shuffle(priorities, ARRAY_SIZE(priorities));
+
+ classifier_init(&cls);
+ tcls_init(&tcls);
+
+ for (i = 0; i < MAX_RULES; i++) {
+ struct test_rule *rule;
+ int priority = priorities[i];
+ int table = rand() % (CLS_N_FIELDS - 1);
+ int wcf = random_wcf_in_table(table, rand());
+ int value_pat = rand() & ((1u << CLS_N_FIELDS) - 1);
+ rule = make_rule(wcf, priority, value_pat);
+ tcls_insert(&tcls, rule);
+ assert(!classifier_insert(&cls, &rule->cls_rule));
+ check_tables(&cls, -1, -1, i + 1);
+ compare_classifiers(&cls, &tcls);
+ }
+
+ destroy_classifier(&cls);
+ tcls_destroy(&tcls);
+ }
+}
+\f
+static void
+run_test(void (*function)(void))
+{
+ function();
+ putchar('.');
+ fflush(stdout);
+}
+
+int
+main(void)
+{
+ init_values();
+ run_test(test_empty);
+ run_test(test_destroy_null);
+ run_test(test_single_rule);
+ run_test(test_rule_replacement);
+ run_test(test_two_rules_in_one_bucket);
+ run_test(test_two_rules_in_one_table);
+ run_test(test_two_rules_in_different_tables);
+ run_test(test_many_rules_in_one_bucket);
+ run_test(test_many_rules_in_one_table);
+ run_test(test_many_rules_in_different_tables);
+ putchar('\n');
+ return 0;
+}