2 * Copyright (c) 2009, 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 /* "White box" tests for classifier.
19 * With very few exceptions, these tests obtain complete coverage of every
20 * basic block and every branch in the classifier implementation, e.g. a clean
21 * report from "gcov -b". (Covering the exceptions would require finding
22 * collisions in the hash function used for flow data, etc.)
24 * This test should receive a clean report from "valgrind --leak-check=full":
25 * it frees every heap block that it allocates.
30 #include "classifier.h"
41 int aux; /* Auxiliary data. */
42 struct cls_rule cls_rule; /* Classifier rule data. */
45 static struct test_rule *
46 test_rule_from_cls_rule(const struct cls_rule *rule)
48 return rule ? CONTAINER_OF(rule, struct test_rule, cls_rule) : NULL;
51 /* Trivial (linear) classifier. */
54 size_t allocated_rules;
55 struct test_rule **rules;
59 tcls_init(struct tcls *tcls)
62 tcls->allocated_rules = 0;
67 tcls_destroy(struct tcls *tcls)
72 for (i = 0; i < tcls->n_rules; i++) {
80 tcls_count_exact(const struct tcls *tcls)
86 for (i = 0; i < tcls->n_rules; i++) {
87 n_exact += tcls->rules[i]->cls_rule.wc.wildcards == 0;
93 tcls_is_empty(const struct tcls *tcls)
95 return tcls->n_rules == 0;
98 static struct test_rule *
99 tcls_insert(struct tcls *tcls, const struct test_rule *rule)
103 assert(rule->cls_rule.wc.wildcards || rule->cls_rule.priority == UINT_MAX);
104 for (i = 0; i < tcls->n_rules; i++) {
105 const struct cls_rule *pos = &tcls->rules[i]->cls_rule;
106 if (pos->priority == rule->cls_rule.priority
107 && pos->wc.wildcards == rule->cls_rule.wc.wildcards
108 && flow_equal(&pos->flow, &rule->cls_rule.flow)) {
110 * XXX flow_equal should ignore wildcarded fields */
111 free(tcls->rules[i]);
112 tcls->rules[i] = xmemdup(rule, sizeof *rule);
113 return tcls->rules[i];
114 } else if (pos->priority < rule->cls_rule.priority) {
119 if (tcls->n_rules >= tcls->allocated_rules) {
120 tcls->rules = x2nrealloc(tcls->rules, &tcls->allocated_rules,
121 sizeof *tcls->rules);
123 if (i != tcls->n_rules) {
124 memmove(&tcls->rules[i + 1], &tcls->rules[i],
125 sizeof *tcls->rules * (tcls->n_rules - i));
127 tcls->rules[i] = xmemdup(rule, sizeof *rule);
129 return tcls->rules[i];
133 tcls_remove(struct tcls *cls, const struct test_rule *rule)
137 for (i = 0; i < cls->n_rules; i++) {
138 struct test_rule *pos = cls->rules[i];
141 memmove(&cls->rules[i], &cls->rules[i + 1],
142 sizeof *cls->rules * (cls->n_rules - i - 1));
151 read_uint32(const void *p)
154 memcpy(&x, p, sizeof x);
159 match(const struct cls_rule *wild, const flow_t *fixed)
163 for (f_idx = 0; f_idx < CLS_N_FIELDS; f_idx++) {
164 const struct cls_field *f = &cls_fields[f_idx];
165 void *wild_field = (char *) &wild->flow + f->ofs;
166 void *fixed_field = (char *) fixed + f->ofs;
168 if ((wild->wc.wildcards & f->wildcards) == f->wildcards ||
169 !memcmp(wild_field, fixed_field, f->len)) {
170 /* Definite match. */
174 if (wild->wc.wildcards & f->wildcards) {
175 uint32_t test = read_uint32(wild_field);
176 uint32_t ip = read_uint32(fixed_field);
177 int shift = (f_idx == CLS_F_IDX_NW_SRC
178 ? OFPFW_NW_SRC_SHIFT : OFPFW_NW_DST_SHIFT);
179 uint32_t mask = flow_nw_bits_to_mask(wild->wc.wildcards, shift);
180 if (!((test ^ ip) & mask)) {
190 static struct cls_rule *
191 tcls_lookup(const struct tcls *cls, const flow_t *flow, int include)
195 for (i = 0; i < cls->n_rules; i++) {
196 struct test_rule *pos = cls->rules[i];
197 uint32_t wildcards = pos->cls_rule.wc.wildcards;
198 if (include & (wildcards ? CLS_INC_WILD : CLS_INC_EXACT)
199 && match(&pos->cls_rule, flow)) {
200 return &pos->cls_rule;
207 tcls_delete_matches(struct tcls *cls,
208 const struct cls_rule *target,
213 for (i = 0; i < cls->n_rules; ) {
214 struct test_rule *pos = cls->rules[i];
215 uint32_t wildcards = pos->cls_rule.wc.wildcards;
216 if (include & (wildcards ? CLS_INC_WILD : CLS_INC_EXACT)
217 && match(target, &pos->cls_rule.flow)) {
218 tcls_remove(cls, pos);
225 #ifdef WORDS_BIGENDIAN
226 #define T_HTONL(VALUE) ((uint32_t) (VALUE))
227 #define T_HTONS(VALUE) ((uint32_t) (VALUE))
229 #define T_HTONL(VALUE) (((((uint32_t) (VALUE)) & 0x000000ff) << 24) | \
230 ((((uint32_t) (VALUE)) & 0x0000ff00) << 8) | \
231 ((((uint32_t) (VALUE)) & 0x00ff0000) >> 8) | \
232 ((((uint32_t) (VALUE)) & 0xff000000) >> 24))
233 #define T_HTONS(VALUE) (((((uint16_t) (VALUE)) & 0xff00) >> 8) | \
234 ((((uint16_t) (VALUE)) & 0x00ff) << 8))
237 static uint32_t nw_src_values[] = { T_HTONL(0xc0a80001),
238 T_HTONL(0xc0a04455) };
239 static uint32_t nw_dst_values[] = { T_HTONL(0xc0a80002),
240 T_HTONL(0xc0a04455) };
241 static uint16_t in_port_values[] = { T_HTONS(1), T_HTONS(OFPP_LOCAL) };
242 static uint16_t dl_vlan_values[] = { T_HTONS(101), T_HTONS(0) };
243 static uint8_t dl_vlan_pcp_values[] = { 7, 0 };
244 static uint16_t dl_type_values[]
245 = { T_HTONS(ETH_TYPE_IP), T_HTONS(ETH_TYPE_ARP) };
246 static uint16_t tp_src_values[] = { T_HTONS(49362), T_HTONS(80) };
247 static uint16_t tp_dst_values[] = { T_HTONS(6667), T_HTONS(22) };
248 static uint8_t dl_src_values[][6] = { { 0x00, 0x02, 0xe3, 0x0f, 0x80, 0xa4 },
249 { 0x5e, 0x33, 0x7f, 0x5f, 0x1e, 0x99 } };
250 static uint8_t dl_dst_values[][6] = { { 0x4a, 0x27, 0x71, 0xae, 0x64, 0xc1 },
251 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
252 static uint8_t nw_proto_values[] = { IP_TYPE_TCP, IP_TYPE_ICMP };
253 static uint8_t nw_tos_values[] = { 49, 0 };
255 static void *values[CLS_N_FIELDS][2];
260 values[CLS_F_IDX_IN_PORT][0] = &in_port_values[0];
261 values[CLS_F_IDX_IN_PORT][1] = &in_port_values[1];
263 values[CLS_F_IDX_DL_VLAN][0] = &dl_vlan_values[0];
264 values[CLS_F_IDX_DL_VLAN][1] = &dl_vlan_values[1];
266 values[CLS_F_IDX_DL_VLAN_PCP][0] = &dl_vlan_pcp_values[0];
267 values[CLS_F_IDX_DL_VLAN_PCP][1] = &dl_vlan_pcp_values[1];
269 values[CLS_F_IDX_DL_SRC][0] = dl_src_values[0];
270 values[CLS_F_IDX_DL_SRC][1] = dl_src_values[1];
272 values[CLS_F_IDX_DL_DST][0] = dl_dst_values[0];
273 values[CLS_F_IDX_DL_DST][1] = dl_dst_values[1];
275 values[CLS_F_IDX_DL_TYPE][0] = &dl_type_values[0];
276 values[CLS_F_IDX_DL_TYPE][1] = &dl_type_values[1];
278 values[CLS_F_IDX_NW_SRC][0] = &nw_src_values[0];
279 values[CLS_F_IDX_NW_SRC][1] = &nw_src_values[1];
281 values[CLS_F_IDX_NW_DST][0] = &nw_dst_values[0];
282 values[CLS_F_IDX_NW_DST][1] = &nw_dst_values[1];
284 values[CLS_F_IDX_NW_PROTO][0] = &nw_proto_values[0];
285 values[CLS_F_IDX_NW_PROTO][1] = &nw_proto_values[1];
287 values[CLS_F_IDX_NW_TOS][0] = &nw_tos_values[0];
288 values[CLS_F_IDX_NW_TOS][1] = &nw_tos_values[1];
290 values[CLS_F_IDX_TP_SRC][0] = &tp_src_values[0];
291 values[CLS_F_IDX_TP_SRC][1] = &tp_src_values[1];
293 values[CLS_F_IDX_TP_DST][0] = &tp_dst_values[0];
294 values[CLS_F_IDX_TP_DST][1] = &tp_dst_values[1];
297 #define N_NW_SRC_VALUES ARRAY_SIZE(nw_src_values)
298 #define N_NW_DST_VALUES ARRAY_SIZE(nw_dst_values)
299 #define N_IN_PORT_VALUES ARRAY_SIZE(in_port_values)
300 #define N_DL_VLAN_VALUES ARRAY_SIZE(dl_vlan_values)
301 #define N_DL_VLAN_PCP_VALUES ARRAY_SIZE(dl_vlan_pcp_values)
302 #define N_DL_TYPE_VALUES ARRAY_SIZE(dl_type_values)
303 #define N_TP_SRC_VALUES ARRAY_SIZE(tp_src_values)
304 #define N_TP_DST_VALUES ARRAY_SIZE(tp_dst_values)
305 #define N_DL_SRC_VALUES ARRAY_SIZE(dl_src_values)
306 #define N_DL_DST_VALUES ARRAY_SIZE(dl_dst_values)
307 #define N_NW_PROTO_VALUES ARRAY_SIZE(nw_proto_values)
308 #define N_NW_TOS_VALUES ARRAY_SIZE(nw_tos_values)
310 #define N_FLOW_VALUES (N_NW_SRC_VALUES * \
314 N_DL_VLAN_PCP_VALUES * \
320 N_NW_PROTO_VALUES * \
324 get_value(unsigned int *x, unsigned n_values)
326 unsigned int rem = *x % n_values;
331 static struct cls_rule *
332 lookup_with_include_bits(const struct classifier *cls,
333 const flow_t *flow, int include)
337 return classifier_lookup_wild(cls, flow);
339 return classifier_lookup_exact(cls, flow);
340 case CLS_INC_WILD | CLS_INC_EXACT:
341 return classifier_lookup(cls, flow);
348 compare_classifiers(struct classifier *cls, struct tcls *tcls)
352 assert(classifier_count(cls) == tcls->n_rules);
353 assert(classifier_count_exact(cls) == tcls_count_exact(tcls));
354 for (i = 0; i < N_FLOW_VALUES; i++) {
355 struct cls_rule *cr0, *cr1;
361 flow.nw_src = nw_src_values[get_value(&x, N_NW_SRC_VALUES)];
362 flow.nw_dst = nw_dst_values[get_value(&x, N_NW_DST_VALUES)];
363 flow.in_port = in_port_values[get_value(&x, N_IN_PORT_VALUES)];
364 flow.dl_vlan = dl_vlan_values[get_value(&x, N_DL_VLAN_VALUES)];
365 flow.dl_vlan_pcp = dl_vlan_pcp_values[get_value(&x,
366 N_DL_VLAN_PCP_VALUES)];
367 flow.dl_type = dl_type_values[get_value(&x, N_DL_TYPE_VALUES)];
368 flow.tp_src = tp_src_values[get_value(&x, N_TP_SRC_VALUES)];
369 flow.tp_dst = tp_dst_values[get_value(&x, N_TP_DST_VALUES)];
370 memcpy(flow.dl_src, dl_src_values[get_value(&x, N_DL_SRC_VALUES)],
372 memcpy(flow.dl_dst, dl_dst_values[get_value(&x, N_DL_DST_VALUES)],
374 flow.nw_proto = nw_proto_values[get_value(&x, N_NW_PROTO_VALUES)];
375 flow.nw_tos = nw_tos_values[get_value(&x, N_NW_TOS_VALUES)];
376 memset(flow.reserved, 0, sizeof flow.reserved);
378 for (include = 1; include <= 3; include++) {
379 cr0 = lookup_with_include_bits(cls, &flow, include);
380 cr1 = tcls_lookup(tcls, &flow, include);
381 assert((cr0 == NULL) == (cr1 == NULL));
383 const struct test_rule *tr0 = test_rule_from_cls_rule(cr0);
384 const struct test_rule *tr1 = test_rule_from_cls_rule(cr1);
386 assert(flow_equal(&cr0->flow, &cr1->flow));
387 assert(cr0->wc.wildcards == cr1->wc.wildcards);
388 assert(cr0->priority == cr1->priority);
389 /* Skip nw_src_mask and nw_dst_mask, because they are derived
390 * members whose values are used only for optimization. */
391 assert(tr0->aux == tr1->aux);
398 free_rule(struct cls_rule *cls_rule, void *cls)
400 classifier_remove(cls, cls_rule);
401 free(test_rule_from_cls_rule(cls_rule));
405 destroy_classifier(struct classifier *cls)
407 classifier_for_each(cls, CLS_INC_ALL, free_rule, cls);
408 classifier_destroy(cls);
412 check_tables(const struct classifier *cls,
413 int n_tables, int n_buckets, int n_rules)
415 int found_tables = 0;
416 int found_buckets = 0;
420 BUILD_ASSERT(CLS_N_FIELDS == ARRAY_SIZE(cls->tables));
421 for (i = 0; i < CLS_N_FIELDS; i++) {
422 const struct cls_bucket *bucket;
423 if (!hmap_is_empty(&cls->tables[i])) {
426 HMAP_FOR_EACH (bucket, struct cls_bucket, hmap_node, &cls->tables[i]) {
428 assert(!list_is_empty(&bucket->rules));
429 found_rules += list_size(&bucket->rules);
433 if (!hmap_is_empty(&cls->exact_table)) {
436 found_rules += hmap_count(&cls->exact_table);
439 assert(n_tables == -1 || found_tables == n_tables);
440 assert(n_rules == -1 || found_rules == n_rules);
441 assert(n_buckets == -1 || found_buckets == n_buckets);
444 static struct test_rule *
445 make_rule(int wc_fields, unsigned int priority, int value_pat)
447 const struct cls_field *f;
448 struct test_rule *rule;
453 memset(&flow, 0, sizeof flow);
454 for (f = &cls_fields[0]; f < &cls_fields[CLS_N_FIELDS]; f++) {
455 int f_idx = f - cls_fields;
456 if (wc_fields & (1u << f_idx)) {
457 wildcards |= f->wildcards;
459 int value_idx = (value_pat & (1u << f_idx)) != 0;
460 memcpy((char *) &flow + f->ofs, values[f_idx][value_idx], f->len);
464 rule = xzalloc(sizeof *rule);
465 cls_rule_from_flow(&rule->cls_rule, &flow, wildcards,
466 !wildcards ? UINT_MAX : priority);
471 shuffle(unsigned int *p, size_t n)
473 for (; n > 1; n--, p++) {
474 unsigned int *q = &p[rand() % n];
475 unsigned int tmp = *p;
481 /* Tests an empty classifier. */
485 struct classifier cls;
488 classifier_init(&cls);
490 assert(classifier_is_empty(&cls));
491 assert(tcls_is_empty(&tcls));
492 compare_classifiers(&cls, &tcls);
493 classifier_destroy(&cls);
497 /* Destroys a null classifier. */
499 test_destroy_null(void)
501 classifier_destroy(NULL);
504 /* Tests classification with one rule at a time. */
506 test_single_rule(void)
508 unsigned int wc_fields; /* Hilarious. */
510 for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
511 struct classifier cls;
512 struct test_rule *rule, *tcls_rule;
515 rule = make_rule(wc_fields,
516 hash_bytes(&wc_fields, sizeof wc_fields, 0), 0);
518 classifier_init(&cls);
521 tcls_rule = tcls_insert(&tcls, rule);
523 assert(!classifier_insert(&cls, &rule->cls_rule));
525 classifier_insert_exact(&cls, &rule->cls_rule);
527 check_tables(&cls, 1, 1, 1);
528 compare_classifiers(&cls, &tcls);
530 classifier_remove(&cls, &rule->cls_rule);
531 tcls_remove(&tcls, tcls_rule);
532 assert(classifier_is_empty(&cls));
533 assert(tcls_is_empty(&tcls));
534 compare_classifiers(&cls, &tcls);
537 classifier_destroy(&cls);
542 /* Tests replacing one rule by another. */
544 test_rule_replacement(void)
546 unsigned int wc_fields;
548 for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
549 struct classifier cls;
550 struct test_rule *rule1;
551 struct test_rule *rule2;
554 rule1 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX);
555 rule2 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX);
559 classifier_init(&cls);
561 tcls_insert(&tcls, rule1);
562 assert(!classifier_insert(&cls, &rule1->cls_rule));
563 check_tables(&cls, 1, 1, 1);
564 compare_classifiers(&cls, &tcls);
568 tcls_insert(&tcls, rule2);
569 assert(test_rule_from_cls_rule(
570 classifier_insert(&cls, &rule2->cls_rule)) == rule1);
572 check_tables(&cls, 1, 1, 1);
573 compare_classifiers(&cls, &tcls);
575 destroy_classifier(&cls);
580 table_mask(int table)
582 return ((1u << CLS_N_FIELDS) - 1) & ~((1u << table) - 1);
586 random_wcf_in_table(int table, int seed)
588 int wc_fields = (1u << table) | hash_int(seed, 0);
589 return wc_fields & table_mask(table);
592 /* Tests classification with two rules at a time that fall into the same
595 test_two_rules_in_one_bucket(void)
597 int table, rel_pri, wcf_pat, value_pat;
599 for (table = 0; table <= CLS_N_FIELDS; table++) {
600 for (rel_pri = -1; rel_pri <= +1; rel_pri++) {
601 for (wcf_pat = 0; wcf_pat < 4; wcf_pat++) {
602 int n_value_pats = table == CLS_N_FIELDS - 1 ? 1 : 2;
603 for (value_pat = 0; value_pat < n_value_pats; value_pat++) {
604 struct test_rule *rule1, *tcls_rule1;
605 struct test_rule *rule2, *tcls_rule2;
606 struct test_rule *displaced_rule;
607 struct classifier cls;
609 unsigned int pri1, pri2;
612 if (table != CLS_F_IDX_EXACT) {
613 /* We can use identical priorities in this test because
614 * the classifier always chooses the rule added later
615 * for equal-priority rules that fall into the same
617 pri1 = table * 257 + 50;
618 pri2 = pri1 + rel_pri;
621 ? random_wcf_in_table(table, pri1)
624 ? random_wcf_in_table(table, pri2)
627 wcf1 &= ~(1u << (CLS_N_FIELDS - 1));
628 wcf2 &= ~(1u << (CLS_N_FIELDS - 1));
631 /* This classifier always puts exact-match rules at
632 * maximum priority. */
633 pri1 = pri2 = UINT_MAX;
635 /* No wildcard fields. */
639 rule1 = make_rule(wcf1, pri1, 0);
640 rule2 = make_rule(wcf2, pri2,
641 value_pat << (CLS_N_FIELDS - 1));
643 classifier_init(&cls);
646 tcls_rule1 = tcls_insert(&tcls, rule1);
647 tcls_rule2 = tcls_insert(&tcls, rule2);
648 assert(!classifier_insert(&cls, &rule1->cls_rule));
649 displaced_rule = test_rule_from_cls_rule(
650 classifier_insert(&cls, &rule2->cls_rule));
651 if (wcf1 != wcf2 || pri1 != pri2 || value_pat) {
652 assert(!displaced_rule);
654 check_tables(&cls, 1, 1, 2);
655 compare_classifiers(&cls, &tcls);
657 classifier_remove(&cls, &rule1->cls_rule);
658 tcls_remove(&tcls, tcls_rule1);
659 check_tables(&cls, 1, 1, 1);
660 compare_classifiers(&cls, &tcls);
662 assert(displaced_rule == rule1);
663 check_tables(&cls, 1, 1, 1);
664 compare_classifiers(&cls, &tcls);
668 classifier_remove(&cls, &rule2->cls_rule);
669 tcls_remove(&tcls, tcls_rule2);
670 compare_classifiers(&cls, &tcls);
673 destroy_classifier(&cls);
681 /* Tests classification with two rules at a time that fall into the same
682 * table but different buckets. */
684 test_two_rules_in_one_table(void)
686 int table, rel_pri, wcf_pat;
688 /* Skip tables 0 and CLS_F_IDX_EXACT because they have one bucket. */
689 for (table = 1; table < CLS_N_FIELDS; table++) {
690 for (rel_pri = -1; rel_pri <= +1; rel_pri++) {
691 for (wcf_pat = 0; wcf_pat < 5; wcf_pat++) {
692 struct test_rule *rule1, *tcls_rule1;
693 struct test_rule *rule2, *tcls_rule2;
694 struct classifier cls;
696 unsigned int pri1, pri2;
698 int value_mask, value_pat1, value_pat2;
701 /* We can use identical priorities in this test because the
702 * classifier always chooses the rule added later for
703 * equal-priority rules that fall into the same table. */
704 pri1 = table * 257 + 50;
705 pri2 = pri1 + rel_pri;
708 wcf1 = wcf2 = random_wcf_in_table(table, pri1);
711 ? random_wcf_in_table(table, pri1)
714 ? random_wcf_in_table(table, pri2)
718 /* Generate value patterns that will put the two rules into
719 * different buckets. */
720 value_mask = ((1u << table) - 1);
721 value_pat1 = hash_int(pri1, 1) & value_mask;
724 value_pat2 = (hash_int(pri2, i++) & value_mask);
725 } while (value_pat1 == value_pat2);
726 rule1 = make_rule(wcf1, pri1, value_pat1);
727 rule2 = make_rule(wcf2, pri2, value_pat2);
729 classifier_init(&cls);
732 tcls_rule1 = tcls_insert(&tcls, rule1);
733 tcls_rule2 = tcls_insert(&tcls, rule2);
734 assert(!classifier_insert(&cls, &rule1->cls_rule));
735 assert(!classifier_insert(&cls, &rule2->cls_rule));
736 check_tables(&cls, 1, 2, 2);
737 compare_classifiers(&cls, &tcls);
739 classifier_remove(&cls, &rule1->cls_rule);
740 tcls_remove(&tcls, tcls_rule1);
741 check_tables(&cls, 1, 1, 1);
742 compare_classifiers(&cls, &tcls);
745 classifier_remove(&cls, &rule2->cls_rule);
746 tcls_remove(&tcls, tcls_rule2);
747 compare_classifiers(&cls, &tcls);
750 classifier_destroy(&cls);
757 /* Tests classification with two rules at a time that fall into different
760 test_two_rules_in_different_tables(void)
762 int table1, table2, rel_pri, wcf_pat;
764 for (table1 = 0; table1 < CLS_N_FIELDS; table1++) {
765 for (table2 = table1 + 1; table2 <= CLS_N_FIELDS; table2++) {
766 for (rel_pri = 0; rel_pri < 2; rel_pri++) {
767 for (wcf_pat = 0; wcf_pat < 4; wcf_pat++) {
768 struct test_rule *rule1, *tcls_rule1;
769 struct test_rule *rule2, *tcls_rule2;
770 struct classifier cls;
772 unsigned int pri1, pri2;
775 /* We must use unique priorities in this test because the
776 * classifier makes the rule choice undefined for rules of
777 * equal priority that fall into different tables. (In
778 * practice, lower-numbered tables win.) */
779 pri1 = table1 * 257 + 50;
780 pri2 = rel_pri ? pri1 - 1 : pri1 + 1;
783 ? random_wcf_in_table(table1, pri1)
786 ? random_wcf_in_table(table2, pri2)
789 if (table2 == CLS_F_IDX_EXACT) {
794 rule1 = make_rule(wcf1, pri1, 0);
795 rule2 = make_rule(wcf2, pri2, 0);
797 classifier_init(&cls);
800 tcls_rule1 = tcls_insert(&tcls, rule1);
801 tcls_rule2 = tcls_insert(&tcls, rule2);
802 assert(!classifier_insert(&cls, &rule1->cls_rule));
803 assert(!classifier_insert(&cls, &rule2->cls_rule));
804 check_tables(&cls, 2, 2, 2);
805 compare_classifiers(&cls, &tcls);
807 classifier_remove(&cls, &rule1->cls_rule);
808 tcls_remove(&tcls, tcls_rule1);
809 check_tables(&cls, 1, 1, 1);
810 compare_classifiers(&cls, &tcls);
813 classifier_remove(&cls, &rule2->cls_rule);
814 tcls_remove(&tcls, tcls_rule2);
815 compare_classifiers(&cls, &tcls);
818 classifier_destroy(&cls);
826 /* Tests classification with many rules at a time that fall into the same
827 * bucket but have unique priorities (and various wildcards). */
829 test_many_rules_in_one_bucket(void)
831 enum { MAX_RULES = 50 };
832 int iteration, table;
834 for (iteration = 0; iteration < 3; iteration++) {
835 for (table = 0; table <= CLS_N_FIELDS; table++) {
836 unsigned int priorities[MAX_RULES];
837 struct classifier cls;
841 srand(hash_int(table, iteration));
842 for (i = 0; i < MAX_RULES; i++) {
843 priorities[i] = i * 129;
845 shuffle(priorities, ARRAY_SIZE(priorities));
847 classifier_init(&cls);
850 for (i = 0; i < MAX_RULES; i++) {
851 struct test_rule *rule;
852 unsigned int priority = priorities[i];
855 wcf = random_wcf_in_table(table, priority);
856 rule = make_rule(wcf, priority,
857 table == CLS_F_IDX_EXACT ? i : 1234);
858 tcls_insert(&tcls, rule);
859 assert(!classifier_insert(&cls, &rule->cls_rule));
860 check_tables(&cls, 1, 1, i + 1);
861 compare_classifiers(&cls, &tcls);
864 destroy_classifier(&cls);
870 /* Tests classification with many rules at a time that fall into the same
871 * table but random buckets. */
873 test_many_rules_in_one_table(void)
875 enum { MAX_RULES = 50 };
876 int iteration, table;
878 for (iteration = 0; iteration < 3; iteration++) {
879 for (table = 0; table < CLS_N_FIELDS; table++) {
880 unsigned int priorities[MAX_RULES];
881 struct classifier cls;
885 srand(hash_int(table, iteration));
886 for (i = 0; i < MAX_RULES; i++) {
887 priorities[i] = i * 129;
889 shuffle(priorities, ARRAY_SIZE(priorities));
891 classifier_init(&cls);
894 for (i = 0; i < MAX_RULES; i++) {
895 struct test_rule *rule;
896 unsigned int priority = priorities[i];
899 wcf = random_wcf_in_table(table, priority);
900 rule = make_rule(wcf, priority, hash_int(priority, 1));
901 tcls_insert(&tcls, rule);
902 assert(!classifier_insert(&cls, &rule->cls_rule));
903 check_tables(&cls, 1, -1, i + 1);
904 compare_classifiers(&cls, &tcls);
907 destroy_classifier(&cls);
913 /* Tests classification with many rules at a time that fall into random buckets
914 * in random tables. */
916 test_many_rules_in_different_tables(void)
918 enum { MAX_RULES = 50 };
921 for (iteration = 0; iteration < 30; iteration++) {
922 unsigned int priorities[MAX_RULES];
923 struct classifier cls;
928 for (i = 0; i < MAX_RULES; i++) {
929 priorities[i] = i * 129;
931 shuffle(priorities, ARRAY_SIZE(priorities));
933 classifier_init(&cls);
936 for (i = 0; i < MAX_RULES; i++) {
937 struct test_rule *rule;
938 unsigned int priority = priorities[i];
939 int table = rand() % (CLS_N_FIELDS + 1);
940 int wcf = random_wcf_in_table(table, rand());
941 int value_pat = rand() & ((1u << CLS_N_FIELDS) - 1);
942 rule = make_rule(wcf, priority, value_pat);
943 tcls_insert(&tcls, rule);
944 assert(!classifier_insert(&cls, &rule->cls_rule));
945 check_tables(&cls, -1, -1, i + 1);
946 compare_classifiers(&cls, &tcls);
949 while (!classifier_is_empty(&cls)) {
950 struct test_rule *rule = xmemdup(tcls.rules[rand() % tcls.n_rules],
951 sizeof(struct test_rule));
952 int include = rand() % 2 ? CLS_INC_WILD : CLS_INC_EXACT;
953 include |= (rule->cls_rule.wc.wildcards
954 ? CLS_INC_WILD : CLS_INC_EXACT);
955 classifier_for_each_match(&cls, &rule->cls_rule, include,
957 tcls_delete_matches(&tcls, &rule->cls_rule, include);
958 compare_classifiers(&cls, &tcls);
964 destroy_classifier(&cls);
970 run_test(void (*function)(void))
981 run_test(test_empty);
982 run_test(test_destroy_null);
983 run_test(test_single_rule);
984 run_test(test_rule_replacement);
985 run_test(test_two_rules_in_one_bucket);
986 run_test(test_two_rules_in_one_table);
987 run_test(test_two_rules_in_different_tables);
988 run_test(test_many_rules_in_one_bucket);
989 run_test(test_many_rules_in_one_table);
990 run_test(test_many_rules_in_different_tables);