2 * Copyright (c) 2009, 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 /* "White box" tests for classifier.
19 * With very few exceptions, these tests obtain complete coverage of every
20 * basic block and every branch in the classifier implementation, e.g. a clean
21 * report from "gcov -b". (Covering the exceptions would require finding
22 * collisions in the hash function used for flow data, etc.)
24 * This test should receive a clean report from "valgrind --leak-check=full":
25 * it frees every heap block that it allocates.
29 #include "classifier.h"
32 #include "byte-order.h"
33 #include "command-line.h"
41 int aux; /* Auxiliary data. */
42 struct cls_rule cls_rule; /* Classifier rule data. */
45 static struct test_rule *
46 test_rule_from_cls_rule(const struct cls_rule *rule)
48 return rule ? CONTAINER_OF(rule, struct test_rule, cls_rule) : NULL;
51 /* Trivial (linear) classifier. */
54 size_t allocated_rules;
55 struct test_rule **rules;
59 tcls_init(struct tcls *tcls)
62 tcls->allocated_rules = 0;
67 tcls_destroy(struct tcls *tcls)
72 for (i = 0; i < tcls->n_rules; i++) {
80 tcls_count_exact(const struct tcls *tcls)
86 for (i = 0; i < tcls->n_rules; i++) {
87 n_exact += tcls->rules[i]->cls_rule.wc.wildcards == 0;
93 tcls_is_empty(const struct tcls *tcls)
95 return tcls->n_rules == 0;
98 static struct test_rule *
99 tcls_insert(struct tcls *tcls, const struct test_rule *rule)
103 assert(rule->cls_rule.wc.wildcards || rule->cls_rule.priority == UINT_MAX);
104 for (i = 0; i < tcls->n_rules; i++) {
105 const struct cls_rule *pos = &tcls->rules[i]->cls_rule;
106 if (pos->priority == rule->cls_rule.priority
107 && pos->wc.wildcards == rule->cls_rule.wc.wildcards
108 && flow_equal(&pos->flow, &rule->cls_rule.flow)) {
110 * XXX flow_equal should ignore wildcarded fields */
111 free(tcls->rules[i]);
112 tcls->rules[i] = xmemdup(rule, sizeof *rule);
113 return tcls->rules[i];
114 } else if (pos->priority < rule->cls_rule.priority) {
119 if (tcls->n_rules >= tcls->allocated_rules) {
120 tcls->rules = x2nrealloc(tcls->rules, &tcls->allocated_rules,
121 sizeof *tcls->rules);
123 if (i != tcls->n_rules) {
124 memmove(&tcls->rules[i + 1], &tcls->rules[i],
125 sizeof *tcls->rules * (tcls->n_rules - i));
127 tcls->rules[i] = xmemdup(rule, sizeof *rule);
129 return tcls->rules[i];
133 tcls_remove(struct tcls *cls, const struct test_rule *rule)
137 for (i = 0; i < cls->n_rules; i++) {
138 struct test_rule *pos = cls->rules[i];
141 memmove(&cls->rules[i], &cls->rules[i + 1],
142 sizeof *cls->rules * (cls->n_rules - i - 1));
151 read_uint32(const void *p)
154 memcpy(&x, p, sizeof x);
159 match(const struct cls_rule *wild, const struct flow *fixed)
163 for (f_idx = 0; f_idx < CLS_N_FIELDS; f_idx++) {
164 const struct cls_field *f = &cls_fields[f_idx];
165 void *wild_field = (char *) &wild->flow + f->ofs;
166 void *fixed_field = (char *) fixed + f->ofs;
168 if ((wild->wc.wildcards & f->wildcards) == f->wildcards ||
169 !memcmp(wild_field, fixed_field, f->len)) {
170 /* Definite match. */
174 if (wild->wc.wildcards & f->wildcards) {
175 uint32_t test = read_uint32(wild_field);
176 uint32_t ip = read_uint32(fixed_field);
177 int shift = (f_idx == CLS_F_IDX_NW_SRC
178 ? OFPFW_NW_SRC_SHIFT : OFPFW_NW_DST_SHIFT);
179 uint32_t mask = flow_nw_bits_to_mask(wild->wc.wildcards, shift);
180 if (!((test ^ ip) & mask)) {
190 static struct cls_rule *
191 tcls_lookup(const struct tcls *cls, const struct flow *flow, int include)
195 for (i = 0; i < cls->n_rules; i++) {
196 struct test_rule *pos = cls->rules[i];
197 uint32_t wildcards = pos->cls_rule.wc.wildcards;
198 if (include & (wildcards ? CLS_INC_WILD : CLS_INC_EXACT)
199 && match(&pos->cls_rule, flow)) {
200 return &pos->cls_rule;
207 tcls_delete_matches(struct tcls *cls,
208 const struct cls_rule *target,
213 for (i = 0; i < cls->n_rules; ) {
214 struct test_rule *pos = cls->rules[i];
215 uint32_t wildcards = pos->cls_rule.wc.wildcards;
216 if (include & (wildcards ? CLS_INC_WILD : CLS_INC_EXACT)
217 && match(target, &pos->cls_rule.flow)) {
218 tcls_remove(cls, pos);
225 static uint32_t nw_src_values[] = { CONSTANT_HTONL(0xc0a80001),
226 CONSTANT_HTONL(0xc0a04455) };
227 static uint32_t nw_dst_values[] = { CONSTANT_HTONL(0xc0a80002),
228 CONSTANT_HTONL(0xc0a04455) };
229 static uint32_t tun_id_values[] = { 0, 0xffff0000 };
230 static uint16_t in_port_values[] = { CONSTANT_HTONS(1),
231 CONSTANT_HTONS(OFPP_LOCAL) };
232 static uint16_t dl_vlan_values[] = { CONSTANT_HTONS(101), CONSTANT_HTONS(0) };
233 static uint8_t dl_vlan_pcp_values[] = { 7, 0 };
234 static uint16_t dl_type_values[]
235 = { CONSTANT_HTONS(ETH_TYPE_IP), CONSTANT_HTONS(ETH_TYPE_ARP) };
236 static uint16_t tp_src_values[] = { CONSTANT_HTONS(49362),
237 CONSTANT_HTONS(80) };
238 static uint16_t tp_dst_values[] = { CONSTANT_HTONS(6667), CONSTANT_HTONS(22) };
239 static uint8_t dl_src_values[][6] = { { 0x00, 0x02, 0xe3, 0x0f, 0x80, 0xa4 },
240 { 0x5e, 0x33, 0x7f, 0x5f, 0x1e, 0x99 } };
241 static uint8_t dl_dst_values[][6] = { { 0x4a, 0x27, 0x71, 0xae, 0x64, 0xc1 },
242 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
243 static uint8_t nw_proto_values[] = { IP_TYPE_TCP, IP_TYPE_ICMP };
244 static uint8_t nw_tos_values[] = { 49, 0 };
246 static void *values[CLS_N_FIELDS][2];
251 values[CLS_F_IDX_TUN_ID][0] = &tun_id_values[0];
252 values[CLS_F_IDX_TUN_ID][1] = &tun_id_values[1];
254 values[CLS_F_IDX_IN_PORT][0] = &in_port_values[0];
255 values[CLS_F_IDX_IN_PORT][1] = &in_port_values[1];
257 values[CLS_F_IDX_DL_VLAN][0] = &dl_vlan_values[0];
258 values[CLS_F_IDX_DL_VLAN][1] = &dl_vlan_values[1];
260 values[CLS_F_IDX_DL_VLAN_PCP][0] = &dl_vlan_pcp_values[0];
261 values[CLS_F_IDX_DL_VLAN_PCP][1] = &dl_vlan_pcp_values[1];
263 values[CLS_F_IDX_DL_SRC][0] = dl_src_values[0];
264 values[CLS_F_IDX_DL_SRC][1] = dl_src_values[1];
266 values[CLS_F_IDX_DL_DST][0] = dl_dst_values[0];
267 values[CLS_F_IDX_DL_DST][1] = dl_dst_values[1];
269 values[CLS_F_IDX_DL_TYPE][0] = &dl_type_values[0];
270 values[CLS_F_IDX_DL_TYPE][1] = &dl_type_values[1];
272 values[CLS_F_IDX_NW_SRC][0] = &nw_src_values[0];
273 values[CLS_F_IDX_NW_SRC][1] = &nw_src_values[1];
275 values[CLS_F_IDX_NW_DST][0] = &nw_dst_values[0];
276 values[CLS_F_IDX_NW_DST][1] = &nw_dst_values[1];
278 values[CLS_F_IDX_NW_PROTO][0] = &nw_proto_values[0];
279 values[CLS_F_IDX_NW_PROTO][1] = &nw_proto_values[1];
281 values[CLS_F_IDX_NW_TOS][0] = &nw_tos_values[0];
282 values[CLS_F_IDX_NW_TOS][1] = &nw_tos_values[1];
284 values[CLS_F_IDX_TP_SRC][0] = &tp_src_values[0];
285 values[CLS_F_IDX_TP_SRC][1] = &tp_src_values[1];
287 values[CLS_F_IDX_TP_DST][0] = &tp_dst_values[0];
288 values[CLS_F_IDX_TP_DST][1] = &tp_dst_values[1];
291 #define N_NW_SRC_VALUES ARRAY_SIZE(nw_src_values)
292 #define N_NW_DST_VALUES ARRAY_SIZE(nw_dst_values)
293 #define N_TUN_ID_VALUES ARRAY_SIZE(tun_id_values)
294 #define N_IN_PORT_VALUES ARRAY_SIZE(in_port_values)
295 #define N_DL_VLAN_VALUES ARRAY_SIZE(dl_vlan_values)
296 #define N_DL_VLAN_PCP_VALUES ARRAY_SIZE(dl_vlan_pcp_values)
297 #define N_DL_TYPE_VALUES ARRAY_SIZE(dl_type_values)
298 #define N_TP_SRC_VALUES ARRAY_SIZE(tp_src_values)
299 #define N_TP_DST_VALUES ARRAY_SIZE(tp_dst_values)
300 #define N_DL_SRC_VALUES ARRAY_SIZE(dl_src_values)
301 #define N_DL_DST_VALUES ARRAY_SIZE(dl_dst_values)
302 #define N_NW_PROTO_VALUES ARRAY_SIZE(nw_proto_values)
303 #define N_NW_TOS_VALUES ARRAY_SIZE(nw_tos_values)
305 #define N_FLOW_VALUES (N_NW_SRC_VALUES * \
310 N_DL_VLAN_PCP_VALUES * \
316 N_NW_PROTO_VALUES * \
320 get_value(unsigned int *x, unsigned n_values)
322 unsigned int rem = *x % n_values;
327 static struct cls_rule *
328 lookup_with_include_bits(const struct classifier *cls,
329 const struct flow *flow, int include)
333 return classifier_lookup_wild(cls, flow);
335 return classifier_lookup_exact(cls, flow);
336 case CLS_INC_WILD | CLS_INC_EXACT:
337 return classifier_lookup(cls, flow);
344 compare_classifiers(struct classifier *cls, struct tcls *tcls)
346 static const int confidence = 500;
349 assert(classifier_count(cls) == tcls->n_rules);
350 assert(classifier_count_exact(cls) == tcls_count_exact(tcls));
351 for (i = 0; i < confidence; i++) {
352 struct cls_rule *cr0, *cr1;
357 x = rand () % N_FLOW_VALUES;
358 flow.nw_src = nw_src_values[get_value(&x, N_NW_SRC_VALUES)];
359 flow.nw_dst = nw_dst_values[get_value(&x, N_NW_DST_VALUES)];
360 flow.tun_id = tun_id_values[get_value(&x, N_TUN_ID_VALUES)];
361 flow.in_port = in_port_values[get_value(&x, N_IN_PORT_VALUES)];
362 flow.dl_vlan = dl_vlan_values[get_value(&x, N_DL_VLAN_VALUES)];
363 flow.dl_vlan_pcp = dl_vlan_pcp_values[get_value(&x,
364 N_DL_VLAN_PCP_VALUES)];
365 flow.dl_type = dl_type_values[get_value(&x, N_DL_TYPE_VALUES)];
366 flow.tp_src = tp_src_values[get_value(&x, N_TP_SRC_VALUES)];
367 flow.tp_dst = tp_dst_values[get_value(&x, N_TP_DST_VALUES)];
368 memcpy(flow.dl_src, dl_src_values[get_value(&x, N_DL_SRC_VALUES)],
370 memcpy(flow.dl_dst, dl_dst_values[get_value(&x, N_DL_DST_VALUES)],
372 flow.nw_proto = nw_proto_values[get_value(&x, N_NW_PROTO_VALUES)];
373 flow.nw_tos = nw_tos_values[get_value(&x, N_NW_TOS_VALUES)];
375 for (include = 1; include <= 3; include++) {
376 cr0 = lookup_with_include_bits(cls, &flow, include);
377 cr1 = tcls_lookup(tcls, &flow, include);
378 assert((cr0 == NULL) == (cr1 == NULL));
380 const struct test_rule *tr0 = test_rule_from_cls_rule(cr0);
381 const struct test_rule *tr1 = test_rule_from_cls_rule(cr1);
383 assert(flow_equal(&cr0->flow, &cr1->flow));
384 assert(cr0->wc.wildcards == cr1->wc.wildcards);
385 assert(cr0->priority == cr1->priority);
386 /* Skip nw_src_mask and nw_dst_mask, because they are derived
387 * members whose values are used only for optimization. */
388 assert(tr0->aux == tr1->aux);
395 free_rule(struct cls_rule *cls_rule, void *cls)
397 classifier_remove(cls, cls_rule);
398 free(test_rule_from_cls_rule(cls_rule));
402 destroy_classifier(struct classifier *cls)
404 classifier_for_each(cls, CLS_INC_ALL, free_rule, cls);
405 classifier_destroy(cls);
409 check_tables(const struct classifier *cls,
410 int n_tables, int n_buckets, int n_rules)
412 int found_tables = 0;
413 int found_buckets = 0;
417 BUILD_ASSERT(CLS_N_FIELDS == ARRAY_SIZE(cls->tables));
418 for (i = 0; i < CLS_N_FIELDS; i++) {
419 const struct cls_bucket *bucket;
420 if (!hmap_is_empty(&cls->tables[i])) {
423 HMAP_FOR_EACH (bucket, hmap_node, &cls->tables[i]) {
425 assert(!list_is_empty(&bucket->rules));
426 found_rules += list_size(&bucket->rules);
430 if (!hmap_is_empty(&cls->exact_table)) {
433 found_rules += hmap_count(&cls->exact_table);
436 assert(n_tables == -1 || found_tables == n_tables);
437 assert(n_rules == -1 || found_rules == n_rules);
438 assert(n_buckets == -1 || found_buckets == n_buckets);
441 static struct test_rule *
442 make_rule(int wc_fields, unsigned int priority, int value_pat)
444 const struct cls_field *f;
445 struct test_rule *rule;
450 memset(&flow, 0, sizeof flow);
451 for (f = &cls_fields[0]; f < &cls_fields[CLS_N_FIELDS]; f++) {
452 int f_idx = f - cls_fields;
453 if (wc_fields & (1u << f_idx)) {
454 wildcards |= f->wildcards;
456 int value_idx = (value_pat & (1u << f_idx)) != 0;
457 memcpy((char *) &flow + f->ofs, values[f_idx][value_idx], f->len);
461 rule = xzalloc(sizeof *rule);
462 cls_rule_from_flow(&flow, wildcards, !wildcards ? UINT_MAX : priority,
468 shuffle(unsigned int *p, size_t n)
470 for (; n > 1; n--, p++) {
471 unsigned int *q = &p[rand() % n];
472 unsigned int tmp = *p;
478 /* Tests an empty classifier. */
480 test_empty(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
482 struct classifier cls;
485 classifier_init(&cls);
487 assert(classifier_is_empty(&cls));
488 assert(tcls_is_empty(&tcls));
489 compare_classifiers(&cls, &tcls);
490 classifier_destroy(&cls);
494 /* Destroys a null classifier. */
496 test_destroy_null(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
498 classifier_destroy(NULL);
501 /* Tests classification with one rule at a time. */
503 test_single_rule(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
505 unsigned int wc_fields; /* Hilarious. */
507 for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
508 struct classifier cls;
509 struct test_rule *rule, *tcls_rule;
512 rule = make_rule(wc_fields,
513 hash_bytes(&wc_fields, sizeof wc_fields, 0), 0);
515 classifier_init(&cls);
518 tcls_rule = tcls_insert(&tcls, rule);
519 assert(!classifier_insert(&cls, &rule->cls_rule));
520 check_tables(&cls, 1, 1, 1);
521 compare_classifiers(&cls, &tcls);
523 classifier_remove(&cls, &rule->cls_rule);
524 tcls_remove(&tcls, tcls_rule);
525 assert(classifier_is_empty(&cls));
526 assert(tcls_is_empty(&tcls));
527 compare_classifiers(&cls, &tcls);
530 classifier_destroy(&cls);
535 /* Tests replacing one rule by another. */
537 test_rule_replacement(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
539 unsigned int wc_fields;
541 for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
542 struct classifier cls;
543 struct test_rule *rule1;
544 struct test_rule *rule2;
547 rule1 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX);
548 rule2 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX);
552 classifier_init(&cls);
554 tcls_insert(&tcls, rule1);
555 assert(!classifier_insert(&cls, &rule1->cls_rule));
556 check_tables(&cls, 1, 1, 1);
557 compare_classifiers(&cls, &tcls);
561 tcls_insert(&tcls, rule2);
562 assert(test_rule_from_cls_rule(
563 classifier_insert(&cls, &rule2->cls_rule)) == rule1);
565 check_tables(&cls, 1, 1, 1);
566 compare_classifiers(&cls, &tcls);
568 destroy_classifier(&cls);
573 table_mask(int table)
575 return ((1u << CLS_N_FIELDS) - 1) & ~((1u << table) - 1);
579 random_wcf_in_table(int table, int seed)
581 int wc_fields = (1u << table) | hash_int(seed, 0);
582 return wc_fields & table_mask(table);
585 /* Tests classification with two rules at a time that fall into the same
588 test_two_rules_in_one_bucket(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
590 int table, rel_pri, wcf_pat, value_pat;
592 for (table = 0; table <= CLS_N_FIELDS; table++) {
593 for (rel_pri = -1; rel_pri <= +1; rel_pri++) {
594 for (wcf_pat = 0; wcf_pat < 4; wcf_pat++) {
595 int n_value_pats = table == CLS_N_FIELDS - 1 ? 1 : 2;
596 for (value_pat = 0; value_pat < n_value_pats; value_pat++) {
597 struct test_rule *rule1, *tcls_rule1;
598 struct test_rule *rule2, *tcls_rule2;
599 struct test_rule *displaced_rule;
600 struct classifier cls;
602 unsigned int pri1, pri2;
605 if (table != CLS_F_IDX_EXACT) {
606 /* We can use identical priorities in this test because
607 * the classifier always chooses the rule added later
608 * for equal-priority rules that fall into the same
610 pri1 = table * 257 + 50;
611 pri2 = pri1 + rel_pri;
614 ? random_wcf_in_table(table, pri1)
617 ? random_wcf_in_table(table, pri2)
620 wcf1 &= ~(1u << (CLS_N_FIELDS - 1));
621 wcf2 &= ~(1u << (CLS_N_FIELDS - 1));
624 /* This classifier always puts exact-match rules at
625 * maximum priority. */
626 pri1 = pri2 = UINT_MAX;
628 /* No wildcard fields. */
632 rule1 = make_rule(wcf1, pri1, 0);
633 rule2 = make_rule(wcf2, pri2,
634 value_pat << (CLS_N_FIELDS - 1));
636 classifier_init(&cls);
639 tcls_rule1 = tcls_insert(&tcls, rule1);
640 tcls_rule2 = tcls_insert(&tcls, rule2);
641 assert(!classifier_insert(&cls, &rule1->cls_rule));
642 displaced_rule = test_rule_from_cls_rule(
643 classifier_insert(&cls, &rule2->cls_rule));
644 if (wcf1 != wcf2 || pri1 != pri2 || value_pat) {
645 assert(!displaced_rule);
647 check_tables(&cls, 1, 1, 2);
648 compare_classifiers(&cls, &tcls);
650 classifier_remove(&cls, &rule1->cls_rule);
651 tcls_remove(&tcls, tcls_rule1);
652 check_tables(&cls, 1, 1, 1);
653 compare_classifiers(&cls, &tcls);
655 assert(displaced_rule == rule1);
656 check_tables(&cls, 1, 1, 1);
657 compare_classifiers(&cls, &tcls);
661 classifier_remove(&cls, &rule2->cls_rule);
662 tcls_remove(&tcls, tcls_rule2);
663 compare_classifiers(&cls, &tcls);
666 destroy_classifier(&cls);
674 /* Tests classification with two rules at a time that fall into the same
675 * table but different buckets. */
677 test_two_rules_in_one_table(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
679 int table, rel_pri, wcf_pat;
681 /* Skip tables 0 and CLS_F_IDX_EXACT because they have one bucket. */
682 for (table = 1; table < CLS_N_FIELDS; table++) {
683 for (rel_pri = -1; rel_pri <= +1; rel_pri++) {
684 for (wcf_pat = 0; wcf_pat < 5; wcf_pat++) {
685 struct test_rule *rule1, *tcls_rule1;
686 struct test_rule *rule2, *tcls_rule2;
687 struct classifier cls;
689 unsigned int pri1, pri2;
691 int value_mask, value_pat1, value_pat2;
694 /* We can use identical priorities in this test because the
695 * classifier always chooses the rule added later for
696 * equal-priority rules that fall into the same table. */
697 pri1 = table * 257 + 50;
698 pri2 = pri1 + rel_pri;
701 wcf1 = wcf2 = random_wcf_in_table(table, pri1);
704 ? random_wcf_in_table(table, pri1)
707 ? random_wcf_in_table(table, pri2)
711 /* Generate value patterns that will put the two rules into
712 * different buckets. */
713 value_mask = ((1u << table) - 1);
714 value_pat1 = hash_int(pri1, 1) & value_mask;
717 value_pat2 = (hash_int(pri2, i++) & value_mask);
718 } while (value_pat1 == value_pat2);
719 rule1 = make_rule(wcf1, pri1, value_pat1);
720 rule2 = make_rule(wcf2, pri2, value_pat2);
722 classifier_init(&cls);
725 tcls_rule1 = tcls_insert(&tcls, rule1);
726 tcls_rule2 = tcls_insert(&tcls, rule2);
727 assert(!classifier_insert(&cls, &rule1->cls_rule));
728 assert(!classifier_insert(&cls, &rule2->cls_rule));
729 check_tables(&cls, 1, 2, 2);
730 compare_classifiers(&cls, &tcls);
732 classifier_remove(&cls, &rule1->cls_rule);
733 tcls_remove(&tcls, tcls_rule1);
734 check_tables(&cls, 1, 1, 1);
735 compare_classifiers(&cls, &tcls);
738 classifier_remove(&cls, &rule2->cls_rule);
739 tcls_remove(&tcls, tcls_rule2);
740 compare_classifiers(&cls, &tcls);
743 classifier_destroy(&cls);
750 /* Tests classification with two rules at a time that fall into different
753 test_two_rules_in_different_tables(int argc OVS_UNUSED,
754 char *argv[] OVS_UNUSED)
756 int table1, table2, rel_pri, wcf_pat;
758 for (table1 = 0; table1 < CLS_N_FIELDS; table1++) {
759 for (table2 = table1 + 1; table2 <= CLS_N_FIELDS; table2++) {
760 for (rel_pri = 0; rel_pri < 2; rel_pri++) {
761 for (wcf_pat = 0; wcf_pat < 4; wcf_pat++) {
762 struct test_rule *rule1, *tcls_rule1;
763 struct test_rule *rule2, *tcls_rule2;
764 struct classifier cls;
766 unsigned int pri1, pri2;
769 /* We must use unique priorities in this test because the
770 * classifier makes the rule choice undefined for rules of
771 * equal priority that fall into different tables. (In
772 * practice, lower-numbered tables win.) */
773 pri1 = table1 * 257 + 50;
774 pri2 = rel_pri ? pri1 - 1 : pri1 + 1;
777 ? random_wcf_in_table(table1, pri1)
780 ? random_wcf_in_table(table2, pri2)
783 if (table2 == CLS_F_IDX_EXACT) {
788 rule1 = make_rule(wcf1, pri1, 0);
789 rule2 = make_rule(wcf2, pri2, 0);
791 classifier_init(&cls);
794 tcls_rule1 = tcls_insert(&tcls, rule1);
795 tcls_rule2 = tcls_insert(&tcls, rule2);
796 assert(!classifier_insert(&cls, &rule1->cls_rule));
797 assert(!classifier_insert(&cls, &rule2->cls_rule));
798 check_tables(&cls, 2, 2, 2);
799 compare_classifiers(&cls, &tcls);
801 classifier_remove(&cls, &rule1->cls_rule);
802 tcls_remove(&tcls, tcls_rule1);
803 check_tables(&cls, 1, 1, 1);
804 compare_classifiers(&cls, &tcls);
807 classifier_remove(&cls, &rule2->cls_rule);
808 tcls_remove(&tcls, tcls_rule2);
809 compare_classifiers(&cls, &tcls);
812 classifier_destroy(&cls);
820 /* Tests classification with many rules at a time that fall into the same
821 * bucket but have unique priorities (and various wildcards). */
823 test_many_rules_in_one_bucket(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
825 enum { MAX_RULES = 50 };
826 int iteration, table;
828 for (iteration = 0; iteration < 3; iteration++) {
829 for (table = 0; table <= CLS_N_FIELDS; table++) {
830 unsigned int priorities[MAX_RULES];
831 struct classifier cls;
835 srand(hash_int(table, iteration));
836 for (i = 0; i < MAX_RULES; i++) {
837 priorities[i] = i * 129;
839 shuffle(priorities, ARRAY_SIZE(priorities));
841 classifier_init(&cls);
844 for (i = 0; i < MAX_RULES; i++) {
845 struct test_rule *rule;
846 unsigned int priority = priorities[i];
849 wcf = random_wcf_in_table(table, priority);
850 rule = make_rule(wcf, priority,
851 table == CLS_F_IDX_EXACT ? i : 1234);
852 tcls_insert(&tcls, rule);
853 assert(!classifier_insert(&cls, &rule->cls_rule));
854 check_tables(&cls, 1, 1, i + 1);
855 compare_classifiers(&cls, &tcls);
858 destroy_classifier(&cls);
864 /* Tests classification with many rules at a time that fall into the same
865 * table but random buckets. */
867 test_many_rules_in_one_table(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
869 enum { MAX_RULES = 50 };
870 int iteration, table;
872 for (iteration = 0; iteration < 3; iteration++) {
873 for (table = 0; table < CLS_N_FIELDS; table++) {
874 unsigned int priorities[MAX_RULES];
875 struct classifier cls;
879 srand(hash_int(table, iteration));
880 for (i = 0; i < MAX_RULES; i++) {
881 priorities[i] = i * 129;
883 shuffle(priorities, ARRAY_SIZE(priorities));
885 classifier_init(&cls);
888 for (i = 0; i < MAX_RULES; i++) {
889 struct test_rule *rule;
890 unsigned int priority = priorities[i];
893 wcf = random_wcf_in_table(table, priority);
894 rule = make_rule(wcf, priority, hash_int(priority, 1));
895 tcls_insert(&tcls, rule);
896 assert(!classifier_insert(&cls, &rule->cls_rule));
897 check_tables(&cls, 1, -1, i + 1);
898 compare_classifiers(&cls, &tcls);
901 destroy_classifier(&cls);
907 /* Tests classification with many rules at a time that fall into random buckets
908 * in random tables. */
910 test_many_rules_in_different_tables(int argc OVS_UNUSED,
911 char *argv[] OVS_UNUSED)
913 enum { MAX_RULES = 50 };
916 for (iteration = 0; iteration < 30; iteration++) {
917 unsigned int priorities[MAX_RULES];
918 struct classifier cls;
923 for (i = 0; i < MAX_RULES; i++) {
924 priorities[i] = i * 129;
926 shuffle(priorities, ARRAY_SIZE(priorities));
928 classifier_init(&cls);
931 for (i = 0; i < MAX_RULES; i++) {
932 struct test_rule *rule;
933 unsigned int priority = priorities[i];
934 int table = rand() % (CLS_N_FIELDS + 1);
935 int wcf = random_wcf_in_table(table, rand());
936 int value_pat = rand() & ((1u << CLS_N_FIELDS) - 1);
937 rule = make_rule(wcf, priority, value_pat);
938 tcls_insert(&tcls, rule);
939 assert(!classifier_insert(&cls, &rule->cls_rule));
940 check_tables(&cls, -1, -1, i + 1);
941 compare_classifiers(&cls, &tcls);
944 while (!classifier_is_empty(&cls)) {
945 struct test_rule *rule = xmemdup(tcls.rules[rand() % tcls.n_rules],
946 sizeof(struct test_rule));
947 int include = rand() % 2 ? CLS_INC_WILD : CLS_INC_EXACT;
948 include |= (rule->cls_rule.wc.wildcards
949 ? CLS_INC_WILD : CLS_INC_EXACT);
950 classifier_for_each_match(&cls, &rule->cls_rule, include,
952 tcls_delete_matches(&tcls, &rule->cls_rule, include);
953 compare_classifiers(&cls, &tcls);
957 destroy_classifier(&cls);
962 static const struct command commands[] = {
963 {"empty", 0, 0, test_empty},
964 {"destroy-null", 0, 0, test_destroy_null},
965 {"single-rule", 0, 0, test_single_rule},
966 {"rule-replacement", 0, 0, test_rule_replacement},
967 {"two-rules-in-one-bucket", 0, 0, test_two_rules_in_one_bucket},
968 {"two-rules-in-one-table", 0, 0, test_two_rules_in_one_table},
969 {"two-rules-in-different-tables", 0, 0,
970 test_two_rules_in_different_tables},
971 {"many-rules-in-one-bucket", 0, 0, test_many_rules_in_one_bucket},
972 {"many-rules-in-one-table", 0, 0, test_many_rules_in_one_table},
973 {"many-rules-in-different-tables", 0, 0,
974 test_many_rules_in_different_tables},
979 main(int argc, char *argv[])
982 run_command(argc - 1, argv + 1, commands);