2 * Copyright (c) 2009, 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 /* "White box" tests for classifier.
19 * With very few exceptions, these tests obtain complete coverage of every
20 * basic block and every branch in the classifier implementation, e.g. a clean
21 * report from "gcov -b". (Covering the exceptions would require finding
22 * collisions in the hash function used for flow data, etc.)
24 * This test should receive a clean report from "valgrind --leak-check=full":
25 * it frees every heap block that it allocates.
29 #include "classifier.h"
32 #include "byte-order.h"
33 #include "command-line.h"
41 int aux; /* Auxiliary data. */
42 struct cls_rule cls_rule; /* Classifier rule data. */
45 static struct test_rule *
46 test_rule_from_cls_rule(const struct cls_rule *rule)
48 return rule ? CONTAINER_OF(rule, struct test_rule, cls_rule) : NULL;
51 /* Trivial (linear) classifier. */
54 size_t allocated_rules;
55 struct test_rule **rules;
59 tcls_init(struct tcls *tcls)
62 tcls->allocated_rules = 0;
67 tcls_destroy(struct tcls *tcls)
72 for (i = 0; i < tcls->n_rules; i++) {
80 tcls_count_exact(const struct tcls *tcls)
86 for (i = 0; i < tcls->n_rules; i++) {
87 n_exact += tcls->rules[i]->cls_rule.wc.wildcards == 0;
93 tcls_is_empty(const struct tcls *tcls)
95 return tcls->n_rules == 0;
98 static struct test_rule *
99 tcls_insert(struct tcls *tcls, const struct test_rule *rule)
103 assert(rule->cls_rule.wc.wildcards || rule->cls_rule.priority == UINT_MAX);
104 for (i = 0; i < tcls->n_rules; i++) {
105 const struct cls_rule *pos = &tcls->rules[i]->cls_rule;
106 if (pos->priority == rule->cls_rule.priority
107 && pos->wc.wildcards == rule->cls_rule.wc.wildcards
108 && flow_equal(&pos->flow, &rule->cls_rule.flow)) {
110 * XXX flow_equal should ignore wildcarded fields */
111 free(tcls->rules[i]);
112 tcls->rules[i] = xmemdup(rule, sizeof *rule);
113 return tcls->rules[i];
114 } else if (pos->priority < rule->cls_rule.priority) {
119 if (tcls->n_rules >= tcls->allocated_rules) {
120 tcls->rules = x2nrealloc(tcls->rules, &tcls->allocated_rules,
121 sizeof *tcls->rules);
123 if (i != tcls->n_rules) {
124 memmove(&tcls->rules[i + 1], &tcls->rules[i],
125 sizeof *tcls->rules * (tcls->n_rules - i));
127 tcls->rules[i] = xmemdup(rule, sizeof *rule);
129 return tcls->rules[i];
133 tcls_remove(struct tcls *cls, const struct test_rule *rule)
137 for (i = 0; i < cls->n_rules; i++) {
138 struct test_rule *pos = cls->rules[i];
141 memmove(&cls->rules[i], &cls->rules[i + 1],
142 sizeof *cls->rules * (cls->n_rules - i - 1));
151 read_uint32(const void *p)
154 memcpy(&x, p, sizeof x);
159 match(const struct cls_rule *wild, const struct flow *fixed)
163 for (f_idx = 0; f_idx < CLS_N_FIELDS; f_idx++) {
164 const struct cls_field *f = &cls_fields[f_idx];
165 void *wild_field = (char *) &wild->flow + f->ofs;
166 void *fixed_field = (char *) fixed + f->ofs;
168 if ((wild->wc.wildcards & f->wildcards) == f->wildcards ||
169 !memcmp(wild_field, fixed_field, f->len)) {
170 /* Definite match. */
174 if (wild->wc.wildcards & f->wildcards) {
175 uint32_t test = read_uint32(wild_field);
176 uint32_t ip = read_uint32(fixed_field);
177 int shift = (f_idx == CLS_F_IDX_NW_SRC
178 ? OFPFW_NW_SRC_SHIFT : OFPFW_NW_DST_SHIFT);
179 uint32_t mask = flow_nw_bits_to_mask(wild->wc.wildcards, shift);
180 if (!((test ^ ip) & mask)) {
190 static struct cls_rule *
191 tcls_lookup(const struct tcls *cls, const struct flow *flow, int include)
195 for (i = 0; i < cls->n_rules; i++) {
196 struct test_rule *pos = cls->rules[i];
197 uint32_t wildcards = pos->cls_rule.wc.wildcards;
198 if (include & (wildcards ? CLS_INC_WILD : CLS_INC_EXACT)
199 && match(&pos->cls_rule, flow)) {
200 return &pos->cls_rule;
207 tcls_delete_matches(struct tcls *cls,
208 const struct cls_rule *target,
213 for (i = 0; i < cls->n_rules; ) {
214 struct test_rule *pos = cls->rules[i];
215 uint32_t wildcards = pos->cls_rule.wc.wildcards;
216 if (include & (wildcards ? CLS_INC_WILD : CLS_INC_EXACT)
217 && match(target, &pos->cls_rule.flow)) {
218 tcls_remove(cls, pos);
225 static uint32_t nw_src_values[] = { CONSTANT_HTONL(0xc0a80001),
226 CONSTANT_HTONL(0xc0a04455) };
227 static uint32_t nw_dst_values[] = { CONSTANT_HTONL(0xc0a80002),
228 CONSTANT_HTONL(0xc0a04455) };
229 static uint32_t tun_id_values[] = { 0, 0xffff0000 };
230 static uint16_t in_port_values[] = { CONSTANT_HTONS(1),
231 CONSTANT_HTONS(OFPP_LOCAL) };
232 static uint16_t dl_vlan_values[] = { CONSTANT_HTONS(101), CONSTANT_HTONS(0) };
233 static uint8_t dl_vlan_pcp_values[] = { 7, 0 };
234 static uint16_t dl_type_values[]
235 = { CONSTANT_HTONS(ETH_TYPE_IP), CONSTANT_HTONS(ETH_TYPE_ARP) };
236 static uint16_t tp_src_values[] = { CONSTANT_HTONS(49362),
237 CONSTANT_HTONS(80) };
238 static uint16_t tp_dst_values[] = { CONSTANT_HTONS(6667), CONSTANT_HTONS(22) };
239 static uint8_t dl_src_values[][6] = { { 0x00, 0x02, 0xe3, 0x0f, 0x80, 0xa4 },
240 { 0x5e, 0x33, 0x7f, 0x5f, 0x1e, 0x99 } };
241 static uint8_t dl_dst_values[][6] = { { 0x4a, 0x27, 0x71, 0xae, 0x64, 0xc1 },
242 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
243 static uint8_t nw_proto_values[] = { IP_TYPE_TCP, IP_TYPE_ICMP };
244 static uint8_t nw_tos_values[] = { 49, 0 };
246 static void *values[CLS_N_FIELDS][2];
251 values[CLS_F_IDX_TUN_ID][0] = &tun_id_values[0];
252 values[CLS_F_IDX_TUN_ID][1] = &tun_id_values[1];
254 values[CLS_F_IDX_IN_PORT][0] = &in_port_values[0];
255 values[CLS_F_IDX_IN_PORT][1] = &in_port_values[1];
257 values[CLS_F_IDX_DL_VLAN][0] = &dl_vlan_values[0];
258 values[CLS_F_IDX_DL_VLAN][1] = &dl_vlan_values[1];
260 values[CLS_F_IDX_DL_VLAN_PCP][0] = &dl_vlan_pcp_values[0];
261 values[CLS_F_IDX_DL_VLAN_PCP][1] = &dl_vlan_pcp_values[1];
263 values[CLS_F_IDX_DL_SRC][0] = dl_src_values[0];
264 values[CLS_F_IDX_DL_SRC][1] = dl_src_values[1];
266 values[CLS_F_IDX_DL_DST][0] = dl_dst_values[0];
267 values[CLS_F_IDX_DL_DST][1] = dl_dst_values[1];
269 values[CLS_F_IDX_DL_TYPE][0] = &dl_type_values[0];
270 values[CLS_F_IDX_DL_TYPE][1] = &dl_type_values[1];
272 values[CLS_F_IDX_NW_SRC][0] = &nw_src_values[0];
273 values[CLS_F_IDX_NW_SRC][1] = &nw_src_values[1];
275 values[CLS_F_IDX_NW_DST][0] = &nw_dst_values[0];
276 values[CLS_F_IDX_NW_DST][1] = &nw_dst_values[1];
278 values[CLS_F_IDX_NW_PROTO][0] = &nw_proto_values[0];
279 values[CLS_F_IDX_NW_PROTO][1] = &nw_proto_values[1];
281 values[CLS_F_IDX_NW_TOS][0] = &nw_tos_values[0];
282 values[CLS_F_IDX_NW_TOS][1] = &nw_tos_values[1];
284 values[CLS_F_IDX_TP_SRC][0] = &tp_src_values[0];
285 values[CLS_F_IDX_TP_SRC][1] = &tp_src_values[1];
287 values[CLS_F_IDX_TP_DST][0] = &tp_dst_values[0];
288 values[CLS_F_IDX_TP_DST][1] = &tp_dst_values[1];
291 #define N_NW_SRC_VALUES ARRAY_SIZE(nw_src_values)
292 #define N_NW_DST_VALUES ARRAY_SIZE(nw_dst_values)
293 #define N_TUN_ID_VALUES ARRAY_SIZE(tun_id_values)
294 #define N_IN_PORT_VALUES ARRAY_SIZE(in_port_values)
295 #define N_DL_VLAN_VALUES ARRAY_SIZE(dl_vlan_values)
296 #define N_DL_VLAN_PCP_VALUES ARRAY_SIZE(dl_vlan_pcp_values)
297 #define N_DL_TYPE_VALUES ARRAY_SIZE(dl_type_values)
298 #define N_TP_SRC_VALUES ARRAY_SIZE(tp_src_values)
299 #define N_TP_DST_VALUES ARRAY_SIZE(tp_dst_values)
300 #define N_DL_SRC_VALUES ARRAY_SIZE(dl_src_values)
301 #define N_DL_DST_VALUES ARRAY_SIZE(dl_dst_values)
302 #define N_NW_PROTO_VALUES ARRAY_SIZE(nw_proto_values)
303 #define N_NW_TOS_VALUES ARRAY_SIZE(nw_tos_values)
305 #define N_FLOW_VALUES (N_NW_SRC_VALUES * \
310 N_DL_VLAN_PCP_VALUES * \
316 N_NW_PROTO_VALUES * \
320 get_value(unsigned int *x, unsigned n_values)
322 unsigned int rem = *x % n_values;
327 static struct cls_rule *
328 lookup_with_include_bits(const struct classifier *cls,
329 const struct flow *flow, int include)
333 return classifier_lookup_wild(cls, flow);
335 return classifier_lookup_exact(cls, flow);
336 case CLS_INC_WILD | CLS_INC_EXACT:
337 return classifier_lookup(cls, flow);
344 compare_classifiers(struct classifier *cls, struct tcls *tcls)
346 static const int confidence = 500;
349 assert(classifier_count(cls) == tcls->n_rules);
350 assert(classifier_count_exact(cls) == tcls_count_exact(tcls));
351 for (i = 0; i < confidence; i++) {
352 struct cls_rule *cr0, *cr1;
357 x = rand () % N_FLOW_VALUES;
358 flow.nw_src = nw_src_values[get_value(&x, N_NW_SRC_VALUES)];
359 flow.nw_dst = nw_dst_values[get_value(&x, N_NW_DST_VALUES)];
360 flow.tun_id = tun_id_values[get_value(&x, N_TUN_ID_VALUES)];
361 flow.in_port = in_port_values[get_value(&x, N_IN_PORT_VALUES)];
362 flow.dl_vlan = dl_vlan_values[get_value(&x, N_DL_VLAN_VALUES)];
363 flow.dl_vlan_pcp = dl_vlan_pcp_values[get_value(&x,
364 N_DL_VLAN_PCP_VALUES)];
365 flow.dl_type = dl_type_values[get_value(&x, N_DL_TYPE_VALUES)];
366 flow.tp_src = tp_src_values[get_value(&x, N_TP_SRC_VALUES)];
367 flow.tp_dst = tp_dst_values[get_value(&x, N_TP_DST_VALUES)];
368 memcpy(flow.dl_src, dl_src_values[get_value(&x, N_DL_SRC_VALUES)],
370 memcpy(flow.dl_dst, dl_dst_values[get_value(&x, N_DL_DST_VALUES)],
372 flow.nw_proto = nw_proto_values[get_value(&x, N_NW_PROTO_VALUES)];
373 flow.nw_tos = nw_tos_values[get_value(&x, N_NW_TOS_VALUES)];
375 for (include = 1; include <= 3; include++) {
376 cr0 = lookup_with_include_bits(cls, &flow, include);
377 cr1 = tcls_lookup(tcls, &flow, include);
378 assert((cr0 == NULL) == (cr1 == NULL));
380 const struct test_rule *tr0 = test_rule_from_cls_rule(cr0);
381 const struct test_rule *tr1 = test_rule_from_cls_rule(cr1);
383 assert(flow_equal(&cr0->flow, &cr1->flow));
384 assert(cr0->wc.wildcards == cr1->wc.wildcards);
385 assert(cr0->priority == cr1->priority);
386 /* Skip nw_src_mask and nw_dst_mask, because they are derived
387 * members whose values are used only for optimization. */
388 assert(tr0->aux == tr1->aux);
395 free_rule(struct cls_rule *cls_rule, void *cls)
397 classifier_remove(cls, cls_rule);
398 free(test_rule_from_cls_rule(cls_rule));
402 destroy_classifier(struct classifier *cls)
404 classifier_for_each(cls, CLS_INC_ALL, free_rule, cls);
405 classifier_destroy(cls);
409 check_tables(const struct classifier *cls,
410 int n_tables, int n_buckets, int n_rules)
412 int found_tables = 0;
413 int found_buckets = 0;
417 BUILD_ASSERT(CLS_N_FIELDS == ARRAY_SIZE(cls->tables));
418 for (i = 0; i < CLS_N_FIELDS; i++) {
419 const struct cls_bucket *bucket;
420 if (!hmap_is_empty(&cls->tables[i])) {
423 HMAP_FOR_EACH (bucket, hmap_node, &cls->tables[i]) {
425 assert(!list_is_empty(&bucket->rules));
426 found_rules += list_size(&bucket->rules);
430 if (!hmap_is_empty(&cls->exact_table)) {
433 found_rules += hmap_count(&cls->exact_table);
436 assert(n_tables == -1 || found_tables == n_tables);
437 assert(n_rules == -1 || found_rules == n_rules);
438 assert(n_buckets == -1 || found_buckets == n_buckets);
441 static struct test_rule *
442 make_rule(int wc_fields, unsigned int priority, int value_pat)
444 const struct cls_field *f;
445 struct test_rule *rule;
450 memset(&flow, 0, sizeof flow);
451 for (f = &cls_fields[0]; f < &cls_fields[CLS_N_FIELDS]; f++) {
452 int f_idx = f - cls_fields;
453 if (wc_fields & (1u << f_idx)) {
454 wildcards |= f->wildcards;
456 int value_idx = (value_pat & (1u << f_idx)) != 0;
457 memcpy((char *) &flow + f->ofs, values[f_idx][value_idx], f->len);
461 rule = xzalloc(sizeof *rule);
462 cls_rule_from_flow(&flow, wildcards, !wildcards ? UINT_MAX : priority,
468 shuffle(unsigned int *p, size_t n)
470 for (; n > 1; n--, p++) {
471 unsigned int *q = &p[rand() % n];
472 unsigned int tmp = *p;
478 /* Tests an empty classifier. */
480 test_empty(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
482 struct classifier cls;
485 classifier_init(&cls);
487 assert(classifier_is_empty(&cls));
488 assert(tcls_is_empty(&tcls));
489 compare_classifiers(&cls, &tcls);
490 classifier_destroy(&cls);
494 /* Destroys a null classifier. */
496 test_destroy_null(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
498 classifier_destroy(NULL);
501 /* Tests classification with one rule at a time. */
503 test_single_rule(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
505 unsigned int wc_fields; /* Hilarious. */
507 for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
508 struct classifier cls;
509 struct test_rule *rule, *tcls_rule;
512 rule = make_rule(wc_fields,
513 hash_bytes(&wc_fields, sizeof wc_fields, 0), 0);
515 classifier_init(&cls);
518 tcls_rule = tcls_insert(&tcls, rule);
520 assert(!classifier_insert(&cls, &rule->cls_rule));
522 classifier_insert_exact(&cls, &rule->cls_rule);
524 check_tables(&cls, 1, 1, 1);
525 compare_classifiers(&cls, &tcls);
527 classifier_remove(&cls, &rule->cls_rule);
528 tcls_remove(&tcls, tcls_rule);
529 assert(classifier_is_empty(&cls));
530 assert(tcls_is_empty(&tcls));
531 compare_classifiers(&cls, &tcls);
534 classifier_destroy(&cls);
539 /* Tests replacing one rule by another. */
541 test_rule_replacement(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
543 unsigned int wc_fields;
545 for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
546 struct classifier cls;
547 struct test_rule *rule1;
548 struct test_rule *rule2;
551 rule1 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX);
552 rule2 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX);
556 classifier_init(&cls);
558 tcls_insert(&tcls, rule1);
559 assert(!classifier_insert(&cls, &rule1->cls_rule));
560 check_tables(&cls, 1, 1, 1);
561 compare_classifiers(&cls, &tcls);
565 tcls_insert(&tcls, rule2);
566 assert(test_rule_from_cls_rule(
567 classifier_insert(&cls, &rule2->cls_rule)) == rule1);
569 check_tables(&cls, 1, 1, 1);
570 compare_classifiers(&cls, &tcls);
572 destroy_classifier(&cls);
577 table_mask(int table)
579 return ((1u << CLS_N_FIELDS) - 1) & ~((1u << table) - 1);
583 random_wcf_in_table(int table, int seed)
585 int wc_fields = (1u << table) | hash_int(seed, 0);
586 return wc_fields & table_mask(table);
589 /* Tests classification with two rules at a time that fall into the same
592 test_two_rules_in_one_bucket(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
594 int table, rel_pri, wcf_pat, value_pat;
596 for (table = 0; table <= CLS_N_FIELDS; table++) {
597 for (rel_pri = -1; rel_pri <= +1; rel_pri++) {
598 for (wcf_pat = 0; wcf_pat < 4; wcf_pat++) {
599 int n_value_pats = table == CLS_N_FIELDS - 1 ? 1 : 2;
600 for (value_pat = 0; value_pat < n_value_pats; value_pat++) {
601 struct test_rule *rule1, *tcls_rule1;
602 struct test_rule *rule2, *tcls_rule2;
603 struct test_rule *displaced_rule;
604 struct classifier cls;
606 unsigned int pri1, pri2;
609 if (table != CLS_F_IDX_EXACT) {
610 /* We can use identical priorities in this test because
611 * the classifier always chooses the rule added later
612 * for equal-priority rules that fall into the same
614 pri1 = table * 257 + 50;
615 pri2 = pri1 + rel_pri;
618 ? random_wcf_in_table(table, pri1)
621 ? random_wcf_in_table(table, pri2)
624 wcf1 &= ~(1u << (CLS_N_FIELDS - 1));
625 wcf2 &= ~(1u << (CLS_N_FIELDS - 1));
628 /* This classifier always puts exact-match rules at
629 * maximum priority. */
630 pri1 = pri2 = UINT_MAX;
632 /* No wildcard fields. */
636 rule1 = make_rule(wcf1, pri1, 0);
637 rule2 = make_rule(wcf2, pri2,
638 value_pat << (CLS_N_FIELDS - 1));
640 classifier_init(&cls);
643 tcls_rule1 = tcls_insert(&tcls, rule1);
644 tcls_rule2 = tcls_insert(&tcls, rule2);
645 assert(!classifier_insert(&cls, &rule1->cls_rule));
646 displaced_rule = test_rule_from_cls_rule(
647 classifier_insert(&cls, &rule2->cls_rule));
648 if (wcf1 != wcf2 || pri1 != pri2 || value_pat) {
649 assert(!displaced_rule);
651 check_tables(&cls, 1, 1, 2);
652 compare_classifiers(&cls, &tcls);
654 classifier_remove(&cls, &rule1->cls_rule);
655 tcls_remove(&tcls, tcls_rule1);
656 check_tables(&cls, 1, 1, 1);
657 compare_classifiers(&cls, &tcls);
659 assert(displaced_rule == rule1);
660 check_tables(&cls, 1, 1, 1);
661 compare_classifiers(&cls, &tcls);
665 classifier_remove(&cls, &rule2->cls_rule);
666 tcls_remove(&tcls, tcls_rule2);
667 compare_classifiers(&cls, &tcls);
670 destroy_classifier(&cls);
678 /* Tests classification with two rules at a time that fall into the same
679 * table but different buckets. */
681 test_two_rules_in_one_table(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
683 int table, rel_pri, wcf_pat;
685 /* Skip tables 0 and CLS_F_IDX_EXACT because they have one bucket. */
686 for (table = 1; table < CLS_N_FIELDS; table++) {
687 for (rel_pri = -1; rel_pri <= +1; rel_pri++) {
688 for (wcf_pat = 0; wcf_pat < 5; wcf_pat++) {
689 struct test_rule *rule1, *tcls_rule1;
690 struct test_rule *rule2, *tcls_rule2;
691 struct classifier cls;
693 unsigned int pri1, pri2;
695 int value_mask, value_pat1, value_pat2;
698 /* We can use identical priorities in this test because the
699 * classifier always chooses the rule added later for
700 * equal-priority rules that fall into the same table. */
701 pri1 = table * 257 + 50;
702 pri2 = pri1 + rel_pri;
705 wcf1 = wcf2 = random_wcf_in_table(table, pri1);
708 ? random_wcf_in_table(table, pri1)
711 ? random_wcf_in_table(table, pri2)
715 /* Generate value patterns that will put the two rules into
716 * different buckets. */
717 value_mask = ((1u << table) - 1);
718 value_pat1 = hash_int(pri1, 1) & value_mask;
721 value_pat2 = (hash_int(pri2, i++) & value_mask);
722 } while (value_pat1 == value_pat2);
723 rule1 = make_rule(wcf1, pri1, value_pat1);
724 rule2 = make_rule(wcf2, pri2, value_pat2);
726 classifier_init(&cls);
729 tcls_rule1 = tcls_insert(&tcls, rule1);
730 tcls_rule2 = tcls_insert(&tcls, rule2);
731 assert(!classifier_insert(&cls, &rule1->cls_rule));
732 assert(!classifier_insert(&cls, &rule2->cls_rule));
733 check_tables(&cls, 1, 2, 2);
734 compare_classifiers(&cls, &tcls);
736 classifier_remove(&cls, &rule1->cls_rule);
737 tcls_remove(&tcls, tcls_rule1);
738 check_tables(&cls, 1, 1, 1);
739 compare_classifiers(&cls, &tcls);
742 classifier_remove(&cls, &rule2->cls_rule);
743 tcls_remove(&tcls, tcls_rule2);
744 compare_classifiers(&cls, &tcls);
747 classifier_destroy(&cls);
754 /* Tests classification with two rules at a time that fall into different
757 test_two_rules_in_different_tables(int argc OVS_UNUSED,
758 char *argv[] OVS_UNUSED)
760 int table1, table2, rel_pri, wcf_pat;
762 for (table1 = 0; table1 < CLS_N_FIELDS; table1++) {
763 for (table2 = table1 + 1; table2 <= CLS_N_FIELDS; table2++) {
764 for (rel_pri = 0; rel_pri < 2; rel_pri++) {
765 for (wcf_pat = 0; wcf_pat < 4; wcf_pat++) {
766 struct test_rule *rule1, *tcls_rule1;
767 struct test_rule *rule2, *tcls_rule2;
768 struct classifier cls;
770 unsigned int pri1, pri2;
773 /* We must use unique priorities in this test because the
774 * classifier makes the rule choice undefined for rules of
775 * equal priority that fall into different tables. (In
776 * practice, lower-numbered tables win.) */
777 pri1 = table1 * 257 + 50;
778 pri2 = rel_pri ? pri1 - 1 : pri1 + 1;
781 ? random_wcf_in_table(table1, pri1)
784 ? random_wcf_in_table(table2, pri2)
787 if (table2 == CLS_F_IDX_EXACT) {
792 rule1 = make_rule(wcf1, pri1, 0);
793 rule2 = make_rule(wcf2, pri2, 0);
795 classifier_init(&cls);
798 tcls_rule1 = tcls_insert(&tcls, rule1);
799 tcls_rule2 = tcls_insert(&tcls, rule2);
800 assert(!classifier_insert(&cls, &rule1->cls_rule));
801 assert(!classifier_insert(&cls, &rule2->cls_rule));
802 check_tables(&cls, 2, 2, 2);
803 compare_classifiers(&cls, &tcls);
805 classifier_remove(&cls, &rule1->cls_rule);
806 tcls_remove(&tcls, tcls_rule1);
807 check_tables(&cls, 1, 1, 1);
808 compare_classifiers(&cls, &tcls);
811 classifier_remove(&cls, &rule2->cls_rule);
812 tcls_remove(&tcls, tcls_rule2);
813 compare_classifiers(&cls, &tcls);
816 classifier_destroy(&cls);
824 /* Tests classification with many rules at a time that fall into the same
825 * bucket but have unique priorities (and various wildcards). */
827 test_many_rules_in_one_bucket(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
829 enum { MAX_RULES = 50 };
830 int iteration, table;
832 for (iteration = 0; iteration < 3; iteration++) {
833 for (table = 0; table <= CLS_N_FIELDS; table++) {
834 unsigned int priorities[MAX_RULES];
835 struct classifier cls;
839 srand(hash_int(table, iteration));
840 for (i = 0; i < MAX_RULES; i++) {
841 priorities[i] = i * 129;
843 shuffle(priorities, ARRAY_SIZE(priorities));
845 classifier_init(&cls);
848 for (i = 0; i < MAX_RULES; i++) {
849 struct test_rule *rule;
850 unsigned int priority = priorities[i];
853 wcf = random_wcf_in_table(table, priority);
854 rule = make_rule(wcf, priority,
855 table == CLS_F_IDX_EXACT ? i : 1234);
856 tcls_insert(&tcls, rule);
857 assert(!classifier_insert(&cls, &rule->cls_rule));
858 check_tables(&cls, 1, 1, i + 1);
859 compare_classifiers(&cls, &tcls);
862 destroy_classifier(&cls);
868 /* Tests classification with many rules at a time that fall into the same
869 * table but random buckets. */
871 test_many_rules_in_one_table(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
873 enum { MAX_RULES = 50 };
874 int iteration, table;
876 for (iteration = 0; iteration < 3; iteration++) {
877 for (table = 0; table < CLS_N_FIELDS; table++) {
878 unsigned int priorities[MAX_RULES];
879 struct classifier cls;
883 srand(hash_int(table, iteration));
884 for (i = 0; i < MAX_RULES; i++) {
885 priorities[i] = i * 129;
887 shuffle(priorities, ARRAY_SIZE(priorities));
889 classifier_init(&cls);
892 for (i = 0; i < MAX_RULES; i++) {
893 struct test_rule *rule;
894 unsigned int priority = priorities[i];
897 wcf = random_wcf_in_table(table, priority);
898 rule = make_rule(wcf, priority, hash_int(priority, 1));
899 tcls_insert(&tcls, rule);
900 assert(!classifier_insert(&cls, &rule->cls_rule));
901 check_tables(&cls, 1, -1, i + 1);
902 compare_classifiers(&cls, &tcls);
905 destroy_classifier(&cls);
911 /* Tests classification with many rules at a time that fall into random buckets
912 * in random tables. */
914 test_many_rules_in_different_tables(int argc OVS_UNUSED,
915 char *argv[] OVS_UNUSED)
917 enum { MAX_RULES = 50 };
920 for (iteration = 0; iteration < 30; iteration++) {
921 unsigned int priorities[MAX_RULES];
922 struct classifier cls;
927 for (i = 0; i < MAX_RULES; i++) {
928 priorities[i] = i * 129;
930 shuffle(priorities, ARRAY_SIZE(priorities));
932 classifier_init(&cls);
935 for (i = 0; i < MAX_RULES; i++) {
936 struct test_rule *rule;
937 unsigned int priority = priorities[i];
938 int table = rand() % (CLS_N_FIELDS + 1);
939 int wcf = random_wcf_in_table(table, rand());
940 int value_pat = rand() & ((1u << CLS_N_FIELDS) - 1);
941 rule = make_rule(wcf, priority, value_pat);
942 tcls_insert(&tcls, rule);
943 assert(!classifier_insert(&cls, &rule->cls_rule));
944 check_tables(&cls, -1, -1, i + 1);
945 compare_classifiers(&cls, &tcls);
948 while (!classifier_is_empty(&cls)) {
949 struct test_rule *rule = xmemdup(tcls.rules[rand() % tcls.n_rules],
950 sizeof(struct test_rule));
951 int include = rand() % 2 ? CLS_INC_WILD : CLS_INC_EXACT;
952 include |= (rule->cls_rule.wc.wildcards
953 ? CLS_INC_WILD : CLS_INC_EXACT);
954 classifier_for_each_match(&cls, &rule->cls_rule, include,
956 tcls_delete_matches(&tcls, &rule->cls_rule, include);
957 compare_classifiers(&cls, &tcls);
961 destroy_classifier(&cls);
966 static const struct command commands[] = {
967 {"empty", 0, 0, test_empty},
968 {"destroy-null", 0, 0, test_destroy_null},
969 {"single-rule", 0, 0, test_single_rule},
970 {"rule-replacement", 0, 0, test_rule_replacement},
971 {"two-rules-in-one-bucket", 0, 0, test_two_rules_in_one_bucket},
972 {"two-rules-in-one-table", 0, 0, test_two_rules_in_one_table},
973 {"two-rules-in-different-tables", 0, 0,
974 test_two_rules_in_different_tables},
975 {"many-rules-in-one-bucket", 0, 0, test_many_rules_in_one_bucket},
976 {"many-rules-in-one-table", 0, 0, test_many_rules_in_one_table},
977 {"many-rules-in-different-tables", 0, 0,
978 test_many_rules_in_different_tables},
983 main(int argc, char *argv[])
986 run_command(argc - 1, argv + 1, commands);