return rule ? CONTAINER_OF(rule, struct test_rule, cls_rule) : NULL;
}
+static struct test_rule *make_rule(int wc_fields, unsigned int priority,
+ int value_pat);
+static void free_rule(struct test_rule *);
+static struct test_rule *clone_rule(const struct test_rule *);
+
/* Trivial (linear) classifier. */
struct tcls {
size_t n_rules;
{
size_t i;
- assert(!flow_wildcards_is_exact(&rule->cls_rule.wc)
- || rule->cls_rule.priority == UINT_MAX);
for (i = 0; i < tcls->n_rules; i++) {
const struct cls_rule *pos = &tcls->rules[i]->cls_rule;
if (cls_rule_equal(pos, &rule->cls_rule)) {
/* Exact match. */
- free(tcls->rules[i]);
- tcls->rules[i] = xmemdup(rule, sizeof *rule);
+ free_rule(tcls->rules[i]);
+ tcls->rules[i] = clone_rule(rule);
return tcls->rules[i];
} else if (pos->priority < rule->cls_rule.priority) {
break;
memmove(&tcls->rules[i + 1], &tcls->rules[i],
sizeof *tcls->rules * (tcls->n_rules - i));
}
- tcls->rules[i] = xmemdup(rule, sizeof *rule);
+ tcls->rules[i] = clone_rule(rule);
tcls->n_rules++;
return tcls->rules[i];
}
bool eq;
if (f_idx == CLS_F_IDX_NW_SRC) {
- eq = !((fixed->nw_src ^ wild->flow.nw_src) & wild->wc.nw_src_mask);
+ eq = !((fixed->nw_src ^ wild->match.flow.nw_src)
+ & wild->match.wc.masks.nw_src);
} else if (f_idx == CLS_F_IDX_NW_DST) {
- eq = !((fixed->nw_dst ^ wild->flow.nw_dst) & wild->wc.nw_dst_mask);
+ eq = !((fixed->nw_dst ^ wild->match.flow.nw_dst)
+ & wild->match.wc.masks.nw_dst);
} else if (f_idx == CLS_F_IDX_TP_SRC) {
- eq = !((fixed->tp_src ^ wild->flow.tp_src) & wild->wc.tp_src_mask);
+ eq = !((fixed->tp_src ^ wild->match.flow.tp_src)
+ & wild->match.wc.masks.tp_src);
} else if (f_idx == CLS_F_IDX_TP_DST) {
- eq = !((fixed->tp_dst ^ wild->flow.tp_dst) & wild->wc.tp_dst_mask);
+ eq = !((fixed->tp_dst ^ wild->match.flow.tp_dst)
+ & wild->match.wc.masks.tp_dst);
} else if (f_idx == CLS_F_IDX_DL_SRC) {
- eq = eth_addr_equal_except(fixed->dl_src, wild->flow.dl_src,
- wild->wc.dl_src_mask);
+ eq = eth_addr_equal_except(fixed->dl_src, wild->match.flow.dl_src,
+ wild->match.wc.masks.dl_src);
} else if (f_idx == CLS_F_IDX_DL_DST) {
- eq = eth_addr_equal_except(fixed->dl_dst, wild->flow.dl_dst,
- wild->wc.dl_dst_mask);
+ eq = eth_addr_equal_except(fixed->dl_dst, wild->match.flow.dl_dst,
+ wild->match.wc.masks.dl_dst);
} else if (f_idx == CLS_F_IDX_VLAN_TCI) {
- eq = !((fixed->vlan_tci ^ wild->flow.vlan_tci)
- & wild->wc.vlan_tci_mask);
+ eq = !((fixed->vlan_tci ^ wild->match.flow.vlan_tci)
+ & wild->match.wc.masks.vlan_tci);
} else if (f_idx == CLS_F_IDX_TUN_ID) {
- eq = !((fixed->tun_id ^ wild->flow.tun_id) & wild->wc.tun_id_mask);
+ eq = !((fixed->tun_id ^ wild->match.flow.tun_id)
+ & wild->match.wc.masks.tun_id);
} else if (f_idx == CLS_F_IDX_METADATA) {
- eq = !((fixed->metadata ^ wild->flow.metadata)
- & wild->wc.metadata_mask);
+ eq = !((fixed->metadata ^ wild->match.flow.metadata)
+ & wild->match.wc.masks.metadata);
} else if (f_idx == CLS_F_IDX_NW_DSCP) {
- eq = !((fixed->nw_tos ^ wild->flow.nw_tos) &
- (wild->wc.nw_tos_mask & IP_DSCP_MASK));
+ eq = !((fixed->nw_tos ^ wild->match.flow.nw_tos) &
+ (wild->match.wc.masks.nw_tos & IP_DSCP_MASK));
} else if (f_idx == CLS_F_IDX_NW_PROTO) {
- eq = !((fixed->nw_proto ^ wild->flow.nw_proto)
- & wild->wc.nw_proto_mask);
+ eq = !((fixed->nw_proto ^ wild->match.flow.nw_proto)
+ & wild->match.wc.masks.nw_proto);
} else if (f_idx == CLS_F_IDX_DL_TYPE) {
- eq = !((fixed->dl_type ^ wild->flow.dl_type)
- & wild->wc.dl_type_mask);
+ eq = !((fixed->dl_type ^ wild->match.flow.dl_type)
+ & wild->match.wc.masks.dl_type);
} else if (f_idx == CLS_F_IDX_IN_PORT) {
- eq = !((fixed->in_port ^ wild->flow.in_port)
- & wild->wc.in_port_mask);
+ eq = !((fixed->in_port ^ wild->match.flow.in_port)
+ & wild->match.wc.masks.in_port);
} else {
NOT_REACHED();
}
for (i = 0; i < cls->n_rules; ) {
struct test_rule *pos = cls->rules[i];
- if (!flow_wildcards_has_extra(&pos->cls_rule.wc, &target->wc)
- && match(target, &pos->cls_rule.flow)) {
+ if (!flow_wildcards_has_extra(&pos->cls_rule.match.wc,
+ &target->match.wc)
+ && match(target, &pos->cls_rule.match.flow)) {
tcls_remove(cls, pos);
} else {
i++;
cls_cursor_init(&cursor, cls, NULL);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cls_rule, &cursor) {
classifier_remove(cls, &rule->cls_rule);
- free(rule);
+ free_rule(rule);
}
classifier_destroy(cls);
}
{
const struct cls_field *f;
struct test_rule *rule;
+ struct match match;
- rule = xzalloc(sizeof *rule);
- cls_rule_init_catchall(&rule->cls_rule, wc_fields ? priority : UINT_MAX);
+ match_init_catchall(&match);
for (f = &cls_fields[0]; f < &cls_fields[CLS_N_FIELDS]; f++) {
int f_idx = f - cls_fields;
int value_idx = (value_pat & (1u << f_idx)) != 0;
- memcpy((char *) &rule->cls_rule.flow + f->ofs,
+ memcpy((char *) &match.flow + f->ofs,
values[f_idx][value_idx], f->len);
if (f_idx == CLS_F_IDX_NW_SRC) {
- rule->cls_rule.wc.nw_src_mask = htonl(UINT32_MAX);
+ match.wc.masks.nw_src = htonl(UINT32_MAX);
} else if (f_idx == CLS_F_IDX_NW_DST) {
- rule->cls_rule.wc.nw_dst_mask = htonl(UINT32_MAX);
+ match.wc.masks.nw_dst = htonl(UINT32_MAX);
} else if (f_idx == CLS_F_IDX_TP_SRC) {
- rule->cls_rule.wc.tp_src_mask = htons(UINT16_MAX);
+ match.wc.masks.tp_src = htons(UINT16_MAX);
} else if (f_idx == CLS_F_IDX_TP_DST) {
- rule->cls_rule.wc.tp_dst_mask = htons(UINT16_MAX);
+ match.wc.masks.tp_dst = htons(UINT16_MAX);
} else if (f_idx == CLS_F_IDX_DL_SRC) {
- memset(rule->cls_rule.wc.dl_src_mask, 0xff, ETH_ADDR_LEN);
+ memset(match.wc.masks.dl_src, 0xff, ETH_ADDR_LEN);
} else if (f_idx == CLS_F_IDX_DL_DST) {
- memset(rule->cls_rule.wc.dl_dst_mask, 0xff, ETH_ADDR_LEN);
+ memset(match.wc.masks.dl_dst, 0xff, ETH_ADDR_LEN);
} else if (f_idx == CLS_F_IDX_VLAN_TCI) {
- rule->cls_rule.wc.vlan_tci_mask = htons(UINT16_MAX);
+ match.wc.masks.vlan_tci = htons(UINT16_MAX);
} else if (f_idx == CLS_F_IDX_TUN_ID) {
- rule->cls_rule.wc.tun_id_mask = htonll(UINT64_MAX);
+ match.wc.masks.tun_id = htonll(UINT64_MAX);
} else if (f_idx == CLS_F_IDX_METADATA) {
- rule->cls_rule.wc.metadata_mask = htonll(UINT64_MAX);
+ match.wc.masks.metadata = htonll(UINT64_MAX);
} else if (f_idx == CLS_F_IDX_NW_DSCP) {
- rule->cls_rule.wc.nw_tos_mask |= IP_DSCP_MASK;
+ match.wc.masks.nw_tos |= IP_DSCP_MASK;
} else if (f_idx == CLS_F_IDX_NW_PROTO) {
- rule->cls_rule.wc.nw_proto_mask = UINT8_MAX;
+ match.wc.masks.nw_proto = UINT8_MAX;
} else if (f_idx == CLS_F_IDX_DL_TYPE) {
- rule->cls_rule.wc.dl_type_mask = htons(UINT16_MAX);
+ match.wc.masks.dl_type = htons(UINT16_MAX);
} else if (f_idx == CLS_F_IDX_IN_PORT) {
- rule->cls_rule.wc.in_port_mask = UINT16_MAX;
+ match.wc.masks.in_port = UINT16_MAX;
} else {
NOT_REACHED();
}
}
+
+ rule = xzalloc(sizeof *rule);
+ cls_rule_init(&rule->cls_rule, &match, wc_fields ? priority : UINT_MAX);
return rule;
}
+static struct test_rule *
+clone_rule(const struct test_rule *src)
+{
+ struct test_rule *dst;
+
+ dst = xmalloc(sizeof *dst);
+ dst->aux = src->aux;
+ cls_rule_clone(&dst->cls_rule, &src->cls_rule);
+ return dst;
+}
+
+static void
+free_rule(struct test_rule *rule)
+{
+ cls_rule_destroy(&rule->cls_rule);
+ free(rule);
+}
+
static void
shuffle(unsigned int *p, size_t n)
{
assert(tcls_is_empty(&tcls));
compare_classifiers(&cls, &tcls);
- free(rule);
+ free_rule(rule);
classifier_destroy(&cls);
tcls_destroy(&tcls);
}
tcls_insert(&tcls, rule2);
assert(test_rule_from_cls_rule(
classifier_replace(&cls, &rule2->cls_rule)) == rule1);
- free(rule1);
+ free_rule(rule1);
check_tables(&cls, 1, 1, 0);
compare_classifiers(&cls, &tcls);
tcls_destroy(&tcls);
tcls_destroy(&tcls);
for (i = 0; i < N_RULES; i++) {
- free(rules[i]);
+ free_rule(rules[i]);
}
} while (next_permutation(ops, ARRAY_SIZE(ops)));
assert(n_permutations == (factorial(N_RULES * 2) >> N_RULES));
int n = 0;
while (x) {
- x &= x - 1;
+ x = zero_rightmost_1bit(x);
n++;
}
for (i = 0; i < N_RULES; i++) {
tcls_remove(&tcls, tcls_rules[i]);
classifier_remove(&cls, &rules[i]->cls_rule);
- free(rules[i]);
+ free_rule(rules[i]);
check_tables(&cls, i < N_RULES - 1, N_RULES - (i + 1), 0);
compare_classifiers(&cls, &tcls);
struct test_rule *target;
struct cls_cursor cursor;
- target = xmemdup(tcls.rules[rand() % tcls.n_rules],
- sizeof(struct test_rule));
+ target = clone_rule(tcls.rules[rand() % tcls.n_rules]);
cls_cursor_init(&cursor, &cls, &target->cls_rule);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cls_rule, &cursor) {
classifier_remove(&cls, &rule->cls_rule);
- free(rule);
+ free_rule(rule);
}
tcls_delete_matches(&tcls, &target->cls_rule);
compare_classifiers(&cls, &tcls);
check_tables(&cls, -1, -1, -1);
- free(target);
+ free_rule(target);
}
destroy_classifier(&cls);