2 * Copyright (c) 2009, 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "classifier.h"
21 #include <netinet/in.h>
22 #include "dynamic-string.h"
27 static struct cls_table *find_table(const struct classifier *,
28 const struct flow_wildcards *);
29 static struct cls_table *insert_table(struct classifier *,
30 const struct flow_wildcards *);
32 static struct cls_table *classifier_first_table(const struct classifier *);
33 static struct cls_table *classifier_next_table(const struct classifier *,
34 const struct cls_table *);
35 static void destroy_table(struct classifier *, struct cls_table *);
37 static bool should_include(const struct cls_table *, int include);
39 static struct cls_rule *find_match(const struct cls_table *,
41 static struct cls_rule *find_equal(struct cls_table *, const struct flow *,
43 static struct cls_rule *insert_rule(struct cls_table *, struct cls_rule *);
45 static bool flow_equal_except(const struct flow *, const struct flow *,
46 const struct flow_wildcards *);
47 static void zero_wildcards(struct flow *, const struct flow_wildcards *);
49 /* Iterates RULE over HEAD and all of the cls_rules on HEAD->list. */
50 #define FOR_EACH_RULE_IN_LIST(RULE, HEAD) \
51 for ((RULE) = (HEAD); (RULE) != NULL; (RULE) = next_rule_in_list(RULE))
52 #define FOR_EACH_RULE_IN_LIST_SAFE(RULE, NEXT, HEAD) \
53 for ((RULE) = (HEAD); \
54 (RULE) != NULL && ((NEXT) = next_rule_in_list(RULE), true); \
57 static struct cls_rule *next_rule_in_list(struct cls_rule *);
59 static struct cls_table *
60 cls_table_from_hmap_node(const struct hmap_node *node)
62 return node ? CONTAINER_OF(node, struct cls_table, hmap_node) : NULL;
65 static struct cls_rule *
66 cls_rule_from_hmap_node(const struct hmap_node *node)
68 return node ? CONTAINER_OF(node, struct cls_rule, hmap_node) : NULL;
71 /* Returns the cls_table within 'cls' that has no wildcards, or NULL if there
74 classifier_exact_table(const struct classifier *cls)
76 struct flow_wildcards exact_wc;
77 flow_wildcards_init_exact(&exact_wc);
78 return find_table(cls, &exact_wc);
81 /* Returns the first rule in 'table', or a null pointer if 'table' is NULL. */
83 cls_table_first_rule(const struct cls_table *table)
85 return table ? cls_rule_from_hmap_node(hmap_first(&table->rules)) : NULL;
88 /* Returns the next rule in 'table' following 'rule', or a null pointer if
89 * 'rule' is the last rule in 'table'. */
91 cls_table_next_rule(const struct cls_table *table, const struct cls_rule *rule)
94 = CONTAINER_OF(rule->list.next, struct cls_rule, hmap_node);
96 return (next->priority < rule->priority
98 : cls_rule_from_hmap_node(hmap_next(&table->rules,
103 cls_rule_init__(struct cls_rule *rule,
104 const struct flow *flow, uint32_t wildcards)
107 flow_wildcards_init(&rule->wc, wildcards);
108 cls_rule_zero_wildcarded_fields(rule);
111 /* Converts the flow in 'flow' into a cls_rule in 'rule', with the given
112 * 'wildcards' and 'priority'.*/
114 cls_rule_from_flow(const struct flow *flow, uint32_t wildcards,
115 unsigned int priority, struct cls_rule *rule)
117 cls_rule_init__(rule, flow, wildcards);
118 rule->priority = priority;
121 /* Converts the ofp_match in 'match' (with format 'flow_format', one of NXFF_*)
122 * into a cls_rule in 'rule', with the given 'priority'. 'cookie' is used
123 * when 'flow_format' is NXFF_TUN_ID_FROM_COOKIE. */
125 cls_rule_from_match(const struct ofp_match *match, unsigned int priority,
126 int flow_format, uint64_t cookie,
127 struct cls_rule *rule)
132 flow_from_match(match, flow_format, cookie, &flow, &wildcards);
133 cls_rule_init__(rule, &flow, wildcards);
134 rule->priority = rule->wc.wildcards ? priority : UINT16_MAX;
137 /* Initializes 'rule' as a "catch-all" rule that matches every packet, with
138 * priority 'priority'. */
140 cls_rule_init_catchall(struct cls_rule *rule, unsigned int priority)
142 memset(&rule->flow, 0, sizeof rule->flow);
143 flow_wildcards_init(&rule->wc, OVSFW_ALL);
144 rule->priority = priority;
147 /* For each bit or field wildcarded in 'rule', sets the corresponding bit or
148 * field in 'flow' to all-0-bits. It is important to maintain this invariant
149 * in a clr_rule that might be inserted into a classifier.
151 * It is never necessary to call this function directly for a cls_rule that is
152 * initialized or modified only by cls_rule_*() functions. It is useful to
153 * restore the invariant in a cls_rule whose 'wc' member is modified by hand.
156 cls_rule_zero_wildcarded_fields(struct cls_rule *rule)
158 zero_wildcards(&rule->flow, &rule->wc);
162 cls_rule_set_in_port(struct cls_rule *rule, uint16_t odp_port)
164 rule->wc.wildcards &= ~OFPFW_IN_PORT;
165 rule->flow.in_port = odp_port;
169 cls_rule_set_dl_type(struct cls_rule *rule, ovs_be16 dl_type)
171 rule->wc.wildcards &= ~OFPFW_DL_TYPE;
172 rule->flow.dl_type = dl_type;
176 cls_rule_set_dl_src(struct cls_rule *rule, const uint8_t dl_src[ETH_ADDR_LEN])
178 rule->wc.wildcards &= ~OFPFW_DL_SRC;
179 memcpy(rule->flow.dl_src, dl_src, ETH_ADDR_LEN);
183 cls_rule_set_dl_dst(struct cls_rule *rule, const uint8_t dl_dst[ETH_ADDR_LEN])
185 rule->wc.wildcards &= ~OFPFW_DL_DST;
186 memcpy(rule->flow.dl_dst, dl_dst, ETH_ADDR_LEN);
190 cls_rule_set_dl_tci(struct cls_rule *rule, ovs_be16 tci)
192 return cls_rule_set_dl_tci_masked(rule, tci, htons(0xffff));
196 cls_rule_set_dl_tci_masked(struct cls_rule *rule, ovs_be16 tci, ovs_be16 mask)
198 switch (ntohs(mask)) {
200 if (tci == htons(0)) {
201 /* Match only packets that have no 802.1Q header. */
202 rule->wc.wildcards &= ~(OFPFW_DL_VLAN | OFPFW_DL_VLAN_PCP);
203 rule->flow.dl_vlan = htons(OFP_VLAN_NONE);
204 rule->flow.dl_vlan_pcp = 0;
206 } else if (tci & htons(VLAN_CFI)) {
207 /* Match only packets that have a specific 802.1Q VID and PCP. */
208 rule->wc.wildcards &= ~(OFPFW_DL_VLAN | OFPFW_DL_VLAN_PCP);
209 rule->flow.dl_vlan = htons(vlan_tci_to_vid(tci));
210 rule->flow.dl_vlan_pcp = vlan_tci_to_pcp(tci);
218 if (!(tci & htons(VLAN_CFI))) {
221 /* Match only packets that have a specific 802.1Q VID. */
222 cls_rule_set_dl_vlan(rule, tci & htons(VLAN_VID_MASK));
223 rule->wc.wildcards |= OFPFW_DL_VLAN_PCP;
224 rule->flow.dl_vlan_pcp = 0;
229 if (!(tci & htons(VLAN_CFI))) {
232 /* Match only packets that have a specific 802.1Q PCP. */
233 cls_rule_set_dl_vlan_pcp(rule, vlan_tci_to_pcp(tci));
234 rule->wc.wildcards |= OFPFW_DL_VLAN;
235 rule->flow.dl_vlan = 0;
240 /* Match anything. */
241 rule->wc.wildcards |= OFPFW_DL_VLAN | OFPFW_DL_VLAN_PCP;
242 rule->flow.dl_vlan = htons(0);
243 rule->flow.dl_vlan_pcp = 0;
252 cls_rule_set_dl_vlan(struct cls_rule *rule, ovs_be16 dl_vlan)
254 if (dl_vlan != htons(OFP_VLAN_NONE)) {
255 dl_vlan &= htons(VLAN_VID_MASK);
258 rule->wc.wildcards &= ~OFPFW_DL_VLAN;
259 rule->flow.dl_vlan = dl_vlan;
263 cls_rule_set_dl_vlan_pcp(struct cls_rule *rule, uint8_t dl_vlan_pcp)
265 rule->wc.wildcards &= ~OFPFW_DL_VLAN_PCP;
266 rule->flow.dl_vlan_pcp = dl_vlan_pcp & 0x07;
270 cls_rule_set_tp_src(struct cls_rule *rule, ovs_be16 tp_src)
272 rule->wc.wildcards &= ~OFPFW_TP_SRC;
273 rule->flow.tp_src = tp_src;
277 cls_rule_set_tp_dst(struct cls_rule *rule, ovs_be16 tp_dst)
279 rule->wc.wildcards &= ~OFPFW_TP_DST;
280 rule->flow.tp_dst = tp_dst;
284 cls_rule_set_nw_proto(struct cls_rule *rule, uint8_t nw_proto)
286 rule->wc.wildcards &= ~OFPFW_NW_PROTO;
287 rule->flow.nw_proto = nw_proto;
291 cls_rule_set_nw_src(struct cls_rule *rule, ovs_be32 nw_src)
293 cls_rule_set_nw_src_masked(rule, nw_src, htonl(UINT32_MAX));
297 cls_rule_set_nw_src_masked(struct cls_rule *rule, ovs_be32 ip, ovs_be32 mask)
299 if (flow_wildcards_set_nw_src_mask(&rule->wc, mask)) {
300 rule->flow.nw_src = ip & mask;
308 cls_rule_set_nw_dst(struct cls_rule *rule, ovs_be32 nw_dst)
310 cls_rule_set_nw_dst_masked(rule, nw_dst, htonl(UINT32_MAX));
314 cls_rule_set_nw_dst_masked(struct cls_rule *rule, ovs_be32 ip, ovs_be32 mask)
316 if (flow_wildcards_set_nw_dst_mask(&rule->wc, mask)) {
317 rule->flow.nw_dst = ip & mask;
325 cls_rule_set_nw_tos(struct cls_rule *rule, uint8_t nw_tos)
327 rule->wc.wildcards &= ~OFPFW_NW_TOS;
328 rule->flow.nw_tos = nw_tos & IP_DSCP_MASK;
332 cls_rule_set_icmp_type(struct cls_rule *rule, uint8_t icmp_type)
334 rule->wc.wildcards &= ~OFPFW_ICMP_TYPE;
335 rule->flow.icmp_type = htons(icmp_type);
340 cls_rule_set_icmp_code(struct cls_rule *rule, uint8_t icmp_code)
342 rule->wc.wildcards &= ~OFPFW_ICMP_CODE;
343 rule->flow.icmp_code = htons(icmp_code);
346 /* Converts 'rule' to a string and returns the string. The caller must free
347 * the string (with free()). */
349 cls_rule_to_string(const struct cls_rule *rule)
351 struct ds s = DS_EMPTY_INITIALIZER;
352 ds_put_format(&s, "wildcards=%x priority=%u ",
353 rule->wc.wildcards, rule->priority);
354 flow_format(&s, &rule->flow);
358 /* Prints cls_rule 'rule', for debugging.
360 * (The output could be improved and expanded, but this was good enough to
361 * debug the classifier.) */
363 cls_rule_print(const struct cls_rule *rule)
365 printf("wildcards=%x priority=%u ", rule->wc.wildcards, rule->priority);
366 flow_print(stdout, &rule->flow);
370 /* Initializes 'cls' as a classifier that initially contains no classification
373 classifier_init(struct classifier *cls)
376 hmap_init(&cls->tables);
379 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
380 * caller's responsibility. */
382 classifier_destroy(struct classifier *cls)
385 struct cls_table *table, *next_table;
387 HMAP_FOR_EACH_SAFE (table, next_table, hmap_node, &cls->tables) {
388 hmap_destroy(&table->rules);
389 hmap_remove(&cls->tables, &table->hmap_node);
392 hmap_destroy(&cls->tables);
396 /* Returns true if 'cls' contains no classification rules, false otherwise. */
398 classifier_is_empty(const struct classifier *cls)
400 return cls->n_rules == 0;
403 /* Returns the number of rules in 'classifier'. */
405 classifier_count(const struct classifier *cls)
410 /* Returns the number of rules in 'classifier' that have no wildcards. */
412 classifier_count_exact(const struct classifier *cls)
414 struct cls_table *exact_table = classifier_exact_table(cls);
415 return exact_table ? exact_table->n_table_rules : 0;
418 /* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
419 * must not modify or free it.
421 * If 'cls' already contains an identical rule (including wildcards, values of
422 * fixed fields, and priority), replaces the old rule by 'rule' and returns the
423 * rule that was replaced. The caller takes ownership of the returned rule and
424 * is thus responsible for freeing it, etc., as necessary.
426 * Returns NULL if 'cls' does not contain a rule with an identical key, after
427 * inserting the new rule. In this case, no rules are displaced by the new
428 * rule, even rules that cannot have any effect because the new rule matches a
429 * superset of their flows and has higher priority. */
431 classifier_insert(struct classifier *cls, struct cls_rule *rule)
433 struct cls_rule *old_rule;
434 struct cls_table *table;
436 table = find_table(cls, &rule->wc);
438 table = insert_table(cls, &rule->wc);
441 old_rule = insert_rule(table, rule);
443 table->n_table_rules++;
449 /* Removes 'rule' from 'cls'. It is the caller's responsibility to free
450 * 'rule', if this is desirable. */
452 classifier_remove(struct classifier *cls, struct cls_rule *rule)
454 struct cls_rule *head;
455 struct cls_table *table;
457 table = find_table(cls, &rule->wc);
458 head = find_equal(table, &rule->flow, rule->hmap_node.hash);
460 list_remove(&rule->list);
461 } else if (list_is_empty(&rule->list)) {
462 hmap_remove(&table->rules, &rule->hmap_node);
464 struct cls_rule *next = CONTAINER_OF(rule->list.next,
465 struct cls_rule, list);
467 list_remove(&rule->list);
468 hmap_replace(&table->rules, &rule->hmap_node, &next->hmap_node);
471 if (--table->n_table_rules == 0 && !table->n_refs) {
472 destroy_table(cls, table);
478 /* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
479 * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules
480 * of equal priority match 'flow', returns one arbitrarily.
482 * 'include' is a combination of CLS_INC_* values that specify tables to
483 * include in the search. */
485 classifier_lookup(const struct classifier *cls, const struct flow *flow,
488 struct cls_table *table;
489 struct cls_rule *best;
492 HMAP_FOR_EACH (table, hmap_node, &cls->tables) {
493 if (should_include(table, include)) {
494 struct cls_rule *rule = find_match(table, flow);
495 if (rule && (!best || rule->priority > best->priority)) {
503 /* Finds and returns a rule in 'cls' with exactly the same priority and
504 * matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
505 * contain an exact match.
507 * Priority is ignored for exact-match rules (because OpenFlow 1.0 always
508 * treats exact-match rules as highest priority). */
510 classifier_find_rule_exactly(const struct classifier *cls,
511 const struct cls_rule *target)
513 struct cls_rule *head, *rule;
514 struct cls_table *table;
516 table = find_table(cls, &target->wc);
521 head = find_equal(table, &target->flow, flow_hash(&target->flow, 0));
522 if (!target->wc.wildcards) {
525 FOR_EACH_RULE_IN_LIST (rule, head) {
526 if (target->priority >= rule->priority) {
527 return target->priority == rule->priority ? rule : NULL;
533 /* Checks if 'target' would overlap any other rule in 'cls'. Two rules are
534 * considered to overlap if both rules have the same priority and a packet
535 * could match both. */
537 classifier_rule_overlaps(const struct classifier *cls,
538 const struct cls_rule *target)
540 struct cls_table *table;
542 HMAP_FOR_EACH (table, hmap_node, &cls->tables) {
543 struct flow_wildcards wc;
544 struct cls_rule *head;
546 flow_wildcards_combine(&wc, &target->wc, &table->wc);
547 HMAP_FOR_EACH (head, hmap_node, &table->rules) {
548 struct cls_rule *rule;
550 FOR_EACH_RULE_IN_LIST (rule, head) {
551 if (rule->priority == target->priority
552 && flow_equal_except(&target->flow, &rule->flow, &wc)) {
562 /* Searches 'cls' for rules that exactly match 'target' or are more specific
563 * than 'target'. That is, a given 'rule' matches 'target' if, for every
566 * - 'target' and 'rule' specify the same (non-wildcarded) value for the
569 * - 'target' wildcards the field,
573 * - 'target' and 'rule' specify different values for the field, or
575 * - 'target' specifies a value for the field but 'rule' wildcards it.
577 * Equivalently, the truth table for whether a field matches is:
582 * +---------+---------+
583 * t wild | yes | yes |
585 * r +---------+---------+
586 * g exact | no |if values|
588 * t +---------+---------+
590 * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD
591 * commands and by OpenFlow 1.0 aggregate and flow stats.
593 * Ignores target->priority.
595 * 'callback' is allowed to delete the rule that is passed as its argument, but
596 * it must not delete (or move) any other rules in 'cls' that have the same
597 * wildcards as the argument rule. */
599 classifier_for_each_match(const struct classifier *cls_,
600 const struct cls_rule *target,
601 int include, cls_cb_func *callback, void *aux)
603 struct classifier *cls = (struct classifier *) cls_;
604 struct cls_table *table, *next_table;
606 for (table = classifier_first_table(cls); table; table = next_table) {
607 if (should_include(table, include)
608 && !flow_wildcards_has_extra(&table->wc, &target->wc)) {
609 /* We have eliminated the "no" case in the truth table above. Two
610 * of the three remaining cases are trivial. We only need to check
611 * the fourth case, where both 'rule' and 'target' require an exact
613 struct cls_rule *head, *next_head;
616 HMAP_FOR_EACH_SAFE (head, next_head, hmap_node, &table->rules) {
617 if (flow_equal_except(&head->flow, &target->flow,
619 struct cls_rule *rule, *next_rule;
621 FOR_EACH_RULE_IN_LIST_SAFE (rule, next_rule, head) {
626 next_table = classifier_next_table(cls, table);
627 if (!--table->n_refs && !table->n_table_rules) {
628 destroy_table(cls, table);
631 next_table = classifier_next_table(cls, table);
636 /* 'callback' is allowed to delete the rule that is passed as its argument, but
637 * it must not delete (or move) any other rules in 'cls' that have the same
638 * wildcards as the argument rule.
640 * If 'include' is CLS_INC_EXACT then CLASSIFIER_FOR_EACH_EXACT_RULE is
641 * probably easier to use. */
643 classifier_for_each(const struct classifier *cls_, int include,
644 cls_cb_func *callback, void *aux)
646 struct classifier *cls = (struct classifier *) cls_;
647 struct cls_table *table, *next_table;
649 for (table = classifier_first_table(cls); table; table = next_table) {
650 if (should_include(table, include)) {
651 struct cls_rule *head, *next_head;
654 HMAP_FOR_EACH_SAFE (head, next_head, hmap_node, &table->rules) {
655 struct cls_rule *rule, *next_rule;
657 FOR_EACH_RULE_IN_LIST_SAFE (rule, next_rule, head) {
661 next_table = classifier_next_table(cls, table);
662 if (!--table->n_refs && !table->n_table_rules) {
663 destroy_table(cls, table);
666 next_table = classifier_next_table(cls, table);
671 static struct cls_table *
672 find_table(const struct classifier *cls, const struct flow_wildcards *wc)
674 struct cls_table *table;
676 HMAP_FOR_EACH_IN_BUCKET (table, hmap_node, flow_wildcards_hash(wc),
678 if (flow_wildcards_equal(wc, &table->wc)) {
685 static struct cls_table *
686 insert_table(struct classifier *cls, const struct flow_wildcards *wc)
688 struct cls_table *table;
690 table = xzalloc(sizeof *table);
691 hmap_init(&table->rules);
693 hmap_insert(&cls->tables, &table->hmap_node, flow_wildcards_hash(wc));
698 static struct cls_table *
699 classifier_first_table(const struct classifier *cls)
701 return cls_table_from_hmap_node(hmap_first(&cls->tables));
704 static struct cls_table *
705 classifier_next_table(const struct classifier *cls,
706 const struct cls_table *table)
708 return cls_table_from_hmap_node(hmap_next(&cls->tables,
713 destroy_table(struct classifier *cls, struct cls_table *table)
715 hmap_remove(&cls->tables, &table->hmap_node);
716 hmap_destroy(&table->rules);
720 /* Returns true if 'table' should be included by an operation with the
721 * specified 'include' (a combination of CLS_INC_*). */
723 should_include(const struct cls_table *table, int include)
725 return include & (table->wc.wildcards ? CLS_INC_WILD : CLS_INC_EXACT);
728 static struct cls_rule *
729 find_match(const struct cls_table *table, const struct flow *flow)
731 struct cls_rule *rule;
735 zero_wildcards(&f, &table->wc);
736 HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, flow_hash(&f, 0),
738 if (flow_equal(&f, &rule->flow)) {
745 static struct cls_rule *
746 find_equal(struct cls_table *table, const struct flow *flow, uint32_t hash)
748 struct cls_rule *head;
750 HMAP_FOR_EACH_WITH_HASH (head, hmap_node, hash, &table->rules) {
751 if (flow_equal(&head->flow, flow)) {
758 static struct cls_rule *
759 insert_rule(struct cls_table *table, struct cls_rule *new)
761 struct cls_rule *head;
763 new->hmap_node.hash = flow_hash(&new->flow, 0);
765 head = find_equal(table, &new->flow, new->hmap_node.hash);
767 hmap_insert(&table->rules, &new->hmap_node, new->hmap_node.hash);
768 list_init(&new->list);
771 /* Scan the list for the insertion point that will keep the list in
772 * order of decreasing priority. */
773 struct cls_rule *rule;
774 FOR_EACH_RULE_IN_LIST (rule, head) {
775 if (new->priority >= rule->priority) {
777 /* 'new' is the new highest-priority flow in the list. */
778 hmap_replace(&table->rules,
779 &rule->hmap_node, &new->hmap_node);
782 if (new->priority == rule->priority) {
783 list_replace(&new->list, &rule->list);
786 list_insert(&rule->list, &new->list);
792 /* Insert 'new' at the end of the list. */
793 list_push_back(&head->list, &new->list);
798 static struct cls_rule *
799 next_rule_in_list(struct cls_rule *rule)
801 struct cls_rule *next = OBJECT_CONTAINING(rule->list.next, next, list);
802 return next->priority < rule->priority ? next : NULL;
806 flow_equal_except(const struct flow *a, const struct flow *b,
807 const struct flow_wildcards *wildcards)
809 const uint32_t wc = wildcards->wildcards;
811 BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 37);
813 return ((wc & NXFW_TUN_ID || a->tun_id == b->tun_id)
814 && !((a->nw_src ^ b->nw_src) & wildcards->nw_src_mask)
815 && !((a->nw_dst ^ b->nw_dst) & wildcards->nw_dst_mask)
816 && (wc & OFPFW_IN_PORT || a->in_port == b->in_port)
817 && (wc & OFPFW_DL_VLAN || a->dl_vlan == b->dl_vlan)
818 && (wc & OFPFW_DL_TYPE || a->dl_type == b->dl_type)
819 && (wc & OFPFW_TP_SRC || a->tp_src == b->tp_src)
820 && (wc & OFPFW_TP_DST || a->tp_dst == b->tp_dst)
821 && (wc & OFPFW_DL_SRC || eth_addr_equals(a->dl_src, b->dl_src))
822 && (wc & OFPFW_DL_DST || eth_addr_equals(a->dl_dst, b->dl_dst))
823 && (wc & OFPFW_NW_PROTO || a->nw_proto == b->nw_proto)
824 && (wc & OFPFW_DL_VLAN_PCP || a->dl_vlan_pcp == b->dl_vlan_pcp)
825 && (wc & OFPFW_NW_TOS || a->nw_tos == b->nw_tos));
829 zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
831 const uint32_t wc = wildcards->wildcards;
833 BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 37);
835 if (wc & NXFW_TUN_ID) {
838 flow->nw_src &= wildcards->nw_src_mask;
839 flow->nw_dst &= wildcards->nw_dst_mask;
840 if (wc & OFPFW_IN_PORT) {
843 if (wc & OFPFW_DL_VLAN) {
846 if (wc & OFPFW_DL_TYPE) {
849 if (wc & OFPFW_TP_SRC) {
852 if (wc & OFPFW_TP_DST) {
855 if (wc & OFPFW_DL_SRC) {
856 memset(flow->dl_src, 0, sizeof flow->dl_src);
858 if (wc & OFPFW_DL_DST) {
859 memset(flow->dl_dst, 0, sizeof flow->dl_dst);
861 if (wc & OFPFW_NW_PROTO) {
864 if (wc & OFPFW_DL_VLAN_PCP) {
865 flow->dl_vlan_pcp = 0;
867 if (wc & OFPFW_NW_TOS) {