+flow_wildcards_equal(const struct flow_wildcards *a,
+ const struct flow_wildcards *b)
+{
+ int i;
+
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+
+ if (a->wildcards != b->wildcards
+ || a->tun_id_mask != b->tun_id_mask
+ || a->nw_src_mask != b->nw_src_mask
+ || a->nw_dst_mask != b->nw_dst_mask
+ || a->vlan_tci_mask != b->vlan_tci_mask
+ || a->metadata_mask != b->metadata_mask
+ || !ipv6_addr_equals(&a->ipv6_src_mask, &b->ipv6_src_mask)
+ || !ipv6_addr_equals(&a->ipv6_dst_mask, &b->ipv6_dst_mask)
+ || a->ipv6_label_mask != b->ipv6_label_mask
+ || !ipv6_addr_equals(&a->nd_target_mask, &b->nd_target_mask)
+ || a->tp_src_mask != b->tp_src_mask
+ || a->tp_dst_mask != b->tp_dst_mask
+ || a->nw_frag_mask != b->nw_frag_mask
+ || !eth_addr_equals(a->dl_src_mask, b->dl_src_mask)
+ || !eth_addr_equals(a->dl_dst_mask, b->dl_dst_mask)
+ || !eth_addr_equals(a->arp_sha_mask, b->arp_sha_mask)
+ || !eth_addr_equals(a->arp_tha_mask, b->arp_tha_mask)) {
+ return false;
+ }
+
+ for (i = 0; i < FLOW_N_REGS; i++) {
+ if (a->reg_masks[i] != b->reg_masks[i]) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* Returns true if at least one bit or field is wildcarded in 'a' but not in
+ * 'b', false otherwise. */
+bool
+flow_wildcards_has_extra(const struct flow_wildcards *a,
+ const struct flow_wildcards *b)
+{
+ int i;
+ uint8_t eth_masked[ETH_ADDR_LEN];
+ struct in6_addr ipv6_masked;
+
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+
+ for (i = 0; i < FLOW_N_REGS; i++) {
+ if ((a->reg_masks[i] & b->reg_masks[i]) != b->reg_masks[i]) {
+ return true;
+ }
+ }
+
+ eth_addr_bitand(a->dl_src_mask, b->dl_src_mask, eth_masked);
+ if (!eth_addr_equals(eth_masked, b->dl_src_mask)) {
+ return true;
+ }
+
+ eth_addr_bitand(a->dl_dst_mask, b->dl_dst_mask, eth_masked);
+ if (!eth_addr_equals(eth_masked, b->dl_dst_mask)) {
+ return true;
+ }
+
+ eth_addr_bitand(a->arp_sha_mask, b->arp_sha_mask, eth_masked);
+ if (!eth_addr_equals(eth_masked, b->arp_sha_mask)) {
+ return true;
+ }
+
+ eth_addr_bitand(a->arp_tha_mask, b->arp_tha_mask, eth_masked);
+ if (!eth_addr_equals(eth_masked, b->arp_tha_mask)) {
+ return true;
+ }
+
+ ipv6_masked = ipv6_addr_bitand(&a->ipv6_src_mask, &b->ipv6_src_mask);
+ if (!ipv6_addr_equals(&ipv6_masked, &b->ipv6_src_mask)) {
+ return true;
+ }
+
+ ipv6_masked = ipv6_addr_bitand(&a->ipv6_dst_mask, &b->ipv6_dst_mask);
+ if (!ipv6_addr_equals(&ipv6_masked, &b->ipv6_dst_mask)) {
+ return true;
+ }
+
+ ipv6_masked = ipv6_addr_bitand(&a->nd_target_mask, &b->nd_target_mask);
+ if (!ipv6_addr_equals(&ipv6_masked, &b->nd_target_mask)) {
+ return true;
+ }
+
+ return (a->wildcards & ~b->wildcards
+ || (a->tun_id_mask & b->tun_id_mask) != b->tun_id_mask
+ || (a->nw_src_mask & b->nw_src_mask) != b->nw_src_mask
+ || (a->nw_dst_mask & b->nw_dst_mask) != b->nw_dst_mask
+ || (a->ipv6_label_mask & b->ipv6_label_mask) != b->ipv6_label_mask
+ || (a->vlan_tci_mask & b->vlan_tci_mask) != b->vlan_tci_mask
+ || (a->metadata_mask & b->metadata_mask) != b->metadata_mask
+ || (a->tp_src_mask & b->tp_src_mask) != b->tp_src_mask
+ || (a->tp_dst_mask & b->tp_dst_mask) != b->tp_dst_mask
+ || (a->nw_frag_mask & b->nw_frag_mask) != b->nw_frag_mask);
+}
+
+/* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
+ * (A 0-bit indicates a wildcard bit.) */
+void
+flow_wildcards_set_reg_mask(struct flow_wildcards *wc, int idx, uint32_t mask)
+{
+ wc->reg_masks[idx] = mask;
+}
+
+/* Hashes 'flow' based on its L2 through L4 protocol information. */
+uint32_t
+flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis)
+{
+ struct {
+ union {
+ ovs_be32 ipv4_addr;
+ struct in6_addr ipv6_addr;
+ };
+ ovs_be16 eth_type;
+ ovs_be16 vlan_tci;
+ ovs_be16 tp_port;
+ uint8_t eth_addr[ETH_ADDR_LEN];
+ uint8_t ip_proto;
+ } fields;
+
+ int i;
+
+ memset(&fields, 0, sizeof fields);
+ for (i = 0; i < ETH_ADDR_LEN; i++) {
+ fields.eth_addr[i] = flow->dl_src[i] ^ flow->dl_dst[i];
+ }
+ fields.vlan_tci = flow->vlan_tci & htons(VLAN_VID_MASK);
+ fields.eth_type = flow->dl_type;
+
+ /* UDP source and destination port are not taken into account because they
+ * will not necessarily be symmetric in a bidirectional flow. */
+ if (fields.eth_type == htons(ETH_TYPE_IP)) {
+ fields.ipv4_addr = flow->nw_src ^ flow->nw_dst;
+ fields.ip_proto = flow->nw_proto;
+ if (fields.ip_proto == IPPROTO_TCP) {
+ fields.tp_port = flow->tp_src ^ flow->tp_dst;
+ }
+ } else if (fields.eth_type == htons(ETH_TYPE_IPV6)) {
+ const uint8_t *a = &flow->ipv6_src.s6_addr[0];
+ const uint8_t *b = &flow->ipv6_dst.s6_addr[0];
+ uint8_t *ipv6_addr = &fields.ipv6_addr.s6_addr[0];
+
+ for (i=0; i<16; i++) {
+ ipv6_addr[i] = a[i] ^ b[i];
+ }
+ fields.ip_proto = flow->nw_proto;
+ if (fields.ip_proto == IPPROTO_TCP) {
+ fields.tp_port = flow->tp_src ^ flow->tp_dst;
+ }
+ }
+ return hash_bytes(&fields, sizeof fields, basis);
+}
+
+/* Hashes the portions of 'flow' designated by 'fields'. */
+uint32_t
+flow_hash_fields(const struct flow *flow, enum nx_hash_fields fields,
+ uint16_t basis)