cooperating group of processes or threads that emitted a log message.
The default log patterns now include this information.
- OpenFlow:
- - Allow bitwise masking for IPv6 flow label.
+ - Allow bitwise masking for SHA and THA fields in ARP, SLL and TLL
+ fields in IPv6 neighbor discovery messages, and IPv6 flow label.
v1.8.0 - xx xxx xxxx
void
cls_rule_set_arp_sha(struct cls_rule *rule, const uint8_t sha[ETH_ADDR_LEN])
{
- rule->wc.wildcards &= ~FWW_ARP_SHA;
- memcpy(rule->flow.arp_sha, sha, ETH_ADDR_LEN);
+ cls_rule_set_eth(sha, rule->flow.arp_sha, rule->wc.arp_sha_mask);
+}
+
+void
+cls_rule_set_arp_sha_masked(struct cls_rule *rule,
+ const uint8_t arp_sha[ETH_ADDR_LEN],
+ const uint8_t mask[ETH_ADDR_LEN])
+{
+ cls_rule_set_eth_masked(arp_sha, mask,
+ rule->flow.arp_sha, rule->wc.arp_sha_mask);
}
void
cls_rule_set_arp_tha(struct cls_rule *rule, const uint8_t tha[ETH_ADDR_LEN])
{
- rule->wc.wildcards &= ~FWW_ARP_THA;
- memcpy(rule->flow.arp_tha, tha, ETH_ADDR_LEN);
+ cls_rule_set_eth(tha, rule->flow.arp_tha, rule->wc.arp_tha_mask);
+}
+
+void
+cls_rule_set_arp_tha_masked(struct cls_rule *rule,
+ const uint8_t arp_tha[ETH_ADDR_LEN],
+ const uint8_t mask[ETH_ADDR_LEN])
+{
+ cls_rule_set_eth_masked(arp_tha, mask,
+ rule->flow.arp_tha, rule->wc.arp_tha_mask);
}
void
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
if (rule->priority != OFP_DEFAULT_PRIORITY) {
ds_put_format(s, "priority=%d,", rule->priority);
}
}
if (f->dl_type == htons(ETH_TYPE_ARP)) {
- if (!(w & FWW_ARP_SHA)) {
- ds_put_format(s, "arp_sha="ETH_ADDR_FMT",",
- ETH_ADDR_ARGS(f->arp_sha));
- }
- if (!(w & FWW_ARP_THA)) {
- ds_put_format(s, "arp_tha="ETH_ADDR_FMT",",
- ETH_ADDR_ARGS(f->arp_tha));
- }
+ format_eth_masked(s, "arp_sha", f->arp_sha, wc->arp_sha_mask);
+ format_eth_masked(s, "arp_tha", f->arp_tha, wc->arp_tha_mask);
}
if (!(w & FWW_NW_DSCP)) {
ds_put_format(s, "nw_tos=%"PRIu8",", f->nw_tos & IP_DSCP_MASK);
format_be16_masked(s, "icmp_code", f->tp_dst, wc->tp_dst_mask);
format_ipv6_netmask(s, "nd_target", &f->nd_target,
&wc->nd_target_mask);
- if (!(w & FWW_ARP_SHA)) {
- ds_put_format(s, "nd_sll="ETH_ADDR_FMT",",
- ETH_ADDR_ARGS(f->arp_sha));
- }
- if (!(w & FWW_ARP_THA)) {
- ds_put_format(s, "nd_tll="ETH_ADDR_FMT",",
- ETH_ADDR_ARGS(f->arp_tha));
- }
+ format_eth_masked(s, "nd_sll", f->arp_sha, wc->arp_sha_mask);
+ format_eth_masked(s, "nd_tll", f->arp_tha, wc->arp_tha_mask);
} else {
format_be16_masked(s, "tp_src", f->tp_src, wc->tp_src_mask);
format_be16_masked(s, "tp_dst", f->tp_dst, wc->tp_dst_mask);
const flow_wildcards_t wc = wildcards->wildcards;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
for (i = 0; i < FLOW_N_REGS; i++) {
if ((a->regs[i] ^ b->regs[i]) & wildcards->reg_masks[i]) {
&& (wc & FWW_NW_DSCP || !((a->nw_tos ^ b->nw_tos) & IP_DSCP_MASK))
&& (wc & FWW_NW_ECN || !((a->nw_tos ^ b->nw_tos) & IP_ECN_MASK))
&& !((a->nw_frag ^ b->nw_frag) & wildcards->nw_frag_mask)
- && (wc & FWW_ARP_SHA || eth_addr_equals(a->arp_sha, b->arp_sha))
- && (wc & FWW_ARP_THA || eth_addr_equals(a->arp_tha, b->arp_tha))
+ && eth_addr_equal_except(a->arp_sha, b->arp_sha,
+ wildcards->arp_sha_mask)
+ && eth_addr_equal_except(a->arp_tha, b->arp_tha,
+ wildcards->arp_tha_mask)
&& !((a->ipv6_label ^ b->ipv6_label) & wildcards->ipv6_label_mask)
&& ipv6_equal_except(&a->ipv6_src, &b->ipv6_src,
&wildcards->ipv6_src_mask)
void cls_rule_set_icmp_type(struct cls_rule *, uint8_t);
void cls_rule_set_icmp_code(struct cls_rule *, uint8_t);
void cls_rule_set_arp_sha(struct cls_rule *, const uint8_t[6]);
+void cls_rule_set_arp_sha_masked(struct cls_rule *, const uint8_t[6],
+ const uint8_t [6]);
void cls_rule_set_arp_tha(struct cls_rule *, const uint8_t[6]);
+void cls_rule_set_arp_tha_masked(struct cls_rule *, const uint8_t[6],
+ const uint8_t [6]);
void cls_rule_set_ipv6_src(struct cls_rule *, const struct in6_addr *);
void cls_rule_set_ipv6_src_masked(struct cls_rule *, const struct in6_addr *,
const struct in6_addr *);
const flow_wildcards_t wc = wildcards->wildcards;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
for (i = 0; i < FLOW_N_REGS; i++) {
flow->regs[i] &= wildcards->reg_masks[i];
flow->nw_ttl = 0;
}
flow->nw_frag &= wildcards->nw_frag_mask;
- if (wc & FWW_ARP_SHA) {
- memset(flow->arp_sha, 0, sizeof flow->arp_sha);
- }
- if (wc & FWW_ARP_THA) {
- memset(flow->arp_tha, 0, sizeof flow->arp_tha);
- }
+ eth_addr_bitand(flow->arp_sha, wildcards->arp_sha_mask, flow->arp_sha);
+ eth_addr_bitand(flow->arp_tha, wildcards->arp_tha_mask, flow->arp_tha);
flow->ipv6_src = ipv6_addr_bitand(&flow->ipv6_src,
&wildcards->ipv6_src_mask);
flow->ipv6_dst = ipv6_addr_bitand(&flow->ipv6_dst,
void
flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
fmd->tun_id = flow->tun_id;
fmd->tun_id_mask = htonll(UINT64_MAX);
void
flow_wildcards_init_catchall(struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
wc->wildcards = FWW_ALL;
wc->tun_id_mask = htonll(0);
wc->tp_dst_mask = htons(0);
memset(wc->dl_src_mask, 0, ETH_ADDR_LEN);
memset(wc->dl_dst_mask, 0, ETH_ADDR_LEN);
+ memset(wc->arp_sha_mask, 0, ETH_ADDR_LEN);
+ memset(wc->arp_tha_mask, 0, ETH_ADDR_LEN);
memset(wc->zeros, 0, sizeof wc->zeros);
}
void
flow_wildcards_init_exact(struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
wc->wildcards = 0;
wc->tun_id_mask = htonll(UINT64_MAX);
wc->tp_dst_mask = htons(UINT16_MAX);
memset(wc->dl_src_mask, 0xff, ETH_ADDR_LEN);
memset(wc->dl_dst_mask, 0xff, ETH_ADDR_LEN);
+ memset(wc->arp_sha_mask, 0xff, ETH_ADDR_LEN);
+ memset(wc->arp_tha_mask, 0xff, ETH_ADDR_LEN);
memset(wc->zeros, 0, sizeof wc->zeros);
}
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
if (wc->wildcards
|| wc->tun_id_mask != htonll(UINT64_MAX)
|| wc->metadata_mask != htonll(UINT64_MAX)
|| !eth_mask_is_exact(wc->dl_src_mask)
|| !eth_mask_is_exact(wc->dl_dst_mask)
+ || !eth_mask_is_exact(wc->arp_sha_mask)
+ || !eth_mask_is_exact(wc->arp_tha_mask)
|| !ipv6_mask_is_exact(&wc->ipv6_src_mask)
|| !ipv6_mask_is_exact(&wc->ipv6_dst_mask)
|| wc->ipv6_label_mask != htonl(UINT32_MAX)
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
if (wc->wildcards != FWW_ALL
|| wc->tun_id_mask != htonll(0)
|| wc->metadata_mask != htonll(0)
|| !eth_addr_is_zero(wc->dl_src_mask)
|| !eth_addr_is_zero(wc->dl_dst_mask)
+ || !eth_addr_is_zero(wc->arp_sha_mask)
+ || !eth_addr_is_zero(wc->arp_tha_mask)
|| !ipv6_mask_is_any(&wc->ipv6_src_mask)
|| !ipv6_mask_is_any(&wc->ipv6_dst_mask)
|| wc->ipv6_label_mask != htonl(0)
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
dst->wildcards = src1->wildcards | src2->wildcards;
dst->tun_id_mask = src1->tun_id_mask & src2->tun_id_mask;
dst->tp_dst_mask = src1->tp_dst_mask & src2->tp_dst_mask;
eth_addr_bitand(src1->dl_src_mask, src2->dl_src_mask, dst->dl_src_mask);
eth_addr_bitand(src1->dl_dst_mask, src2->dl_dst_mask, dst->dl_dst_mask);
+ eth_addr_bitand(src1->arp_sha_mask, src2->arp_sha_mask, dst->arp_sha_mask);
+ eth_addr_bitand(src1->arp_tha_mask, src2->arp_tha_mask, dst->arp_tha_mask);
}
/* Returns a hash of the wildcards in 'wc'. */
/* If you change struct flow_wildcards and thereby trigger this
* assertion, please check that the new struct flow_wildcards has no holes
* in it before you update the assertion. */
- BUILD_ASSERT_DECL(sizeof *wc == 104 + FLOW_N_REGS * 4);
+ BUILD_ASSERT_DECL(sizeof *wc == 112 + FLOW_N_REGS * 4);
return hash_bytes(wc, sizeof *wc, basis);
}
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
if (a->wildcards != b->wildcards
|| a->tun_id_mask != b->tun_id_mask
|| a->tp_src_mask != b->tp_src_mask
|| a->tp_dst_mask != b->tp_dst_mask
|| !eth_addr_equals(a->dl_src_mask, b->dl_src_mask)
- || !eth_addr_equals(a->dl_dst_mask, b->dl_dst_mask)) {
+ || !eth_addr_equals(a->dl_dst_mask, b->dl_dst_mask)
+ || !eth_addr_equals(a->arp_sha_mask, b->arp_sha_mask)
+ || !eth_addr_equals(a->arp_tha_mask, b->arp_tha_mask)) {
return false;
}
uint8_t eth_masked[ETH_ADDR_LEN];
struct in6_addr ipv6_masked;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
for (i = 0; i < FLOW_N_REGS; i++) {
if ((a->reg_masks[i] & b->reg_masks[i]) != b->reg_masks[i]) {
return true;
}
+ eth_addr_bitand(a->arp_sha_mask, b->arp_sha_mask, eth_masked);
+ if (!eth_addr_equals(eth_masked, b->arp_sha_mask)) {
+ return true;
+ }
+
+ eth_addr_bitand(a->arp_tha_mask, b->arp_tha_mask, eth_masked);
+ if (!eth_addr_equals(eth_masked, b->arp_tha_mask)) {
+ return true;
+ }
+
ipv6_masked = ipv6_addr_bitand(&a->ipv6_src_mask, &b->ipv6_src_mask);
if (!ipv6_addr_equals(&ipv6_masked, &b->ipv6_src_mask)) {
return true;
/* This sequence number should be incremented whenever anything involving flows
* or the wildcarding of flows changes. This will cause build assertion
* failures in places which likely need to be updated. */
-#define FLOW_WC_SEQ 13
+#define FLOW_WC_SEQ 14
#define FLOW_N_REGS 8
BUILD_ASSERT_DECL(FLOW_N_REGS <= NXM_NX_MAX_REGS);
BUILD_ASSERT_DECL(sizeof(struct flow) == FLOW_SIG_SIZE + FLOW_PAD_SIZE);
/* Remember to update FLOW_WC_SEQ when changing 'struct flow'. */
-BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 150 && FLOW_WC_SEQ == 13);
+BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 150 && FLOW_WC_SEQ == 14);
void flow_extract(struct ofpbuf *, uint32_t priority, ovs_be64 tun_id,
uint16_t in_port, struct flow *);
/* No corresponding OFPFW10_* bits. */
#define FWW_NW_DSCP ((OVS_FORCE flow_wildcards_t) (1 << 1))
#define FWW_NW_ECN ((OVS_FORCE flow_wildcards_t) (1 << 2))
-#define FWW_ARP_SHA ((OVS_FORCE flow_wildcards_t) (1 << 3))
-#define FWW_ARP_THA ((OVS_FORCE flow_wildcards_t) (1 << 6))
-#define FWW_NW_TTL ((OVS_FORCE flow_wildcards_t) (1 << 7))
-#define FWW_ALL ((OVS_FORCE flow_wildcards_t) (((1 << 8)) - 1))
+#define FWW_NW_TTL ((OVS_FORCE flow_wildcards_t) (1 << 3))
+#define FWW_ALL ((OVS_FORCE flow_wildcards_t) (((1 << 6)) - 1))
/* Remember to update FLOW_WC_SEQ when adding or removing FWW_*. */
-BUILD_ASSERT_DECL(FWW_ALL == ((1 << 8) - 1) && FLOW_WC_SEQ == 13);
+BUILD_ASSERT_DECL(FWW_ALL == ((1 << 6) - 1) && FLOW_WC_SEQ == 14);
/* Information on wildcards for a flow, as a supplement to "struct flow".
*
uint8_t nw_frag_mask; /* 1-bit in each significant nw_frag bit. */
uint8_t dl_src_mask[6]; /* 1-bit in each significant dl_src bit. */
uint8_t dl_dst_mask[6]; /* 1-bit in each significant dl_dst bit. */
- uint8_t zeros[5]; /* Padding field set to zero. */
+ uint8_t arp_sha_mask[6]; /* 1-bit in each significant dl_dst bit. */
+ uint8_t arp_tha_mask[6]; /* 1-bit in each significant dl_dst bit. */
+ uint8_t zeros[1]; /* Padding field set to zero. */
};
/* Remember to update FLOW_WC_SEQ when updating struct flow_wildcards. */
-BUILD_ASSERT_DECL(sizeof(struct flow_wildcards) == 136 && FLOW_WC_SEQ == 13);
+BUILD_ASSERT_DECL(sizeof(struct flow_wildcards) == 144 && FLOW_WC_SEQ == 14);
void flow_wildcards_init_catchall(struct flow_wildcards *);
void flow_wildcards_init_exact(struct flow_wildcards *);
}, {
MFF_ARP_SHA, "arp_sha", NULL,
MF_FIELD_SIZES(mac),
- MFM_NONE, FWW_ARP_SHA,
+ MFM_FULLY, 0,
MFS_ETHERNET,
MFP_ARP,
false,
}, {
MFF_ARP_THA, "arp_tha", NULL,
MF_FIELD_SIZES(mac),
- MFM_NONE, FWW_ARP_THA,
+ MFM_FULLY, 0,
MFS_ETHERNET,
MFP_ARP,
false,
}, {
MFF_ND_SLL, "nd_sll", NULL,
MF_FIELD_SIZES(mac),
- MFM_NONE, FWW_ARP_SHA,
+ MFM_FULLY, 0,
MFS_ETHERNET,
MFP_ND_SOLICIT,
false,
}, {
MFF_ND_TLL, "nd_tll", NULL,
MF_FIELD_SIZES(mac),
- MFM_NONE, FWW_ARP_THA,
+ MFM_FULLY, 0,
MFS_ETHERNET,
MFP_ND_ADVERT,
false,
case MFF_IP_ECN:
case MFF_IP_TTL:
case MFF_ARP_OP:
- case MFF_ARP_SHA:
- case MFF_ARP_THA:
- case MFF_ND_SLL:
- case MFF_ND_TLL:
assert(mf->fww_bit != 0);
return (wc->wildcards & mf->fww_bit) != 0;
case MFF_ETH_DST:
return eth_addr_is_zero(wc->dl_dst_mask);
+ case MFF_ARP_SHA:
+ case MFF_ND_SLL:
+ return eth_addr_is_zero(wc->arp_sha_mask);
+
+ case MFF_ARP_THA:
+ case MFF_ND_TLL:
+ return eth_addr_is_zero(wc->arp_tha_mask);
+
case MFF_VLAN_TCI:
return !wc->vlan_tci_mask;
case MFF_VLAN_VID:
case MFF_IP_ECN:
case MFF_IP_TTL:
case MFF_ARP_OP:
- case MFF_ARP_SHA:
- case MFF_ARP_THA:
- case MFF_ND_SLL:
- case MFF_ND_TLL:
assert(mf->fww_bit != 0);
memset(mask, wc->wildcards & mf->fww_bit ? 0x00 : 0xff, mf->n_bytes);
break;
case MFF_ARP_TPA:
mask->be32 = wc->nw_dst_mask;
break;
+ case MFF_ARP_SHA:
+ case MFF_ND_SLL:
+ memcpy(mask->mac, wc->arp_sha_mask, ETH_ADDR_LEN);
+ break;
+ case MFF_ARP_THA:
+ case MFF_ND_TLL:
+ memcpy(mask->mac, wc->arp_tha_mask, ETH_ADDR_LEN);
+ break;
case MFF_TCP_SRC:
case MFF_UDP_SRC:
case MFF_ARP_SHA:
case MFF_ND_SLL:
- rule->wc.wildcards |= FWW_ARP_SHA;
- memset(rule->flow.arp_sha, 0, sizeof rule->flow.arp_sha);
+ memset(rule->flow.arp_sha, 0, ETH_ADDR_LEN);
+ memset(rule->wc.arp_sha_mask, 0, ETH_ADDR_LEN);
break;
case MFF_ARP_THA:
case MFF_ND_TLL:
- rule->wc.wildcards |= FWW_ARP_THA;
- memset(rule->flow.arp_tha, 0, sizeof rule->flow.arp_tha);
+ memset(rule->flow.arp_tha, 0, ETH_ADDR_LEN);
+ memset(rule->wc.arp_tha_mask, 0, ETH_ADDR_LEN);
break;
case MFF_TCP_SRC:
case MFF_IP_DSCP:
case MFF_IP_ECN:
case MFF_ARP_OP:
- case MFF_ARP_SHA:
- case MFF_ARP_THA:
case MFF_ICMPV4_TYPE:
case MFF_ICMPV4_CODE:
case MFF_ICMPV6_TYPE:
case MFF_ICMPV6_CODE:
- case MFF_ND_SLL:
- case MFF_ND_TLL:
NOT_REACHED();
case MFF_TUN_ID:
cls_rule_set_dl_src_masked(rule, value->mac, mask->mac);
break;
+ case MFF_ARP_SHA:
+ case MFF_ND_SLL:
+ cls_rule_set_arp_sha_masked(rule, value->mac, mask->mac);
+ break;
+
+ case MFF_ARP_THA:
+ case MFF_ND_TLL:
+ cls_rule_set_arp_tha_masked(rule, value->mac, mask->mac);
+ break;
+
case MFF_VLAN_TCI:
cls_rule_set_dl_tci_masked(rule, value->be16, mask->be16);
break;
int match_len;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
/* Metadata. */
if (!(wc & FWW_IN_PORT)) {
flow->tp_src == htons(ND_NEIGHBOR_ADVERT))) {
nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_ND_TARGET : NXM_NX_ND_TARGET,
&flow->nd_target, &cr->wc.nd_target_mask);
- if (!(wc & FWW_ARP_SHA)
- && flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
- nxm_put_eth(b, oxm ? OXM_OF_IPV6_ND_SLL : NXM_NX_ND_SLL,
- flow->arp_sha);
+ if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
+ nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_SLL : NXM_NX_ND_SLL,
+ flow->arp_sha, cr->wc.arp_sha_mask);
}
- if (!(wc & FWW_ARP_THA)
- && flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
- nxm_put_eth(b, oxm ? OXM_OF_IPV6_ND_TLL : NXM_NX_ND_TLL,
- flow->arp_tha);
+ if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
+ nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_TLL : NXM_NX_ND_TLL,
+ flow->arp_tha, cr->wc.arp_tha_mask);
}
}
} else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) {
flow->nw_src, cr->wc.nw_src_mask);
nxm_put_32m(b, oxm ? OXM_OF_ARP_TPA : NXM_OF_ARP_TPA,
flow->nw_dst, cr->wc.nw_dst_mask);
- if (!(wc & FWW_ARP_SHA)) {
- nxm_put_eth(b, oxm ? OXM_OF_ARP_SHA : NXM_NX_ARP_SHA,
- flow->arp_sha);
- }
- if (!(wc & FWW_ARP_THA)) {
- nxm_put_eth(b, oxm ? OXM_OF_ARP_THA : NXM_NX_ARP_THA,
- flow->arp_tha);
- }
+ nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_SHA : NXM_NX_ARP_SHA,
+ flow->arp_sha, cr->wc.arp_sha_mask);
+ nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_THA : NXM_NX_ARP_THA,
+ flow->arp_tha, cr->wc.arp_tha_mask);
}
/* Tunnel ID. */
void
ofputil_wildcard_from_ofpfw10(uint32_t ofpfw, struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
/* Initialize most of rule->wc. */
flow_wildcards_init_catchall(wc);
wc->wildcards = (OVS_FORCE flow_wildcards_t) ofpfw & WC_INVARIANTS;
/* Wildcard fields that aren't defined by ofp10_match or tun_id. */
- wc->wildcards |= FWW_ARP_SHA | FWW_ARP_THA | FWW_NW_ECN | FWW_NW_TTL;
+ wc->wildcards |= FWW_NW_ECN | FWW_NW_TTL;
if (ofpfw & OFPFW10_NW_TOS) {
/* OpenFlow 1.0 defines a TOS wildcard, but it's much later in
{
const struct flow_wildcards *wc = &rule->wc;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 13);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
/* NXM and OF1.1+ supports bitwise matching on ethernet addresses. */
if (!eth_mask_is_exact(wc->dl_src_mask)
}
/* Only NXM supports matching ARP hardware addresses. */
- if (!(wc->wildcards & FWW_ARP_SHA) || !(wc->wildcards & FWW_ARP_THA)) {
+ if (!eth_addr_is_zero(wc->arp_sha_mask) ||
+ !eth_addr_is_zero(wc->arp_tha_mask)) {
return OFPUTIL_P_NXM_ANY;
}
wc.wildcards |= FWW_NW_TTL;
}
if (!(may_match & MAY_ARP_SHA)) {
- wc.wildcards |= FWW_ARP_SHA;
+ memset(wc.arp_sha_mask, 0, ETH_ADDR_LEN);
}
if (!(may_match & MAY_ARP_THA)) {
- wc.wildcards |= FWW_ARP_THA;
+ memset(wc.arp_tha_mask, 0, ETH_ADDR_LEN);
}
if (!(may_match & MAY_IPV6)) {
wc.ipv6_src_mask = wc.ipv6_dst_mask = in6addr_any;
OXM_OF_ETH_TYPE(0806) OXM_OF_ARP_SHA(0002e30f80a4)
OXM_OF_ETH_TYPE(0800) OXM_OF_ARP_SHA(0002e30f80a4)
OXM_OF_ARP_SHA(0002e30f80a4)
+OXM_OF_ETH_TYPE(0806) OXM_OF_ARP_SHA_W(0002e30f80a4/ffffffffffff)
+OXM_OF_ETH_TYPE(0806) OXM_OF_ARP_SHA_W(0002e30f80a4/000000000000)
+OXM_OF_ETH_TYPE(0806) OXM_OF_ARP_SHA_W(0002e30f80a4/00000000000f)
# ARP destination hardware address
OXM_OF_ETH_TYPE(0806) OXM_OF_ARP_THA(0002e30f80a4)
OXM_OF_ETH_TYPE(0800) OXM_OF_ARP_THA(0002e30f80a4)
OXM_OF_ARP_THA(0002e30f80a4)
+OXM_OF_ETH_TYPE(0806) OXM_OF_ARP_THA_W(0002e30f80a4/ffffffffffff)
+OXM_OF_ETH_TYPE(0806) OXM_OF_ARP_THA_W(0002e30f80a4/000000000000)
+OXM_OF_ETH_TYPE(0806) OXM_OF_ARP_THA_W(0002e30f80a4/00000000000f)
# IPv6 source
OXM_OF_ETH_TYPE(86dd) OXM_OF_IPV6_SRC(20010db83c4d00010002000300040005)
OXM_OF_ETH_TYPE(0806), OXM_OF_ARP_SHA(0002e30f80a4)
nx_pull_match() returned error OFPBMC_BAD_PREREQ
nx_pull_match() returned error OFPBMC_BAD_PREREQ
+OXM_OF_ETH_TYPE(0806), OXM_OF_ARP_SHA(0002e30f80a4)
+OXM_OF_ETH_TYPE(0806)
+OXM_OF_ETH_TYPE(0806), OXM_OF_ARP_SHA_W(000000000004/00000000000f)
# ARP destination hardware address
OXM_OF_ETH_TYPE(0806), OXM_OF_ARP_THA(0002e30f80a4)
nx_pull_match() returned error OFPBMC_BAD_PREREQ
nx_pull_match() returned error OFPBMC_BAD_PREREQ
+OXM_OF_ETH_TYPE(0806), OXM_OF_ARP_THA(0002e30f80a4)
+OXM_OF_ETH_TYPE(0806)
+OXM_OF_ETH_TYPE(0806), OXM_OF_ARP_THA_W(000000000004/00000000000f)
# IPv6 source
OXM_OF_ETH_TYPE(86dd), OXM_OF_IPV6_SRC(20010db83c4d00010002000300040005)