void
cls_rule_set_nw_proto(struct cls_rule *rule, uint8_t nw_proto)
{
- rule->wc.wildcards &= ~FWW_NW_PROTO;
rule->flow.nw_proto = nw_proto;
+ rule->wc.nw_proto_mask = UINT8_MAX;
}
void
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
if (rule->priority != OFP_DEFAULT_PRIORITY) {
ds_put_format(s, "priority=%d,", rule->priority);
if (!(w & FWW_DL_TYPE)) {
skip_type = true;
if (f->dl_type == htons(ETH_TYPE_IP)) {
- if (!(w & FWW_NW_PROTO)) {
+ if (wc->nw_proto_mask) {
skip_proto = true;
if (f->nw_proto == IPPROTO_ICMP) {
ds_put_cstr(s, "icmp,");
ds_put_cstr(s, "ip,");
}
} else if (f->dl_type == htons(ETH_TYPE_IPV6)) {
- if (!(w & FWW_NW_PROTO)) {
+ if (wc->nw_proto_mask) {
skip_proto = true;
if (f->nw_proto == IPPROTO_ICMPV6) {
ds_put_cstr(s, "icmp6,");
format_ip_netmask(s, "nw_src", f->nw_src, wc->nw_src_mask);
format_ip_netmask(s, "nw_dst", f->nw_dst, wc->nw_dst_mask);
}
- if (!skip_proto && !(w & FWW_NW_PROTO)) {
+ if (!skip_proto && wc->nw_proto_mask) {
if (f->dl_type == htons(ETH_TYPE_ARP)) {
ds_put_format(s, "arp_op=%"PRIu8",", f->nw_proto);
} else {
const flow_wildcards_t wc = wildcards->wildcards;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
for (i = 0; i < FLOW_N_REGS; i++) {
if ((a->regs[i] ^ b->regs[i]) & wildcards->reg_masks[i]) {
wildcards->dl_src_mask)
&& eth_addr_equal_except(a->dl_dst, b->dl_dst,
wildcards->dl_dst_mask)
- && (wc & FWW_NW_PROTO || a->nw_proto == b->nw_proto)
+ && !((a->nw_proto ^ b->nw_proto) & wildcards->nw_proto_mask)
&& !((a->nw_ttl ^ b->nw_ttl) & wildcards->nw_ttl_mask)
&& !((a->nw_tos ^ b->nw_tos) & wildcards->nw_tos_mask)
&& !((a->nw_frag ^ b->nw_frag) & wildcards->nw_frag_mask)
const flow_wildcards_t wc = wildcards->wildcards;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
for (i = 0; i < FLOW_N_REGS; i++) {
flow->regs[i] &= wildcards->reg_masks[i];
flow->tp_dst &= wildcards->tp_dst_mask;
eth_addr_bitand(flow->dl_src, wildcards->dl_src_mask, flow->dl_src);
eth_addr_bitand(flow->dl_dst, wildcards->dl_dst_mask, flow->dl_dst);
- if (wc & FWW_NW_PROTO) {
- flow->nw_proto = 0;
- }
flow->ipv6_label &= wildcards->ipv6_label_mask;
+ flow->nw_proto &= wildcards->nw_proto_mask;
flow->nw_tos &= wildcards->nw_tos_mask;
flow->nw_ttl &= wildcards->nw_ttl_mask;
flow->nw_frag &= wildcards->nw_frag_mask;
void
flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
fmd->tun_id = flow->tun_id;
fmd->metadata = flow->metadata;
void
flow_wildcards_init_catchall(struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
wc->wildcards = FWW_ALL;
wc->tun_id_mask = htonll(0);
memset(wc->dl_dst_mask, 0, ETH_ADDR_LEN);
memset(wc->arp_sha_mask, 0, ETH_ADDR_LEN);
memset(wc->arp_tha_mask, 0, ETH_ADDR_LEN);
+ wc->nw_proto_mask = 0;
wc->nw_tos_mask = 0;
wc->nw_ttl_mask = 0;
memset(wc->zeros, 0, sizeof wc->zeros);
void
flow_wildcards_init_exact(struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
wc->wildcards = 0;
wc->tun_id_mask = htonll(UINT64_MAX);
memset(wc->dl_dst_mask, 0xff, ETH_ADDR_LEN);
memset(wc->arp_sha_mask, 0xff, ETH_ADDR_LEN);
memset(wc->arp_tha_mask, 0xff, ETH_ADDR_LEN);
+ wc->nw_proto_mask = UINT8_MAX;
wc->nw_tos_mask = UINT8_MAX;
wc->nw_ttl_mask = UINT8_MAX;
memset(wc->zeros, 0, sizeof wc->zeros);
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
if (wc->wildcards
|| wc->tun_id_mask != htonll(UINT64_MAX)
|| !ipv6_mask_is_exact(&wc->ipv6_dst_mask)
|| wc->ipv6_label_mask != htonl(UINT32_MAX)
|| !ipv6_mask_is_exact(&wc->nd_target_mask)
+ || wc->nw_proto_mask != UINT8_MAX
|| wc->nw_frag_mask != UINT8_MAX
|| wc->nw_tos_mask != UINT8_MAX
|| wc->nw_ttl_mask != UINT8_MAX) {
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
if (wc->wildcards != FWW_ALL
|| wc->tun_id_mask != htonll(0)
|| !ipv6_mask_is_any(&wc->ipv6_dst_mask)
|| wc->ipv6_label_mask != htonl(0)
|| !ipv6_mask_is_any(&wc->nd_target_mask)
+ || wc->nw_proto_mask != 0
|| wc->nw_frag_mask != 0
|| wc->nw_tos_mask != 0
|| wc->nw_ttl_mask != 0) {
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
dst->wildcards = src1->wildcards | src2->wildcards;
dst->tun_id_mask = src1->tun_id_mask & src2->tun_id_mask;
eth_addr_bitand(src1->dl_dst_mask, src2->dl_dst_mask, dst->dl_dst_mask);
eth_addr_bitand(src1->arp_sha_mask, src2->arp_sha_mask, dst->arp_sha_mask);
eth_addr_bitand(src1->arp_tha_mask, src2->arp_tha_mask, dst->arp_tha_mask);
+ dst->nw_proto_mask = src1->nw_proto_mask & src2->nw_proto_mask;
dst->nw_tos_mask = src1->nw_tos_mask & src2->nw_tos_mask;
dst->nw_ttl_mask = src1->nw_ttl_mask & src2->nw_ttl_mask;
}
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
if (a->wildcards != b->wildcards
|| a->tun_id_mask != b->tun_id_mask
|| !eth_addr_equals(a->dl_dst_mask, b->dl_dst_mask)
|| !eth_addr_equals(a->arp_sha_mask, b->arp_sha_mask)
|| !eth_addr_equals(a->arp_tha_mask, b->arp_tha_mask)
+ || a->nw_proto_mask != b->nw_proto_mask
|| a->nw_tos_mask != b->nw_tos_mask
|| a->nw_ttl_mask != b->nw_ttl_mask) {
return false;
uint8_t eth_masked[ETH_ADDR_LEN];
struct in6_addr ipv6_masked;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
for (i = 0; i < FLOW_N_REGS; i++) {
if ((a->reg_masks[i] & b->reg_masks[i]) != b->reg_masks[i]) {
|| (a->metadata_mask & b->metadata_mask) != b->metadata_mask
|| (a->tp_src_mask & b->tp_src_mask) != b->tp_src_mask
|| (a->tp_dst_mask & b->tp_dst_mask) != b->tp_dst_mask
+ || (a->nw_proto_mask & b->nw_proto_mask) != b->nw_proto_mask
|| (a->nw_frag_mask & b->nw_frag_mask) != b->nw_frag_mask
|| (a->nw_tos_mask & b->nw_tos_mask) != b->nw_tos_mask
|| (a->nw_ttl_mask & b->nw_ttl_mask) != b->nw_ttl_mask);
/* This sequence number should be incremented whenever anything involving flows
* or the wildcarding of flows changes. This will cause build assertion
* failures in places which likely need to be updated. */
-#define FLOW_WC_SEQ 15
+#define FLOW_WC_SEQ 16
#define FLOW_N_REGS 8
BUILD_ASSERT_DECL(FLOW_N_REGS <= NXM_NX_MAX_REGS);
BUILD_ASSERT_DECL(sizeof(struct flow) == FLOW_SIG_SIZE + FLOW_PAD_SIZE);
/* Remember to update FLOW_WC_SEQ when changing 'struct flow'. */
-BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 150 && FLOW_WC_SEQ == 15);
+BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 150 && FLOW_WC_SEQ == 16);
void flow_extract(struct ofpbuf *, uint32_t priority, ovs_be64 tun_id,
uint16_t in_port, struct flow *);
#define FWW_IN_PORT ((OVS_FORCE flow_wildcards_t) (1 << 0))
#define FWW_DL_TYPE ((OVS_FORCE flow_wildcards_t) (1 << 1))
-#define FWW_NW_PROTO ((OVS_FORCE flow_wildcards_t) (1 << 2))
-#define FWW_ALL ((OVS_FORCE flow_wildcards_t) (((1 << 3)) - 1))
+#define FWW_ALL ((OVS_FORCE flow_wildcards_t) (((1 << 2)) - 1))
/* Remember to update FLOW_WC_SEQ when adding or removing FWW_*. */
-BUILD_ASSERT_DECL(FWW_ALL == ((1 << 3) - 1) && FLOW_WC_SEQ == 15);
+BUILD_ASSERT_DECL(FWW_ALL == ((1 << 2) - 1) && FLOW_WC_SEQ == 16);
/* Information on wildcards for a flow, as a supplement to "struct flow".
*
ovs_be16 vlan_tci_mask; /* 1-bit in each significant vlan_tci bit. */
ovs_be16 tp_src_mask; /* 1-bit in each significant tp_src bit. */
ovs_be16 tp_dst_mask; /* 1-bit in each significant tp_dst bit. */
+ uint8_t nw_proto_mask; /* 1-bit in each significant nw_proto bit. */
uint8_t nw_frag_mask; /* 1-bit in each significant nw_frag bit. */
uint8_t dl_src_mask[6]; /* 1-bit in each significant dl_src bit. */
uint8_t dl_dst_mask[6]; /* 1-bit in each significant dl_dst bit. */
uint8_t arp_tha_mask[6]; /* 1-bit in each significant dl_dst bit. */
uint8_t nw_tos_mask; /* 1-bit in each significant nw_tos bit. */
uint8_t nw_ttl_mask; /* 1-bit in each significant nw_ttl bit. */
- uint8_t zeros[7]; /* Padding field set to zero. */
+ uint8_t zeros[6]; /* Padding field set to zero. */
};
/* Remember to update FLOW_WC_SEQ when updating struct flow_wildcards. */
-BUILD_ASSERT_DECL(sizeof(struct flow_wildcards) == 152 && FLOW_WC_SEQ == 15);
+BUILD_ASSERT_DECL(sizeof(struct flow_wildcards) == 152 && FLOW_WC_SEQ == 16);
void flow_wildcards_init_catchall(struct flow_wildcards *);
void flow_wildcards_init_exact(struct flow_wildcards *);
{
MFF_IP_PROTO, "nw_proto", NULL,
MF_FIELD_SIZES(u8),
- MFM_NONE, FWW_NW_PROTO,
+ MFM_NONE, 0,
MFS_DECIMAL,
MFP_IP_ANY,
false,
{
MFF_ARP_OP, "arp_op", NULL,
MF_FIELD_SIZES(be16),
- MFM_NONE, FWW_NW_PROTO,
+ MFM_NONE, 0,
MFS_DECIMAL,
MFP_ARP,
false,
switch (mf->id) {
case MFF_IN_PORT:
case MFF_ETH_TYPE:
- case MFF_IP_PROTO:
- case MFF_ARP_OP:
assert(mf->fww_bit != 0);
return (wc->wildcards & mf->fww_bit) != 0;
case MFF_IPV6_LABEL:
return !wc->ipv6_label_mask;
+ case MFF_IP_PROTO:
+ return !wc->nw_proto_mask;
case MFF_IP_DSCP:
return !(wc->nw_tos_mask & IP_DSCP_MASK);
case MFF_IP_ECN:
case MFF_IP_FRAG:
return !(wc->nw_frag_mask & FLOW_NW_FRAG_MASK);
+ case MFF_ARP_OP:
+ return !wc->nw_proto_mask;
case MFF_ARP_SPA:
return !wc->nw_src_mask;
case MFF_ARP_TPA:
switch (mf->id) {
case MFF_IN_PORT:
case MFF_ETH_TYPE:
- case MFF_IP_PROTO:
- case MFF_ARP_OP:
assert(mf->fww_bit != 0);
memset(mask, wc->wildcards & mf->fww_bit ? 0x00 : 0xff, mf->n_bytes);
break;
mask->be32 = wc->ipv6_label_mask;
break;
+ case MFF_IP_PROTO:
+ mask->u8 = wc->nw_proto_mask;
+ break;
case MFF_IP_DSCP:
mask->u8 = wc->nw_tos_mask & IP_DSCP_MASK;
break;
mask->u8 = wc->nw_frag_mask & FLOW_NW_FRAG_MASK;
break;
+ case MFF_ARP_OP:
+ mask->u8 = wc->nw_proto_mask;
+ break;
case MFF_ARP_SPA:
mask->be32 = wc->nw_src_mask;
break;
break;
case MFF_IP_PROTO:
- rule->wc.wildcards |= FWW_NW_PROTO;
+ rule->wc.nw_proto_mask = 0;
rule->flow.nw_proto = 0;
break;
break;
case MFF_ARP_OP:
- rule->wc.wildcards |= FWW_NW_PROTO;
+ rule->wc.nw_proto_mask = 0;
rule->flow.nw_proto = 0;
break;
uint8_t icmp_proto, uint32_t icmp_type, uint32_t icmp_code,
bool oxm)
{
- const flow_wildcards_t wc = cr->wc.wildcards;
const struct flow *flow = &cr->flow;
nxm_put_frag(b, cr);
nxm_put_8(b, NXM_NX_IP_TTL, flow->nw_ttl);
}
- if (!(wc & FWW_NW_PROTO)) {
+ if (cr->wc.nw_proto_mask) {
nxm_put_8(b, oxm ? OXM_OF_IP_PROTO : NXM_OF_IP_PROTO, flow->nw_proto);
if (flow->nw_proto == IPPROTO_TCP) {
int match_len;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
/* Metadata. */
if (!(wc & FWW_IN_PORT)) {
}
} else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) {
/* ARP. */
- if (!(wc & FWW_NW_PROTO)) {
+ if (cr->wc.nw_proto_mask) {
nxm_put_16(b, oxm ? OXM_OF_ARP_OP : NXM_OF_ARP_OP,
htons(flow->nw_proto));
}
void
ofputil_wildcard_from_ofpfw10(uint32_t ofpfw, struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
/* Initialize most of rule->wc. */
flow_wildcards_init_catchall(wc);
if (ofpfw & OFPFW10_DL_TYPE) {
wc->wildcards |= FWW_DL_TYPE;
}
- if (ofpfw & OFPFW10_NW_PROTO) {
- wc->wildcards |= FWW_NW_PROTO;
- }
if (!(ofpfw & OFPFW10_NW_TOS)) {
wc->nw_tos_mask |= IP_DSCP_MASK;
}
+ if (!(ofpfw & OFPFW10_NW_PROTO)) {
+ wc->nw_proto_mask = UINT8_MAX;
+ }
wc->nw_src_mask = ofputil_wcbits_to_netmask(ofpfw >> OFPFW10_NW_SRC_SHIFT);
wc->nw_dst_mask = ofputil_wcbits_to_netmask(ofpfw >> OFPFW10_NW_DST_SHIFT);
if (wc->wildcards & FWW_DL_TYPE) {
ofpfw |= OFPFW10_DL_TYPE;
}
- if (wc->wildcards & FWW_NW_PROTO) {
+ if (!wc->nw_proto_mask) {
ofpfw |= OFPFW10_NW_PROTO;
}
ofpfw |= (ofputil_netmask_to_wcbits(wc->nw_src_mask)
match->nw_tos = rule->flow.nw_tos & IP_DSCP_MASK;
}
- if (rule->wc.wildcards & FWW_NW_PROTO) {
+ if (!rule->wc.nw_proto_mask) {
wc |= OFPFW11_NW_PROTO;
} else {
match->nw_proto = rule->flow.nw_proto;
{
const struct flow_wildcards *wc = &rule->wc;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 16);
/* NXM and OF1.1+ supports bitwise matching on ethernet addresses. */
if (!eth_mask_is_exact(wc->dl_src_mask)
wc.tp_src_mask = wc.tp_dst_mask = htons(0);
}
if (!(may_match & MAY_NW_PROTO)) {
- wc.wildcards |= FWW_NW_PROTO;
+ wc.nw_proto_mask = 0;
}
if (!(may_match & MAY_IPVx)) {
wc.nw_tos_mask = 0;
CLS_FIELD(0, tp_dst, TP_DST) \
CLS_FIELD(0, dl_src, DL_SRC) \
CLS_FIELD(0, dl_dst, DL_DST) \
- CLS_FIELD(FWW_NW_PROTO, nw_proto, NW_PROTO) \
+ CLS_FIELD(0, nw_proto, NW_PROTO) \
CLS_FIELD(0, nw_tos, NW_DSCP)
/* Field indexes.
} else if (f_idx == CLS_F_IDX_NW_DSCP) {
eq = !((fixed->nw_tos ^ wild->flow.nw_tos) &
(wild->wc.nw_tos_mask & IP_DSCP_MASK));
+ } else if (f_idx == CLS_F_IDX_NW_PROTO) {
+ eq = !((fixed->nw_proto ^ wild->flow.nw_proto)
+ & wild->wc.nw_proto_mask);
} else {
NOT_REACHED();
}
rule->cls_rule.wc.metadata_mask = htonll(UINT64_MAX);
} else if (f_idx == CLS_F_IDX_NW_DSCP) {
rule->cls_rule.wc.nw_tos_mask |= IP_DSCP_MASK;
+ } else if (f_idx == CLS_F_IDX_NW_PROTO) {
+ rule->cls_rule.wc.nw_proto_mask = UINT8_MAX;
} else {
NOT_REACHED();
}