void
cls_rule_set_nw_ttl(struct cls_rule *rule, uint8_t nw_ttl)
{
- rule->wc.wildcards &= ~FWW_NW_TTL;
+ rule->wc.nw_ttl_mask = UINT8_MAX;
rule->flow.nw_ttl = nw_ttl;
}
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
if (rule->priority != OFP_DEFAULT_PRIORITY) {
ds_put_format(s, "priority=%d,", rule->priority);
if (wc->nw_tos_mask & IP_ECN_MASK) {
ds_put_format(s, "nw_ecn=%"PRIu8",", f->nw_tos & IP_ECN_MASK);
}
- if (!(w & FWW_NW_TTL)) {
+ if (wc->nw_ttl_mask) {
ds_put_format(s, "nw_ttl=%"PRIu8",", f->nw_ttl);
}
switch (wc->nw_frag_mask) {
const flow_wildcards_t wc = wildcards->wildcards;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
for (i = 0; i < FLOW_N_REGS; i++) {
if ((a->regs[i] ^ b->regs[i]) & wildcards->reg_masks[i]) {
&& eth_addr_equal_except(a->dl_dst, b->dl_dst,
wildcards->dl_dst_mask)
&& (wc & FWW_NW_PROTO || a->nw_proto == b->nw_proto)
- && (wc & FWW_NW_TTL || a->nw_ttl == b->nw_ttl)
+ && !((a->nw_ttl ^ b->nw_ttl) & wildcards->nw_ttl_mask)
&& !((a->nw_tos ^ b->nw_tos) & wildcards->nw_tos_mask)
&& !((a->nw_frag ^ b->nw_frag) & wildcards->nw_frag_mask)
&& eth_addr_equal_except(a->arp_sha, b->arp_sha,
const flow_wildcards_t wc = wildcards->wildcards;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
for (i = 0; i < FLOW_N_REGS; i++) {
flow->regs[i] &= wildcards->reg_masks[i];
}
flow->ipv6_label &= wildcards->ipv6_label_mask;
flow->nw_tos &= wildcards->nw_tos_mask;
- if (wc & FWW_NW_TTL) {
- flow->nw_ttl = 0;
- }
+ flow->nw_ttl &= wildcards->nw_ttl_mask;
flow->nw_frag &= wildcards->nw_frag_mask;
eth_addr_bitand(flow->arp_sha, wildcards->arp_sha_mask, flow->arp_sha);
eth_addr_bitand(flow->arp_tha, wildcards->arp_tha_mask, flow->arp_tha);
void
flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
fmd->tun_id = flow->tun_id;
fmd->metadata = flow->metadata;
void
flow_wildcards_init_catchall(struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
wc->wildcards = FWW_ALL;
wc->tun_id_mask = htonll(0);
memset(wc->arp_sha_mask, 0, ETH_ADDR_LEN);
memset(wc->arp_tha_mask, 0, ETH_ADDR_LEN);
wc->nw_tos_mask = 0;
+ wc->nw_ttl_mask = 0;
+ memset(wc->zeros, 0, sizeof wc->zeros);
}
/* Initializes 'wc' as an exact-match set of wildcards; that is, 'wc' does not
void
flow_wildcards_init_exact(struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
wc->wildcards = 0;
wc->tun_id_mask = htonll(UINT64_MAX);
memset(wc->arp_sha_mask, 0xff, ETH_ADDR_LEN);
memset(wc->arp_tha_mask, 0xff, ETH_ADDR_LEN);
wc->nw_tos_mask = UINT8_MAX;
+ wc->nw_ttl_mask = UINT8_MAX;
+ memset(wc->zeros, 0, sizeof wc->zeros);
}
/* Returns true if 'wc' is exact-match, false if 'wc' wildcards any bits or
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
if (wc->wildcards
|| wc->tun_id_mask != htonll(UINT64_MAX)
|| wc->ipv6_label_mask != htonl(UINT32_MAX)
|| !ipv6_mask_is_exact(&wc->nd_target_mask)
|| wc->nw_frag_mask != UINT8_MAX
- || wc->nw_tos_mask != UINT8_MAX) {
+ || wc->nw_tos_mask != UINT8_MAX
+ || wc->nw_ttl_mask != UINT8_MAX) {
return false;
}
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
if (wc->wildcards != FWW_ALL
|| wc->tun_id_mask != htonll(0)
|| wc->ipv6_label_mask != htonl(0)
|| !ipv6_mask_is_any(&wc->nd_target_mask)
|| wc->nw_frag_mask != 0
- || wc->nw_tos_mask != 0) {
+ || wc->nw_tos_mask != 0
+ || wc->nw_ttl_mask != 0) {
return false;
}
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
dst->wildcards = src1->wildcards | src2->wildcards;
dst->tun_id_mask = src1->tun_id_mask & src2->tun_id_mask;
eth_addr_bitand(src1->arp_sha_mask, src2->arp_sha_mask, dst->arp_sha_mask);
eth_addr_bitand(src1->arp_tha_mask, src2->arp_tha_mask, dst->arp_tha_mask);
dst->nw_tos_mask = src1->nw_tos_mask & src2->nw_tos_mask;
+ dst->nw_ttl_mask = src1->nw_ttl_mask & src2->nw_ttl_mask;
}
/* Returns a hash of the wildcards in 'wc'. */
/* If you change struct flow_wildcards and thereby trigger this
* assertion, please check that the new struct flow_wildcards has no holes
* in it before you update the assertion. */
- BUILD_ASSERT_DECL(sizeof *wc == 112 + FLOW_N_REGS * 4);
+ BUILD_ASSERT_DECL(sizeof *wc == 120 + FLOW_N_REGS * 4);
return hash_bytes(wc, sizeof *wc, basis);
}
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
if (a->wildcards != b->wildcards
|| a->tun_id_mask != b->tun_id_mask
|| !eth_addr_equals(a->dl_dst_mask, b->dl_dst_mask)
|| !eth_addr_equals(a->arp_sha_mask, b->arp_sha_mask)
|| !eth_addr_equals(a->arp_tha_mask, b->arp_tha_mask)
- || a->nw_tos_mask != b->nw_tos_mask) {
+ || a->nw_tos_mask != b->nw_tos_mask
+ || a->nw_ttl_mask != b->nw_ttl_mask) {
return false;
}
uint8_t eth_masked[ETH_ADDR_LEN];
struct in6_addr ipv6_masked;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
for (i = 0; i < FLOW_N_REGS; i++) {
if ((a->reg_masks[i] & b->reg_masks[i]) != b->reg_masks[i]) {
|| (a->tp_src_mask & b->tp_src_mask) != b->tp_src_mask
|| (a->tp_dst_mask & b->tp_dst_mask) != b->tp_dst_mask
|| (a->nw_frag_mask & b->nw_frag_mask) != b->nw_frag_mask
- || (a->nw_tos_mask & b->nw_tos_mask) != b->nw_tos_mask);
+ || (a->nw_tos_mask & b->nw_tos_mask) != b->nw_tos_mask
+ || (a->nw_ttl_mask & b->nw_ttl_mask) != b->nw_ttl_mask);
}
/* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
/* This sequence number should be incremented whenever anything involving flows
* or the wildcarding of flows changes. This will cause build assertion
* failures in places which likely need to be updated. */
-#define FLOW_WC_SEQ 14
+#define FLOW_WC_SEQ 15
#define FLOW_N_REGS 8
BUILD_ASSERT_DECL(FLOW_N_REGS <= NXM_NX_MAX_REGS);
BUILD_ASSERT_DECL(sizeof(struct flow) == FLOW_SIG_SIZE + FLOW_PAD_SIZE);
/* Remember to update FLOW_WC_SEQ when changing 'struct flow'. */
-BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 150 && FLOW_WC_SEQ == 14);
+BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 150 && FLOW_WC_SEQ == 15);
void flow_extract(struct ofpbuf *, uint32_t priority, ovs_be64 tun_id,
uint16_t in_port, struct flow *);
#define FWW_IN_PORT ((OVS_FORCE flow_wildcards_t) (1 << 0))
#define FWW_DL_TYPE ((OVS_FORCE flow_wildcards_t) (1 << 1))
#define FWW_NW_PROTO ((OVS_FORCE flow_wildcards_t) (1 << 2))
-#define FWW_NW_TTL ((OVS_FORCE flow_wildcards_t) (1 << 3))
-#define FWW_ALL ((OVS_FORCE flow_wildcards_t) (((1 << 4)) - 1))
+#define FWW_ALL ((OVS_FORCE flow_wildcards_t) (((1 << 3)) - 1))
/* Remember to update FLOW_WC_SEQ when adding or removing FWW_*. */
-BUILD_ASSERT_DECL(FWW_ALL == ((1 << 4) - 1) && FLOW_WC_SEQ == 14);
+BUILD_ASSERT_DECL(FWW_ALL == ((1 << 3) - 1) && FLOW_WC_SEQ == 15);
/* Information on wildcards for a flow, as a supplement to "struct flow".
*
uint8_t arp_sha_mask[6]; /* 1-bit in each significant dl_dst bit. */
uint8_t arp_tha_mask[6]; /* 1-bit in each significant dl_dst bit. */
uint8_t nw_tos_mask; /* 1-bit in each significant nw_tos bit. */
+ uint8_t nw_ttl_mask; /* 1-bit in each significant nw_ttl bit. */
+ uint8_t zeros[7]; /* Padding field set to zero. */
};
/* Remember to update FLOW_WC_SEQ when updating struct flow_wildcards. */
-BUILD_ASSERT_DECL(sizeof(struct flow_wildcards) == 144 && FLOW_WC_SEQ == 14);
+BUILD_ASSERT_DECL(sizeof(struct flow_wildcards) == 152 && FLOW_WC_SEQ == 15);
void flow_wildcards_init_catchall(struct flow_wildcards *);
void flow_wildcards_init_exact(struct flow_wildcards *);
}, {
MFF_IP_TTL, "nw_ttl", NULL,
MF_FIELD_SIZES(u8),
- MFM_NONE, FWW_NW_TTL,
+ MFM_NONE, 0,
MFS_DECIMAL,
MFP_IP_ANY,
true,
case MFF_IN_PORT:
case MFF_ETH_TYPE:
case MFF_IP_PROTO:
- case MFF_IP_TTL:
case MFF_ARP_OP:
assert(mf->fww_bit != 0);
return (wc->wildcards & mf->fww_bit) != 0;
return !(wc->nw_tos_mask & IP_DSCP_MASK);
case MFF_IP_ECN:
return !(wc->nw_tos_mask & IP_ECN_MASK);
+ case MFF_IP_TTL:
+ return !wc->nw_ttl_mask;
case MFF_ND_TARGET:
return ipv6_mask_is_any(&wc->nd_target_mask);
case MFF_IN_PORT:
case MFF_ETH_TYPE:
case MFF_IP_PROTO:
- case MFF_IP_TTL:
case MFF_ARP_OP:
assert(mf->fww_bit != 0);
memset(mask, wc->wildcards & mf->fww_bit ? 0x00 : 0xff, mf->n_bytes);
mask->ipv6 = wc->nd_target_mask;
break;
+ case MFF_IP_TTL:
+ mask->u8 = wc->nw_ttl_mask;
+ break;
case MFF_IP_FRAG:
mask->u8 = wc->nw_frag_mask & FLOW_NW_FRAG_MASK;
break;
break;
case MFF_IP_TTL:
- rule->wc.wildcards |= FWW_NW_TTL;
+ rule->wc.nw_ttl_mask = 0;
rule->flow.nw_ttl = 0;
break;
flow->nw_tos & IP_ECN_MASK);
}
- if (!oxm && !(wc & FWW_NW_TTL)) {
+ if (!oxm && cr->wc.nw_ttl_mask) {
nxm_put_8(b, NXM_NX_IP_TTL, flow->nw_ttl);
}
int match_len;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
/* Metadata. */
if (!(wc & FWW_IN_PORT)) {
void
ofputil_wildcard_from_ofpfw10(uint32_t ofpfw, struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
/* Initialize most of rule->wc. */
flow_wildcards_init_catchall(wc);
- /* Start with wildcard fields that aren't defined by ofp10_match. */
- wc->wildcards = FWW_NW_TTL;
-
+ wc->wildcards = 0;
if (ofpfw & OFPFW10_IN_PORT) {
wc->wildcards |= FWW_IN_PORT;
}
{
const struct flow_wildcards *wc = &rule->wc;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
/* NXM and OF1.1+ supports bitwise matching on ethernet addresses. */
if (!eth_mask_is_exact(wc->dl_src_mask)
}
/* Only NXM supports matching IP TTL/hop limit. */
- if (!(wc->wildcards & FWW_NW_TTL)) {
+ if (wc->nw_ttl_mask) {
return OFPUTIL_P_NXM_ANY;
}
}
if (!(may_match & MAY_IPVx)) {
wc.nw_tos_mask = 0;
- wc.wildcards |= FWW_NW_TTL;
+ wc.nw_ttl_mask = 0;
}
if (!(may_match & MAY_ARP_SHA)) {
memset(wc.arp_sha_mask, 0, ETH_ADDR_LEN);