* properly mark later fragments.
*/
later_key = *upcall_info->key;
- later_key.ip.tos_frag &= ~OVS_FRAG_TYPE_MASK;
- later_key.ip.tos_frag |= OVS_FRAG_TYPE_LATER;
+ later_key.ip.frag = OVS_FRAG_TYPE_LATER;
later_info = *upcall_info;
later_info.key = &later_key;
if (ipv4_key->ipv4_tos & INET_ECN_MASK)
return -EINVAL;
- if (ipv4_key->ipv4_frag !=
- (flow_key->ip.tos_frag & OVS_FRAG_TYPE_MASK))
+ if (ipv4_key->ipv4_frag != flow_key->ip.frag)
return -EINVAL;
break;
* @nexthdrp: Initially, points to the type of the extension header at @start.
* This function updates it to point to the extension header at the final
* offset.
- * @tos_frag: Points to the @tos_frag member in a &struct sw_flow_key. This
+ * @frag: Points to the @frag member in a &struct sw_flow_key. This
* function sets an appropriate %OVS_FRAG_TYPE_* value.
*
- * This is based on ipv6_skip_exthdr() but adds the updates to *@tos_frag.
+ * This is based on ipv6_skip_exthdr() but adds the updates to *@frag.
*
* When there is more than one fragment header, this version reports whether
* the final fragment header that it examines is a first fragment.
* Returns the final payload offset, or -1 on error.
*/
static int skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
- u8 *tos_frag)
+ u8 *frag)
{
u8 nexthdr = *nexthdrp;
if (fp == NULL)
return -1;
- *tos_frag &= ~OVS_FRAG_TYPE_MASK;
if (ntohs(*fp) & ~0x7) {
- *tos_frag |= OVS_FRAG_TYPE_LATER;
+ *frag = OVS_FRAG_TYPE_LATER;
break;
}
- *tos_frag |= OVS_FRAG_TYPE_FIRST;
+ *frag = OVS_FRAG_TYPE_FIRST;
hdrlen = 8;
} else if (nexthdr == NEXTHDR_AUTH)
hdrlen = (hp->hdrlen+2)<<2;
payload_ofs = (u8 *)(nh + 1) - skb->data;
key->ip.proto = NEXTHDR_NONE;
- key->ip.tos_frag = ipv6_get_dsfield(nh) & ~INET_ECN_MASK;
+ key->ip.tos = ipv6_get_dsfield(nh) & ~INET_ECN_MASK;
key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
ipv6_addr_copy(&key->ipv6.addr.src, &nh->saddr);
ipv6_addr_copy(&key->ipv6.addr.dst, &nh->daddr);
- payload_ofs = skip_exthdr(skb, payload_ofs,
- &nexthdr, &key->ip.tos_frag);
+ payload_ofs = skip_exthdr(skb, payload_ofs, &nexthdr, &key->ip.frag);
if (unlikely(payload_ofs < 0))
return -EINVAL;
key->ipv4.addr.dst = nh->daddr;
key->ip.proto = nh->protocol;
- key->ip.tos_frag = nh->tos & ~INET_ECN_MASK;
+ key->ip.tos = nh->tos & ~INET_ECN_MASK;
offset = nh->frag_off & htons(IP_OFFSET);
if (offset) {
- key->ip.tos_frag |= OVS_FRAG_TYPE_LATER;
+ key->ip.frag = OVS_FRAG_TYPE_LATER;
goto out;
}
if (nh->frag_off & htons(IP_MF) ||
skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
- key->ip.tos_frag |= OVS_FRAG_TYPE_FIRST;
+ key->ip.frag = OVS_FRAG_TYPE_FIRST;
/* Transport layer. */
if (key->ip.proto == IPPROTO_TCP) {
goto out;
}
- if ((key->ip.tos_frag & OVS_FRAG_TYPE_MASK) == OVS_FRAG_TYPE_LATER)
+ if (key->ip.frag == OVS_FRAG_TYPE_LATER)
goto out;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
- key->ip.tos_frag |= OVS_FRAG_TYPE_FIRST;
+ key->ip.frag = OVS_FRAG_TYPE_FIRST;
/* Transport layer. */
if (key->ip.proto == NEXTHDR_TCP) {
}
}
-static int parse_tos_frag(struct sw_flow_key *swkey, u8 tos, u8 frag)
-{
- if (tos & INET_ECN_MASK || frag > OVS_FRAG_TYPE_MAX)
- return -EINVAL;
-
- swkey->ip.tos_frag = tos | frag;
- return 0;
-}
-
/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
const u32 ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_PRIORITY] = 4,
const struct nlattr *attr)
{
int error = 0;
- enum ovs_frag_type frag_type;
const struct nlattr *nla;
u16 prev_type;
int rem;
if (swkey->eth.type != htons(ETH_P_IP))
goto invalid;
ipv4_key = nla_data(nla);
- swkey->ip.proto = ipv4_key->ipv4_proto;
- if (parse_tos_frag(swkey, ipv4_key->ipv4_tos,
- ipv4_key->ipv4_frag))
+ if (ipv4_key->ipv4_tos & INET_ECN_MASK)
+ goto invalid;
+ if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
goto invalid;
+ swkey->ip.proto = ipv4_key->ipv4_proto;
+ swkey->ip.tos = ipv4_key->ipv4_tos;
+ swkey->ip.frag = ipv4_key->ipv4_frag;
swkey->ipv4.addr.src = ipv4_key->ipv4_src;
swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
break;
if (swkey->eth.type != htons(ETH_P_IPV6))
goto invalid;
ipv6_key = nla_data(nla);
+ if (ipv6_key->ipv6_tos & INET_ECN_MASK)
+ goto invalid;
+ if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
+ goto invalid;
swkey->ipv6.label = ipv6_key->ipv6_label;
swkey->ip.proto = ipv6_key->ipv6_proto;
- if (parse_tos_frag(swkey, ipv6_key->ipv6_tos,
- ipv6_key->ipv6_frag))
- goto invalid;
+ swkey->ip.tos = ipv6_key->ipv6_tos;
+ swkey->ip.frag = ipv6_key->ipv6_frag;
memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
sizeof(swkey->ipv6.addr.src));
memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
if (rem)
goto invalid;
- frag_type = swkey->ip.tos_frag & OVS_FRAG_TYPE_MASK;
switch (prev_type) {
case OVS_KEY_ATTR_UNSPEC:
goto invalid;
goto ok;
case OVS_KEY_ATTR_IPV4:
- if (frag_type == OVS_FRAG_TYPE_LATER)
+ if (swkey->ip.frag == OVS_FRAG_TYPE_LATER)
goto ok;
if (swkey->ip.proto == IPPROTO_TCP ||
swkey->ip.proto == IPPROTO_UDP ||
goto ok;
case OVS_KEY_ATTR_IPV6:
- if (frag_type == OVS_FRAG_TYPE_LATER)
+ if (swkey->ip.frag == OVS_FRAG_TYPE_LATER)
goto ok;
if (swkey->ip.proto == IPPROTO_TCP ||
swkey->ip.proto == IPPROTO_UDP ||
case OVS_KEY_ATTR_ICMPV6:
if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT) ||
- frag_type == OVS_FRAG_TYPE_LATER)
+ swkey->ip.frag == OVS_FRAG_TYPE_LATER)
goto invalid;
goto ok;
case OVS_KEY_ATTR_UDP:
case OVS_KEY_ATTR_ICMP:
case OVS_KEY_ATTR_ND:
- if (frag_type == OVS_FRAG_TYPE_LATER)
+ if (swkey->ip.frag == OVS_FRAG_TYPE_LATER)
goto invalid;
goto ok;
ipv4_key->ipv4_src = swkey->ipv4.addr.src;
ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
ipv4_key->ipv4_proto = swkey->ip.proto;
- ipv4_key->ipv4_tos = swkey->ip.tos_frag & ~INET_ECN_MASK;
- ipv4_key->ipv4_frag = swkey->ip.tos_frag & OVS_FRAG_TYPE_MASK;
+ ipv4_key->ipv4_tos = swkey->ip.tos & ~INET_ECN_MASK;
+ ipv4_key->ipv4_frag = swkey->ip.frag;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
struct ovs_key_ipv6 *ipv6_key;
sizeof(ipv6_key->ipv6_dst));
ipv6_key->ipv6_label = swkey->ipv6.label;
ipv6_key->ipv6_proto = swkey->ip.proto;
- ipv6_key->ipv6_tos = swkey->ip.tos_frag & ~INET_ECN_MASK;
- ipv6_key->ipv6_frag = swkey->ip.tos_frag & OVS_FRAG_TYPE_MASK;
+ ipv6_key->ipv6_tos = swkey->ip.tos & ~INET_ECN_MASK;
+ ipv6_key->ipv6_frag = swkey->ip.frag;
} else if (swkey->eth.type == htons(ETH_P_ARP)) {
struct ovs_key_arp *arp_key;
if ((swkey->eth.type == htons(ETH_P_IP) ||
swkey->eth.type == htons(ETH_P_IPV6)) &&
- (swkey->ip.tos_frag & OVS_FRAG_TYPE_MASK) != OVS_FRAG_TYPE_LATER) {
+ swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
if (swkey->ip.proto == IPPROTO_TCP) {
struct ovs_key_tcp *tcp_key;
struct nlattr actions[];
};
-/* Mask for the OVS_FRAG_TYPE_* value in the low 2 bits of ip.tos_frag in
- * struct sw_flow_key. */
-#define OVS_FRAG_TYPE_MASK INET_ECN_MASK
-
struct sw_flow_key {
struct {
__be64 tun_id; /* Encapsulating tunnel ID. */
} eth;
struct {
u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */
- u8 tos_frag; /* IP ToS DSCP in high 6 bits,
- * OVS_FRAG_TYPE_* in low 2 bits. */
+ u8 tos; /* IP ToS DSCP in high 6 bits. */
+ u8 frag; /* One of OVS_FRAG_TYPE_*. */
} ip;
union {
struct {
* OVS_KEY_ATTR_ETHERNET 12 -- 4 16
* OVS_KEY_ATTR_8021Q 4 -- 4 8
* OVS_KEY_ATTR_ETHERTYPE 2 2 4 8
- * OVS_KEY_ATTR_IPV6 38 2 4 44
+ * OVS_KEY_ATTR_IPV6 39 1 4 44
* OVS_KEY_ATTR_ICMPV6 2 2 4 8
* OVS_KEY_ATTR_ND 28 -- 4 32
* -------------------------------------------------
void
cls_rule_set_nw_tos(struct cls_rule *rule, uint8_t nw_tos)
{
- rule->wc.tos_frag_mask |= IP_DSCP_MASK;
- rule->flow.tos_frag &= ~IP_DSCP_MASK;
- rule->flow.tos_frag |= nw_tos & IP_DSCP_MASK;
+ rule->wc.tos_mask |= IP_DSCP_MASK;
+ rule->flow.tos &= ~IP_DSCP_MASK;
+ rule->flow.tos |= nw_tos & IP_DSCP_MASK;
}
void
cls_rule_set_frag(struct cls_rule *rule, uint8_t frag)
{
- rule->wc.tos_frag_mask |= FLOW_FRAG_MASK;
- rule->flow.tos_frag &= ~FLOW_FRAG_MASK;
- rule->flow.tos_frag |= frag & FLOW_FRAG_MASK;
+ rule->wc.frag_mask |= FLOW_FRAG_MASK;
+ rule->flow.frag = frag;
}
void
cls_rule_set_frag_masked(struct cls_rule *rule, uint8_t frag, uint8_t mask)
{
- mask &= FLOW_FRAG_MASK;
- frag &= mask;
- rule->wc.tos_frag_mask = (rule->wc.tos_frag_mask & ~FLOW_FRAG_MASK) | mask;
- rule->flow.tos_frag = (rule->flow.tos_frag & ~FLOW_FRAG_MASK) | frag;
+ rule->flow.frag = frag & mask;
+ rule->wc.frag_mask = mask;
}
void
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 4);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 5);
if (rule->priority != OFP_DEFAULT_PRIORITY) {
ds_put_format(s, "priority=%d,", rule->priority);
ETH_ADDR_ARGS(f->arp_tha));
}
}
- if (wc->tos_frag_mask & IP_DSCP_MASK) {
- ds_put_format(s, "nw_tos=%"PRIu8",", f->tos_frag & IP_DSCP_MASK);
+ if (wc->tos_mask & IP_DSCP_MASK) {
+ ds_put_format(s, "nw_tos=%"PRIu8",", f->tos & IP_DSCP_MASK);
}
- switch (wc->tos_frag_mask & FLOW_FRAG_MASK) {
+ switch (wc->frag_mask) {
case FLOW_FRAG_ANY | FLOW_FRAG_LATER:
ds_put_format(s, "frag=%s,",
- f->tos_frag & FLOW_FRAG_ANY
- ? (f->tos_frag & FLOW_FRAG_LATER ? "later" : "first")
- : (f->tos_frag & FLOW_FRAG_LATER ? "<error>" : "no"));
+ f->frag & FLOW_FRAG_ANY
+ ? (f->frag & FLOW_FRAG_LATER ? "later" : "first")
+ : (f->frag & FLOW_FRAG_LATER ? "<error>" : "no"));
break;
case FLOW_FRAG_ANY:
ds_put_format(s, "frag=%s,",
- f->tos_frag & FLOW_FRAG_ANY ? "yes" : "no");
+ f->frag & FLOW_FRAG_ANY ? "yes" : "no");
break;
case FLOW_FRAG_LATER:
ds_put_format(s, "frag=%s,",
- f->tos_frag & FLOW_FRAG_LATER ? "later" : "not_later");
+ f->frag & FLOW_FRAG_LATER ? "later" : "not_later");
break;
}
if (f->nw_proto == IPPROTO_ICMP) {
const flow_wildcards_t wc = wildcards->wildcards;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 4);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 5);
for (i = 0; i < FLOW_N_REGS; i++) {
if ((a->regs[i] ^ b->regs[i]) & wildcards->reg_masks[i]) {
&& (wc & FWW_ETH_MCAST
|| !((a->dl_dst[0] ^ b->dl_dst[0]) & 0x01))
&& (wc & FWW_NW_PROTO || a->nw_proto == b->nw_proto)
- && !((a->tos_frag ^ b->tos_frag) & wildcards->tos_frag_mask)
+ && !((a->tos ^ b->tos) & wildcards->tos_mask)
+ && !((a->frag ^ b->frag) & wildcards->frag_mask)
&& (wc & FWW_ARP_SHA || eth_addr_equals(a->arp_sha, b->arp_sha))
&& (wc & FWW_ARP_THA || eth_addr_equals(a->arp_tha, b->arp_tha))
&& (wc & FWW_IPV6_LABEL || a->ipv6_label == b->ipv6_label)
flow->ipv6_dst = nh->ip6_dst;
tc_flow = get_unaligned_be32(&nh->ip6_flow);
- flow->tos_frag = (ntohl(tc_flow) >> 4) & IP_DSCP_MASK;
+ flow->tos = (ntohl(tc_flow) >> 4) & IP_DSCP_MASK;
flow->ipv6_label = tc_flow & htonl(IPV6_LABEL_MASK);
flow->nw_proto = IPPROTO_NONE;
}
/* We only process the first fragment. */
- flow->tos_frag &= ~FLOW_FRAG_MASK;
- flow->tos_frag |= FLOW_FRAG_ANY;
+ flow->frag = FLOW_FRAG_ANY;
if ((frag_hdr->ip6f_offlg & IP6F_OFF_MASK) != htons(0)) {
- flow->tos_frag |= FLOW_FRAG_LATER;
+ flow->frag |= FLOW_FRAG_LATER;
nexthdr = IPPROTO_FRAGMENT;
break;
}
flow->nw_dst = get_unaligned_be32(&nh->ip_dst);
flow->nw_proto = nh->ip_proto;
- flow->tos_frag = nh->ip_tos & IP_DSCP_MASK;
+ flow->tos = nh->ip_tos & IP_DSCP_MASK;
if (IP_IS_FRAGMENT(nh->ip_frag_off)) {
- flow->tos_frag |= FLOW_FRAG_ANY;
+ flow->frag = FLOW_FRAG_ANY;
if (nh->ip_frag_off & htons(IP_FRAG_OFF_MASK)) {
- flow->tos_frag |= FLOW_FRAG_LATER;
+ flow->frag |= FLOW_FRAG_LATER;
}
}
const flow_wildcards_t wc = wildcards->wildcards;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 4);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 5);
for (i = 0; i < FLOW_N_REGS; i++) {
flow->regs[i] &= wildcards->reg_masks[i];
if (wc & FWW_IPV6_LABEL) {
flow->ipv6_label = htonl(0);
}
- flow->tos_frag &= wildcards->tos_frag_mask;
+ flow->tos &= wildcards->tos_mask;
+ flow->frag &= wildcards->frag_mask;
if (wc & FWW_ARP_SHA) {
memset(flow->arp_sha, 0, sizeof flow->arp_sha);
}
void
flow_format(struct ds *ds, const struct flow *flow)
{
- int frag;
-
ds_put_format(ds, "priority%"PRIu32
":tunnel%#"PRIx64
":in_port%04"PRIx16,
if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
ds_put_format(ds, " label%#"PRIx32" proto%"PRIu8" tos%"PRIu8" ipv6",
ntohl(flow->ipv6_label), flow->nw_proto,
- flow->tos_frag & IP_DSCP_MASK);
+ flow->tos & IP_DSCP_MASK);
print_ipv6_addr(ds, &flow->ipv6_src);
ds_put_cstr(ds, "->");
print_ipv6_addr(ds, &flow->ipv6_dst);
" tos%"PRIu8
" ip"IP_FMT"->"IP_FMT,
flow->nw_proto,
- flow->tos_frag & IP_DSCP_MASK,
+ flow->tos & IP_DSCP_MASK,
IP_ARGS(&flow->nw_src),
IP_ARGS(&flow->nw_dst));
}
- frag = flow->tos_frag & FLOW_FRAG_MASK;
- if (frag) {
+ if (flow->frag) {
ds_put_format(ds, " frag(%s)",
- frag == FLOW_FRAG_ANY ? "first"
- : frag == (FLOW_FRAG_ANY | FLOW_FRAG_LATER) ? "later"
- : "<error>");
+ flow->frag == FLOW_FRAG_ANY ? "first"
+ : flow->frag == (FLOW_FRAG_ANY | FLOW_FRAG_LATER)
+ ? "later" : "<error>");
}
if (flow->tp_src || flow->tp_dst) {
ds_put_format(ds, " port%"PRIu16"->%"PRIu16,
void
flow_wildcards_init_catchall(struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 4);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 5);
wc->wildcards = FWW_ALL;
wc->tun_id_mask = htonll(0);
wc->ipv6_dst_mask = in6addr_any;
memset(wc->reg_masks, 0, sizeof wc->reg_masks);
wc->vlan_tci_mask = htons(0);
- wc->tos_frag_mask = 0;
+ wc->tos_mask = 0;
+ wc->frag_mask = 0;
memset(wc->zeros, 0, sizeof wc->zeros);
}
void
flow_wildcards_init_exact(struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 4);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 5);
wc->wildcards = 0;
wc->tun_id_mask = htonll(UINT64_MAX);
wc->ipv6_dst_mask = in6addr_exact;
memset(wc->reg_masks, 0xff, sizeof wc->reg_masks);
wc->vlan_tci_mask = htons(UINT16_MAX);
- wc->tos_frag_mask = UINT8_MAX;
+ wc->tos_mask = UINT8_MAX;
+ wc->frag_mask = UINT8_MAX;
memset(wc->zeros, 0, sizeof wc->zeros);
}
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 4);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 5);
if (wc->wildcards
|| wc->tun_id_mask != htonll(UINT64_MAX)
|| wc->vlan_tci_mask != htons(UINT16_MAX)
|| !ipv6_mask_is_exact(&wc->ipv6_src_mask)
|| !ipv6_mask_is_exact(&wc->ipv6_dst_mask)
- || wc->tos_frag_mask != UINT8_MAX) {
+ || wc->tos_mask != UINT8_MAX
+ || wc->frag_mask != UINT8_MAX) {
return false;
}
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 4);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 5);
if (wc->wildcards != FWW_ALL
|| wc->tun_id_mask != htonll(0)
|| wc->vlan_tci_mask != htons(0)
|| !ipv6_mask_is_any(&wc->ipv6_src_mask)
|| !ipv6_mask_is_any(&wc->ipv6_dst_mask)
- || wc->tos_frag_mask != 0) {
+ || wc->tos_mask != 0
+ || wc->frag_mask != 0) {
return false;
}
b->l3 = ip = ofpbuf_put_zeros(b, sizeof *ip);
ip->ip_ihl_ver = IP_IHL_VER(5, 4);
- ip->ip_tos = flow->tos_frag & IP_DSCP_MASK;
+ ip->ip_tos = flow->tos & IP_DSCP_MASK;
ip->ip_proto = flow->nw_proto;
ip->ip_src = flow->nw_src;
ip->ip_dst = flow->nw_dst;
- if (flow->tos_frag & FLOW_FRAG_ANY) {
+ if (flow->frag & FLOW_FRAG_ANY) {
ip->ip_frag_off |= htons(IP_MORE_FRAGMENTS);
- if (flow->tos_frag & FLOW_FRAG_LATER) {
+ if (flow->frag & FLOW_FRAG_LATER) {
ip->ip_frag_off |= htons(100);
}
}
- if (!(flow->tos_frag & FLOW_FRAG_ANY)
- || !(flow->tos_frag & FLOW_FRAG_LATER)) {
+ if (!(flow->frag & FLOW_FRAG_ANY)
+ || !(flow->frag & FLOW_FRAG_LATER)) {
if (flow->nw_proto == IPPROTO_TCP) {
struct tcp_header *tcp;
/* This sequence number should be incremented whenever anything involving flows
* or the wildcarding of flows changes. This will cause build assertion
* failures in places which likely need to be updated. */
-#define FLOW_WC_SEQ 4
+#define FLOW_WC_SEQ 5
#define FLOW_N_REGS 5
BUILD_ASSERT_DECL(FLOW_N_REGS <= NXM_NX_MAX_REGS);
uint8_t dl_src[6]; /* Ethernet source address. */
uint8_t dl_dst[6]; /* Ethernet destination address. */
uint8_t nw_proto; /* IP protocol or low 8 bits of ARP opcode. */
- uint8_t tos_frag; /* IP ToS in top bits, FLOW_FRAG_* in low. */
+ uint8_t tos; /* IP ToS. */
uint8_t arp_sha[6]; /* ARP/ND source hardware address. */
uint8_t arp_tha[6]; /* ARP/ND target hardware address. */
+ uint8_t frag; /* FLOW_FRAG_* flags. */
+ uint8_t reserved[7]; /* Reserved for 64-bit packing. */
};
/* Assert that there are FLOW_SIG_SIZE bytes of significant data in "struct
* flow", followed by FLOW_PAD_SIZE bytes of padding. */
-#define FLOW_SIG_SIZE (108 + FLOW_N_REGS * 4)
-#define FLOW_PAD_SIZE 0
-BUILD_ASSERT_DECL(offsetof(struct flow, arp_tha) == FLOW_SIG_SIZE - 6);
-BUILD_ASSERT_DECL(sizeof(((struct flow *)0)->arp_tha) == 6);
+#define FLOW_SIG_SIZE (109 + FLOW_N_REGS * 4)
+#define FLOW_PAD_SIZE 7
+BUILD_ASSERT_DECL(offsetof(struct flow, frag) == FLOW_SIG_SIZE - 1);
+BUILD_ASSERT_DECL(sizeof(((struct flow *)0)->frag) == 1);
BUILD_ASSERT_DECL(sizeof(struct flow) == FLOW_SIG_SIZE + FLOW_PAD_SIZE);
/* Remember to update FLOW_WC_SEQ when changing 'struct flow'. */
-BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 128 && FLOW_WC_SEQ == 4);
+BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 129 && FLOW_WC_SEQ == 5);
void flow_extract(struct ofpbuf *, uint32_t priority, ovs_be64 tun_id,
uint16_t in_port, struct flow *);
#define FWW_ALL ((OVS_FORCE flow_wildcards_t) (((1 << 12)) - 1))
/* Remember to update FLOW_WC_SEQ when adding or removing FWW_*. */
-BUILD_ASSERT_DECL(FWW_ALL == ((1 << 12) - 1) && FLOW_WC_SEQ == 4);
+BUILD_ASSERT_DECL(FWW_ALL == ((1 << 12) - 1) && FLOW_WC_SEQ == 5);
/* Information on wildcards for a flow, as a supplement to "struct flow".
*
struct in6_addr ipv6_src_mask; /* 1-bit in each signficant ipv6_src bit. */
struct in6_addr ipv6_dst_mask; /* 1-bit in each signficant ipv6_dst bit. */
ovs_be16 vlan_tci_mask; /* 1-bit in each significant vlan_tci bit. */
- uint8_t tos_frag_mask; /* 1-bit in each significant tos_frag bit. */
- uint8_t zeros[5]; /* Padding field set to zero. */
+ uint8_t tos_mask; /* 1-bit in each significant tos bit. */
+ uint8_t frag_mask; /* 1-bit in each significant frag bit. */
+ uint8_t zeros[4]; /* Padding field set to zero. */
};
/* Remember to update FLOW_WC_SEQ when updating struct flow_wildcards. */
-BUILD_ASSERT_DECL(sizeof(struct flow_wildcards) == 80 && FLOW_WC_SEQ == 4);
+BUILD_ASSERT_DECL(sizeof(struct flow_wildcards) == 80 && FLOW_WC_SEQ == 5);
void flow_wildcards_init_catchall(struct flow_wildcards *);
void flow_wildcards_init_exact(struct flow_wildcards *);
return ipv6_mask_is_any(&wc->ipv6_dst_mask);
case MFF_IP_TOS:
- return !(wc->tos_frag_mask & IP_DSCP_MASK);
+ return !(wc->tos_mask & IP_DSCP_MASK);
case MFF_IP_FRAG:
- return !(wc->tos_frag_mask & FLOW_FRAG_MASK);
+ return !(wc->frag_mask & FLOW_FRAG_MASK);
case MFF_ARP_SPA:
return !wc->nw_src_mask;
break;
case MFF_IP_TOS:
- mask->u8 = wc->tos_frag_mask & IP_DSCP_MASK;
+ mask->u8 = wc->tos_mask & IP_DSCP_MASK;
break;
case MFF_IP_FRAG:
- mask->u8 = wc->tos_frag_mask & FLOW_FRAG_MASK;
+ mask->u8 = wc->frag_mask & FLOW_FRAG_MASK;
break;
case MFF_ARP_SPA:
break;
case MFF_IP_TOS:
- value->u8 = flow->tos_frag & IP_DSCP_MASK;
+ value->u8 = flow->tos & IP_DSCP_MASK;
break;
case MFF_IP_FRAG:
- value->u8 = flow->tos_frag & FLOW_FRAG_MASK;
+ value->u8 = flow->frag;
break;
case MFF_ARP_OP:
break;
case MFF_IP_TOS:
- rule->wc.tos_frag_mask |= IP_DSCP_MASK;
- rule->flow.tos_frag &= ~IP_DSCP_MASK;
+ rule->wc.tos_mask |= IP_DSCP_MASK;
+ rule->flow.tos &= ~IP_DSCP_MASK;
break;
case MFF_IP_FRAG:
- rule->wc.tos_frag_mask |= FLOW_FRAG_MASK;
- rule->flow.tos_frag &= ~FLOW_FRAG_MASK;
+ rule->wc.frag_mask |= FLOW_FRAG_MASK;
+ rule->flow.frag &= ~FLOW_FRAG_MASK;
break;
case MFF_ARP_OP:
}
static void
-nxm_put_tos_frag(struct ofpbuf *b, const struct cls_rule *cr)
+nxm_put_frag(struct ofpbuf *b, const struct cls_rule *cr)
{
- uint8_t tos_frag = cr->flow.tos_frag;
- uint8_t tos_frag_mask = cr->wc.tos_frag_mask;
+ uint8_t frag = cr->flow.frag;
+ uint8_t frag_mask = cr->wc.frag_mask;
- if (tos_frag_mask & IP_DSCP_MASK) {
- nxm_put_8(b, NXM_OF_IP_TOS, tos_frag & IP_DSCP_MASK);
- }
-
- switch (tos_frag_mask & FLOW_FRAG_MASK) {
+ switch (frag_mask) {
case 0:
break;
case FLOW_FRAG_MASK:
- /* Output it as exact-match even though only the low 2 bits matter. */
- nxm_put_8(b, NXM_NX_IP_FRAG, tos_frag & FLOW_FRAG_MASK);
+ nxm_put_8(b, NXM_NX_IP_FRAG, frag);
break;
default:
- nxm_put_8m(b, NXM_NX_IP_FRAG, tos_frag & FLOW_FRAG_MASK,
- tos_frag_mask & FLOW_FRAG_MASK);
+ nxm_put_8m(b, NXM_NX_IP_FRAG, frag, frag_mask & FLOW_FRAG_MASK);
break;
}
}
int match_len;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 4);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 5);
/* Metadata. */
if (!(wc & FWW_IN_PORT)) {
/* L3. */
if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) {
/* IP. */
- nxm_put_tos_frag(b, cr);
nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask);
nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask);
+ nxm_put_frag(b, cr);
+
+ if (cr->wc.tos_mask & IP_DSCP_MASK) {
+ nxm_put_8(b, NXM_OF_IP_TOS, flow->tos & IP_DSCP_MASK);
+ }
if (!(wc & FWW_NW_PROTO)) {
nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto);
}
} else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IPV6)) {
/* IPv6. */
- nxm_put_tos_frag(b, cr);
nxm_put_ipv6(b, NXM_NX_IPV6_SRC, &flow->ipv6_src,
&cr->wc.ipv6_src_mask);
nxm_put_ipv6(b, NXM_NX_IPV6_DST, &flow->ipv6_dst,
&cr->wc.ipv6_dst_mask);
+ nxm_put_frag(b, cr);
+
if (!(wc & FWW_IPV6_LABEL)) {
nxm_put_32(b, NXM_NX_IPV6_LABEL, flow->ipv6_label);
}
+ if (cr->wc.tos_mask & IP_DSCP_MASK) {
+ nxm_put_8(b, NXM_OF_IP_TOS, flow->tos & IP_DSCP_MASK);
+ }
+
if (!(wc & FWW_NW_PROTO)) {
nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto);
switch (flow->nw_proto) {
return ntohs(flow->vlan_tci);
case NFI_NXM_OF_IP_TOS:
- return flow->tos_frag & IP_DSCP_MASK;
+ return flow->tos & IP_DSCP_MASK;
case NFI_NXM_NX_IP_FRAG:
- return flow->tos_frag & FLOW_FRAG_MASK;
+ return flow->frag;
case NFI_NXM_OF_IP_PROTO:
case NFI_NXM_OF_ARP_OP:
#endif
case NFI_NXM_OF_IP_TOS:
- flow->tos_frag &= ~IP_DSCP_MASK;
- flow->tos_frag |= new_value & IP_DSCP_MASK;
+ flow->tos &= ~IP_DSCP_MASK;
+ flow->tos |= new_value & IP_DSCP_MASK;
break;
case NFI_NXM_NX_IP_FRAG:
- flow->tos_frag &= ~FLOW_FRAG_MASK;
- flow->tos_frag |= new_value & FLOW_FRAG_MASK;
+ flow->frag = new_value;
break;
case NFI_NXM_OF_IP_SRC:
}
static uint8_t
-tos_frag_to_odp_frag(uint8_t tos_frag)
+ovs_to_odp_frag(uint8_t ovs_frag)
{
- return (tos_frag & FLOW_FRAG_LATER ? OVS_FRAG_TYPE_LATER
- : tos_frag & FLOW_FRAG_ANY ? OVS_FRAG_TYPE_FIRST
+ return (ovs_frag & FLOW_FRAG_LATER ? OVS_FRAG_TYPE_LATER
+ : ovs_frag & FLOW_FRAG_ANY ? OVS_FRAG_TYPE_FIRST
: OVS_FRAG_TYPE_NONE);
}
ipv4_key->ipv4_src = flow->nw_src;
ipv4_key->ipv4_dst = flow->nw_dst;
ipv4_key->ipv4_proto = flow->nw_proto;
- ipv4_key->ipv4_tos = flow->tos_frag & IP_DSCP_MASK;
- ipv4_key->ipv4_frag = tos_frag_to_odp_frag(flow->tos_frag);
+ ipv4_key->ipv4_tos = flow->tos & IP_DSCP_MASK;
+ ipv4_key->ipv4_frag = ovs_to_odp_frag(flow->frag);
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
struct ovs_key_ipv6 *ipv6_key;
memcpy(ipv6_key->ipv6_dst, &flow->ipv6_dst, sizeof ipv6_key->ipv6_dst);
ipv6_key->ipv6_label = flow->ipv6_label;
ipv6_key->ipv6_proto = flow->nw_proto;
- ipv6_key->ipv6_tos = flow->tos_frag & IP_DSCP_MASK;
- ipv6_key->ipv6_frag = tos_frag_to_odp_frag(flow->tos_frag);
+ ipv6_key->ipv6_tos = flow->tos & IP_DSCP_MASK;
+ ipv6_key->ipv6_frag = ovs_to_odp_frag(flow->frag);
} else if (flow->dl_type == htons(ETH_TYPE_ARP)) {
struct ovs_key_arp *arp_key;
if ((flow->dl_type == htons(ETH_TYPE_IP)
|| flow->dl_type == htons(ETH_TYPE_IPV6))
- && !(flow->tos_frag & FLOW_FRAG_LATER)) {
+ && !(flow->frag & FLOW_FRAG_LATER)) {
if (flow->nw_proto == IPPROTO_TCP) {
struct ovs_key_tcp *tcp_key;
}
static bool
-odp_to_tos_frag(uint8_t odp_tos, uint8_t odp_frag, struct flow *flow)
+odp_to_ovs_frag(uint8_t odp_frag, struct flow *flow)
{
- if (odp_tos & ~IP_DSCP_MASK || odp_frag > OVS_FRAG_TYPE_LATER) {
+ if (odp_frag > OVS_FRAG_TYPE_LATER) {
return false;
}
- flow->tos_frag = odp_tos;
if (odp_frag != OVS_FRAG_TYPE_NONE) {
- flow->tos_frag |= FLOW_FRAG_ANY;
+ flow->frag |= FLOW_FRAG_ANY;
if (odp_frag == OVS_FRAG_TYPE_LATER) {
- flow->tos_frag |= FLOW_FRAG_LATER;
+ flow->frag |= FLOW_FRAG_LATER;
}
}
return true;
flow->nw_src = ipv4_key->ipv4_src;
flow->nw_dst = ipv4_key->ipv4_dst;
flow->nw_proto = ipv4_key->ipv4_proto;
- if (!odp_to_tos_frag(ipv4_key->ipv4_tos, ipv4_key->ipv4_frag,
- flow)) {
+ flow->tos = ipv4_key->ipv4_tos;
+ if (!odp_to_ovs_frag(ipv4_key->ipv4_frag, flow)) {
return EINVAL;
}
break;
memcpy(&flow->ipv6_dst, ipv6_key->ipv6_dst, sizeof flow->ipv6_dst);
flow->ipv6_label = ipv6_key->ipv6_label;
flow->nw_proto = ipv6_key->ipv6_proto;
- if (!odp_to_tos_frag(ipv6_key->ipv6_tos, ipv6_key->ipv6_frag,
- flow)) {
+ flow->tos = ipv6_key->ipv6_tos;
+ if (!odp_to_ovs_frag(ipv6_key->ipv6_frag, flow)) {
return EINVAL;
}
break;
return 0;
case OVS_KEY_ATTR_IPV4:
- if (flow->tos_frag & FLOW_FRAG_LATER) {
+ if (flow->frag & FLOW_FRAG_LATER) {
return 0;
}
if (flow->nw_proto == IPPROTO_TCP
return 0;
case OVS_KEY_ATTR_IPV6:
- if (flow->tos_frag & FLOW_FRAG_LATER) {
+ if (flow->frag & FLOW_FRAG_LATER) {
return 0;
}
if (flow->nw_proto == IPPROTO_TCP
case OVS_KEY_ATTR_ICMPV6:
if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)
|| flow->tp_src == htons(ND_NEIGHBOR_ADVERT)
- || flow->tos_frag & FLOW_FRAG_LATER) {
+ || flow->frag & FLOW_FRAG_LATER) {
return EINVAL;
}
return 0;
case OVS_KEY_ATTR_UDP:
case OVS_KEY_ATTR_ICMP:
case OVS_KEY_ATTR_ND:
- if (flow->tos_frag & FLOW_FRAG_LATER) {
+ if (flow->frag & FLOW_FRAG_LATER) {
return EINVAL;
}
return 0;
* OVS_KEY_ATTR_ETHERNET 12 -- 4 16
* OVS_KEY_ATTR_8021Q 4 -- 4 8
* OVS_KEY_ATTR_ETHERTYPE 2 2 4 8
- * OVS_KEY_ATTR_IPV6 38 2 4 44
+ * OVS_KEY_ATTR_IPV6 39 1 4 44
* OVS_KEY_ATTR_ICMPV6 2 2 4 8
* OVS_KEY_ATTR_ND 28 -- 4 32
* -------------------------------------------------
void
ofputil_wildcard_from_openflow(uint32_t ofpfw, struct flow_wildcards *wc)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 4);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 5);
/* Initialize most of rule->wc. */
flow_wildcards_init_catchall(wc);
| FWW_IPV6_LABEL);
if (!(ofpfw & OFPFW_NW_TOS)) {
- wc->tos_frag_mask |= IP_DSCP_MASK;
+ wc->tos_mask |= IP_DSCP_MASK;
}
wc->nw_src_mask = ofputil_wcbits_to_netmask(ofpfw >> OFPFW_NW_SRC_SHIFT);
rule->flow.tp_dst = match->tp_dst;
memcpy(rule->flow.dl_src, match->dl_src, ETH_ADDR_LEN);
memcpy(rule->flow.dl_dst, match->dl_dst, ETH_ADDR_LEN);
- rule->flow.tos_frag = match->nw_tos & IP_DSCP_MASK;
+ rule->flow.tos = match->nw_tos & IP_DSCP_MASK;
rule->flow.nw_proto = match->nw_proto;
/* Translate VLANs. */
ofpfw = (OVS_FORCE uint32_t) (wc->wildcards & WC_INVARIANTS);
ofpfw |= ofputil_netmask_to_wcbits(wc->nw_src_mask) << OFPFW_NW_SRC_SHIFT;
ofpfw |= ofputil_netmask_to_wcbits(wc->nw_dst_mask) << OFPFW_NW_DST_SHIFT;
- if (!(wc->tos_frag_mask & IP_DSCP_MASK)) {
+ if (!(wc->tos_mask & IP_DSCP_MASK)) {
ofpfw |= OFPFW_NW_TOS;
}
match->dl_type = ofputil_dl_type_to_openflow(rule->flow.dl_type);
match->nw_src = rule->flow.nw_src;
match->nw_dst = rule->flow.nw_dst;
- match->nw_tos = rule->flow.tos_frag & IP_DSCP_MASK;
+ match->nw_tos = rule->flow.tos & IP_DSCP_MASK;
match->nw_proto = rule->flow.nw_proto;
match->tp_src = rule->flow.tp_src;
match->tp_dst = rule->flow.tp_dst;
{
const struct flow_wildcards *wc = &rule->wc;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 4);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 5);
/* Only NXM supports separately wildcards the Ethernet multicast bit. */
if (!(wc->wildcards & FWW_DL_DST) != !(wc->wildcards & FWW_ETH_MCAST)) {
}
/* Only NXM supports matching fragments. */
- if (wc->tos_frag_mask & FLOW_FRAG_MASK) {
+ if (wc->frag_mask) {
return NXFF_NXM;
}
MAY_NW_ADDR = 1 << 0, /* nw_src, nw_dst */
MAY_TP_ADDR = 1 << 1, /* tp_src, tp_dst */
MAY_NW_PROTO = 1 << 2, /* nw_proto */
- MAY_TOS_FRAG = 1 << 3, /* tos_frag */
+ MAY_IPVx = 1 << 3, /* tos, frag */
MAY_ARP_SHA = 1 << 4, /* arp_sha */
MAY_ARP_THA = 1 << 5, /* arp_tha */
MAY_IPV6_ADDR = 1 << 6, /* ipv6_src, ipv6_dst */
/* Figure out what fields may be matched. */
if (rule->flow.dl_type == htons(ETH_TYPE_IP)) {
- may_match = MAY_NW_PROTO | MAY_TOS_FRAG | MAY_NW_ADDR;
+ may_match = MAY_NW_PROTO | MAY_IPVx | MAY_NW_ADDR;
if (rule->flow.nw_proto == IPPROTO_TCP ||
rule->flow.nw_proto == IPPROTO_UDP ||
rule->flow.nw_proto == IPPROTO_ICMP) {
}
} else if (rule->flow.dl_type == htons(ETH_TYPE_IPV6)
&& flow_format == NXFF_NXM) {
- may_match = MAY_NW_PROTO | MAY_TOS_FRAG | MAY_IPV6_ADDR;
+ may_match = MAY_NW_PROTO | MAY_IPVx | MAY_IPV6_ADDR;
if (rule->flow.nw_proto == IPPROTO_TCP ||
rule->flow.nw_proto == IPPROTO_UDP) {
may_match |= MAY_TP_ADDR;
if (!(may_match & MAY_NW_PROTO)) {
wc.wildcards |= FWW_NW_PROTO;
}
- if (!(may_match & MAY_TOS_FRAG)) {
- wc.tos_frag_mask = 0;
+ if (!(may_match & MAY_IPVx)) {
+ wc.tos_mask = 0;
+ wc.frag_mask = 0;
}
if (!(may_match & MAY_ARP_SHA)) {
wc.wildcards |= FWW_ARP_SHA;
}
nf_rec->tcp_flags = nf_flow->tcp_flags;
nf_rec->ip_proto = expired->flow.nw_proto;
- nf_rec->ip_tos = expired->flow.tos_frag & IP_DSCP_MASK;
+ nf_rec->ip_tos = expired->flow.tos & IP_DSCP_MASK;
/* NetFlow messages are limited to 30 records. */
if (ntohs(nf_hdr->count) >= 30) {
}
cls = &ofproto->up.tables[table_id];
- if (flow->tos_frag & FLOW_FRAG_ANY
+ if (flow->frag & FLOW_FRAG_ANY
&& ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
/* For OFPC_NORMAL frag_handling, we must pretend that transport ports
* are unavailable. */
commit_set_nw_action(const struct flow *flow, struct flow *base,
struct ofpbuf *odp_actions)
{
- int frag = base->tos_frag & FLOW_FRAG_MASK;
struct ovs_key_ipv4 ipv4_key;
if (base->dl_type != htons(ETH_TYPE_IP) ||
if (base->nw_src == flow->nw_src &&
base->nw_dst == flow->nw_dst &&
- base->tos_frag == flow->tos_frag) {
+ base->tos == flow->tos &&
+ base->frag == flow->frag) {
return;
}
ipv4_key.ipv4_src = base->nw_src = flow->nw_src;
ipv4_key.ipv4_dst = base->nw_dst = flow->nw_dst;
ipv4_key.ipv4_proto = base->nw_proto;
- ipv4_key.ipv4_tos = flow->tos_frag & IP_DSCP_MASK;
- ipv4_key.ipv4_frag = (frag == 0 ? OVS_FRAG_TYPE_NONE
- : frag == FLOW_FRAG_ANY ? OVS_FRAG_TYPE_FIRST
+ ipv4_key.ipv4_tos = flow->tos & IP_DSCP_MASK;
+ ipv4_key.ipv4_frag = (base->frag == 0 ? OVS_FRAG_TYPE_NONE
+ : base->frag == FLOW_FRAG_ANY ? OVS_FRAG_TYPE_FIRST
: OVS_FRAG_TYPE_LATER);
commit_action__(odp_actions, OVS_ACTION_ATTR_SET,
break;
case OFPUTIL_OFPAT_SET_NW_TOS:
- ctx->flow.tos_frag &= ~IP_DSCP_MASK;
- ctx->flow.tos_frag |= ia->nw_tos.nw_tos & IP_DSCP_MASK;
+ ctx->flow.tos &= ~IP_DSCP_MASK;
+ ctx->flow.tos |= ia->nw_tos.nw_tos & IP_DSCP_MASK;
break;
case OFPUTIL_OFPAT_SET_TP_SRC:
ctx->table_id = 0;
ctx->exit = false;
- if (ctx->flow.tos_frag & FLOW_FRAG_ANY) {
+ if (ctx->flow.frag & FLOW_FRAG_ANY) {
switch (ctx->ofproto->up.frag_handling) {
case OFPC_FRAG_NORMAL:
/* We must pretend that transport ports are unavailable. */
CLS_FIELD(FWW_DL_SRC, dl_src, DL_SRC) \
CLS_FIELD(FWW_DL_DST | FWW_ETH_MCAST, dl_dst, DL_DST) \
CLS_FIELD(FWW_NW_PROTO, nw_proto, NW_PROTO) \
- CLS_FIELD(0, tos_frag, TOS_FRAG)
+ CLS_FIELD(0, tos, TOS)
/* Field indexes.
*
& wild->wc.vlan_tci_mask);
} else if (f_idx == CLS_F_IDX_TUN_ID) {
eq = !((fixed->tun_id ^ wild->flow.tun_id) & wild->wc.tun_id_mask);
- } else if (f_idx == CLS_F_IDX_TOS_FRAG) {
- eq = !((fixed->tos_frag ^ wild->flow.tos_frag)
- & wild->wc.tos_frag_mask);
+ } else if (f_idx == CLS_F_IDX_TOS) {
+ eq = !((fixed->tos ^ wild->flow.tos)
+ & wild->wc.tos_mask);
} else {
NOT_REACHED();
}
static uint8_t dl_dst_values[][6] = { { 0x4a, 0x27, 0x71, 0xae, 0x64, 0xc1 },
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
static uint8_t nw_proto_values[] = { IPPROTO_TCP, IPPROTO_ICMP };
-static uint8_t tos_frag_values[] = { 48, 0 };
+static uint8_t tos_values[] = { 48, 0 };
static void *values[CLS_N_FIELDS][2];
values[CLS_F_IDX_NW_PROTO][0] = &nw_proto_values[0];
values[CLS_F_IDX_NW_PROTO][1] = &nw_proto_values[1];
- values[CLS_F_IDX_TOS_FRAG][0] = &tos_frag_values[0];
- values[CLS_F_IDX_TOS_FRAG][1] = &tos_frag_values[1];
+ values[CLS_F_IDX_TOS][0] = &tos_values[0];
+ values[CLS_F_IDX_TOS][1] = &tos_values[1];
values[CLS_F_IDX_TP_SRC][0] = &tp_src_values[0];
values[CLS_F_IDX_TP_SRC][1] = &tp_src_values[1];
#define N_DL_SRC_VALUES ARRAY_SIZE(dl_src_values)
#define N_DL_DST_VALUES ARRAY_SIZE(dl_dst_values)
#define N_NW_PROTO_VALUES ARRAY_SIZE(nw_proto_values)
-#define N_TOS_FRAG_VALUES ARRAY_SIZE(tos_frag_values)
+#define N_TOS_VALUES ARRAY_SIZE(tos_values)
#define N_FLOW_VALUES (N_NW_SRC_VALUES * \
N_NW_DST_VALUES * \
N_DL_SRC_VALUES * \
N_DL_DST_VALUES * \
N_NW_PROTO_VALUES * \
- N_TOS_FRAG_VALUES)
+ N_TOS_VALUES)
static unsigned int
get_value(unsigned int *x, unsigned n_values)
memcpy(flow.dl_dst, dl_dst_values[get_value(&x, N_DL_DST_VALUES)],
ETH_ADDR_LEN);
flow.nw_proto = nw_proto_values[get_value(&x, N_NW_PROTO_VALUES)];
- flow.tos_frag = tos_frag_values[get_value(&x, N_TOS_FRAG_VALUES)];
+ flow.tos = tos_values[get_value(&x, N_TOS_VALUES)];
cr0 = classifier_lookup(cls, &flow);
cr1 = tcls_lookup(tcls, &flow);
rule->cls_rule.wc.vlan_tci_mask = htons(UINT16_MAX);
} else if (f_idx == CLS_F_IDX_TUN_ID) {
rule->cls_rule.wc.tun_id_mask = htonll(UINT64_MAX);
- } else if (f_idx == CLS_F_IDX_TOS_FRAG) {
- rule->cls_rule.wc.tos_frag_mask = UINT8_MAX;
+ } else if (f_idx == CLS_F_IDX_TOS) {
+ rule->cls_rule.wc.tos_mask = UINT8_MAX;
} else {
NOT_REACHED();
}