X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=lib%2Fnx-match.c;h=6c8ee13414bdf72fd4b284cf8387c19f2f3573e7;hb=7fa710e43fdf2dbf7fe41877cf73e04ccc4a7166;hp=e2330fd581bce0ddf64e663bc0f939de6b079ec6;hpb=09246b99d1601e2ba7ff85bb26f9b0235632a76d;p=openvswitch diff --git a/lib/nx-match.c b/lib/nx-match.c index e2330fd5..6c8ee134 100644 --- a/lib/nx-match.c +++ b/lib/nx-match.c @@ -55,7 +55,7 @@ struct nxm_field { struct hmap_node hmap_node; enum nxm_field_index index; /* NFI_* value. */ uint32_t header; /* NXM_* value. */ - uint32_t wildcard; /* Wildcard bit, if exactly one. */ + flow_wildcards_t wildcard; /* FWW_* bit, if exactly one. */ ovs_be16 dl_type; /* dl_type prerequisite, if nonzero. */ uint8_t nw_proto; /* nw_proto prerequisite, if nonzero. */ const char *name; /* "NXM_*" string. */ @@ -72,6 +72,16 @@ static struct nxm_field nxm_fields[N_NXM_FIELDS] = { /* Hash table of 'nxm_fields'. */ static struct hmap all_nxm_fields = HMAP_INITIALIZER(&all_nxm_fields); +/* Possible masks for NXM_OF_ETH_DST_W. */ +static const uint8_t eth_all_0s[ETH_ADDR_LEN] + = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; +static const uint8_t eth_all_1s[ETH_ADDR_LEN] + = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; +static const uint8_t eth_mcast_1[ETH_ADDR_LEN] + = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00}; +static const uint8_t eth_mcast_0[ETH_ADDR_LEN] + = {0xfe, 0xff, 0xff, 0xff, 0xff, 0xff}; + static void nxm_init(void) { @@ -119,17 +129,33 @@ nxm_field_bytes(uint32_t header) unsigned int length = NXM_LENGTH(header); return NXM_HASMASK(header) ? length / 2 : length; } + +/* Returns the width of the data for a field with the given 'header', in + * bits. */ +static int +nxm_field_bits(uint32_t header) +{ + return nxm_field_bytes(header) * 8; +} /* nx_pull_match() and helpers. */ static int -parse_tci(struct cls_rule *rule, ovs_be16 tci, ovs_be16 mask) +parse_nx_reg(const struct nxm_field *f, + struct flow *flow, struct flow_wildcards *wc, + const void *value, const void *maskp) { - enum { OFPFW_DL_TCI = OFPFW_DL_VLAN | OFPFW_DL_VLAN_PCP }; - if ((rule->wc.wildcards & OFPFW_DL_TCI) != OFPFW_DL_TCI) { + int idx = NXM_NX_REG_IDX(f->header); + if (wc->reg_masks[idx]) { return NXM_DUP_TYPE; } else { - return cls_rule_set_dl_tci_masked(rule, tci, mask) ? 0 : NXM_INVALID; + flow_wildcards_set_reg_mask(wc, idx, + (NXM_HASMASK(f->header) + ? ntohl(get_unaligned_u32(maskp)) + : UINT32_MAX)); + flow->regs[idx] = ntohl(get_unaligned_u32(value)); + flow->regs[idx] &= wc->reg_masks[idx]; + return 0; } } @@ -151,8 +177,34 @@ parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f, /* Ethernet header. */ case NFI_NXM_OF_ETH_DST: - memcpy(flow->dl_dst, value, ETH_ADDR_LEN); - return 0; + if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST)) + != (FWW_DL_DST | FWW_ETH_MCAST)) { + return NXM_DUP_TYPE; + } else { + wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST); + memcpy(flow->dl_dst, value, ETH_ADDR_LEN); + return 0; + } + case NFI_NXM_OF_ETH_DST_W: + if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST)) + != (FWW_DL_DST | FWW_ETH_MCAST)) { + return NXM_DUP_TYPE; + } else if (eth_addr_equals(mask, eth_mcast_1)) { + wc->wildcards &= ~FWW_ETH_MCAST; + flow->dl_dst[0] = *(uint8_t *) value & 0x01; + } else if (eth_addr_equals(mask, eth_mcast_0)) { + wc->wildcards &= ~FWW_DL_DST; + memcpy(flow->dl_dst, value, ETH_ADDR_LEN); + flow->dl_dst[0] &= 0xfe; + } else if (eth_addr_equals(mask, eth_all_0s)) { + return 0; + } else if (eth_addr_equals(mask, eth_all_1s)) { + wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST); + memcpy(flow->dl_dst, value, ETH_ADDR_LEN); + return 0; + } else { + return NXM_BAD_MASK; + } case NFI_NXM_OF_ETH_SRC: memcpy(flow->dl_src, value, ETH_ADDR_LEN); return 0; @@ -162,11 +214,20 @@ parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f, /* 802.1Q header. */ case NFI_NXM_OF_VLAN_TCI: - return parse_tci(rule, get_unaligned_u16(value), htons(UINT16_MAX)); - + if (wc->vlan_tci_mask) { + return NXM_DUP_TYPE; + } else { + cls_rule_set_dl_tci(rule, get_unaligned_u16(value)); + return 0; + } case NFI_NXM_OF_VLAN_TCI_W: - return parse_tci(rule, get_unaligned_u16(value), - get_unaligned_u16(mask)); + if (wc->vlan_tci_mask) { + return NXM_DUP_TYPE; + } else { + cls_rule_set_dl_tci_masked(rule, get_unaligned_u16(value), + get_unaligned_u16(mask)); + return 0; + } /* IP header. */ case NFI_NXM_OF_IP_TOS: @@ -260,6 +321,26 @@ parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f, flow->tun_id = htonl(ntohll(get_unaligned_u64(value))); return 0; + /* Registers. */ + case NFI_NXM_NX_REG0: + case NFI_NXM_NX_REG0_W: +#if FLOW_N_REGS >= 2 + case NFI_NXM_NX_REG1: + case NFI_NXM_NX_REG1_W: +#endif +#if FLOW_N_REGS >= 3 + case NFI_NXM_NX_REG2: + case NFI_NXM_NX_REG2_W: +#endif +#if FLOW_N_REGS >= 4 + case NFI_NXM_NX_REG3: + case NFI_NXM_NX_REG3_W: +#endif +#if FLOW_N_REGS > 4 +#error +#endif + return parse_nx_reg(f, flow, wc, value, mask); + case N_NXM_FIELDS: NOT_REACHED(); } @@ -394,6 +475,23 @@ nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask) ofpbuf_put(b, &mask, sizeof mask); } +static void +nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask) +{ + switch (mask) { + case 0: + break; + + case CONSTANT_HTONS(UINT16_MAX): + nxm_put_16(b, header, value); + break; + + default: + nxm_put_16w(b, NXM_MAKE_WILD_HEADER(header), value, mask); + break; + } +} + static void nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value) { @@ -416,7 +514,7 @@ nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask) case 0: break; - case UINT32_MAX: + case CONSTANT_HTONL(UINT32_MAX): nxm_put_32(b, header, value); break; @@ -433,7 +531,6 @@ nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value) ofpbuf_put(b, &value, sizeof value); } - static void nxm_put_eth(struct ofpbuf *b, uint32_t header, const uint8_t value[ETH_ADDR_LEN]) @@ -442,17 +539,40 @@ nxm_put_eth(struct ofpbuf *b, uint32_t header, ofpbuf_put(b, value, ETH_ADDR_LEN); } +static void +nxm_put_eth_dst(struct ofpbuf *b, + uint32_t wc, const uint8_t value[ETH_ADDR_LEN]) +{ + switch (wc & (FWW_DL_DST | FWW_ETH_MCAST)) { + case FWW_DL_DST | FWW_ETH_MCAST: + break; + case FWW_DL_DST: + nxm_put_header(b, NXM_OF_ETH_DST_W); + ofpbuf_put(b, value, ETH_ADDR_LEN); + ofpbuf_put(b, eth_mcast_1, ETH_ADDR_LEN); + break; + case FWW_ETH_MCAST: + nxm_put_header(b, NXM_OF_ETH_DST_W); + ofpbuf_put(b, value, ETH_ADDR_LEN); + ofpbuf_put(b, eth_mcast_0, ETH_ADDR_LEN); + break; + case 0: + nxm_put_eth(b, NXM_OF_ETH_DST, value); + break; + } +} + int nx_put_match(struct ofpbuf *b, const struct cls_rule *cr) { - const uint32_t wc = cr->wc.wildcards; + const flow_wildcards_t wc = cr->wc.wildcards; const struct flow *flow = &cr->flow; const size_t start_len = b->size; - ovs_be16 vid, pcp; int match_len; + int i; /* Metadata. */ - if (!(wc & OFPFW_IN_PORT)) { + if (!(wc & FWW_IN_PORT)) { uint16_t in_port = flow->in_port; if (in_port == ODPP_LOCAL) { in_port = OFPP_LOCAL; @@ -461,88 +581,63 @@ nx_put_match(struct ofpbuf *b, const struct cls_rule *cr) } /* Ethernet. */ - if (!(wc & OFPFW_DL_DST)) { - nxm_put_eth(b, NXM_OF_ETH_DST, flow->dl_dst); - } - if (!(wc & OFPFW_DL_SRC)) { + nxm_put_eth_dst(b, wc, flow->dl_dst); + if (!(wc & FWW_DL_SRC)) { nxm_put_eth(b, NXM_OF_ETH_SRC, flow->dl_src); } - if (!(wc & OFPFW_DL_TYPE)) { + if (!(wc & FWW_DL_TYPE)) { nxm_put_16(b, NXM_OF_ETH_TYPE, flow->dl_type); } /* 802.1Q. */ - vid = flow->dl_vlan & htons(VLAN_VID_MASK); - pcp = htons((flow->dl_vlan_pcp << VLAN_PCP_SHIFT) & VLAN_PCP_MASK); - switch (wc & (OFPFW_DL_VLAN | OFPFW_DL_VLAN_PCP)) { - case OFPFW_DL_VLAN | OFPFW_DL_VLAN_PCP: - break; - case OFPFW_DL_VLAN: - nxm_put_16w(b, NXM_OF_VLAN_TCI_W, pcp | htons(VLAN_CFI), - htons(VLAN_PCP_MASK | VLAN_CFI)); - break; - case OFPFW_DL_VLAN_PCP: - if (flow->dl_vlan == htons(OFP_VLAN_NONE)) { - nxm_put_16(b, NXM_OF_VLAN_TCI, 0); - } else { - nxm_put_16w(b, NXM_OF_VLAN_TCI_W, vid | htons(VLAN_CFI), - htons(VLAN_VID_MASK | VLAN_CFI)); - } - break; - case 0: - if (flow->dl_vlan == htons(OFP_VLAN_NONE)) { - nxm_put_16(b, NXM_OF_VLAN_TCI, 0); - } else { - nxm_put_16(b, NXM_OF_VLAN_TCI, vid | pcp | htons(VLAN_CFI)); - } - break; - } + nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci, cr->wc.vlan_tci_mask); - if (!(wc & OFPFW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) { + /* L3. */ + if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) { /* IP. */ - if (!(wc & OFPFW_NW_TOS)) { + if (!(wc & FWW_NW_TOS)) { nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc); } nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask); nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask); - if (!(wc & OFPFW_NW_PROTO)) { + if (!(wc & FWW_NW_PROTO)) { nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto); switch (flow->nw_proto) { /* TCP. */ case IP_TYPE_TCP: - if (!(wc & OFPFW_TP_SRC)) { + if (!(wc & FWW_TP_SRC)) { nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src); } - if (!(wc & OFPFW_TP_DST)) { + if (!(wc & FWW_TP_DST)) { nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst); } break; /* UDP. */ case IP_TYPE_UDP: - if (!(wc & OFPFW_TP_SRC)) { + if (!(wc & FWW_TP_SRC)) { nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src); } - if (!(wc & OFPFW_TP_DST)) { + if (!(wc & FWW_TP_DST)) { nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst); } break; /* ICMP. */ case IP_TYPE_ICMP: - if (!(wc & OFPFW_TP_SRC)) { + if (!(wc & FWW_TP_SRC)) { nxm_put_8(b, NXM_OF_ICMP_TYPE, ntohs(flow->tp_src)); } - if (!(wc & OFPFW_TP_DST)) { + if (!(wc & FWW_TP_DST)) { nxm_put_8(b, NXM_OF_ICMP_CODE, ntohs(flow->tp_dst)); } break; } } - } else if (!(wc & OFPFW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) { + } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) { /* ARP. */ - if (!(wc & OFPFW_NW_PROTO)) { + if (!(wc & FWW_NW_PROTO)) { nxm_put_16(b, NXM_OF_ARP_OP, htons(flow->nw_proto)); } nxm_put_32m(b, NXM_OF_ARP_SPA, flow->nw_src, cr->wc.nw_src_mask); @@ -550,10 +645,16 @@ nx_put_match(struct ofpbuf *b, const struct cls_rule *cr) } /* Tunnel ID. */ - if (!(wc & NXFW_TUN_ID)) { + if (!(wc & FWW_TUN_ID)) { nxm_put_64(b, NXM_NX_TUN_ID, htonll(ntohl(flow->tun_id))); } + /* Registers. */ + for (i = 0; i < FLOW_N_REGS; i++) { + nxm_put_32m(b, NXM_NX_REG(i), + htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i])); + } + match_len = b->size - start_len; ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len); return match_len; @@ -637,17 +738,15 @@ static const char * parse_hex_bytes(struct ofpbuf *b, const char *s, unsigned int n) { while (n--) { - int low, high; uint8_t byte; + bool ok; s += strspn(s, " "); - low = hexit_value(*s); - high = low < 0 ? low : hexit_value(s[1]); - if (low < 0 || high < 0) { + byte = hexits_value(s, 2, &ok); + if (!ok) { ovs_fatal(0, "%.2s: hex digits expected", s); } - byte = 16 * low + high; ofpbuf_put(b, &byte, 1); s += 2; } @@ -707,3 +806,207 @@ nx_match_from_string(const char *s, struct ofpbuf *b) ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len); return match_len; } + +/* nxm_check_reg_move(), nxm_check_reg_load(). */ + +static bool +field_ok(const struct nxm_field *f, const struct flow *flow, int size) +{ + return (f && !NXM_HASMASK(f->header) + && nxm_prereqs_ok(f, flow) && size <= nxm_field_bits(f->header)); +} + +int +nxm_check_reg_move(const struct nx_action_reg_move *action, + const struct flow *flow) +{ + const struct nxm_field *src; + const struct nxm_field *dst; + + if (action->n_bits == htons(0)) { + return BAD_ARGUMENT; + } + + src = nxm_field_lookup(ntohl(action->src)); + if (!field_ok(src, flow, ntohs(action->src_ofs) + ntohs(action->n_bits))) { + return BAD_ARGUMENT; + } + + dst = nxm_field_lookup(ntohl(action->dst)); + if (!field_ok(dst, flow, ntohs(action->dst_ofs) + ntohs(action->n_bits))) { + return BAD_ARGUMENT; + } + + if (!NXM_IS_NX_REG(dst->header) + && dst->header != NXM_OF_VLAN_TCI + && dst->header != NXM_NX_TUN_ID) { + return BAD_ARGUMENT; + } + + return 0; +} + +int +nxm_check_reg_load(const struct nx_action_reg_load *action, + const struct flow *flow) +{ + const struct nxm_field *dst; + int ofs, n_bits; + + ofs = ntohs(action->ofs_nbits) >> 6; + n_bits = (ntohs(action->ofs_nbits) & 0x3f) + 1; + dst = nxm_field_lookup(ntohl(action->dst)); + if (!field_ok(dst, flow, ofs + n_bits)) { + return BAD_ARGUMENT; + } + + /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in + * action->value. */ + if (n_bits < 64 && ntohll(action->value) >> n_bits) { + return BAD_ARGUMENT; + } + + if (!NXM_IS_NX_REG(dst->header)) { + return BAD_ARGUMENT; + } + + return 0; +} + +/* nxm_execute_reg_move(), nxm_execute_reg_load(). */ + +static uint64_t +nxm_read_field(const struct nxm_field *src, const struct flow *flow) +{ + switch (src->index) { + case NFI_NXM_OF_IN_PORT: + return flow->in_port == ODPP_LOCAL ? OFPP_LOCAL : flow->in_port; + + case NFI_NXM_OF_ETH_DST: + return eth_addr_to_uint64(flow->dl_dst); + + case NFI_NXM_OF_ETH_SRC: + return eth_addr_to_uint64(flow->dl_src); + + case NFI_NXM_OF_ETH_TYPE: + return ntohs(flow->dl_type); + + case NFI_NXM_OF_VLAN_TCI: + return ntohs(flow->vlan_tci); + + case NFI_NXM_OF_IP_TOS: + return flow->nw_tos; + + case NFI_NXM_OF_IP_PROTO: + case NFI_NXM_OF_ARP_OP: + return flow->nw_proto; + + case NFI_NXM_OF_IP_SRC: + case NFI_NXM_OF_ARP_SPA: + return ntohl(flow->nw_src); + + case NFI_NXM_OF_IP_DST: + case NFI_NXM_OF_ARP_TPA: + return ntohl(flow->nw_dst); + + case NFI_NXM_OF_TCP_SRC: + case NFI_NXM_OF_UDP_SRC: + return ntohs(flow->tp_src); + + case NFI_NXM_OF_TCP_DST: + case NFI_NXM_OF_UDP_DST: + return ntohs(flow->tp_dst); + + case NFI_NXM_OF_ICMP_TYPE: + return ntohs(flow->tp_src) & 0xff; + + case NFI_NXM_OF_ICMP_CODE: + return ntohs(flow->tp_dst) & 0xff; + + case NFI_NXM_NX_TUN_ID: + return ntohl(flow->tun_id); + +#define NXM_READ_REGISTER(IDX) \ + case NFI_NXM_NX_REG##IDX: \ + return flow->regs[IDX]; \ + case NFI_NXM_NX_REG##IDX##_W: \ + NOT_REACHED(); + + NXM_READ_REGISTER(0); +#if FLOW_N_REGS >= 2 + NXM_READ_REGISTER(1); +#endif +#if FLOW_N_REGS >= 3 + NXM_READ_REGISTER(2); +#endif +#if FLOW_N_REGS >= 4 + NXM_READ_REGISTER(3); +#endif +#if FLOW_N_REGS > 4 +#error +#endif + + case NFI_NXM_OF_ETH_DST_W: + case NFI_NXM_OF_VLAN_TCI_W: + case NFI_NXM_OF_IP_SRC_W: + case NFI_NXM_OF_IP_DST_W: + case NFI_NXM_OF_ARP_SPA_W: + case NFI_NXM_OF_ARP_TPA_W: + case N_NXM_FIELDS: + NOT_REACHED(); + } + + NOT_REACHED(); +} + +void +nxm_execute_reg_move(const struct nx_action_reg_move *action, + struct flow *flow) +{ + /* Preparation. */ + int n_bits = ntohs(action->n_bits); + uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1; + + /* Get the interesting bits of the source field. */ + const struct nxm_field *src = nxm_field_lookup(ntohl(action->src)); + int src_ofs = ntohs(action->src_ofs); + uint64_t src_data = nxm_read_field(src, flow) & (mask << src_ofs); + + /* Get the remaining bits of the destination field. */ + const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst)); + int dst_ofs = ntohs(action->dst_ofs); + uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs); + + /* Get the final value. */ + uint64_t new_data = dst_data | ((src_data >> src_ofs) << dst_ofs); + + /* Store the result. */ + if (NXM_IS_NX_REG(dst->header)) { + flow->regs[NXM_NX_REG_IDX(dst->header)] = new_data; + } else if (dst->header == NXM_OF_VLAN_TCI) { + flow->vlan_tci = htons(new_data); + } else if (dst->header == NXM_NX_TUN_ID) { + flow->tun_id = htonl(new_data); + } else { + NOT_REACHED(); + } +} + +void +nxm_execute_reg_load(const struct nx_action_reg_load *action, + struct flow *flow) +{ + /* Preparation. */ + int n_bits = (ntohs(action->ofs_nbits) & 0x3f) + 1; + uint32_t mask = n_bits == 32 ? UINT32_MAX : (UINT32_C(1) << n_bits) - 1; + uint32_t *reg = &flow->regs[NXM_NX_REG_IDX(ntohl(action->dst))]; + + /* Get source data. */ + uint32_t src_data = ntohll(action->value); + + /* Get remaining bits of the destination field. */ + int dst_ofs = ntohs(action->ofs_nbits) >> 6; + uint32_t dst_data = *reg & ~(mask << dst_ofs); + + *reg = dst_data | (src_data << dst_ofs); +}