X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=lib%2Fnx-match.c;h=2654dde07fc325d019e82c77172a4036291d6326;hb=df2c07f4338faac04f4969f243fe4e8083b309ac;hp=e2330fd581bce0ddf64e663bc0f939de6b079ec6;hpb=09246b99d1601e2ba7ff85bb26f9b0235632a76d;p=openvswitch diff --git a/lib/nx-match.c b/lib/nx-match.c index e2330fd5..2654dde0 100644 --- a/lib/nx-match.c +++ b/lib/nx-match.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Nicira Networks. + * Copyright (c) 2010, 2011 Nicira Networks. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +18,8 @@ #include "nx-match.h" +#include + #include "classifier.h" #include "dynamic-string.h" #include "ofp-util.h" @@ -46,26 +48,30 @@ enum { /* For each NXM_* field, define NFI_NXM_* as consecutive integers starting from * zero. */ enum nxm_field_index { -#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO) NFI_NXM_##HEADER, +#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPES, NW_PROTO, WRITABLE) \ + NFI_NXM_##HEADER, #include "nx-match.def" N_NXM_FIELDS }; struct nxm_field { struct hmap_node hmap_node; - enum nxm_field_index index; /* NFI_* value. */ - uint32_t header; /* NXM_* value. */ - uint32_t wildcard; /* Wildcard bit, if exactly one. */ - ovs_be16 dl_type; /* dl_type prerequisite, if nonzero. */ - uint8_t nw_proto; /* nw_proto prerequisite, if nonzero. */ - const char *name; /* "NXM_*" string. */ + enum nxm_field_index index; /* NFI_* value. */ + uint32_t header; /* NXM_* value. */ + flow_wildcards_t wildcard; /* FWW_* bit, if exactly one. */ + ovs_be16 dl_type[N_NXM_DL_TYPES]; /* dl_type prerequisites. */ + uint8_t nw_proto; /* nw_proto prerequisite, if nonzero. */ + const char *name; /* "NXM_*" string. */ + bool writable; /* Writable with NXAST_REG_{MOVE,LOAD}? */ }; + /* All the known fields. */ static struct nxm_field nxm_fields[N_NXM_FIELDS] = { -#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO) \ +#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPES, NW_PROTO, WRITABLE) \ { HMAP_NODE_NULL_INITIALIZER, NFI_NXM_##HEADER, NXM_##HEADER, WILDCARD, \ - CONSTANT_HTONS(DL_TYPE), NW_PROTO, "NXM_" #HEADER }, + DL_CONVERT DL_TYPES, NW_PROTO, "NXM_" #HEADER, WRITABLE }, +#define DL_CONVERT(T1, T2) { CONSTANT_HTONS(T1), CONSTANT_HTONS(T2) } #include "nx-match.def" }; @@ -87,7 +93,7 @@ nxm_init(void) /* Verify that the header values are unique (duplicate "case" values * cause a compile error). */ switch (0) { -#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO) \ +#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \ case NXM_##HEADER: break; #include "nx-match.def" } @@ -113,23 +119,39 @@ nxm_field_lookup(uint32_t header) /* Returns the width of the data for a field with the given 'header', in * bytes. */ -static int +int nxm_field_bytes(uint32_t header) { unsigned int length = NXM_LENGTH(header); return NXM_HASMASK(header) ? length / 2 : length; } + +/* Returns the width of the data for a field with the given 'header', in + * bits. */ +int +nxm_field_bits(uint32_t header) +{ + return nxm_field_bytes(header) * 8; +} /* nx_pull_match() and helpers. */ static int -parse_tci(struct cls_rule *rule, ovs_be16 tci, ovs_be16 mask) +parse_nx_reg(const struct nxm_field *f, + struct flow *flow, struct flow_wildcards *wc, + const void *value, const void *maskp) { - enum { OFPFW_DL_TCI = OFPFW_DL_VLAN | OFPFW_DL_VLAN_PCP }; - if ((rule->wc.wildcards & OFPFW_DL_TCI) != OFPFW_DL_TCI) { + int idx = NXM_NX_REG_IDX(f->header); + if (wc->reg_masks[idx]) { return NXM_DUP_TYPE; } else { - return cls_rule_set_dl_tci_masked(rule, tci, mask) ? 0 : NXM_INVALID; + flow_wildcards_set_reg_mask(wc, idx, + (NXM_HASMASK(f->header) + ? ntohl(get_unaligned_be32(maskp)) + : UINT32_MAX)); + flow->regs[idx] = ntohl(get_unaligned_be32(value)); + flow->regs[idx] &= wc->reg_masks[idx]; + return 0; } } @@ -140,33 +162,57 @@ parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f, struct flow_wildcards *wc = &rule->wc; struct flow *flow = &rule->flow; + BUILD_ASSERT_DECL(FLOW_WC_SEQ == 1); + switch (f->index) { /* Metadata. */ case NFI_NXM_OF_IN_PORT: - flow->in_port = ntohs(get_unaligned_u16(value)); - if (flow->in_port == OFPP_LOCAL) { - flow->in_port = ODPP_LOCAL; - } + flow->in_port = ntohs(get_unaligned_be16(value)); return 0; /* Ethernet header. */ case NFI_NXM_OF_ETH_DST: - memcpy(flow->dl_dst, value, ETH_ADDR_LEN); - return 0; + if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST)) + != (FWW_DL_DST | FWW_ETH_MCAST)) { + return NXM_DUP_TYPE; + } else { + wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST); + memcpy(flow->dl_dst, value, ETH_ADDR_LEN); + return 0; + } + case NFI_NXM_OF_ETH_DST_W: + if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST)) + != (FWW_DL_DST | FWW_ETH_MCAST)) { + return NXM_DUP_TYPE; + } else if (flow_wildcards_is_dl_dst_mask_valid(mask)) { + cls_rule_set_dl_dst_masked(rule, value, mask); + return 0; + } else { + return NXM_BAD_MASK; + } case NFI_NXM_OF_ETH_SRC: memcpy(flow->dl_src, value, ETH_ADDR_LEN); return 0; case NFI_NXM_OF_ETH_TYPE: - flow->dl_type = get_unaligned_u16(value); + flow->dl_type = ofputil_dl_type_from_openflow(get_unaligned_be16(value)); return 0; /* 802.1Q header. */ case NFI_NXM_OF_VLAN_TCI: - return parse_tci(rule, get_unaligned_u16(value), htons(UINT16_MAX)); - + if (wc->vlan_tci_mask) { + return NXM_DUP_TYPE; + } else { + cls_rule_set_dl_tci(rule, get_unaligned_be16(value)); + return 0; + } case NFI_NXM_OF_VLAN_TCI_W: - return parse_tci(rule, get_unaligned_u16(value), - get_unaligned_u16(mask)); + if (wc->vlan_tci_mask) { + return NXM_DUP_TYPE; + } else { + cls_rule_set_dl_tci_masked(rule, get_unaligned_be16(value), + get_unaligned_be16(mask)); + return 0; + } /* IP header. */ case NFI_NXM_OF_IP_TOS: @@ -186,7 +232,7 @@ parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f, if (wc->nw_src_mask) { return NXM_DUP_TYPE; } else { - cls_rule_set_nw_src(rule, get_unaligned_u32(value)); + cls_rule_set_nw_src(rule, get_unaligned_be32(value)); return 0; } case NFI_NXM_OF_IP_SRC_W: @@ -194,8 +240,8 @@ parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f, if (wc->nw_src_mask) { return NXM_DUP_TYPE; } else { - ovs_be32 ip = get_unaligned_u32(value); - ovs_be32 netmask = get_unaligned_u32(mask); + ovs_be32 ip = get_unaligned_be32(value); + ovs_be32 netmask = get_unaligned_be32(mask); if (!cls_rule_set_nw_src_masked(rule, ip, netmask)) { return NXM_BAD_MASK; } @@ -206,7 +252,7 @@ parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f, if (wc->nw_dst_mask) { return NXM_DUP_TYPE; } else { - cls_rule_set_nw_dst(rule, get_unaligned_u32(value)); + cls_rule_set_nw_dst(rule, get_unaligned_be32(value)); return 0; } case NFI_NXM_OF_IP_DST_W: @@ -214,28 +260,72 @@ parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f, if (wc->nw_dst_mask) { return NXM_DUP_TYPE; } else { - ovs_be32 ip = get_unaligned_u32(value); - ovs_be32 netmask = get_unaligned_u32(mask); + ovs_be32 ip = get_unaligned_be32(value); + ovs_be32 netmask = get_unaligned_be32(mask); if (!cls_rule_set_nw_dst_masked(rule, ip, netmask)) { return NXM_BAD_MASK; } return 0; } + /* IPv6 addresses. */ + case NFI_NXM_NX_IPV6_SRC: + if (!ipv6_mask_is_any(&wc->ipv6_src_mask)) { + return NXM_DUP_TYPE; + } else { + struct in6_addr ipv6; + memcpy(&ipv6, value, sizeof ipv6); + cls_rule_set_ipv6_src(rule, &ipv6); + return 0; + } + case NFI_NXM_NX_IPV6_SRC_W: + if (!ipv6_mask_is_any(&wc->ipv6_src_mask)) { + return NXM_DUP_TYPE; + } else { + struct in6_addr ipv6, netmask; + memcpy(&ipv6, value, sizeof ipv6); + memcpy(&netmask, mask, sizeof netmask); + if (!cls_rule_set_ipv6_src_masked(rule, &ipv6, &netmask)) { + return NXM_BAD_MASK; + } + return 0; + } + case NFI_NXM_NX_IPV6_DST: + if (!ipv6_mask_is_any(&wc->ipv6_dst_mask)) { + return NXM_DUP_TYPE; + } else { + struct in6_addr ipv6; + memcpy(&ipv6, value, sizeof ipv6); + cls_rule_set_ipv6_dst(rule, &ipv6); + return 0; + } + case NFI_NXM_NX_IPV6_DST_W: + if (!ipv6_mask_is_any(&wc->ipv6_dst_mask)) { + return NXM_DUP_TYPE; + } else { + struct in6_addr ipv6, netmask; + memcpy(&ipv6, value, sizeof ipv6); + memcpy(&netmask, mask, sizeof netmask); + if (!cls_rule_set_ipv6_dst_masked(rule, &ipv6, &netmask)) { + return NXM_BAD_MASK; + } + return 0; + } + /* TCP header. */ case NFI_NXM_OF_TCP_SRC: - flow->tp_src = get_unaligned_u16(value); + flow->tp_src = get_unaligned_be16(value); return 0; case NFI_NXM_OF_TCP_DST: - flow->tp_dst = get_unaligned_u16(value); + flow->tp_dst = get_unaligned_be16(value); return 0; /* UDP header. */ case NFI_NXM_OF_UDP_SRC: - flow->tp_src = get_unaligned_u16(value); + flow->tp_src = get_unaligned_be16(value); return 0; case NFI_NXM_OF_UDP_DST: - flow->tp_dst = get_unaligned_u16(value); + flow->tp_dst = get_unaligned_be16(value); return 0; /* ICMP header. */ @@ -246,19 +336,91 @@ parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f, flow->tp_dst = htons(*(uint8_t *) value); return 0; + /* ICMPv6 header. */ + case NFI_NXM_NX_ICMPV6_TYPE: + flow->tp_src = htons(*(uint8_t *) value); + return 0; + case NFI_NXM_NX_ICMPV6_CODE: + flow->tp_dst = htons(*(uint8_t *) value); + return 0; + + /* IPv6 Neighbor Discovery. */ + case NFI_NXM_NX_ND_TARGET: + /* We've already verified that it's an ICMPv6 message. */ + if ((flow->tp_src != htons(ND_NEIGHBOR_SOLICIT)) + && (flow->tp_src != htons(ND_NEIGHBOR_ADVERT))) { + return NXM_BAD_PREREQ; + } + memcpy(&flow->nd_target, value, sizeof flow->nd_target); + return 0; + case NFI_NXM_NX_ND_SLL: + /* We've already verified that it's an ICMPv6 message. */ + if (flow->tp_src != htons(ND_NEIGHBOR_SOLICIT)) { + return NXM_BAD_PREREQ; + } + memcpy(flow->arp_sha, value, ETH_ADDR_LEN); + return 0; + case NFI_NXM_NX_ND_TLL: + /* We've already verified that it's an ICMPv6 message. */ + if (flow->tp_src != htons(ND_NEIGHBOR_ADVERT)) { + return NXM_BAD_PREREQ; + } + memcpy(flow->arp_tha, value, ETH_ADDR_LEN); + return 0; + /* ARP header. */ case NFI_NXM_OF_ARP_OP: - if (ntohs(get_unaligned_u16(value)) > 255) { + if (ntohs(get_unaligned_be16(value)) > 255) { return NXM_BAD_VALUE; } else { - flow->nw_proto = ntohs(get_unaligned_u16(value)); + flow->nw_proto = ntohs(get_unaligned_be16(value)); return 0; } + case NFI_NXM_NX_ARP_SHA: + memcpy(flow->arp_sha, value, ETH_ADDR_LEN); + return 0; + case NFI_NXM_NX_ARP_THA: + memcpy(flow->arp_tha, value, ETH_ADDR_LEN); + return 0; + /* Tunnel ID. */ case NFI_NXM_NX_TUN_ID: - flow->tun_id = htonl(ntohll(get_unaligned_u64(value))); - return 0; + if (wc->tun_id_mask) { + return NXM_DUP_TYPE; + } else { + cls_rule_set_tun_id(rule, get_unaligned_be64(value)); + return 0; + } + case NFI_NXM_NX_TUN_ID_W: + if (wc->tun_id_mask) { + return NXM_DUP_TYPE; + } else { + ovs_be64 tun_id = get_unaligned_be64(value); + ovs_be64 tun_mask = get_unaligned_be64(mask); + cls_rule_set_tun_id_masked(rule, tun_id, tun_mask); + return 0; + } + + /* Registers. */ + case NFI_NXM_NX_REG0: + case NFI_NXM_NX_REG0_W: +#if FLOW_N_REGS >= 2 + case NFI_NXM_NX_REG1: + case NFI_NXM_NX_REG1_W: +#endif +#if FLOW_N_REGS >= 3 + case NFI_NXM_NX_REG2: + case NFI_NXM_NX_REG2_W: +#endif +#if FLOW_N_REGS >= 4 + case NFI_NXM_NX_REG3: + case NFI_NXM_NX_REG3_W: +#endif +#if FLOW_N_REGS > 4 +#error +#endif + return parse_nx_reg(f, flow, wc, value, mask); case N_NXM_FIELDS: NOT_REACHED(); @@ -269,9 +431,19 @@ parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f, static bool nxm_prereqs_ok(const struct nxm_field *field, const struct flow *flow) { - return (!field->dl_type - || (field->dl_type == flow->dl_type - && (!field->nw_proto || field->nw_proto == flow->nw_proto))); + if (field->nw_proto && field->nw_proto != flow->nw_proto) { + return false; + } + + if (!field->dl_type[0]) { + return true; + } else if (field->dl_type[0] == flow->dl_type) { + return true; + } else if (field->dl_type[1] && field->dl_type[1] == flow->dl_type) { + return true; + } + + return false; } static uint32_t @@ -314,7 +486,7 @@ nx_pull_match(struct ofpbuf *b, unsigned int match_len, uint16_t priority, p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8)); if (!p) { - VLOG_DBG_RL(&rl, "nx_match length %zu, rounded up to a " + VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a " "multiple of 8, is longer than space in message (max " "length %zu)", match_len, b->size); return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN); @@ -394,6 +566,23 @@ nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask) ofpbuf_put(b, &mask, sizeof mask); } +static void +nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask) +{ + switch (mask) { + case 0: + break; + + case CONSTANT_HTONS(UINT16_MAX): + nxm_put_16(b, header, value); + break; + + default: + nxm_put_16w(b, NXM_MAKE_WILD_HEADER(header), value, mask); + break; + } +} + static void nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value) { @@ -416,7 +605,7 @@ nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask) case 0: break; - case UINT32_MAX: + case CONSTANT_HTONL(UINT32_MAX): nxm_put_32(b, header, value); break; @@ -433,6 +622,30 @@ nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value) ofpbuf_put(b, &value, sizeof value); } +static void +nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask) +{ + nxm_put_header(b, header); + ofpbuf_put(b, &value, sizeof value); + ofpbuf_put(b, &mask, sizeof mask); +} + +static void +nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask) +{ + switch (mask) { + case 0: + break; + + case CONSTANT_HTONLL(UINT64_MAX): + nxm_put_64(b, header, value); + break; + + default: + nxm_put_64w(b, NXM_MAKE_WILD_HEADER(header), value, mask); + break; + } +} static void nxm_put_eth(struct ofpbuf *b, uint32_t header, @@ -442,116 +655,206 @@ nxm_put_eth(struct ofpbuf *b, uint32_t header, ofpbuf_put(b, value, ETH_ADDR_LEN); } +static void +nxm_put_eth_dst(struct ofpbuf *b, + flow_wildcards_t wc, const uint8_t value[ETH_ADDR_LEN]) +{ + switch (wc & (FWW_DL_DST | FWW_ETH_MCAST)) { + case FWW_DL_DST | FWW_ETH_MCAST: + break; + default: + nxm_put_header(b, NXM_OF_ETH_DST_W); + ofpbuf_put(b, value, ETH_ADDR_LEN); + ofpbuf_put(b, flow_wildcards_to_dl_dst_mask(wc), ETH_ADDR_LEN); + break; + case 0: + nxm_put_eth(b, NXM_OF_ETH_DST, value); + break; + } +} + +static void +nxm_put_ipv6(struct ofpbuf *b, uint32_t header, + const struct in6_addr *value, const struct in6_addr *mask) +{ + if (ipv6_mask_is_any(mask)) { + return; + } else if (ipv6_mask_is_exact(mask)) { + nxm_put_header(b, header); + ofpbuf_put(b, value, sizeof *value); + } else { + nxm_put_header(b, NXM_MAKE_WILD_HEADER(header)); + ofpbuf_put(b, value, sizeof *value); + ofpbuf_put(b, mask, sizeof *mask); + } +} + +/* Appends to 'b' the nx_match format that expresses 'cr' (except for + * 'cr->priority', because priority is not part of nx_match), plus enough + * zero bytes to pad the nx_match out to a multiple of 8. + * + * This function can cause 'b''s data to be reallocated. + * + * Returns the number of bytes appended to 'b', excluding padding. + * + * If 'cr' is a catch-all rule that matches every packet, then this function + * appends nothing to 'b' and returns 0. */ int nx_put_match(struct ofpbuf *b, const struct cls_rule *cr) { - const uint32_t wc = cr->wc.wildcards; + const flow_wildcards_t wc = cr->wc.wildcards; const struct flow *flow = &cr->flow; const size_t start_len = b->size; - ovs_be16 vid, pcp; int match_len; + int i; + + BUILD_ASSERT_DECL(FLOW_WC_SEQ == 1); /* Metadata. */ - if (!(wc & OFPFW_IN_PORT)) { + if (!(wc & FWW_IN_PORT)) { uint16_t in_port = flow->in_port; - if (in_port == ODPP_LOCAL) { - in_port = OFPP_LOCAL; - } nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port)); } /* Ethernet. */ - if (!(wc & OFPFW_DL_DST)) { - nxm_put_eth(b, NXM_OF_ETH_DST, flow->dl_dst); - } - if (!(wc & OFPFW_DL_SRC)) { + nxm_put_eth_dst(b, wc, flow->dl_dst); + if (!(wc & FWW_DL_SRC)) { nxm_put_eth(b, NXM_OF_ETH_SRC, flow->dl_src); } - if (!(wc & OFPFW_DL_TYPE)) { - nxm_put_16(b, NXM_OF_ETH_TYPE, flow->dl_type); + if (!(wc & FWW_DL_TYPE)) { + nxm_put_16(b, NXM_OF_ETH_TYPE, + ofputil_dl_type_to_openflow(flow->dl_type)); } /* 802.1Q. */ - vid = flow->dl_vlan & htons(VLAN_VID_MASK); - pcp = htons((flow->dl_vlan_pcp << VLAN_PCP_SHIFT) & VLAN_PCP_MASK); - switch (wc & (OFPFW_DL_VLAN | OFPFW_DL_VLAN_PCP)) { - case OFPFW_DL_VLAN | OFPFW_DL_VLAN_PCP: - break; - case OFPFW_DL_VLAN: - nxm_put_16w(b, NXM_OF_VLAN_TCI_W, pcp | htons(VLAN_CFI), - htons(VLAN_PCP_MASK | VLAN_CFI)); - break; - case OFPFW_DL_VLAN_PCP: - if (flow->dl_vlan == htons(OFP_VLAN_NONE)) { - nxm_put_16(b, NXM_OF_VLAN_TCI, 0); - } else { - nxm_put_16w(b, NXM_OF_VLAN_TCI_W, vid | htons(VLAN_CFI), - htons(VLAN_VID_MASK | VLAN_CFI)); - } - break; - case 0: - if (flow->dl_vlan == htons(OFP_VLAN_NONE)) { - nxm_put_16(b, NXM_OF_VLAN_TCI, 0); - } else { - nxm_put_16(b, NXM_OF_VLAN_TCI, vid | pcp | htons(VLAN_CFI)); - } - break; - } + nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci, cr->wc.vlan_tci_mask); - if (!(wc & OFPFW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) { + /* L3. */ + if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) { /* IP. */ - if (!(wc & OFPFW_NW_TOS)) { + if (!(wc & FWW_NW_TOS)) { nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc); } nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask); nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask); - if (!(wc & OFPFW_NW_PROTO)) { + if (!(wc & FWW_NW_PROTO)) { nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto); switch (flow->nw_proto) { /* TCP. */ - case IP_TYPE_TCP: - if (!(wc & OFPFW_TP_SRC)) { + case IPPROTO_TCP: + if (!(wc & FWW_TP_SRC)) { nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src); } - if (!(wc & OFPFW_TP_DST)) { + if (!(wc & FWW_TP_DST)) { nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst); } break; /* UDP. */ - case IP_TYPE_UDP: - if (!(wc & OFPFW_TP_SRC)) { + case IPPROTO_UDP: + if (!(wc & FWW_TP_SRC)) { nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src); } - if (!(wc & OFPFW_TP_DST)) { + if (!(wc & FWW_TP_DST)) { nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst); } break; /* ICMP. */ - case IP_TYPE_ICMP: - if (!(wc & OFPFW_TP_SRC)) { + case IPPROTO_ICMP: + if (!(wc & FWW_TP_SRC)) { nxm_put_8(b, NXM_OF_ICMP_TYPE, ntohs(flow->tp_src)); } - if (!(wc & OFPFW_TP_DST)) { + if (!(wc & FWW_TP_DST)) { nxm_put_8(b, NXM_OF_ICMP_CODE, ntohs(flow->tp_dst)); } break; } } - } else if (!(wc & OFPFW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) { + } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IPV6)) { + /* IPv6. */ + + if (!(wc & FWW_NW_TOS)) { + nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc); + } + nxm_put_ipv6(b, NXM_NX_IPV6_SRC, &flow->ipv6_src, + &cr->wc.ipv6_src_mask); + nxm_put_ipv6(b, NXM_NX_IPV6_DST, &flow->ipv6_dst, + &cr->wc.ipv6_dst_mask); + + if (!(wc & FWW_NW_PROTO)) { + nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto); + switch (flow->nw_proto) { + /* TCP. */ + case IPPROTO_TCP: + if (!(wc & FWW_TP_SRC)) { + nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src); + } + if (!(wc & FWW_TP_DST)) { + nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst); + } + break; + + /* UDP. */ + case IPPROTO_UDP: + if (!(wc & FWW_TP_SRC)) { + nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src); + } + if (!(wc & FWW_TP_DST)) { + nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst); + } + break; + + /* ICMPv6. */ + case IPPROTO_ICMPV6: + if (!(wc & FWW_TP_SRC)) { + nxm_put_8(b, NXM_NX_ICMPV6_TYPE, ntohs(flow->tp_src)); + + if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) || + flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) { + if (!(wc & FWW_ND_TARGET)) { + nxm_put_ipv6(b, NXM_NX_ND_TARGET, &flow->nd_target, + &in6addr_exact); + } + if (!(wc & FWW_ARP_SHA) + && flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) { + nxm_put_eth(b, NXM_NX_ND_SLL, flow->arp_sha); + } + if (!(wc & FWW_ARP_THA) + && flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) { + nxm_put_eth(b, NXM_NX_ND_TLL, flow->arp_tha); + } + } + } + if (!(wc & FWW_TP_DST)) { + nxm_put_8(b, NXM_NX_ICMPV6_CODE, ntohs(flow->tp_dst)); + } + break; + } + } + } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) { /* ARP. */ - if (!(wc & OFPFW_NW_PROTO)) { + if (!(wc & FWW_NW_PROTO)) { nxm_put_16(b, NXM_OF_ARP_OP, htons(flow->nw_proto)); } nxm_put_32m(b, NXM_OF_ARP_SPA, flow->nw_src, cr->wc.nw_src_mask); nxm_put_32m(b, NXM_OF_ARP_TPA, flow->nw_dst, cr->wc.nw_dst_mask); + if (!(wc & FWW_ARP_SHA)) { + nxm_put_eth(b, NXM_NX_ARP_SHA, flow->arp_sha); + } + if (!(wc & FWW_ARP_THA)) { + nxm_put_eth(b, NXM_NX_ARP_THA, flow->arp_tha); + } } /* Tunnel ID. */ - if (!(wc & NXFW_TUN_ID)) { - nxm_put_64(b, NXM_NX_TUN_ID, htonll(ntohl(flow->tun_id))); + nxm_put_64m(b, NXM_NX_TUN_ID, flow->tun_id, cr->wc.tun_id_mask); + + /* Registers. */ + for (i = 0; i < FLOW_N_REGS; i++) { + nxm_put_32m(b, NXM_NX_REG(i), + htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i])); } match_len = b->size - start_len; @@ -561,6 +864,8 @@ nx_put_match(struct ofpbuf *b, const struct cls_rule *cr) /* nx_match_to_string() and helpers. */ +static void format_nxm_field_name(struct ds *, uint32_t header); + char * nx_match_to_string(const uint8_t *p, unsigned int match_len) { @@ -577,20 +882,13 @@ nx_match_to_string(const uint8_t *p, unsigned int match_len) unsigned int value_len = nxm_field_bytes(header); const uint8_t *value = p + 4; const uint8_t *mask = value + value_len; - const struct nxm_field *f; unsigned int i; if (s.length) { ds_put_cstr(&s, ", "); } - f = nxm_field_lookup(header); - if (f) { - ds_put_cstr(&s, f->name); - } else { - ds_put_format(&s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header)); - } - + format_nxm_field_name(&s, header); ds_put_char(&s, '('); for (i = 0; i < value_len; i++) { @@ -619,39 +917,39 @@ nx_match_to_string(const uint8_t *p, unsigned int match_len) return ds_steal_cstr(&s); } -static const struct nxm_field * -lookup_nxm_field(const char *name, int name_len) +static void +format_nxm_field_name(struct ds *s, uint32_t header) +{ + const struct nxm_field *f = nxm_field_lookup(header); + if (f) { + ds_put_cstr(s, f->name); + } else { + ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header)); + } +} + +static uint32_t +parse_nxm_field_name(const char *name, int name_len) { const struct nxm_field *f; + /* Check whether it's a field name. */ for (f = nxm_fields; f < &nxm_fields[ARRAY_SIZE(nxm_fields)]; f++) { if (!strncmp(f->name, name, name_len) && f->name[name_len] == '\0') { - return f; + return f->header; } } - return NULL; -} - -static const char * -parse_hex_bytes(struct ofpbuf *b, const char *s, unsigned int n) -{ - while (n--) { - int low, high; - uint8_t byte; - - s += strspn(s, " "); - low = hexit_value(*s); - high = low < 0 ? low : hexit_value(s[1]); - if (low < 0 || high < 0) { - ovs_fatal(0, "%.2s: hex digits expected", s); + /* Check whether it's a 32-bit field header value as hex. + * (This isn't ordinarily useful except for testing error behavior.) */ + if (name_len == 8) { + uint32_t header = hexits_value(name, name_len, NULL); + if (header != UINT_MAX) { + return header; } - - byte = 16 * low + high; - ofpbuf_put(b, &byte, 1); - s += 2; } - return s; + + return 0; } /* nx_match_from_string(). */ @@ -670,35 +968,45 @@ nx_match_from_string(const char *s, struct ofpbuf *b) } for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) { - const struct nxm_field *f; + const char *name; + uint32_t header; int name_len; + size_t n; + name = s; name_len = strcspn(s, "("); if (s[name_len] != '(') { ovs_fatal(0, "%s: missing ( at end of nx_match", full_s); } - f = lookup_nxm_field(s, name_len); - if (!f) { + header = parse_nxm_field_name(name, name_len); + if (!header) { ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s); } s += name_len + 1; - nxm_put_header(b, f->header); - s = parse_hex_bytes(b, s, nxm_field_bytes(f->header)); - if (NXM_HASMASK(f->header)) { + nxm_put_header(b, header); + s = ofpbuf_put_hex(b, s, &n); + if (n != nxm_field_bytes(header)) { + ovs_fatal(0, "%.2s: hex digits expected", s); + } + if (NXM_HASMASK(header)) { s += strspn(s, " "); if (*s != '/') { - ovs_fatal(0, "%s: missing / in masked field %s", - full_s, f->name); + ovs_fatal(0, "%s: missing / in masked field %.*s", + full_s, name_len, name); + } + s = ofpbuf_put_hex(b, s + 1, &n); + if (n != nxm_field_bytes(header)) { + ovs_fatal(0, "%.2s: hex digits expected", s); } - s = parse_hex_bytes(b, s + 1, nxm_field_bytes(f->header)); } s += strspn(s, " "); if (*s != ')') { - ovs_fatal(0, "%s: missing ) following field %s", full_s, f->name); + ovs_fatal(0, "%s: missing ) following field %.*s", + full_s, name_len, name); } s++; } @@ -707,3 +1015,512 @@ nx_match_from_string(const char *s, struct ofpbuf *b) ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len); return match_len; } + +const char * +nxm_parse_field_bits(const char *s, uint32_t *headerp, int *ofsp, int *n_bitsp) +{ + const char *full_s = s; + const char *name; + uint32_t header; + int start, end; + int name_len; + int width; + + name = s; + name_len = strcspn(s, "["); + if (s[name_len] != '[') { + ovs_fatal(0, "%s: missing [ looking for field name", full_s); + } + + header = parse_nxm_field_name(name, name_len); + if (!header) { + ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s); + } + width = nxm_field_bits(header); + + s += name_len; + if (sscanf(s, "[%d..%d]", &start, &end) == 2) { + /* Nothing to do. */ + } else if (sscanf(s, "[%d]", &start) == 1) { + end = start; + } else if (!strncmp(s, "[]", 2)) { + start = 0; + end = width - 1; + } else { + ovs_fatal(0, "%s: syntax error expecting [] or [] or " + "[..]", full_s); + } + s = strchr(s, ']') + 1; + + if (start > end) { + ovs_fatal(0, "%s: starting bit %d is after ending bit %d", + full_s, start, end); + } else if (start >= width) { + ovs_fatal(0, "%s: starting bit %d is not valid because field is only " + "%d bits wide", full_s, start, width); + } else if (end >= width){ + ovs_fatal(0, "%s: ending bit %d is not valid because field is only " + "%d bits wide", full_s, end, width); + } + + *headerp = header; + *ofsp = start; + *n_bitsp = end - start + 1; + + return s; +} + +void +nxm_parse_reg_move(struct nx_action_reg_move *move, const char *s) +{ + const char *full_s = s; + uint32_t src, dst; + int src_ofs, dst_ofs; + int src_n_bits, dst_n_bits; + + s = nxm_parse_field_bits(s, &src, &src_ofs, &src_n_bits); + if (strncmp(s, "->", 2)) { + ovs_fatal(0, "%s: missing `->' following source", full_s); + } + s += 2; + s = nxm_parse_field_bits(s, &dst, &dst_ofs, &dst_n_bits); + if (*s != '\0') { + ovs_fatal(0, "%s: trailing garbage following destination", full_s); + } + + if (src_n_bits != dst_n_bits) { + ovs_fatal(0, "%s: source field is %d bits wide but destination is " + "%d bits wide", full_s, src_n_bits, dst_n_bits); + } + + move->type = htons(OFPAT_VENDOR); + move->len = htons(sizeof *move); + move->vendor = htonl(NX_VENDOR_ID); + move->subtype = htons(NXAST_REG_MOVE); + move->n_bits = htons(src_n_bits); + move->src_ofs = htons(src_ofs); + move->dst_ofs = htons(dst_ofs); + move->src = htonl(src); + move->dst = htonl(dst); +} + +void +nxm_parse_reg_load(struct nx_action_reg_load *load, const char *s) +{ + const char *full_s = s; + uint32_t dst; + int ofs, n_bits; + uint64_t value; + + value = strtoull(s, (char **) &s, 0); + if (strncmp(s, "->", 2)) { + ovs_fatal(0, "%s: missing `->' following value", full_s); + } + s += 2; + s = nxm_parse_field_bits(s, &dst, &ofs, &n_bits); + if (*s != '\0') { + ovs_fatal(0, "%s: trailing garbage following destination", full_s); + } + + if (n_bits < 64 && (value >> n_bits) != 0) { + ovs_fatal(0, "%s: value %"PRIu64" does not fit into %d bits", + full_s, value, n_bits); + } + + load->type = htons(OFPAT_VENDOR); + load->len = htons(sizeof *load); + load->vendor = htonl(NX_VENDOR_ID); + load->subtype = htons(NXAST_REG_LOAD); + load->ofs_nbits = nxm_encode_ofs_nbits(ofs, n_bits); + load->dst = htonl(dst); + load->value = htonll(value); +} + +/* nxm_format_reg_move(), nxm_format_reg_load(). */ + +void +nxm_format_field_bits(struct ds *s, uint32_t header, int ofs, int n_bits) +{ + format_nxm_field_name(s, header); + if (ofs == 0 && n_bits == nxm_field_bits(header)) { + ds_put_cstr(s, "[]"); + } else if (n_bits == 1) { + ds_put_format(s, "[%d]", ofs); + } else { + ds_put_format(s, "[%d..%d]", ofs, ofs + n_bits - 1); + } +} + +void +nxm_format_reg_move(const struct nx_action_reg_move *move, struct ds *s) +{ + int n_bits = ntohs(move->n_bits); + int src_ofs = ntohs(move->src_ofs); + int dst_ofs = ntohs(move->dst_ofs); + uint32_t src = ntohl(move->src); + uint32_t dst = ntohl(move->dst); + + ds_put_format(s, "move:"); + nxm_format_field_bits(s, src, src_ofs, n_bits); + ds_put_cstr(s, "->"); + nxm_format_field_bits(s, dst, dst_ofs, n_bits); +} + +void +nxm_format_reg_load(const struct nx_action_reg_load *load, struct ds *s) +{ + int ofs = nxm_decode_ofs(load->ofs_nbits); + int n_bits = nxm_decode_n_bits(load->ofs_nbits); + uint32_t dst = ntohl(load->dst); + uint64_t value = ntohll(load->value); + + ds_put_format(s, "load:%#"PRIx64"->", value); + nxm_format_field_bits(s, dst, ofs, n_bits); +} + +/* nxm_check_reg_move(), nxm_check_reg_load(). */ + +static bool +field_ok(const struct nxm_field *f, const struct flow *flow, int size) +{ + return (f && !NXM_HASMASK(f->header) + && nxm_prereqs_ok(f, flow) && size <= nxm_field_bits(f->header)); +} + +int +nxm_check_reg_move(const struct nx_action_reg_move *action, + const struct flow *flow) +{ + int src_ofs, dst_ofs, n_bits; + int error; + + n_bits = ntohs(action->n_bits); + src_ofs = ntohs(action->src_ofs); + dst_ofs = ntohs(action->dst_ofs); + + error = nxm_src_check(action->src, src_ofs, n_bits, flow); + if (error) { + return error; + } + + return nxm_dst_check(action->dst, dst_ofs, n_bits, flow); +} + +/* Given a flow, checks that the source field represented by 'src_header' + * in the range ['ofs', 'ofs' + 'n_bits') is valid. */ +int +nxm_src_check(ovs_be32 src_header, unsigned int ofs, unsigned int n_bits, + const struct flow *flow) +{ + const struct nxm_field *src = nxm_field_lookup(ntohl(src_header)); + + if (!n_bits) { + VLOG_WARN_RL(&rl, "zero bit source field"); + } else if (!field_ok(src, flow, ofs + n_bits)) { + VLOG_WARN_RL(&rl, "invalid source field"); + } else { + return 0; + } + + return BAD_ARGUMENT; +} + +/* Given a flow, checks that the destination field represented by 'dst_header' + * in the range ['ofs', 'ofs' + 'n_bits') is valid. */ +int +nxm_dst_check(ovs_be32 dst_header, unsigned int ofs, unsigned int n_bits, + const struct flow *flow) +{ + const struct nxm_field *dst = nxm_field_lookup(ntohl(dst_header)); + + if (!n_bits) { + VLOG_WARN_RL(&rl, "zero bit destination field"); + } else if (!field_ok(dst, flow, ofs + n_bits)) { + VLOG_WARN_RL(&rl, "invalid destination field"); + } else if (!dst->writable) { + VLOG_WARN_RL(&rl, "destination field is not writable"); + } else { + return 0; + } + + return BAD_ARGUMENT; +} + +int +nxm_check_reg_load(const struct nx_action_reg_load *action, + const struct flow *flow) +{ + unsigned int ofs = nxm_decode_ofs(action->ofs_nbits); + unsigned int n_bits = nxm_decode_n_bits(action->ofs_nbits); + int error; + + error = nxm_dst_check(action->dst, ofs, n_bits, flow); + if (error) { + return error; + } + + /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in + * action->value. */ + if (n_bits < 64 && ntohll(action->value) >> n_bits) { + return BAD_ARGUMENT; + } + + return 0; +} + +/* nxm_execute_reg_move(), nxm_execute_reg_load(). */ + +static uint64_t +nxm_read_field(const struct nxm_field *src, const struct flow *flow) +{ + switch (src->index) { + case NFI_NXM_OF_IN_PORT: + return flow->in_port; + + case NFI_NXM_OF_ETH_DST: + return eth_addr_to_uint64(flow->dl_dst); + + case NFI_NXM_OF_ETH_SRC: + return eth_addr_to_uint64(flow->dl_src); + + case NFI_NXM_OF_ETH_TYPE: + return ntohs(ofputil_dl_type_to_openflow(flow->dl_type)); + + case NFI_NXM_OF_VLAN_TCI: + return ntohs(flow->vlan_tci); + + case NFI_NXM_OF_IP_TOS: + return flow->nw_tos; + + case NFI_NXM_OF_IP_PROTO: + case NFI_NXM_OF_ARP_OP: + return flow->nw_proto; + + case NFI_NXM_OF_IP_SRC: + case NFI_NXM_OF_ARP_SPA: + return ntohl(flow->nw_src); + + case NFI_NXM_OF_IP_DST: + case NFI_NXM_OF_ARP_TPA: + return ntohl(flow->nw_dst); + + case NFI_NXM_OF_TCP_SRC: + case NFI_NXM_OF_UDP_SRC: + return ntohs(flow->tp_src); + + case NFI_NXM_OF_TCP_DST: + case NFI_NXM_OF_UDP_DST: + return ntohs(flow->tp_dst); + + case NFI_NXM_OF_ICMP_TYPE: + case NFI_NXM_NX_ICMPV6_TYPE: + return ntohs(flow->tp_src) & 0xff; + + case NFI_NXM_OF_ICMP_CODE: + case NFI_NXM_NX_ICMPV6_CODE: + return ntohs(flow->tp_dst) & 0xff; + + case NFI_NXM_NX_TUN_ID: + return ntohll(flow->tun_id); + +#define NXM_READ_REGISTER(IDX) \ + case NFI_NXM_NX_REG##IDX: \ + return flow->regs[IDX]; \ + case NFI_NXM_NX_REG##IDX##_W: \ + NOT_REACHED(); + + NXM_READ_REGISTER(0); +#if FLOW_N_REGS >= 2 + NXM_READ_REGISTER(1); +#endif +#if FLOW_N_REGS >= 3 + NXM_READ_REGISTER(2); +#endif +#if FLOW_N_REGS >= 4 + NXM_READ_REGISTER(3); +#endif +#if FLOW_N_REGS > 4 +#error +#endif + + case NFI_NXM_NX_ARP_SHA: + case NFI_NXM_NX_ND_SLL: + return eth_addr_to_uint64(flow->arp_sha); + + case NFI_NXM_NX_ARP_THA: + case NFI_NXM_NX_ND_TLL: + return eth_addr_to_uint64(flow->arp_tha); + + case NFI_NXM_NX_TUN_ID_W: + case NFI_NXM_OF_ETH_DST_W: + case NFI_NXM_OF_VLAN_TCI_W: + case NFI_NXM_OF_IP_SRC_W: + case NFI_NXM_OF_IP_DST_W: + case NFI_NXM_OF_ARP_SPA_W: + case NFI_NXM_OF_ARP_TPA_W: + case NFI_NXM_NX_IPV6_SRC: + case NFI_NXM_NX_IPV6_SRC_W: + case NFI_NXM_NX_IPV6_DST: + case NFI_NXM_NX_IPV6_DST_W: + case NFI_NXM_NX_ND_TARGET: + case N_NXM_FIELDS: + NOT_REACHED(); + } + + NOT_REACHED(); +} + +/* Returns the value of the NXM field corresponding to 'header' at 'ofs_nbits' + * in 'flow'. */ +uint64_t +nxm_read_field_bits(ovs_be32 header, ovs_be16 ofs_nbits, + const struct flow *flow) +{ + int n_bits = nxm_decode_n_bits(ofs_nbits); + int ofs = nxm_decode_ofs(ofs_nbits); + uint64_t mask, data; + + mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1; + data = nxm_read_field(nxm_field_lookup(ntohl(header)), flow); + data = (data >> ofs) & mask; + + return data; +} + +static void +nxm_write_field(const struct nxm_field *dst, struct flow *flow, + uint64_t new_value) +{ + switch (dst->index) { + case NFI_NXM_OF_ETH_DST: + eth_addr_from_uint64(new_value, flow->dl_dst); + break; + + case NFI_NXM_OF_ETH_SRC: + eth_addr_from_uint64(new_value, flow->dl_src); + break; + + case NFI_NXM_OF_VLAN_TCI: + flow->vlan_tci = htons(new_value); + break; + + case NFI_NXM_NX_TUN_ID: + flow->tun_id = htonll(new_value); + break; + +#define NXM_WRITE_REGISTER(IDX) \ + case NFI_NXM_NX_REG##IDX: \ + flow->regs[IDX] = new_value; \ + break; \ + case NFI_NXM_NX_REG##IDX##_W: \ + NOT_REACHED(); + + NXM_WRITE_REGISTER(0); +#if FLOW_N_REGS >= 2 + NXM_WRITE_REGISTER(1); +#endif +#if FLOW_N_REGS >= 3 + NXM_WRITE_REGISTER(2); +#endif +#if FLOW_N_REGS >= 4 + NXM_WRITE_REGISTER(3); +#endif +#if FLOW_N_REGS > 4 +#error +#endif + + case NFI_NXM_OF_IP_TOS: + flow->nw_tos = new_value & IP_DSCP_MASK; + break; + + case NFI_NXM_OF_IP_SRC: + flow->nw_src = htonl(new_value); + break; + + case NFI_NXM_OF_IP_DST: + flow->nw_dst = htonl(new_value); + break; + + case NFI_NXM_OF_TCP_SRC: + case NFI_NXM_OF_UDP_SRC: + flow->tp_src = htons(new_value); + break; + + case NFI_NXM_OF_TCP_DST: + case NFI_NXM_OF_UDP_DST: + flow->tp_dst = htons(new_value); + break; + + case NFI_NXM_OF_IN_PORT: + case NFI_NXM_OF_ETH_TYPE: + case NFI_NXM_OF_IP_PROTO: + case NFI_NXM_OF_ARP_OP: + case NFI_NXM_OF_ARP_SPA: + case NFI_NXM_OF_ARP_TPA: + case NFI_NXM_OF_ICMP_TYPE: + case NFI_NXM_OF_ICMP_CODE: + case NFI_NXM_NX_TUN_ID_W: + case NFI_NXM_OF_ETH_DST_W: + case NFI_NXM_OF_VLAN_TCI_W: + case NFI_NXM_OF_IP_SRC_W: + case NFI_NXM_OF_IP_DST_W: + case NFI_NXM_OF_ARP_SPA_W: + case NFI_NXM_OF_ARP_TPA_W: + case NFI_NXM_NX_ARP_SHA: + case NFI_NXM_NX_ARP_THA: + case NFI_NXM_NX_IPV6_SRC: + case NFI_NXM_NX_IPV6_SRC_W: + case NFI_NXM_NX_IPV6_DST: + case NFI_NXM_NX_IPV6_DST_W: + case NFI_NXM_NX_ICMPV6_TYPE: + case NFI_NXM_NX_ICMPV6_CODE: + case NFI_NXM_NX_ND_TARGET: + case NFI_NXM_NX_ND_SLL: + case NFI_NXM_NX_ND_TLL: + case N_NXM_FIELDS: + NOT_REACHED(); + } +} + +void +nxm_execute_reg_move(const struct nx_action_reg_move *action, + struct flow *flow) +{ + ovs_be16 src_ofs_nbits, dst_ofs_nbits; + uint64_t src_data; + int n_bits; + + n_bits = ntohs(action->n_bits); + src_ofs_nbits = nxm_encode_ofs_nbits(ntohs(action->src_ofs), n_bits); + dst_ofs_nbits = nxm_encode_ofs_nbits(ntohs(action->dst_ofs), n_bits); + + src_data = nxm_read_field_bits(action->src, src_ofs_nbits, flow); + nxm_reg_load(action->dst, dst_ofs_nbits, src_data, flow); +} + +void +nxm_execute_reg_load(const struct nx_action_reg_load *action, + struct flow *flow) +{ + nxm_reg_load(action->dst, action->ofs_nbits, ntohll(action->value), flow); +} + +/* Calculates ofs and n_bits from the given 'ofs_nbits' parameter, and copies + * 'src_data'[0:n_bits] to 'dst_header'[ofs:ofs+n_bits] in the given 'flow'. */ +void +nxm_reg_load(ovs_be32 dst_header, ovs_be16 ofs_nbits, uint64_t src_data, + struct flow *flow) +{ + int n_bits = nxm_decode_n_bits(ofs_nbits); + int dst_ofs = nxm_decode_ofs(ofs_nbits); + uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1; + + /* Get remaining bits of the destination field. */ + const struct nxm_field *dst = nxm_field_lookup(ntohl(dst_header)); + uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs); + + /* Get the final value. */ + uint64_t new_data = dst_data | (src_data << dst_ofs); + + nxm_write_field(dst, flow, new_data); +}