NXAST_SET_TUNNEL, /* struct nx_action_set_tunnel */
NXAST_DROP_SPOOFED_ARP, /* struct nx_action_drop_spoofed_arp */
NXAST_SET_QUEUE, /* struct nx_action_set_queue */
- NXAST_POP_QUEUE /* struct nx_action_pop_queue */
+ NXAST_POP_QUEUE, /* struct nx_action_pop_queue */
+ NXAST_REG_MOVE, /* struct nx_action_reg_move */
+ NXAST_REG_LOAD, /* struct nx_action_reg_load */
};
/* Header for Nicira-defined actions. */
};
OFP_ASSERT(sizeof(struct nx_action_pop_queue) == 16);
+/* Action structure for NXAST_REG_MOVE.
+ *
+ * Copies src[src_ofs:src_ofs+n_bits] to dst[dst_ofs:dst_ofs+n_bits], where
+ * a[b:c] denotes the bits within 'a' numbered 'b' through 'c' (not including
+ * bit 'c'). Bit numbering starts at 0 for the least-significant bit, 1 for
+ * the next most significant bit, and so on.
+ *
+ * 'src' and 'dst' are nxm_header values with nxm_hasmask=0. The following
+ * nxm_header values are potentially acceptable as 'src':
+ *
+ * - NXM_OF_IN_PORT
+ * - NXM_OF_ETH_DST
+ * - NXM_OF_ETH_SRC
+ * - NXM_OF_ETH_TYPE
+ * - NXM_OF_VLAN_TCI
+ * - NXM_OF_IP_TOS
+ * - NXM_OF_IP_PROTO
+ * - NXM_OF_IP_SRC
+ * - NXM_OF_IP_DST
+ * - NXM_OF_TCP_SRC
+ * - NXM_OF_TCP_DST
+ * - NXM_OF_UDP_SRC
+ * - NXM_OF_UDP_DST
+ * - NXM_OF_ICMP_TYPE
+ * - NXM_OF_ICMP_CODE
+ * - NXM_OF_ARP_OP
+ * - NXM_OF_ARP_SPA
+ * - NXM_OF_ARP_TPA
+ * - NXM_NX_TUN_ID
+ * - NXM_NX_REG(idx) for idx in the switch's accepted range.
+ *
+ * The following nxm_header values are potentially acceptable as 'dst':
+ *
+ * - NXM_NX_REG(idx) for idx in the switch's accepted range.
+ *
+ * - NXM_OF_VLAN_TCI. Modifying this field's value has side effects on the
+ * packet's 802.1Q header. Setting a value with CFI=0 removes the 802.1Q
+ * header (if any), ignoring the other bits. Setting a value with CFI=1
+ * adds or modifies the 802.1Q header appropriately, setting the TCI field
+ * to the field's new value (with the CFI bit masked out).
+ *
+ * - NXM_NX_TUN_ID. Modifying this value modifies the tunnel ID used for the
+ * packet's next tunnel encapsulation.
+ *
+ * A given nxm_header value may be used as 'src' or 'dst' only on a flow whose
+ * nx_match satisfies its prerequisites. For example, NXM_OF_IP_TOS may be
+ * used only if the flow's nx_match includes an nxm_entry that specifies
+ * nxm_type=NXM_OF_ETH_TYPE, nxm_hasmask=0, and nxm_value=0x0800.
+ *
+ * The switch will reject actions for which src_ofs+n_bits is greater than the
+ * width of 'src' or dst_ofs+n_bits is greater than the width of 'dst' with
+ * error type OFPET_BAD_ACTION, code OFPBAC_BAD_ARGUMENT.
+ */
+struct nx_action_reg_move {
+ ovs_be16 type; /* OFPAT_VENDOR. */
+ ovs_be16 len; /* Length is 16. */
+ ovs_be32 vendor; /* NX_VENDOR_ID. */
+ ovs_be16 subtype; /* NXAST_REG_MOVE. */
+ ovs_be16 n_bits; /* Number of bits. */
+ ovs_be16 src_ofs; /* Starting bit offset in source. */
+ ovs_be16 dst_ofs; /* Starting bit offset in destination. */
+ ovs_be32 src; /* Source register. */
+ ovs_be32 dst; /* Destination register. */
+};
+OFP_ASSERT(sizeof(struct nx_action_reg_move) == 24);
+
+/* Action structure for NXAST_REG_LOAD.
+ *
+ * Copies value[0:n_bits] to dst[ofs:ofs+n_bits], where a[b:c] denotes the bits
+ * within 'a' numbered 'b' through 'c' (not including bit 'c'). Bit numbering
+ * starts at 0 for the least-significant bit, 1 for the next most significant
+ * bit, and so on.
+ *
+ * 'dst' must be one of the following:
+ *
+ * - NXM_NX_REG(idx) for idx in the switch's accepted range.
+ *
+ * The 'ofs' and 'n_bits' fields are combined into a single 'ofs_nbits' field
+ * to avoid enlarging the structure by another 8 bytes. To allow 'n_bits' to
+ * take a value between 1 and 64 (inclusive) while taking up only 6 bits, it is
+ * also stored as one less than its true value:
+ *
+ * 15 6 5 0
+ * +------------------------------+------------------+
+ * | ofs | n_bits - 1 |
+ * +------------------------------+------------------+
+ *
+ * The switch will reject actions for which ofs+n_bits is greater than the
+ * width of 'dst', or in which any bits in 'value' with value 2**n_bits or
+ * greater are set to 1, with error type OFPET_BAD_ACTION, code
+ * OFPBAC_BAD_ARGUMENT.
+ */
+struct nx_action_reg_load {
+ ovs_be16 type; /* OFPAT_VENDOR. */
+ ovs_be16 len; /* Length is 16. */
+ ovs_be32 vendor; /* NX_VENDOR_ID. */
+ ovs_be16 subtype; /* NXAST_REG_LOAD. */
+ ovs_be16 ofs_nbits; /* (ofs << 6) | (n_bits - 1). */
+ ovs_be32 dst; /* Destination register. */
+ ovs_be64 value; /* Immediate value. */
+};
+OFP_ASSERT(sizeof(struct nx_action_reg_load) == 24);
+
/* Wildcard for tunnel ID. */
#define NXFW_TUN_ID (1 << 25)
/* ## Nicira match extensions. ## */
/* ## ------------------------ ## */
+/* Metadata registers.
+ *
+ * Registers initially have value 0. Actions allow register values to be
+ * manipulated.
+ *
+ * Prereqs: None.
+ *
+ * Format: Array of 32-bit integer registers. Space is reserved for up to
+ * NXM_NX_MAX_REGS registers, but switches may implement fewer.
+ *
+ * Masking: Arbitrary masks. */
+#define NXM_NX_MAX_REGS 16
+#define NXM_NX_REG(IDX) NXM_HEADER (0x0001, IDX, 4)
+#define NXM_NX_REG_W(IDX) NXM_HEADER_W(0x0001, IDX, 4)
+#define NXM_NX_REG_IDX(HEADER) NXM_FIELD(HEADER)
+#define NXM_IS_NX_REG(HEADER) (!((((HEADER) ^ NXM_NX_REG(0))) & 0xffffe0ff))
+#define NXM_NX_REG0 NXM_HEADER (0x0001, 0, 4)
+#define NXM_NX_REG0_W NXM_HEADER_W(0x0001, 0, 4)
+#define NXM_NX_REG1 NXM_HEADER (0x0001, 1, 4)
+#define NXM_NX_REG1_W NXM_HEADER_W(0x0001, 1, 4)
+#define NXM_NX_REG2 NXM_HEADER (0x0001, 2, 4)
+#define NXM_NX_REG2_W NXM_HEADER_W(0x0001, 2, 4)
+#define NXM_NX_REG3 NXM_HEADER (0x0001, 3, 4)
+#define NXM_NX_REG3_W NXM_HEADER_W(0x0001, 3, 4)
+
/* Tunnel ID.
*
* For a packet received via GRE tunnel including a (32-bit) key, the key is
const struct flow_wildcards *wildcards)
{
const uint32_t wc = wildcards->wildcards;
+ int i;
- BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 37);
+ BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 37 + FLOW_N_REGS * 4);
+
+ for (i = 0; i < FLOW_N_REGS; i++) {
+ if ((a->regs[i] ^ b->regs[i]) & wildcards->reg_masks[i]) {
+ return false;
+ }
+ }
return ((wc & NXFW_TUN_ID || a->tun_id == b->tun_id)
&& !((a->nw_src ^ b->nw_src) & wildcards->nw_src_mask)
zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
{
const uint32_t wc = wildcards->wildcards;
+ int i;
- BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 37);
+ BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 37 + 4 * FLOW_N_REGS);
+ for (i = 0; i < FLOW_N_REGS; i++) {
+ flow->regs[i] &= wildcards->reg_masks[i];
+ }
if (wc & NXFW_TUN_ID) {
flow->tun_id = 0;
}
flow_to_match(const struct flow *flow, uint32_t wildcards,
int flow_format, struct ofp_match *match)
{
- if (flow_format != NXFF_TUN_ID_FROM_COOKIE) {
- wildcards &= OFPFW_ALL;
- }
+ wildcards &= (flow_format == NXFF_TUN_ID_FROM_COOKIE ? OVSFW_ALL
+ : OFPFW_ALL);
match->wildcards = htonl(wildcards);
match->in_port = htons(flow->in_port == ODPP_LOCAL ? OFPP_LOCAL
ovs_be64 cookie, struct flow *flow,
struct flow_wildcards *wc)
{
- flow_wildcards_init(wc, ntohl(match->wildcards));
- if (flow_format == NXFF_TUN_ID_FROM_COOKIE
- && !(wc->wildcards & NXFW_TUN_ID)) {
- flow->tun_id = htonl(ntohll(cookie) >> 32);
+ uint32_t wildcards = ntohl(match->wildcards) & OVSFW_ALL;
+
+ flow->tun_id = 0;
+ if (flow_format != NXFF_TUN_ID_FROM_COOKIE) {
+ wildcards |= NXFW_TUN_ID;
} else {
- wc->wildcards |= NXFW_TUN_ID;
- flow->tun_id = 0;
+ if (!(wildcards & NXFW_TUN_ID)) {
+ flow->tun_id = htonl(ntohll(cookie) >> 32);
+ }
}
+ flow_wildcards_init(wc, wildcards);
flow->nw_src = match->nw_src;
flow->nw_dst = match->nw_dst;
}
/* Initializes 'wc' from 'wildcards', which may be any combination of the
- * OFPFW_* and OVSFW_* wildcard bits. */
+ * OFPFW_* and OVSFW_* wildcard bits.
+ *
+ * All registers (NXM_NX_REG*) are always completely wildcarded, because
+ * 'wildcards' doesn't have enough bits to give the details on which
+ * particular bits should be wildcarded (if any). The caller may use
+ * flow_wildcards_set_reg_mask() to update the register wildcard masks. */
void
flow_wildcards_init(struct flow_wildcards *wc, uint32_t wildcards)
{
- wc->wildcards = flow_wildcards_normalize(wildcards);
+ wc->wildcards = flow_wildcards_normalize(wildcards) | FWW_REGS;
wc->nw_src_mask = flow_nw_bits_to_mask(wc->wildcards, OFPFW_NW_SRC_SHIFT);
wc->nw_dst_mask = flow_nw_bits_to_mask(wc->wildcards, OFPFW_NW_DST_SHIFT);
+ memset(wc->reg_masks, 0, sizeof wc->reg_masks);
}
/* Initializes 'wc' as an exact-match set of wildcards; that is, 'wc' does not
void
flow_wildcards_init_exact(struct flow_wildcards *wc)
{
- flow_wildcards_init(wc, 0);
+ wc->wildcards = 0;
+ wc->nw_src_mask = htonl(UINT32_MAX);
+ wc->nw_dst_mask = htonl(UINT32_MAX);
+ memset(wc->reg_masks, 0xff, sizeof wc->reg_masks);
}
static inline uint32_t
{
uint32_t wb1 = src1->wildcards;
uint32_t wb2 = src2->wildcards;
+ int i;
dst->wildcards = (wb1 | wb2) & ~(OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK);
dst->wildcards |= combine_nw_bits(wb1, wb2, OFPFW_NW_SRC_SHIFT);
dst->wildcards |= combine_nw_bits(wb1, wb2, OFPFW_NW_DST_SHIFT);
dst->nw_src_mask = src1->nw_src_mask & src2->nw_src_mask;
dst->nw_dst_mask = src1->nw_dst_mask & src2->nw_dst_mask;
+ for (i = 0; i < FLOW_N_REGS; i++) {
+ dst->reg_masks[i] = src1->reg_masks[i] & src2->reg_masks[i];
+ }
}
/* Returns a hash of the wildcards in 'wc'. */
{
/* There is no need to include nw_src_mask or nw_dst_mask because they do
* not add any information (they can be computed from wc->wildcards). */
- return hash_int(wc->wildcards, 0);
+ BUILD_ASSERT_DECL(sizeof wc->wildcards == 4);
+ BUILD_ASSERT_DECL(sizeof wc->reg_masks == 4 * FLOW_N_REGS);
+ BUILD_ASSERT_DECL(offsetof(struct flow_wildcards, wildcards) == 0);
+ BUILD_ASSERT_DECL(offsetof(struct flow_wildcards, reg_masks) == 4);
+ return hash_words((const uint32_t *) wc, 1 + FLOW_N_REGS, 0);
}
/* Returns true if 'a' and 'b' represent the same wildcards, false if they are
flow_wildcards_equal(const struct flow_wildcards *a,
const struct flow_wildcards *b)
{
- return a->wildcards == b->wildcards;
+ int i;
+
+ if (a->wildcards != b->wildcards) {
+ return false;
+ }
+
+ for (i = 0; i < FLOW_N_REGS; i++) {
+ if (a->reg_masks[i] != b->reg_masks[i]) {
+ return false;
+ }
+ }
+
+ return true;
}
/* Returns true if at least one bit or field is wildcarded in 'a' but not in
flow_wildcards_has_extra(const struct flow_wildcards *a,
const struct flow_wildcards *b)
{
+ int i;
+
+ for (i = 0; i < FLOW_N_REGS; i++) {
+ if ((a->reg_masks[i] & b->reg_masks[i]) != b->reg_masks[i]) {
+ return true;
+ }
+ }
+
#define OFPFW_NW_MASK (OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK)
return ((a->wildcards & ~(b->wildcards | OFPFW_NW_MASK))
|| (a->nw_src_mask & b->nw_src_mask) != b->nw_src_mask
{
return set_nw_mask(wc, mask, &wc->nw_dst_mask, OFPFW_NW_DST_SHIFT);
}
+
+/* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
+ * (A 0-bit indicates a wildcard bit.) */
+void
+flow_wildcards_set_reg_mask(struct flow_wildcards *wc, int idx, uint32_t mask)
+{
+ if (mask != wc->reg_masks[idx]) {
+ wc->reg_masks[idx] = mask;
+ if (mask != UINT32_MAX) {
+ wc->wildcards |= FWW_REGS;
+ } else {
+ int i;
+
+ for (i = 0; i < FLOW_N_REGS; i++) {
+ if (wc->reg_masks[i] != UINT32_MAX) {
+ wc->wildcards |= FWW_REGS;
+ return;
+ }
+ }
+ wc->wildcards &= ~FWW_REGS;
+ }
+ }
+}
struct ofp_match;
struct ofpbuf;
+#define FLOW_N_REGS 3
+BUILD_ASSERT_DECL(FLOW_N_REGS <= NXM_NX_MAX_REGS);
+
struct flow {
+ uint32_t regs[FLOW_N_REGS]; /* Registers. */
ovs_be32 tun_id; /* Encapsulating tunnel ID. */
ovs_be32 nw_src; /* IP source address. */
ovs_be32 nw_dst; /* IP destination address. */
/* Assert that there are FLOW_SIG_SIZE bytes of significant data in "struct
* flow", followed by FLOW_PAD_SIZE bytes of padding. */
-#define FLOW_SIG_SIZE 37
+#define FLOW_SIG_SIZE (37 + FLOW_N_REGS * 4)
#define FLOW_PAD_SIZE 3
BUILD_ASSERT_DECL(offsetof(struct flow, nw_tos) == FLOW_SIG_SIZE - 1);
BUILD_ASSERT_DECL(sizeof(((struct flow *)0)->nw_tos) == 1);
return hash_bytes(flow, FLOW_SIG_SIZE, basis);
}
+/* Set to 1 in the 'wildcards' member of struct flow_wildcards if any bits in
+ * any of the reg_masks are wildcarded. This maintains the invariant that
+ * 'wildcards' is nonzero if and only if any bits are wildcarded.
+ *
+ * This is used only internally to Open vSwitch--it never appears in the wire
+ * protocol. */
+#define FWW_REGS (1u << 31)
+BUILD_ASSERT_DECL(!(FWW_REGS & OVSFW_ALL)); /* Avoid collisions. */
+
/* Information on wildcards for a flow, as a supplement to "struct flow".
*
* The flow_wildcards_*() functions below both depend on and maintain the
* 1. 'wildcards' is nonzero if and only if at least one bit or field is
* wildcarded.
*
- * 2. Bits in 'wildcards' not included in OVSFW_ALL are set to 0. (This is a
- * corollary to invariant #1.)
+ * 2. Bits in 'wildcards' not included in OVSFW_ALL or FWW_REGS are set to 0.
+ * (This is a corollary to invariant #1.)
*
* 3. The fields in 'wildcards' masked by OFPFW_NW_SRC_MASK and
* OFPFW_NW_DST_MASK have values between 0 and 32, inclusive.
*
* 4. The fields masked by OFPFW_NW_SRC_MASK and OFPFW_NW_DST_MASK correspond
* correctly to the masks in 'nw_src_mask' and 'nw_dst_mask', respectively.
+ *
+ * 5. FWW_REGS is set to 1 in 'wildcards' if and only if at least one bit in
+ * 'reg_masks[]' is nonzero. (This allows wildcarded 'reg_masks[]' to
+ * satisfy invariant #1.)
+ *
+ * 6. If FWW_REGS is set to 0 in 'wildcards', then the values of all of the
+ * other members can be correctly predicted based on 'wildcards' alone.
*/
struct flow_wildcards {
uint32_t wildcards; /* enum ofp_flow_wildcards. */
+ uint32_t reg_masks[FLOW_N_REGS]; /* 1-bit in each significant regs bit. */
ovs_be32 nw_src_mask; /* 1-bit in each significant nw_src bit. */
ovs_be32 nw_dst_mask; /* 1-bit in each significant nw_dst bit. */
};
bool flow_wildcards_set_nw_src_mask(struct flow_wildcards *, ovs_be32);
bool flow_wildcards_set_nw_dst_mask(struct flow_wildcards *, ovs_be32);
+void flow_wildcards_set_reg_mask(struct flow_wildcards *,
+ int idx, uint32_t mask);
void flow_wildcards_combine(struct flow_wildcards *dst,
const struct flow_wildcards *src1,
unsigned int length = NXM_LENGTH(header);
return NXM_HASMASK(header) ? length / 2 : length;
}
+
+/* Returns the width of the data for a field with the given 'header', in
+ * bits. */
+static int
+nxm_field_bits(uint32_t header)
+{
+ return nxm_field_bytes(header) * 8;
+}
\f
/* nx_pull_match() and helpers. */
}
}
+static int
+parse_nx_reg(const struct nxm_field *f,
+ struct flow *flow, struct flow_wildcards *wc,
+ const void *value, const void *maskp)
+{
+ int idx = NXM_NX_REG_IDX(f->header);
+ if (wc->reg_masks[idx]) {
+ return NXM_DUP_TYPE;
+ } else {
+ flow_wildcards_set_reg_mask(wc, idx,
+ (NXM_HASMASK(f->header)
+ ? ntohl(get_unaligned_u32(maskp))
+ : UINT32_MAX));
+ flow->regs[idx] = ntohl(get_unaligned_u32(value));
+ flow->regs[idx] &= wc->reg_masks[idx];
+ return 0;
+ }
+}
+
static int
parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f,
const void *value, const void *mask)
flow->tun_id = htonl(ntohll(get_unaligned_u64(value)));
return 0;
+ /* Registers. */
+ case NFI_NXM_NX_REG0:
+ case NFI_NXM_NX_REG0_W:
+#if FLOW_N_REGS >= 2
+ case NFI_NXM_NX_REG1:
+ case NFI_NXM_NX_REG1_W:
+#endif
+#if FLOW_N_REGS >= 3
+ case NFI_NXM_NX_REG2:
+ case NFI_NXM_NX_REG2_W:
+#endif
+#if FLOW_N_REGS >= 4
+ case NFI_NXM_NX_REG3:
+ case NFI_NXM_NX_REG3_W:
+#endif
+#if FLOW_N_REGS > 4
+#error
+#endif
+ return parse_nx_reg(f, flow, wc, value, mask);
+
case N_NXM_FIELDS:
NOT_REACHED();
}
const size_t start_len = b->size;
ovs_be16 vid, pcp;
int match_len;
+ int i;
/* Metadata. */
if (!(wc & OFPFW_IN_PORT)) {
nxm_put_64(b, NXM_NX_TUN_ID, htonll(ntohl(flow->tun_id)));
}
+ /* Registers. */
+ for (i = 0; i < FLOW_N_REGS; i++) {
+ nxm_put_32m(b, NXM_NX_REG(i),
+ htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i]));
+ }
+
match_len = b->size - start_len;
ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
return match_len;
ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
return match_len;
}
+\f
+/* nxm_check_reg_move(), nxm_check_reg_load(). */
+
+static bool
+field_ok(const struct nxm_field *f, const struct flow *flow, int size)
+{
+ return (f && !NXM_HASMASK(f->header)
+ && nxm_prereqs_ok(f, flow) && size <= nxm_field_bits(f->header));
+}
+
+int
+nxm_check_reg_move(const struct nx_action_reg_move *action,
+ const struct flow *flow)
+{
+ const struct nxm_field *src;
+ const struct nxm_field *dst;
+
+ if (action->n_bits == htons(0)) {
+ return BAD_ARGUMENT;
+ }
+
+ src = nxm_field_lookup(ntohl(action->src));
+ if (!field_ok(src, flow, ntohs(action->src_ofs) + ntohs(action->n_bits))) {
+ return BAD_ARGUMENT;
+ }
+
+ dst = nxm_field_lookup(ntohl(action->dst));
+ if (!field_ok(dst, flow, ntohs(action->dst_ofs) + ntohs(action->n_bits))) {
+ return BAD_ARGUMENT;
+ }
+
+ if (!NXM_IS_NX_REG(dst->header)
+ && dst->header != NXM_OF_VLAN_TCI
+ && dst->header != NXM_NX_TUN_ID) {
+ return BAD_ARGUMENT;
+ }
+
+ return 0;
+}
+
+int
+nxm_check_reg_load(const struct nx_action_reg_load *action,
+ const struct flow *flow)
+{
+ const struct nxm_field *dst;
+ int ofs, n_bits;
+
+ ofs = ntohs(action->ofs_nbits) >> 6;
+ n_bits = (ntohs(action->ofs_nbits) & 0x3f) + 1;
+ dst = nxm_field_lookup(ntohl(action->dst));
+ if (!field_ok(dst, flow, ofs + n_bits)) {
+ return BAD_ARGUMENT;
+ }
+
+ /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in
+ * action->value. */
+ if (n_bits < 64 && ntohll(action->value) >> n_bits) {
+ return BAD_ARGUMENT;
+ }
+
+ if (!NXM_IS_NX_REG(dst->header)) {
+ return BAD_ARGUMENT;
+ }
+
+ return 0;
+}
+\f
+/* nxm_execute_reg_move(), nxm_execute_reg_load(). */
+
+static uint64_t
+nxm_read_field(const struct nxm_field *src, const struct flow *flow)
+{
+ switch (src->index) {
+ case NFI_NXM_OF_IN_PORT:
+ return flow->in_port == ODPP_LOCAL ? OFPP_LOCAL : flow->in_port;
+
+ case NFI_NXM_OF_ETH_DST:
+ return eth_addr_to_uint64(flow->dl_dst);
+
+ case NFI_NXM_OF_ETH_SRC:
+ return eth_addr_to_uint64(flow->dl_src);
+
+ case NFI_NXM_OF_ETH_TYPE:
+ return ntohs(flow->dl_type);
+
+ case NFI_NXM_OF_VLAN_TCI:
+ if (flow->dl_vlan == htons(OFP_VLAN_NONE)) {
+ return 0;
+ } else {
+ return (ntohs(flow->dl_vlan & htons(VLAN_VID_MASK))
+ | ((flow->dl_vlan_pcp << VLAN_PCP_SHIFT) & VLAN_PCP_MASK)
+ | VLAN_CFI);
+ }
+
+ case NFI_NXM_OF_IP_TOS:
+ return flow->nw_tos;
+
+ case NFI_NXM_OF_IP_PROTO:
+ case NFI_NXM_OF_ARP_OP:
+ return flow->nw_proto;
+
+ case NFI_NXM_OF_IP_SRC:
+ case NFI_NXM_OF_ARP_SPA:
+ return ntohl(flow->nw_src);
+
+ case NFI_NXM_OF_IP_DST:
+ case NFI_NXM_OF_ARP_TPA:
+ return ntohl(flow->nw_dst);
+
+ case NFI_NXM_OF_TCP_SRC:
+ case NFI_NXM_OF_UDP_SRC:
+ return ntohs(flow->tp_src);
+
+ case NFI_NXM_OF_TCP_DST:
+ case NFI_NXM_OF_UDP_DST:
+ return ntohs(flow->tp_dst);
+
+ case NFI_NXM_OF_ICMP_TYPE:
+ return ntohs(flow->tp_src) & 0xff;
+
+ case NFI_NXM_OF_ICMP_CODE:
+ return ntohs(flow->tp_dst) & 0xff;
+
+ case NFI_NXM_NX_TUN_ID:
+ return ntohl(flow->tun_id);
+
+#define NXM_READ_REGISTER(IDX) \
+ case NFI_NXM_NX_REG##IDX: \
+ return flow->regs[IDX]; \
+ case NFI_NXM_NX_REG##IDX##_W: \
+ NOT_REACHED();
+
+ NXM_READ_REGISTER(0);
+#if FLOW_N_REGS >= 2
+ NXM_READ_REGISTER(1);
+#endif
+#if FLOW_N_REGS >= 3
+ NXM_READ_REGISTER(2);
+#endif
+#if FLOW_N_REGS >= 4
+ NXM_READ_REGISTER(3);
+#endif
+#if FLOW_N_REGS > 4
+#error
+#endif
+
+ case NFI_NXM_OF_VLAN_TCI_W:
+ case NFI_NXM_OF_IP_SRC_W:
+ case NFI_NXM_OF_IP_DST_W:
+ case NFI_NXM_OF_ARP_SPA_W:
+ case NFI_NXM_OF_ARP_TPA_W:
+ case N_NXM_FIELDS:
+ NOT_REACHED();
+ }
+
+ NOT_REACHED();
+}
+
+void
+nxm_execute_reg_move(const struct nx_action_reg_move *action,
+ struct flow *flow)
+{
+ /* Preparation. */
+ int n_bits = ntohs(action->n_bits);
+ uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1;
+
+ /* Get the interesting bits of the source field. */
+ const struct nxm_field *src = nxm_field_lookup(ntohl(action->src));
+ int src_ofs = ntohs(action->src_ofs);
+ uint64_t src_data = nxm_read_field(src, flow) & (mask << src_ofs);
+
+ /* Get the remaining bits of the destination field. */
+ const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst));
+ int dst_ofs = ntohs(action->dst_ofs);
+ uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs);
+
+ /* Get the final value. */
+ uint64_t new_data = dst_data | ((src_data >> src_ofs) << dst_ofs);
+
+ /* Store the result. */
+ if (NXM_IS_NX_REG(dst->header)) {
+ flow->regs[NXM_NX_REG_IDX(dst->header)] = new_data;
+ } else if (dst->header == NXM_OF_VLAN_TCI) {
+ ovs_be16 vlan_tci = htons(new_data & VLAN_CFI ? new_data : 0);
+ flow->dl_vlan = htons(vlan_tci_to_vid(vlan_tci));
+ flow->dl_vlan_pcp = vlan_tci_to_pcp(vlan_tci);
+ } else if (dst->header == NXM_NX_TUN_ID) {
+ flow->tun_id = htonl(new_data);
+ } else {
+ NOT_REACHED();
+ }
+}
+
+void
+nxm_execute_reg_load(const struct nx_action_reg_load *action,
+ struct flow *flow)
+{
+ /* Preparation. */
+ int n_bits = (ntohs(action->ofs_nbits) & 0x3f) + 1;
+ uint32_t mask = n_bits == 32 ? UINT32_MAX : (UINT32_C(1) << n_bits) - 1;
+ uint32_t *reg = &flow->regs[NXM_NX_REG_IDX(ntohl(action->dst))];
+
+ /* Get source data. */
+ uint32_t src_data = ntohll(action->value);
+
+ /* Get remaining bits of the destination field. */
+ int dst_ofs = ntohs(action->ofs_nbits) >> 6;
+ uint32_t dst_data = *reg & ~(mask << dst_ofs);
+
+ *reg = dst_data | (src_data << dst_ofs);
+}
DEFINE_FIELD_M(OF_ARP_TPA, 0, ETH_TYPE_ARP, 0)
DEFINE_FIELD (NX_TUN_ID, NXFW_TUN_ID, 0, 0)
+DEFINE_FIELD_M(NX_REG0, 0, 0, 0)
+#if FLOW_N_REGS >= 2
+DEFINE_FIELD_M(NX_REG1, 0, 0, 0)
+#endif
+#if FLOW_N_REGS >= 3
+DEFINE_FIELD_M(NX_REG2, 0, 0, 0)
+#endif
+#if FLOW_N_REGS >= 4
+DEFINE_FIELD_M(NX_REG3, 0, 0, 0)
+#endif
+#if FLOW_N_REGS > 4
+#error
+#endif
+
#undef DEFINE_FIELD
#include <stdint.h>
struct cls_rule;
+struct flow;
struct ofpbuf;
+struct nx_action_reg_load;
+struct nx_action_reg_move;
/* Nicira Extended Match (NXM) flexible flow match helper functions.
*
char *nx_match_to_string(const uint8_t *, unsigned int match_len);
int nx_match_from_string(const char *, struct ofpbuf *);
+int nxm_check_reg_move(const struct nx_action_reg_move *, const struct flow *);
+int nxm_check_reg_load(const struct nx_action_reg_load *, const struct flow *);
+
+void nxm_execute_reg_move(const struct nx_action_reg_move *, struct flow *);
+void nxm_execute_reg_load(const struct nx_action_reg_load *, struct flow *);
+
/* Upper bound on the length of an nx_match. The longest nx_match (assuming
* we implement 4 registers) would be:
*
void
odp_flow_key_to_flow(const struct odp_flow_key *key, struct flow *flow)
{
+ memset(flow->regs, 0, sizeof flow->regs);
flow->tun_id = key->tun_id;
flow->nw_src = key->nw_src;
flow->nw_dst = key->nw_dst;
#include <inttypes.h>
#include <stdlib.h>
#include "byte-order.h"
+#include "nx-match.h"
#include "ofp-util.h"
#include "ofpbuf.h"
#include "packets.h"
}
static int
-check_nicira_action(const union ofp_action *a, unsigned int len)
+check_nicira_action(const union ofp_action *a, unsigned int len,
+ const struct flow *flow)
{
const struct nx_action_header *nah;
+ int error;
if (len < 16) {
VLOG_DBG_RL(&bad_ofmsg_rl,
case NXAST_SET_QUEUE:
case NXAST_POP_QUEUE:
return check_action_exact_len(a, len, 16);
+ case NXAST_REG_MOVE:
+ error = check_action_exact_len(a, len,
+ sizeof(struct nx_action_reg_move));
+ if (error) {
+ return error;
+ }
+ return nxm_check_reg_move((const struct nx_action_reg_move *) a, flow);
+ case NXAST_REG_LOAD:
+ error = check_action_exact_len(a, len,
+ sizeof(struct nx_action_reg_load));
+ if (error) {
+ return error;
+ }
+ return nxm_check_reg_load((const struct nx_action_reg_load *) a, flow);
default:
return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_BAD_VENDOR_TYPE);
}
}
static int
-check_action(const union ofp_action *a, unsigned int len, int max_ports)
+check_action(const union ofp_action *a, unsigned int len,
+ const struct flow *flow, int max_ports)
{
int error;
case OFPAT_VENDOR:
return (a->vendor.vendor == htonl(NX_VENDOR_ID)
- ? check_nicira_action(a, len)
+ ? check_nicira_action(a, len, flow)
: ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_BAD_VENDOR));
case OFPAT_ENQUEUE:
int
validate_actions(const union ofp_action *actions, size_t n_actions,
- const struct flow *flow OVS_UNUSED, int max_ports)
+ const struct flow *flow, int max_ports)
{
size_t i;
return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_BAD_LEN);
}
- error = check_action(a, len, max_ports);
+ error = check_action(a, len, flow, max_ports);
if (error) {
return error;
}
}
}
+static void
+xlate_reg_move_action(struct action_xlate_ctx *ctx,
+ const struct nx_action_reg_move *narm)
+{
+ ovs_be16 old_vlan = ctx->flow.dl_vlan;
+ uint8_t old_pcp = ctx->flow.dl_vlan_pcp;
+
+ nxm_execute_reg_move(narm, &ctx->flow);
+
+ if (ctx->flow.dl_vlan != old_vlan || ctx->flow.dl_vlan_pcp != old_pcp) {
+ xlate_set_dl_tci(ctx);
+ }
+}
+
static void
xlate_nicira_action(struct action_xlate_ctx *ctx,
const struct nx_action_header *nah)
odp_actions_add(ctx->out, ODPAT_POP_PRIORITY);
break;
+ case NXAST_REG_MOVE:
+ xlate_reg_move_action(ctx, (const struct nx_action_reg_move *) nah);
+ break;
+
+ case NXAST_REG_LOAD:
+ nxm_execute_reg_load((const struct nx_action_reg_load *) nah,
+ &ctx->flow);
+ break;
+
/* If you add a new action here that modifies flow data, don't forget to
* update the flow key in ctx->flow at the same time. */
# Tunnel ID.
NXM_NX_TUN_ID(00000000abcdef01)
+
+# Register 0.
+NXM_NX_REG0(acebdf56)
+NXM_NX_REG0_W(a0e0d050/f0f0f0f0)
])
AT_CHECK([ovs-ofctl parse-nx-match < nx-match.txt], [0], [stdout])
AT_CHECK([cat stdout], [0], [dnl
# Tunnel ID.
NXM_NX_TUN_ID(00000000abcdef01)
+
+# Register 0.
+NXM_NX_REG0(acebdf56)
+NXM_NX_REG0_W(a0e0d050/f0f0f0f0)
])
AT_CLEANUP