2 * Copyright (c) 2010, 2011 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include "classifier.h"
22 #include "dynamic-string.h"
25 #include "openflow/nicira-ext.h"
27 #include "unaligned.h"
30 VLOG_DEFINE_THIS_MODULE(nx_match);
32 /* Rate limit for nx_match parse errors. These always indicate a bug in the
33 * peer and so there's not much point in showing a lot of them. */
34 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
37 NXM_INVALID = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_INVALID),
38 NXM_BAD_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_TYPE),
39 NXM_BAD_VALUE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_VALUE),
40 NXM_BAD_MASK = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_MASK),
41 NXM_BAD_PREREQ = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_PREREQ),
42 NXM_DUP_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_DUP_TYPE),
43 BAD_ARGUMENT = OFP_MKERR(OFPET_BAD_ACTION, OFPBAC_BAD_ARGUMENT)
46 /* For each NXM_* field, define NFI_NXM_* as consecutive integers starting from
48 enum nxm_field_index {
49 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \
51 #include "nx-match.def"
56 struct hmap_node hmap_node;
57 enum nxm_field_index index; /* NFI_* value. */
58 uint32_t header; /* NXM_* value. */
59 flow_wildcards_t wildcard; /* FWW_* bit, if exactly one. */
60 ovs_be16 dl_type; /* dl_type prerequisite, if nonzero. */
61 uint8_t nw_proto; /* nw_proto prerequisite, if nonzero. */
62 const char *name; /* "NXM_*" string. */
63 bool writable; /* Writable with NXAST_REG_{MOVE,LOAD}? */
66 /* All the known fields. */
67 static struct nxm_field nxm_fields[N_NXM_FIELDS] = {
68 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \
69 { HMAP_NODE_NULL_INITIALIZER, NFI_NXM_##HEADER, NXM_##HEADER, WILDCARD, \
70 CONSTANT_HTONS(DL_TYPE), NW_PROTO, "NXM_" #HEADER, WRITABLE },
71 #include "nx-match.def"
74 /* Hash table of 'nxm_fields'. */
75 static struct hmap all_nxm_fields = HMAP_INITIALIZER(&all_nxm_fields);
77 /* Possible masks for NXM_OF_ETH_DST_W. */
78 static const uint8_t eth_all_0s[ETH_ADDR_LEN]
79 = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
80 static const uint8_t eth_all_1s[ETH_ADDR_LEN]
81 = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
82 static const uint8_t eth_mcast_1[ETH_ADDR_LEN]
83 = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
84 static const uint8_t eth_mcast_0[ETH_ADDR_LEN]
85 = {0xfe, 0xff, 0xff, 0xff, 0xff, 0xff};
90 if (hmap_is_empty(&all_nxm_fields)) {
93 for (i = 0; i < N_NXM_FIELDS; i++) {
94 struct nxm_field *f = &nxm_fields[i];
95 hmap_insert(&all_nxm_fields, &f->hmap_node,
96 hash_int(f->header, 0));
99 /* Verify that the header values are unique (duplicate "case" values
100 * cause a compile error). */
102 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \
103 case NXM_##HEADER: break;
104 #include "nx-match.def"
109 static const struct nxm_field *
110 nxm_field_lookup(uint32_t header)
116 HMAP_FOR_EACH_WITH_HASH (f, hmap_node, hash_int(header, 0),
118 if (f->header == header) {
126 /* Returns the width of the data for a field with the given 'header', in
129 nxm_field_bytes(uint32_t header)
131 unsigned int length = NXM_LENGTH(header);
132 return NXM_HASMASK(header) ? length / 2 : length;
135 /* Returns the width of the data for a field with the given 'header', in
138 nxm_field_bits(uint32_t header)
140 return nxm_field_bytes(header) * 8;
143 /* nx_pull_match() and helpers. */
146 parse_nx_reg(const struct nxm_field *f,
147 struct flow *flow, struct flow_wildcards *wc,
148 const void *value, const void *maskp)
150 int idx = NXM_NX_REG_IDX(f->header);
151 if (wc->reg_masks[idx]) {
154 flow_wildcards_set_reg_mask(wc, idx,
155 (NXM_HASMASK(f->header)
156 ? ntohl(get_unaligned_be32(maskp))
158 flow->regs[idx] = ntohl(get_unaligned_be32(value));
159 flow->regs[idx] &= wc->reg_masks[idx];
165 parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f,
166 const void *value, const void *mask)
168 struct flow_wildcards *wc = &rule->wc;
169 struct flow *flow = &rule->flow;
173 case NFI_NXM_OF_IN_PORT:
174 flow->in_port = ntohs(get_unaligned_be16(value));
175 if (flow->in_port == OFPP_LOCAL) {
176 flow->in_port = ODPP_LOCAL;
180 /* Ethernet header. */
181 case NFI_NXM_OF_ETH_DST:
182 if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST))
183 != (FWW_DL_DST | FWW_ETH_MCAST)) {
186 wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST);
187 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
190 case NFI_NXM_OF_ETH_DST_W:
191 if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST))
192 != (FWW_DL_DST | FWW_ETH_MCAST)) {
194 } else if (eth_addr_equals(mask, eth_mcast_1)) {
195 wc->wildcards &= ~FWW_ETH_MCAST;
196 flow->dl_dst[0] = *(uint8_t *) value & 0x01;
197 } else if (eth_addr_equals(mask, eth_mcast_0)) {
198 wc->wildcards &= ~FWW_DL_DST;
199 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
200 flow->dl_dst[0] &= 0xfe;
201 } else if (eth_addr_equals(mask, eth_all_0s)) {
203 } else if (eth_addr_equals(mask, eth_all_1s)) {
204 wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST);
205 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
210 case NFI_NXM_OF_ETH_SRC:
211 memcpy(flow->dl_src, value, ETH_ADDR_LEN);
213 case NFI_NXM_OF_ETH_TYPE:
214 flow->dl_type = ofputil_dl_type_from_openflow(get_unaligned_be16(value));
218 case NFI_NXM_OF_VLAN_TCI:
219 if (wc->vlan_tci_mask) {
222 cls_rule_set_dl_tci(rule, get_unaligned_be16(value));
225 case NFI_NXM_OF_VLAN_TCI_W:
226 if (wc->vlan_tci_mask) {
229 cls_rule_set_dl_tci_masked(rule, get_unaligned_be16(value),
230 get_unaligned_be16(mask));
235 case NFI_NXM_OF_IP_TOS:
236 if (*(uint8_t *) value & 0x03) {
237 return NXM_BAD_VALUE;
239 flow->nw_tos = *(uint8_t *) value;
242 case NFI_NXM_OF_IP_PROTO:
243 flow->nw_proto = *(uint8_t *) value;
246 /* IP addresses in IP and ARP headers. */
247 case NFI_NXM_OF_IP_SRC:
248 case NFI_NXM_OF_ARP_SPA:
249 if (wc->nw_src_mask) {
252 cls_rule_set_nw_src(rule, get_unaligned_be32(value));
255 case NFI_NXM_OF_IP_SRC_W:
256 case NFI_NXM_OF_ARP_SPA_W:
257 if (wc->nw_src_mask) {
260 ovs_be32 ip = get_unaligned_be32(value);
261 ovs_be32 netmask = get_unaligned_be32(mask);
262 if (!cls_rule_set_nw_src_masked(rule, ip, netmask)) {
267 case NFI_NXM_OF_IP_DST:
268 case NFI_NXM_OF_ARP_TPA:
269 if (wc->nw_dst_mask) {
272 cls_rule_set_nw_dst(rule, get_unaligned_be32(value));
275 case NFI_NXM_OF_IP_DST_W:
276 case NFI_NXM_OF_ARP_TPA_W:
277 if (wc->nw_dst_mask) {
280 ovs_be32 ip = get_unaligned_be32(value);
281 ovs_be32 netmask = get_unaligned_be32(mask);
282 if (!cls_rule_set_nw_dst_masked(rule, ip, netmask)) {
289 case NFI_NXM_OF_TCP_SRC:
290 flow->tp_src = get_unaligned_be16(value);
292 case NFI_NXM_OF_TCP_DST:
293 flow->tp_dst = get_unaligned_be16(value);
297 case NFI_NXM_OF_UDP_SRC:
298 flow->tp_src = get_unaligned_be16(value);
300 case NFI_NXM_OF_UDP_DST:
301 flow->tp_dst = get_unaligned_be16(value);
305 case NFI_NXM_OF_ICMP_TYPE:
306 flow->tp_src = htons(*(uint8_t *) value);
308 case NFI_NXM_OF_ICMP_CODE:
309 flow->tp_dst = htons(*(uint8_t *) value);
313 case NFI_NXM_OF_ARP_OP:
314 if (ntohs(get_unaligned_be16(value)) > 255) {
315 return NXM_BAD_VALUE;
317 flow->nw_proto = ntohs(get_unaligned_be16(value));
321 case NFI_NXM_NX_ARP_SHA:
322 memcpy(flow->arp_sha, value, ETH_ADDR_LEN);
324 case NFI_NXM_NX_ARP_THA:
325 memcpy(flow->arp_tha, value, ETH_ADDR_LEN);
329 case NFI_NXM_NX_TUN_ID:
330 if (wc->tun_id_mask) {
333 cls_rule_set_tun_id(rule, get_unaligned_be64(value));
336 case NFI_NXM_NX_TUN_ID_W:
337 if (wc->tun_id_mask) {
340 ovs_be64 tun_id = get_unaligned_be64(value);
341 ovs_be64 tun_mask = get_unaligned_be64(mask);
342 cls_rule_set_tun_id_masked(rule, tun_id, tun_mask);
347 case NFI_NXM_NX_REG0:
348 case NFI_NXM_NX_REG0_W:
350 case NFI_NXM_NX_REG1:
351 case NFI_NXM_NX_REG1_W:
354 case NFI_NXM_NX_REG2:
355 case NFI_NXM_NX_REG2_W:
358 case NFI_NXM_NX_REG3:
359 case NFI_NXM_NX_REG3_W:
364 return parse_nx_reg(f, flow, wc, value, mask);
373 nxm_prereqs_ok(const struct nxm_field *field, const struct flow *flow)
375 return (!field->dl_type
376 || (field->dl_type == flow->dl_type
377 && (!field->nw_proto || field->nw_proto == flow->nw_proto)));
381 nx_entry_ok(const void *p, unsigned int match_len)
383 unsigned int payload_len;
389 VLOG_DBG_RL(&rl, "nx_match ends with partial nxm_header");
393 memcpy(&header_be, p, 4);
394 header = ntohl(header_be);
396 payload_len = NXM_LENGTH(header);
398 VLOG_DBG_RL(&rl, "nxm_entry %08"PRIx32" has invalid payload "
402 if (match_len < payload_len + 4) {
403 VLOG_DBG_RL(&rl, "%"PRIu32"-byte nxm_entry but only "
404 "%u bytes left in nx_match", payload_len + 4, match_len);
412 nx_pull_match(struct ofpbuf *b, unsigned int match_len, uint16_t priority,
413 struct cls_rule *rule)
418 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
420 VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
421 "multiple of 8, is longer than space in message (max "
422 "length %zu)", match_len, b->size);
423 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
426 cls_rule_init_catchall(rule, priority);
427 while ((header = nx_entry_ok(p, match_len)) != 0) {
428 unsigned length = NXM_LENGTH(header);
429 const struct nxm_field *f;
432 f = nxm_field_lookup(header);
434 error = NXM_BAD_TYPE;
435 } else if (!nxm_prereqs_ok(f, &rule->flow)) {
436 error = NXM_BAD_PREREQ;
437 } else if (f->wildcard && !(rule->wc.wildcards & f->wildcard)) {
438 error = NXM_DUP_TYPE;
440 /* 'hasmask' and 'length' are known to be correct at this point
441 * because they are included in 'header' and nxm_field_lookup()
442 * checked them already. */
443 rule->wc.wildcards &= ~f->wildcard;
444 error = parse_nxm_entry(rule, f, p + 4, p + 4 + length / 2);
447 VLOG_DBG_RL(&rl, "bad nxm_entry with vendor=%"PRIu32", "
448 "field=%"PRIu32", hasmask=%"PRIu32", type=%"PRIu32" "
450 NXM_VENDOR(header), NXM_FIELD(header),
451 NXM_HASMASK(header), NXM_TYPE(header),
458 match_len -= 4 + length;
461 return match_len ? NXM_INVALID : 0;
464 /* nx_put_match() and helpers.
466 * 'put' functions whose names end in 'w' add a wildcarded field.
467 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
468 * Other 'put' functions add exact-match fields.
472 nxm_put_header(struct ofpbuf *b, uint32_t header)
474 ovs_be32 n_header = htonl(header);
475 ofpbuf_put(b, &n_header, sizeof n_header);
479 nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
481 nxm_put_header(b, header);
482 ofpbuf_put(b, &value, sizeof value);
486 nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
488 nxm_put_header(b, header);
489 ofpbuf_put(b, &value, sizeof value);
493 nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
495 nxm_put_header(b, header);
496 ofpbuf_put(b, &value, sizeof value);
497 ofpbuf_put(b, &mask, sizeof mask);
501 nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
507 case CONSTANT_HTONS(UINT16_MAX):
508 nxm_put_16(b, header, value);
512 nxm_put_16w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
518 nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
520 nxm_put_header(b, header);
521 ofpbuf_put(b, &value, sizeof value);
525 nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
527 nxm_put_header(b, header);
528 ofpbuf_put(b, &value, sizeof value);
529 ofpbuf_put(b, &mask, sizeof mask);
533 nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
539 case CONSTANT_HTONL(UINT32_MAX):
540 nxm_put_32(b, header, value);
544 nxm_put_32w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
550 nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
552 nxm_put_header(b, header);
553 ofpbuf_put(b, &value, sizeof value);
557 nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
559 nxm_put_header(b, header);
560 ofpbuf_put(b, &value, sizeof value);
561 ofpbuf_put(b, &mask, sizeof mask);
565 nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
571 case CONSTANT_HTONLL(UINT64_MAX):
572 nxm_put_64(b, header, value);
576 nxm_put_64w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
582 nxm_put_eth(struct ofpbuf *b, uint32_t header,
583 const uint8_t value[ETH_ADDR_LEN])
585 nxm_put_header(b, header);
586 ofpbuf_put(b, value, ETH_ADDR_LEN);
590 nxm_put_eth_dst(struct ofpbuf *b,
591 uint32_t wc, const uint8_t value[ETH_ADDR_LEN])
593 switch (wc & (FWW_DL_DST | FWW_ETH_MCAST)) {
594 case FWW_DL_DST | FWW_ETH_MCAST:
597 nxm_put_header(b, NXM_OF_ETH_DST_W);
598 ofpbuf_put(b, value, ETH_ADDR_LEN);
599 ofpbuf_put(b, eth_mcast_1, ETH_ADDR_LEN);
602 nxm_put_header(b, NXM_OF_ETH_DST_W);
603 ofpbuf_put(b, value, ETH_ADDR_LEN);
604 ofpbuf_put(b, eth_mcast_0, ETH_ADDR_LEN);
607 nxm_put_eth(b, NXM_OF_ETH_DST, value);
612 /* Appends to 'b' the nx_match format that expresses 'cr' (except for
613 * 'cr->priority', because priority is not part of nx_match), plus enough
614 * zero bytes to pad the nx_match out to a multiple of 8.
616 * This function can cause 'b''s data to be reallocated.
618 * Returns the number of bytes appended to 'b', excluding padding.
620 * If 'cr' is a catch-all rule that matches every packet, then this function
621 * appends nothing to 'b' and returns 0. */
623 nx_put_match(struct ofpbuf *b, const struct cls_rule *cr)
625 const flow_wildcards_t wc = cr->wc.wildcards;
626 const struct flow *flow = &cr->flow;
627 const size_t start_len = b->size;
632 if (!(wc & FWW_IN_PORT)) {
633 uint16_t in_port = flow->in_port;
634 if (in_port == ODPP_LOCAL) {
635 in_port = OFPP_LOCAL;
637 nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port));
641 nxm_put_eth_dst(b, wc, flow->dl_dst);
642 if (!(wc & FWW_DL_SRC)) {
643 nxm_put_eth(b, NXM_OF_ETH_SRC, flow->dl_src);
645 if (!(wc & FWW_DL_TYPE)) {
646 nxm_put_16(b, NXM_OF_ETH_TYPE,
647 ofputil_dl_type_to_openflow(flow->dl_type));
651 nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci, cr->wc.vlan_tci_mask);
654 if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) {
656 if (!(wc & FWW_NW_TOS)) {
657 nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc);
659 nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask);
660 nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask);
662 if (!(wc & FWW_NW_PROTO)) {
663 nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto);
664 switch (flow->nw_proto) {
667 if (!(wc & FWW_TP_SRC)) {
668 nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src);
670 if (!(wc & FWW_TP_DST)) {
671 nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst);
677 if (!(wc & FWW_TP_SRC)) {
678 nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src);
680 if (!(wc & FWW_TP_DST)) {
681 nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst);
687 if (!(wc & FWW_TP_SRC)) {
688 nxm_put_8(b, NXM_OF_ICMP_TYPE, ntohs(flow->tp_src));
690 if (!(wc & FWW_TP_DST)) {
691 nxm_put_8(b, NXM_OF_ICMP_CODE, ntohs(flow->tp_dst));
696 } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) {
698 if (!(wc & FWW_NW_PROTO)) {
699 nxm_put_16(b, NXM_OF_ARP_OP, htons(flow->nw_proto));
701 nxm_put_32m(b, NXM_OF_ARP_SPA, flow->nw_src, cr->wc.nw_src_mask);
702 nxm_put_32m(b, NXM_OF_ARP_TPA, flow->nw_dst, cr->wc.nw_dst_mask);
703 if (!(wc & FWW_ARP_SHA)) {
704 nxm_put_eth(b, NXM_NX_ARP_SHA, flow->arp_sha);
706 if (!(wc & FWW_ARP_THA)) {
707 nxm_put_eth(b, NXM_NX_ARP_THA, flow->arp_tha);
712 nxm_put_64m(b, NXM_NX_TUN_ID, flow->tun_id, cr->wc.tun_id_mask);
715 for (i = 0; i < FLOW_N_REGS; i++) {
716 nxm_put_32m(b, NXM_NX_REG(i),
717 htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i]));
720 match_len = b->size - start_len;
721 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
725 /* nx_match_to_string() and helpers. */
727 static void format_nxm_field_name(struct ds *, uint32_t header);
730 nx_match_to_string(const uint8_t *p, unsigned int match_len)
736 return xstrdup("<any>");
740 while ((header = nx_entry_ok(p, match_len)) != 0) {
741 unsigned int length = NXM_LENGTH(header);
742 unsigned int value_len = nxm_field_bytes(header);
743 const uint8_t *value = p + 4;
744 const uint8_t *mask = value + value_len;
748 ds_put_cstr(&s, ", ");
751 format_nxm_field_name(&s, header);
752 ds_put_char(&s, '(');
754 for (i = 0; i < value_len; i++) {
755 ds_put_format(&s, "%02x", value[i]);
757 if (NXM_HASMASK(header)) {
758 ds_put_char(&s, '/');
759 for (i = 0; i < value_len; i++) {
760 ds_put_format(&s, "%02x", mask[i]);
763 ds_put_char(&s, ')');
766 match_len -= 4 + length;
771 ds_put_cstr(&s, ", ");
774 ds_put_format(&s, "<%u invalid bytes>", match_len);
777 return ds_steal_cstr(&s);
781 format_nxm_field_name(struct ds *s, uint32_t header)
783 const struct nxm_field *f = nxm_field_lookup(header);
785 ds_put_cstr(s, f->name);
787 ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header));
792 parse_nxm_field_name(const char *name, int name_len)
794 const struct nxm_field *f;
796 /* Check whether it's a field name. */
797 for (f = nxm_fields; f < &nxm_fields[ARRAY_SIZE(nxm_fields)]; f++) {
798 if (!strncmp(f->name, name, name_len) && f->name[name_len] == '\0') {
803 /* Check whether it's a 32-bit field header value as hex.
804 * (This isn't ordinarily useful except for testing error behavior.) */
806 uint32_t header = hexits_value(name, name_len, NULL);
807 if (header != UINT_MAX) {
815 /* nx_match_from_string(). */
818 nx_match_from_string(const char *s, struct ofpbuf *b)
820 const char *full_s = s;
821 const size_t start_len = b->size;
824 if (!strcmp(s, "<any>")) {
825 /* Ensure that 'b->data' isn't actually null. */
826 ofpbuf_prealloc_tailroom(b, 1);
830 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
837 name_len = strcspn(s, "(");
838 if (s[name_len] != '(') {
839 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
842 header = parse_nxm_field_name(name, name_len);
844 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
849 nxm_put_header(b, header);
850 s = ofpbuf_put_hex(b, s, &n);
851 if (n != nxm_field_bytes(header)) {
852 ovs_fatal(0, "%.2s: hex digits expected", s);
854 if (NXM_HASMASK(header)) {
857 ovs_fatal(0, "%s: missing / in masked field %.*s",
858 full_s, name_len, name);
860 s = ofpbuf_put_hex(b, s + 1, &n);
861 if (n != nxm_field_bytes(header)) {
862 ovs_fatal(0, "%.2s: hex digits expected", s);
868 ovs_fatal(0, "%s: missing ) following field %.*s",
869 full_s, name_len, name);
874 match_len = b->size - start_len;
875 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
880 nxm_parse_field_bits(const char *s, uint32_t *headerp, int *ofsp, int *n_bitsp)
882 const char *full_s = s;
890 name_len = strcspn(s, "[");
891 if (s[name_len] != '[') {
892 ovs_fatal(0, "%s: missing [ looking for field name", full_s);
895 header = parse_nxm_field_name(name, name_len);
897 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
899 width = nxm_field_bits(header);
902 if (sscanf(s, "[%d..%d]", &start, &end) == 2) {
904 } else if (sscanf(s, "[%d]", &start) == 1) {
906 } else if (!strncmp(s, "[]", 2)) {
910 ovs_fatal(0, "%s: syntax error expecting [] or [<bit>] or "
911 "[<start>..<end>]", full_s);
913 s = strchr(s, ']') + 1;
916 ovs_fatal(0, "%s: starting bit %d is after ending bit %d",
918 } else if (start >= width) {
919 ovs_fatal(0, "%s: starting bit %d is not valid because field is only "
920 "%d bits wide", full_s, start, width);
921 } else if (end >= width){
922 ovs_fatal(0, "%s: ending bit %d is not valid because field is only "
923 "%d bits wide", full_s, end, width);
928 *n_bitsp = end - start + 1;
934 nxm_parse_reg_move(struct nx_action_reg_move *move, const char *s)
936 const char *full_s = s;
938 int src_ofs, dst_ofs;
939 int src_n_bits, dst_n_bits;
941 s = nxm_parse_field_bits(s, &src, &src_ofs, &src_n_bits);
942 if (strncmp(s, "->", 2)) {
943 ovs_fatal(0, "%s: missing `->' following source", full_s);
946 s = nxm_parse_field_bits(s, &dst, &dst_ofs, &dst_n_bits);
948 ovs_fatal(0, "%s: trailing garbage following destination", full_s);
951 if (src_n_bits != dst_n_bits) {
952 ovs_fatal(0, "%s: source field is %d bits wide but destination is "
953 "%d bits wide", full_s, src_n_bits, dst_n_bits);
956 move->type = htons(OFPAT_VENDOR);
957 move->len = htons(sizeof *move);
958 move->vendor = htonl(NX_VENDOR_ID);
959 move->subtype = htons(NXAST_REG_MOVE);
960 move->n_bits = htons(src_n_bits);
961 move->src_ofs = htons(src_ofs);
962 move->dst_ofs = htons(dst_ofs);
963 move->src = htonl(src);
964 move->dst = htonl(dst);
968 nxm_parse_reg_load(struct nx_action_reg_load *load, const char *s)
970 const char *full_s = s;
975 value = strtoull(s, (char **) &s, 0);
976 if (strncmp(s, "->", 2)) {
977 ovs_fatal(0, "%s: missing `->' following value", full_s);
980 s = nxm_parse_field_bits(s, &dst, &ofs, &n_bits);
982 ovs_fatal(0, "%s: trailing garbage following destination", full_s);
985 if (n_bits < 64 && (value >> n_bits) != 0) {
986 ovs_fatal(0, "%s: value %"PRIu64" does not fit into %d bits",
987 full_s, value, n_bits);
990 load->type = htons(OFPAT_VENDOR);
991 load->len = htons(sizeof *load);
992 load->vendor = htonl(NX_VENDOR_ID);
993 load->subtype = htons(NXAST_REG_LOAD);
994 load->ofs_nbits = nxm_encode_ofs_nbits(ofs, n_bits);
995 load->dst = htonl(dst);
996 load->value = htonll(value);
999 /* nxm_format_reg_move(), nxm_format_reg_load(). */
1002 nxm_format_field_bits(struct ds *s, uint32_t header, int ofs, int n_bits)
1004 format_nxm_field_name(s, header);
1005 if (ofs == 0 && n_bits == nxm_field_bits(header)) {
1006 ds_put_cstr(s, "[]");
1007 } else if (n_bits == 1) {
1008 ds_put_format(s, "[%d]", ofs);
1010 ds_put_format(s, "[%d..%d]", ofs, ofs + n_bits - 1);
1015 nxm_format_reg_move(const struct nx_action_reg_move *move, struct ds *s)
1017 int n_bits = ntohs(move->n_bits);
1018 int src_ofs = ntohs(move->src_ofs);
1019 int dst_ofs = ntohs(move->dst_ofs);
1020 uint32_t src = ntohl(move->src);
1021 uint32_t dst = ntohl(move->dst);
1023 ds_put_format(s, "move:");
1024 nxm_format_field_bits(s, src, src_ofs, n_bits);
1025 ds_put_cstr(s, "->");
1026 nxm_format_field_bits(s, dst, dst_ofs, n_bits);
1030 nxm_format_reg_load(const struct nx_action_reg_load *load, struct ds *s)
1032 int ofs = nxm_decode_ofs(load->ofs_nbits);
1033 int n_bits = nxm_decode_n_bits(load->ofs_nbits);
1034 uint32_t dst = ntohl(load->dst);
1035 uint64_t value = ntohll(load->value);
1037 ds_put_format(s, "load:%#"PRIx64"->", value);
1038 nxm_format_field_bits(s, dst, ofs, n_bits);
1041 /* nxm_check_reg_move(), nxm_check_reg_load(). */
1044 field_ok(const struct nxm_field *f, const struct flow *flow, int size)
1046 return (f && !NXM_HASMASK(f->header)
1047 && nxm_prereqs_ok(f, flow) && size <= nxm_field_bits(f->header));
1051 nxm_check_reg_move(const struct nx_action_reg_move *action,
1052 const struct flow *flow)
1054 const struct nxm_field *src;
1055 const struct nxm_field *dst;
1057 if (action->n_bits == htons(0)) {
1058 return BAD_ARGUMENT;
1061 src = nxm_field_lookup(ntohl(action->src));
1062 if (!field_ok(src, flow, ntohs(action->src_ofs) + ntohs(action->n_bits))) {
1063 return BAD_ARGUMENT;
1066 dst = nxm_field_lookup(ntohl(action->dst));
1067 if (!field_ok(dst, flow, ntohs(action->dst_ofs) + ntohs(action->n_bits))) {
1068 return BAD_ARGUMENT;
1071 if (!dst->writable) {
1072 return BAD_ARGUMENT;
1079 nxm_check_reg_load(const struct nx_action_reg_load *action,
1080 const struct flow *flow)
1082 const struct nxm_field *dst;
1085 ofs = nxm_decode_ofs(action->ofs_nbits);
1086 n_bits = nxm_decode_n_bits(action->ofs_nbits);
1087 dst = nxm_field_lookup(ntohl(action->dst));
1088 if (!field_ok(dst, flow, ofs + n_bits)) {
1089 return BAD_ARGUMENT;
1092 /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in
1094 if (n_bits < 64 && ntohll(action->value) >> n_bits) {
1095 return BAD_ARGUMENT;
1098 if (!dst->writable) {
1099 return BAD_ARGUMENT;
1105 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1108 nxm_read_field(const struct nxm_field *src, const struct flow *flow)
1110 switch (src->index) {
1111 case NFI_NXM_OF_IN_PORT:
1112 return flow->in_port == ODPP_LOCAL ? OFPP_LOCAL : flow->in_port;
1114 case NFI_NXM_OF_ETH_DST:
1115 return eth_addr_to_uint64(flow->dl_dst);
1117 case NFI_NXM_OF_ETH_SRC:
1118 return eth_addr_to_uint64(flow->dl_src);
1120 case NFI_NXM_OF_ETH_TYPE:
1121 return ntohs(ofputil_dl_type_to_openflow(flow->dl_type));
1123 case NFI_NXM_OF_VLAN_TCI:
1124 return ntohs(flow->vlan_tci);
1126 case NFI_NXM_OF_IP_TOS:
1127 return flow->nw_tos;
1129 case NFI_NXM_OF_IP_PROTO:
1130 case NFI_NXM_OF_ARP_OP:
1131 return flow->nw_proto;
1133 case NFI_NXM_OF_IP_SRC:
1134 case NFI_NXM_OF_ARP_SPA:
1135 return ntohl(flow->nw_src);
1137 case NFI_NXM_OF_IP_DST:
1138 case NFI_NXM_OF_ARP_TPA:
1139 return ntohl(flow->nw_dst);
1141 case NFI_NXM_OF_TCP_SRC:
1142 case NFI_NXM_OF_UDP_SRC:
1143 return ntohs(flow->tp_src);
1145 case NFI_NXM_OF_TCP_DST:
1146 case NFI_NXM_OF_UDP_DST:
1147 return ntohs(flow->tp_dst);
1149 case NFI_NXM_OF_ICMP_TYPE:
1150 return ntohs(flow->tp_src) & 0xff;
1152 case NFI_NXM_OF_ICMP_CODE:
1153 return ntohs(flow->tp_dst) & 0xff;
1155 case NFI_NXM_NX_TUN_ID:
1156 return ntohll(flow->tun_id);
1158 #define NXM_READ_REGISTER(IDX) \
1159 case NFI_NXM_NX_REG##IDX: \
1160 return flow->regs[IDX]; \
1161 case NFI_NXM_NX_REG##IDX##_W: \
1164 NXM_READ_REGISTER(0);
1165 #if FLOW_N_REGS >= 2
1166 NXM_READ_REGISTER(1);
1168 #if FLOW_N_REGS >= 3
1169 NXM_READ_REGISTER(2);
1171 #if FLOW_N_REGS >= 4
1172 NXM_READ_REGISTER(3);
1178 case NFI_NXM_NX_ARP_SHA:
1179 return eth_addr_to_uint64(flow->arp_sha);
1181 case NFI_NXM_NX_ARP_THA:
1182 return eth_addr_to_uint64(flow->arp_tha);
1184 case NFI_NXM_NX_TUN_ID_W:
1185 case NFI_NXM_OF_ETH_DST_W:
1186 case NFI_NXM_OF_VLAN_TCI_W:
1187 case NFI_NXM_OF_IP_SRC_W:
1188 case NFI_NXM_OF_IP_DST_W:
1189 case NFI_NXM_OF_ARP_SPA_W:
1190 case NFI_NXM_OF_ARP_TPA_W:
1199 nxm_write_field(const struct nxm_field *dst, struct flow *flow,
1202 switch (dst->index) {
1203 case NFI_NXM_OF_VLAN_TCI:
1204 flow->vlan_tci = htons(new_value);
1207 case NFI_NXM_NX_TUN_ID:
1208 flow->tun_id = htonll(new_value);
1211 #define NXM_WRITE_REGISTER(IDX) \
1212 case NFI_NXM_NX_REG##IDX: \
1213 flow->regs[IDX] = new_value; \
1215 case NFI_NXM_NX_REG##IDX##_W: \
1218 NXM_WRITE_REGISTER(0);
1219 #if FLOW_N_REGS >= 2
1220 NXM_WRITE_REGISTER(1);
1222 #if FLOW_N_REGS >= 3
1223 NXM_WRITE_REGISTER(2);
1225 #if FLOW_N_REGS >= 4
1226 NXM_WRITE_REGISTER(3);
1232 case NFI_NXM_OF_IN_PORT:
1233 case NFI_NXM_OF_ETH_DST:
1234 case NFI_NXM_OF_ETH_SRC:
1235 case NFI_NXM_OF_ETH_TYPE:
1236 case NFI_NXM_OF_IP_TOS:
1237 case NFI_NXM_OF_IP_PROTO:
1238 case NFI_NXM_OF_ARP_OP:
1239 case NFI_NXM_OF_IP_SRC:
1240 case NFI_NXM_OF_ARP_SPA:
1241 case NFI_NXM_OF_IP_DST:
1242 case NFI_NXM_OF_ARP_TPA:
1243 case NFI_NXM_OF_TCP_SRC:
1244 case NFI_NXM_OF_UDP_SRC:
1245 case NFI_NXM_OF_TCP_DST:
1246 case NFI_NXM_OF_UDP_DST:
1247 case NFI_NXM_OF_ICMP_TYPE:
1248 case NFI_NXM_OF_ICMP_CODE:
1249 case NFI_NXM_NX_TUN_ID_W:
1250 case NFI_NXM_OF_ETH_DST_W:
1251 case NFI_NXM_OF_VLAN_TCI_W:
1252 case NFI_NXM_OF_IP_SRC_W:
1253 case NFI_NXM_OF_IP_DST_W:
1254 case NFI_NXM_OF_ARP_SPA_W:
1255 case NFI_NXM_OF_ARP_TPA_W:
1256 case NFI_NXM_NX_ARP_SHA:
1257 case NFI_NXM_NX_ARP_THA:
1264 nxm_execute_reg_move(const struct nx_action_reg_move *action,
1268 int n_bits = ntohs(action->n_bits);
1269 uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1;
1271 /* Get the interesting bits of the source field. */
1272 const struct nxm_field *src = nxm_field_lookup(ntohl(action->src));
1273 int src_ofs = ntohs(action->src_ofs);
1274 uint64_t src_data = nxm_read_field(src, flow) & (mask << src_ofs);
1276 /* Get the remaining bits of the destination field. */
1277 const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst));
1278 int dst_ofs = ntohs(action->dst_ofs);
1279 uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs);
1281 /* Get the final value. */
1282 uint64_t new_data = dst_data | ((src_data >> src_ofs) << dst_ofs);
1284 nxm_write_field(dst, flow, new_data);
1288 nxm_execute_reg_load(const struct nx_action_reg_load *action,
1292 int n_bits = nxm_decode_n_bits(action->ofs_nbits);
1293 uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1;
1295 /* Get source data. */
1296 uint64_t src_data = ntohll(action->value);
1298 /* Get remaining bits of the destination field. */
1299 const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst));
1300 int dst_ofs = nxm_decode_ofs(action->ofs_nbits);
1301 uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs);
1303 /* Get the final value. */
1304 uint64_t new_data = dst_data | (src_data << dst_ofs);
1306 nxm_write_field(dst, flow, new_data);