2 * Copyright (c) 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include "classifier.h"
22 #include "dynamic-string.h"
25 #include "openflow/nicira-ext.h"
27 #include "unaligned.h"
30 VLOG_DEFINE_THIS_MODULE(nx_match);
32 /* Rate limit for nx_match parse errors. These always indicate a bug in the
33 * peer and so there's not much point in showing a lot of them. */
34 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
37 NXM_INVALID = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_INVALID),
38 NXM_BAD_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_TYPE),
39 NXM_BAD_VALUE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_VALUE),
40 NXM_BAD_MASK = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_MASK),
41 NXM_BAD_PREREQ = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_PREREQ),
42 NXM_DUP_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_DUP_TYPE),
43 BAD_ARGUMENT = OFP_MKERR(OFPET_BAD_ACTION, OFPBAC_BAD_ARGUMENT)
46 /* For each NXM_* field, define NFI_NXM_* as consecutive integers starting from
48 enum nxm_field_index {
49 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO) NFI_NXM_##HEADER,
50 #include "nx-match.def"
55 struct hmap_node hmap_node;
56 enum nxm_field_index index; /* NFI_* value. */
57 uint32_t header; /* NXM_* value. */
58 uint32_t wildcard; /* Wildcard bit, if exactly one. */
59 ovs_be16 dl_type; /* dl_type prerequisite, if nonzero. */
60 uint8_t nw_proto; /* nw_proto prerequisite, if nonzero. */
61 const char *name; /* "NXM_*" string. */
64 /* All the known fields. */
65 static struct nxm_field nxm_fields[N_NXM_FIELDS] = {
66 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO) \
67 { HMAP_NODE_NULL_INITIALIZER, NFI_NXM_##HEADER, NXM_##HEADER, WILDCARD, \
68 CONSTANT_HTONS(DL_TYPE), NW_PROTO, "NXM_" #HEADER },
69 #include "nx-match.def"
72 /* Hash table of 'nxm_fields'. */
73 static struct hmap all_nxm_fields = HMAP_INITIALIZER(&all_nxm_fields);
75 /* Possible masks for NXM_OF_ETH_DST_W. */
76 static const uint8_t eth_all_0s[ETH_ADDR_LEN]
77 = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
78 static const uint8_t eth_all_1s[ETH_ADDR_LEN]
79 = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
80 static const uint8_t eth_mcast_1[ETH_ADDR_LEN]
81 = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
82 static const uint8_t eth_mcast_0[ETH_ADDR_LEN]
83 = {0xfe, 0xff, 0xff, 0xff, 0xff, 0xff};
88 if (hmap_is_empty(&all_nxm_fields)) {
91 for (i = 0; i < N_NXM_FIELDS; i++) {
92 struct nxm_field *f = &nxm_fields[i];
93 hmap_insert(&all_nxm_fields, &f->hmap_node,
94 hash_int(f->header, 0));
97 /* Verify that the header values are unique (duplicate "case" values
98 * cause a compile error). */
100 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO) \
101 case NXM_##HEADER: break;
102 #include "nx-match.def"
107 static const struct nxm_field *
108 nxm_field_lookup(uint32_t header)
114 HMAP_FOR_EACH_WITH_HASH (f, hmap_node, hash_int(header, 0),
116 if (f->header == header) {
124 /* Returns the width of the data for a field with the given 'header', in
127 nxm_field_bytes(uint32_t header)
129 unsigned int length = NXM_LENGTH(header);
130 return NXM_HASMASK(header) ? length / 2 : length;
133 /* Returns the width of the data for a field with the given 'header', in
136 nxm_field_bits(uint32_t header)
138 return nxm_field_bytes(header) * 8;
141 /* nx_pull_match() and helpers. */
144 parse_tci(struct cls_rule *rule, ovs_be16 tci, ovs_be16 mask)
146 enum { OFPFW_DL_TCI = OFPFW_DL_VLAN | OFPFW_DL_VLAN_PCP };
147 if ((rule->wc.wildcards & OFPFW_DL_TCI) != OFPFW_DL_TCI) {
150 return cls_rule_set_dl_tci_masked(rule, tci, mask) ? 0 : NXM_INVALID;
155 parse_nx_reg(const struct nxm_field *f,
156 struct flow *flow, struct flow_wildcards *wc,
157 const void *value, const void *maskp)
159 int idx = NXM_NX_REG_IDX(f->header);
160 if (wc->reg_masks[idx]) {
163 flow_wildcards_set_reg_mask(wc, idx,
164 (NXM_HASMASK(f->header)
165 ? ntohl(get_unaligned_u32(maskp))
167 flow->regs[idx] = ntohl(get_unaligned_u32(value));
168 flow->regs[idx] &= wc->reg_masks[idx];
174 parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f,
175 const void *value, const void *mask)
177 struct flow_wildcards *wc = &rule->wc;
178 struct flow *flow = &rule->flow;
182 case NFI_NXM_OF_IN_PORT:
183 flow->in_port = ntohs(get_unaligned_u16(value));
184 if (flow->in_port == OFPP_LOCAL) {
185 flow->in_port = ODPP_LOCAL;
189 /* Ethernet header. */
190 case NFI_NXM_OF_ETH_DST:
191 if ((wc->wildcards & (OFPFW_DL_DST | FWW_ETH_MCAST))
192 != (OFPFW_DL_DST | FWW_ETH_MCAST)) {
195 wc->wildcards &= ~(OFPFW_DL_DST | FWW_ETH_MCAST);
196 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
199 case NFI_NXM_OF_ETH_DST_W:
200 if ((wc->wildcards & (OFPFW_DL_DST | FWW_ETH_MCAST))
201 != (OFPFW_DL_DST | FWW_ETH_MCAST)) {
203 } else if (eth_addr_equals(mask, eth_mcast_1)) {
204 wc->wildcards &= ~FWW_ETH_MCAST;
205 flow->dl_dst[0] = *(uint8_t *) value & 0x01;
206 } else if (eth_addr_equals(mask, eth_mcast_0)) {
207 wc->wildcards &= ~OFPFW_DL_DST;
208 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
209 flow->dl_dst[0] &= 0xfe;
210 } else if (eth_addr_equals(mask, eth_all_0s)) {
212 } else if (eth_addr_equals(mask, eth_all_1s)) {
213 wc->wildcards &= ~(OFPFW_DL_DST | FWW_ETH_MCAST);
214 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
219 case NFI_NXM_OF_ETH_SRC:
220 memcpy(flow->dl_src, value, ETH_ADDR_LEN);
222 case NFI_NXM_OF_ETH_TYPE:
223 flow->dl_type = get_unaligned_u16(value);
227 case NFI_NXM_OF_VLAN_TCI:
228 return parse_tci(rule, get_unaligned_u16(value), htons(UINT16_MAX));
230 case NFI_NXM_OF_VLAN_TCI_W:
231 return parse_tci(rule, get_unaligned_u16(value),
232 get_unaligned_u16(mask));
235 case NFI_NXM_OF_IP_TOS:
236 if (*(uint8_t *) value & 0x03) {
237 return NXM_BAD_VALUE;
239 flow->nw_tos = *(uint8_t *) value;
242 case NFI_NXM_OF_IP_PROTO:
243 flow->nw_proto = *(uint8_t *) value;
246 /* IP addresses in IP and ARP headers. */
247 case NFI_NXM_OF_IP_SRC:
248 case NFI_NXM_OF_ARP_SPA:
249 if (wc->nw_src_mask) {
252 cls_rule_set_nw_src(rule, get_unaligned_u32(value));
255 case NFI_NXM_OF_IP_SRC_W:
256 case NFI_NXM_OF_ARP_SPA_W:
257 if (wc->nw_src_mask) {
260 ovs_be32 ip = get_unaligned_u32(value);
261 ovs_be32 netmask = get_unaligned_u32(mask);
262 if (!cls_rule_set_nw_src_masked(rule, ip, netmask)) {
267 case NFI_NXM_OF_IP_DST:
268 case NFI_NXM_OF_ARP_TPA:
269 if (wc->nw_dst_mask) {
272 cls_rule_set_nw_dst(rule, get_unaligned_u32(value));
275 case NFI_NXM_OF_IP_DST_W:
276 case NFI_NXM_OF_ARP_TPA_W:
277 if (wc->nw_dst_mask) {
280 ovs_be32 ip = get_unaligned_u32(value);
281 ovs_be32 netmask = get_unaligned_u32(mask);
282 if (!cls_rule_set_nw_dst_masked(rule, ip, netmask)) {
289 case NFI_NXM_OF_TCP_SRC:
290 flow->tp_src = get_unaligned_u16(value);
292 case NFI_NXM_OF_TCP_DST:
293 flow->tp_dst = get_unaligned_u16(value);
297 case NFI_NXM_OF_UDP_SRC:
298 flow->tp_src = get_unaligned_u16(value);
300 case NFI_NXM_OF_UDP_DST:
301 flow->tp_dst = get_unaligned_u16(value);
305 case NFI_NXM_OF_ICMP_TYPE:
306 flow->tp_src = htons(*(uint8_t *) value);
308 case NFI_NXM_OF_ICMP_CODE:
309 flow->tp_dst = htons(*(uint8_t *) value);
313 case NFI_NXM_OF_ARP_OP:
314 if (ntohs(get_unaligned_u16(value)) > 255) {
315 return NXM_BAD_VALUE;
317 flow->nw_proto = ntohs(get_unaligned_u16(value));
322 case NFI_NXM_NX_TUN_ID:
323 flow->tun_id = htonl(ntohll(get_unaligned_u64(value)));
327 case NFI_NXM_NX_REG0:
328 case NFI_NXM_NX_REG0_W:
330 case NFI_NXM_NX_REG1:
331 case NFI_NXM_NX_REG1_W:
334 case NFI_NXM_NX_REG2:
335 case NFI_NXM_NX_REG2_W:
338 case NFI_NXM_NX_REG3:
339 case NFI_NXM_NX_REG3_W:
344 return parse_nx_reg(f, flow, wc, value, mask);
353 nxm_prereqs_ok(const struct nxm_field *field, const struct flow *flow)
355 return (!field->dl_type
356 || (field->dl_type == flow->dl_type
357 && (!field->nw_proto || field->nw_proto == flow->nw_proto)));
361 nx_entry_ok(const void *p, unsigned int match_len)
363 unsigned int payload_len;
369 VLOG_DBG_RL(&rl, "nx_match ends with partial nxm_header");
373 memcpy(&header_be, p, 4);
374 header = ntohl(header_be);
376 payload_len = NXM_LENGTH(header);
378 VLOG_DBG_RL(&rl, "nxm_entry %08"PRIx32" has invalid payload "
382 if (match_len < payload_len + 4) {
383 VLOG_DBG_RL(&rl, "%"PRIu32"-byte nxm_entry but only "
384 "%u bytes left in nx_match", payload_len + 4, match_len);
392 nx_pull_match(struct ofpbuf *b, unsigned int match_len, uint16_t priority,
393 struct cls_rule *rule)
398 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
400 VLOG_DBG_RL(&rl, "nx_match length %zu, rounded up to a "
401 "multiple of 8, is longer than space in message (max "
402 "length %zu)", match_len, b->size);
403 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
406 cls_rule_init_catchall(rule, priority);
407 while ((header = nx_entry_ok(p, match_len)) != 0) {
408 unsigned length = NXM_LENGTH(header);
409 const struct nxm_field *f;
412 f = nxm_field_lookup(header);
414 error = NXM_BAD_TYPE;
415 } else if (!nxm_prereqs_ok(f, &rule->flow)) {
416 error = NXM_BAD_PREREQ;
417 } else if (f->wildcard && !(rule->wc.wildcards & f->wildcard)) {
418 error = NXM_DUP_TYPE;
420 /* 'hasmask' and 'length' are known to be correct at this point
421 * because they are included in 'header' and nxm_field_lookup()
422 * checked them already. */
423 rule->wc.wildcards &= ~f->wildcard;
424 error = parse_nxm_entry(rule, f, p + 4, p + 4 + length / 2);
427 VLOG_DBG_RL(&rl, "bad nxm_entry with vendor=%"PRIu32", "
428 "field=%"PRIu32", hasmask=%"PRIu32", type=%"PRIu32" "
430 NXM_VENDOR(header), NXM_FIELD(header),
431 NXM_HASMASK(header), NXM_TYPE(header),
438 match_len -= 4 + length;
441 return match_len ? NXM_INVALID : 0;
444 /* nx_put_match() and helpers.
446 * 'put' functions whose names end in 'w' add a wildcarded field.
447 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
448 * Other 'put' functions add exact-match fields.
452 nxm_put_header(struct ofpbuf *b, uint32_t header)
454 ovs_be32 n_header = htonl(header);
455 ofpbuf_put(b, &n_header, sizeof n_header);
459 nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
461 nxm_put_header(b, header);
462 ofpbuf_put(b, &value, sizeof value);
466 nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
468 nxm_put_header(b, header);
469 ofpbuf_put(b, &value, sizeof value);
473 nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
475 nxm_put_header(b, header);
476 ofpbuf_put(b, &value, sizeof value);
477 ofpbuf_put(b, &mask, sizeof mask);
481 nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
483 nxm_put_header(b, header);
484 ofpbuf_put(b, &value, sizeof value);
488 nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
490 nxm_put_header(b, header);
491 ofpbuf_put(b, &value, sizeof value);
492 ofpbuf_put(b, &mask, sizeof mask);
496 nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
503 nxm_put_32(b, header, value);
507 nxm_put_32w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
513 nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
515 nxm_put_header(b, header);
516 ofpbuf_put(b, &value, sizeof value);
520 nxm_put_eth(struct ofpbuf *b, uint32_t header,
521 const uint8_t value[ETH_ADDR_LEN])
523 nxm_put_header(b, header);
524 ofpbuf_put(b, value, ETH_ADDR_LEN);
528 nxm_put_eth_dst(struct ofpbuf *b,
529 uint32_t wc, const uint8_t value[ETH_ADDR_LEN])
531 switch (wc & (OFPFW_DL_DST | FWW_ETH_MCAST)) {
532 case OFPFW_DL_DST | FWW_ETH_MCAST:
535 nxm_put_header(b, NXM_OF_ETH_DST_W);
536 ofpbuf_put(b, value, ETH_ADDR_LEN);
537 ofpbuf_put(b, eth_mcast_1, ETH_ADDR_LEN);
540 nxm_put_header(b, NXM_OF_ETH_DST_W);
541 ofpbuf_put(b, value, ETH_ADDR_LEN);
542 ofpbuf_put(b, eth_mcast_0, ETH_ADDR_LEN);
545 nxm_put_eth(b, NXM_OF_ETH_DST, value);
551 nx_put_match(struct ofpbuf *b, const struct cls_rule *cr)
553 const uint32_t wc = cr->wc.wildcards;
554 const struct flow *flow = &cr->flow;
555 const size_t start_len = b->size;
561 if (!(wc & OFPFW_IN_PORT)) {
562 uint16_t in_port = flow->in_port;
563 if (in_port == ODPP_LOCAL) {
564 in_port = OFPP_LOCAL;
566 nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port));
570 nxm_put_eth_dst(b, wc, flow->dl_dst);
571 if (!(wc & OFPFW_DL_SRC)) {
572 nxm_put_eth(b, NXM_OF_ETH_SRC, flow->dl_src);
574 if (!(wc & OFPFW_DL_TYPE)) {
575 nxm_put_16(b, NXM_OF_ETH_TYPE, flow->dl_type);
579 vid = flow->dl_vlan & htons(VLAN_VID_MASK);
580 pcp = htons((flow->dl_vlan_pcp << VLAN_PCP_SHIFT) & VLAN_PCP_MASK);
581 switch (wc & (OFPFW_DL_VLAN | OFPFW_DL_VLAN_PCP)) {
582 case OFPFW_DL_VLAN | OFPFW_DL_VLAN_PCP:
585 nxm_put_16w(b, NXM_OF_VLAN_TCI_W, pcp | htons(VLAN_CFI),
586 htons(VLAN_PCP_MASK | VLAN_CFI));
588 case OFPFW_DL_VLAN_PCP:
589 if (flow->dl_vlan == htons(OFP_VLAN_NONE)) {
590 nxm_put_16(b, NXM_OF_VLAN_TCI, 0);
592 nxm_put_16w(b, NXM_OF_VLAN_TCI_W, vid | htons(VLAN_CFI),
593 htons(VLAN_VID_MASK | VLAN_CFI));
597 if (flow->dl_vlan == htons(OFP_VLAN_NONE)) {
598 nxm_put_16(b, NXM_OF_VLAN_TCI, 0);
600 nxm_put_16(b, NXM_OF_VLAN_TCI, vid | pcp | htons(VLAN_CFI));
605 if (!(wc & OFPFW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) {
607 if (!(wc & OFPFW_NW_TOS)) {
608 nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc);
610 nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask);
611 nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask);
613 if (!(wc & OFPFW_NW_PROTO)) {
614 nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto);
615 switch (flow->nw_proto) {
618 if (!(wc & OFPFW_TP_SRC)) {
619 nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src);
621 if (!(wc & OFPFW_TP_DST)) {
622 nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst);
628 if (!(wc & OFPFW_TP_SRC)) {
629 nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src);
631 if (!(wc & OFPFW_TP_DST)) {
632 nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst);
638 if (!(wc & OFPFW_TP_SRC)) {
639 nxm_put_8(b, NXM_OF_ICMP_TYPE, ntohs(flow->tp_src));
641 if (!(wc & OFPFW_TP_DST)) {
642 nxm_put_8(b, NXM_OF_ICMP_CODE, ntohs(flow->tp_dst));
647 } else if (!(wc & OFPFW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) {
649 if (!(wc & OFPFW_NW_PROTO)) {
650 nxm_put_16(b, NXM_OF_ARP_OP, htons(flow->nw_proto));
652 nxm_put_32m(b, NXM_OF_ARP_SPA, flow->nw_src, cr->wc.nw_src_mask);
653 nxm_put_32m(b, NXM_OF_ARP_TPA, flow->nw_dst, cr->wc.nw_dst_mask);
657 if (!(wc & NXFW_TUN_ID)) {
658 nxm_put_64(b, NXM_NX_TUN_ID, htonll(ntohl(flow->tun_id)));
662 for (i = 0; i < FLOW_N_REGS; i++) {
663 nxm_put_32m(b, NXM_NX_REG(i),
664 htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i]));
667 match_len = b->size - start_len;
668 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
672 /* nx_match_to_string() and helpers. */
675 nx_match_to_string(const uint8_t *p, unsigned int match_len)
681 return xstrdup("<any>");
685 while ((header = nx_entry_ok(p, match_len)) != 0) {
686 unsigned int length = NXM_LENGTH(header);
687 unsigned int value_len = nxm_field_bytes(header);
688 const uint8_t *value = p + 4;
689 const uint8_t *mask = value + value_len;
690 const struct nxm_field *f;
694 ds_put_cstr(&s, ", ");
697 f = nxm_field_lookup(header);
699 ds_put_cstr(&s, f->name);
701 ds_put_format(&s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header));
704 ds_put_char(&s, '(');
706 for (i = 0; i < value_len; i++) {
707 ds_put_format(&s, "%02x", value[i]);
709 if (NXM_HASMASK(header)) {
710 ds_put_char(&s, '/');
711 for (i = 0; i < value_len; i++) {
712 ds_put_format(&s, "%02x", mask[i]);
715 ds_put_char(&s, ')');
718 match_len -= 4 + length;
723 ds_put_cstr(&s, ", ");
726 ds_put_format(&s, "<%u invalid bytes>", match_len);
729 return ds_steal_cstr(&s);
732 static const struct nxm_field *
733 lookup_nxm_field(const char *name, int name_len)
735 const struct nxm_field *f;
737 for (f = nxm_fields; f < &nxm_fields[ARRAY_SIZE(nxm_fields)]; f++) {
738 if (!strncmp(f->name, name, name_len) && f->name[name_len] == '\0') {
747 parse_hex_bytes(struct ofpbuf *b, const char *s, unsigned int n)
754 low = hexit_value(*s);
755 high = low < 0 ? low : hexit_value(s[1]);
756 if (low < 0 || high < 0) {
757 ovs_fatal(0, "%.2s: hex digits expected", s);
760 byte = 16 * low + high;
761 ofpbuf_put(b, &byte, 1);
767 /* nx_match_from_string(). */
770 nx_match_from_string(const char *s, struct ofpbuf *b)
772 const char *full_s = s;
773 const size_t start_len = b->size;
776 if (!strcmp(s, "<any>")) {
777 /* Ensure that 'b->data' isn't actually null. */
778 ofpbuf_prealloc_tailroom(b, 1);
782 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
783 const struct nxm_field *f;
786 name_len = strcspn(s, "(");
787 if (s[name_len] != '(') {
788 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
791 f = lookup_nxm_field(s, name_len);
793 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
798 nxm_put_header(b, f->header);
799 s = parse_hex_bytes(b, s, nxm_field_bytes(f->header));
800 if (NXM_HASMASK(f->header)) {
803 ovs_fatal(0, "%s: missing / in masked field %s",
806 s = parse_hex_bytes(b, s + 1, nxm_field_bytes(f->header));
811 ovs_fatal(0, "%s: missing ) following field %s", full_s, f->name);
816 match_len = b->size - start_len;
817 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
821 /* nxm_check_reg_move(), nxm_check_reg_load(). */
824 field_ok(const struct nxm_field *f, const struct flow *flow, int size)
826 return (f && !NXM_HASMASK(f->header)
827 && nxm_prereqs_ok(f, flow) && size <= nxm_field_bits(f->header));
831 nxm_check_reg_move(const struct nx_action_reg_move *action,
832 const struct flow *flow)
834 const struct nxm_field *src;
835 const struct nxm_field *dst;
837 if (action->n_bits == htons(0)) {
841 src = nxm_field_lookup(ntohl(action->src));
842 if (!field_ok(src, flow, ntohs(action->src_ofs) + ntohs(action->n_bits))) {
846 dst = nxm_field_lookup(ntohl(action->dst));
847 if (!field_ok(dst, flow, ntohs(action->dst_ofs) + ntohs(action->n_bits))) {
851 if (!NXM_IS_NX_REG(dst->header)
852 && dst->header != NXM_OF_VLAN_TCI
853 && dst->header != NXM_NX_TUN_ID) {
861 nxm_check_reg_load(const struct nx_action_reg_load *action,
862 const struct flow *flow)
864 const struct nxm_field *dst;
867 ofs = ntohs(action->ofs_nbits) >> 6;
868 n_bits = (ntohs(action->ofs_nbits) & 0x3f) + 1;
869 dst = nxm_field_lookup(ntohl(action->dst));
870 if (!field_ok(dst, flow, ofs + n_bits)) {
874 /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in
876 if (n_bits < 64 && ntohll(action->value) >> n_bits) {
880 if (!NXM_IS_NX_REG(dst->header)) {
887 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
890 nxm_read_field(const struct nxm_field *src, const struct flow *flow)
892 switch (src->index) {
893 case NFI_NXM_OF_IN_PORT:
894 return flow->in_port == ODPP_LOCAL ? OFPP_LOCAL : flow->in_port;
896 case NFI_NXM_OF_ETH_DST:
897 return eth_addr_to_uint64(flow->dl_dst);
899 case NFI_NXM_OF_ETH_SRC:
900 return eth_addr_to_uint64(flow->dl_src);
902 case NFI_NXM_OF_ETH_TYPE:
903 return ntohs(flow->dl_type);
905 case NFI_NXM_OF_VLAN_TCI:
906 if (flow->dl_vlan == htons(OFP_VLAN_NONE)) {
909 return (ntohs(flow->dl_vlan & htons(VLAN_VID_MASK))
910 | ((flow->dl_vlan_pcp << VLAN_PCP_SHIFT) & VLAN_PCP_MASK)
914 case NFI_NXM_OF_IP_TOS:
917 case NFI_NXM_OF_IP_PROTO:
918 case NFI_NXM_OF_ARP_OP:
919 return flow->nw_proto;
921 case NFI_NXM_OF_IP_SRC:
922 case NFI_NXM_OF_ARP_SPA:
923 return ntohl(flow->nw_src);
925 case NFI_NXM_OF_IP_DST:
926 case NFI_NXM_OF_ARP_TPA:
927 return ntohl(flow->nw_dst);
929 case NFI_NXM_OF_TCP_SRC:
930 case NFI_NXM_OF_UDP_SRC:
931 return ntohs(flow->tp_src);
933 case NFI_NXM_OF_TCP_DST:
934 case NFI_NXM_OF_UDP_DST:
935 return ntohs(flow->tp_dst);
937 case NFI_NXM_OF_ICMP_TYPE:
938 return ntohs(flow->tp_src) & 0xff;
940 case NFI_NXM_OF_ICMP_CODE:
941 return ntohs(flow->tp_dst) & 0xff;
943 case NFI_NXM_NX_TUN_ID:
944 return ntohl(flow->tun_id);
946 #define NXM_READ_REGISTER(IDX) \
947 case NFI_NXM_NX_REG##IDX: \
948 return flow->regs[IDX]; \
949 case NFI_NXM_NX_REG##IDX##_W: \
952 NXM_READ_REGISTER(0);
954 NXM_READ_REGISTER(1);
957 NXM_READ_REGISTER(2);
960 NXM_READ_REGISTER(3);
966 case NFI_NXM_OF_ETH_DST_W:
967 case NFI_NXM_OF_VLAN_TCI_W:
968 case NFI_NXM_OF_IP_SRC_W:
969 case NFI_NXM_OF_IP_DST_W:
970 case NFI_NXM_OF_ARP_SPA_W:
971 case NFI_NXM_OF_ARP_TPA_W:
980 nxm_execute_reg_move(const struct nx_action_reg_move *action,
984 int n_bits = ntohs(action->n_bits);
985 uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1;
987 /* Get the interesting bits of the source field. */
988 const struct nxm_field *src = nxm_field_lookup(ntohl(action->src));
989 int src_ofs = ntohs(action->src_ofs);
990 uint64_t src_data = nxm_read_field(src, flow) & (mask << src_ofs);
992 /* Get the remaining bits of the destination field. */
993 const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst));
994 int dst_ofs = ntohs(action->dst_ofs);
995 uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs);
997 /* Get the final value. */
998 uint64_t new_data = dst_data | ((src_data >> src_ofs) << dst_ofs);
1000 /* Store the result. */
1001 if (NXM_IS_NX_REG(dst->header)) {
1002 flow->regs[NXM_NX_REG_IDX(dst->header)] = new_data;
1003 } else if (dst->header == NXM_OF_VLAN_TCI) {
1004 ovs_be16 vlan_tci = htons(new_data & VLAN_CFI ? new_data : 0);
1005 flow->dl_vlan = htons(vlan_tci_to_vid(vlan_tci));
1006 flow->dl_vlan_pcp = vlan_tci_to_pcp(vlan_tci);
1007 } else if (dst->header == NXM_NX_TUN_ID) {
1008 flow->tun_id = htonl(new_data);
1015 nxm_execute_reg_load(const struct nx_action_reg_load *action,
1019 int n_bits = (ntohs(action->ofs_nbits) & 0x3f) + 1;
1020 uint32_t mask = n_bits == 32 ? UINT32_MAX : (UINT32_C(1) << n_bits) - 1;
1021 uint32_t *reg = &flow->regs[NXM_NX_REG_IDX(ntohl(action->dst))];
1023 /* Get source data. */
1024 uint32_t src_data = ntohll(action->value);
1026 /* Get remaining bits of the destination field. */
1027 int dst_ofs = ntohs(action->ofs_nbits) >> 6;
1028 uint32_t dst_data = *reg & ~(mask << dst_ofs);
1030 *reg = dst_data | (src_data << dst_ofs);