2 * Copyright (c) 2010, 2011 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include <netinet/icmp6.h>
23 #include "classifier.h"
24 #include "dynamic-string.h"
27 #include "openflow/nicira-ext.h"
29 #include "unaligned.h"
32 VLOG_DEFINE_THIS_MODULE(nx_match);
34 /* Rate limit for nx_match parse errors. These always indicate a bug in the
35 * peer and so there's not much point in showing a lot of them. */
36 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
39 NXM_INVALID = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_INVALID),
40 NXM_BAD_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_TYPE),
41 NXM_BAD_VALUE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_VALUE),
42 NXM_BAD_MASK = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_MASK),
43 NXM_BAD_PREREQ = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_PREREQ),
44 NXM_DUP_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_DUP_TYPE),
45 BAD_ARGUMENT = OFP_MKERR(OFPET_BAD_ACTION, OFPBAC_BAD_ARGUMENT)
48 /* For each NXM_* field, define NFI_NXM_* as consecutive integers starting from
50 enum nxm_field_index {
51 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPES, NW_PROTO, WRITABLE) \
53 #include "nx-match.def"
58 struct hmap_node hmap_node;
59 enum nxm_field_index index; /* NFI_* value. */
60 uint32_t header; /* NXM_* value. */
61 flow_wildcards_t wildcard; /* FWW_* bit, if exactly one. */
62 ovs_be16 dl_type[N_NXM_DL_TYPES]; /* dl_type prerequisites. */
63 uint8_t nw_proto; /* nw_proto prerequisite, if nonzero. */
64 const char *name; /* "NXM_*" string. */
65 bool writable; /* Writable with NXAST_REG_{MOVE,LOAD}? */
69 /* All the known fields. */
70 static struct nxm_field nxm_fields[N_NXM_FIELDS] = {
71 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPES, NW_PROTO, WRITABLE) \
72 { HMAP_NODE_NULL_INITIALIZER, NFI_NXM_##HEADER, NXM_##HEADER, WILDCARD, \
73 DL_CONVERT DL_TYPES, NW_PROTO, "NXM_" #HEADER, WRITABLE },
74 #define DL_CONVERT(T1, T2) { CONSTANT_HTONS(T1), CONSTANT_HTONS(T2) }
75 #include "nx-match.def"
78 /* Hash table of 'nxm_fields'. */
79 static struct hmap all_nxm_fields = HMAP_INITIALIZER(&all_nxm_fields);
84 if (hmap_is_empty(&all_nxm_fields)) {
87 for (i = 0; i < N_NXM_FIELDS; i++) {
88 struct nxm_field *f = &nxm_fields[i];
89 hmap_insert(&all_nxm_fields, &f->hmap_node,
90 hash_int(f->header, 0));
93 /* Verify that the header values are unique (duplicate "case" values
94 * cause a compile error). */
96 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \
97 case NXM_##HEADER: break;
98 #include "nx-match.def"
103 static const struct nxm_field *
104 nxm_field_lookup(uint32_t header)
110 HMAP_FOR_EACH_WITH_HASH (f, hmap_node, hash_int(header, 0),
112 if (f->header == header) {
120 /* Returns the width of the data for a field with the given 'header', in
123 nxm_field_bytes(uint32_t header)
125 unsigned int length = NXM_LENGTH(header);
126 return NXM_HASMASK(header) ? length / 2 : length;
129 /* Returns the width of the data for a field with the given 'header', in
132 nxm_field_bits(uint32_t header)
134 return nxm_field_bytes(header) * 8;
137 /* nx_pull_match() and helpers. */
140 parse_nx_reg(const struct nxm_field *f,
141 struct flow *flow, struct flow_wildcards *wc,
142 const void *value, const void *maskp)
144 int idx = NXM_NX_REG_IDX(f->header);
145 if (wc->reg_masks[idx]) {
148 flow_wildcards_set_reg_mask(wc, idx,
149 (NXM_HASMASK(f->header)
150 ? ntohl(get_unaligned_be32(maskp))
152 flow->regs[idx] = ntohl(get_unaligned_be32(value));
153 flow->regs[idx] &= wc->reg_masks[idx];
159 parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f,
160 const void *value, const void *mask)
162 struct flow_wildcards *wc = &rule->wc;
163 struct flow *flow = &rule->flow;
165 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 1);
169 case NFI_NXM_OF_IN_PORT:
170 flow->in_port = ntohs(get_unaligned_be16(value));
173 /* Ethernet header. */
174 case NFI_NXM_OF_ETH_DST:
175 if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST))
176 != (FWW_DL_DST | FWW_ETH_MCAST)) {
179 wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST);
180 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
183 case NFI_NXM_OF_ETH_DST_W:
184 if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST))
185 != (FWW_DL_DST | FWW_ETH_MCAST)) {
187 } else if (flow_wildcards_is_dl_dst_mask_valid(mask)) {
188 cls_rule_set_dl_dst_masked(rule, value, mask);
193 case NFI_NXM_OF_ETH_SRC:
194 memcpy(flow->dl_src, value, ETH_ADDR_LEN);
196 case NFI_NXM_OF_ETH_TYPE:
197 flow->dl_type = ofputil_dl_type_from_openflow(get_unaligned_be16(value));
201 case NFI_NXM_OF_VLAN_TCI:
202 if (wc->vlan_tci_mask) {
205 cls_rule_set_dl_tci(rule, get_unaligned_be16(value));
208 case NFI_NXM_OF_VLAN_TCI_W:
209 if (wc->vlan_tci_mask) {
212 cls_rule_set_dl_tci_masked(rule, get_unaligned_be16(value),
213 get_unaligned_be16(mask));
218 case NFI_NXM_OF_IP_TOS:
219 if (*(uint8_t *) value & 0x03) {
220 return NXM_BAD_VALUE;
222 flow->nw_tos = *(uint8_t *) value;
225 case NFI_NXM_OF_IP_PROTO:
226 flow->nw_proto = *(uint8_t *) value;
229 /* IP addresses in IP and ARP headers. */
230 case NFI_NXM_OF_IP_SRC:
231 case NFI_NXM_OF_ARP_SPA:
232 if (wc->nw_src_mask) {
235 cls_rule_set_nw_src(rule, get_unaligned_be32(value));
238 case NFI_NXM_OF_IP_SRC_W:
239 case NFI_NXM_OF_ARP_SPA_W:
240 if (wc->nw_src_mask) {
243 ovs_be32 ip = get_unaligned_be32(value);
244 ovs_be32 netmask = get_unaligned_be32(mask);
245 if (!cls_rule_set_nw_src_masked(rule, ip, netmask)) {
250 case NFI_NXM_OF_IP_DST:
251 case NFI_NXM_OF_ARP_TPA:
252 if (wc->nw_dst_mask) {
255 cls_rule_set_nw_dst(rule, get_unaligned_be32(value));
258 case NFI_NXM_OF_IP_DST_W:
259 case NFI_NXM_OF_ARP_TPA_W:
260 if (wc->nw_dst_mask) {
263 ovs_be32 ip = get_unaligned_be32(value);
264 ovs_be32 netmask = get_unaligned_be32(mask);
265 if (!cls_rule_set_nw_dst_masked(rule, ip, netmask)) {
271 /* IPv6 addresses. */
272 case NFI_NXM_NX_IPV6_SRC:
273 if (!ipv6_mask_is_any(&wc->ipv6_src_mask)) {
276 struct in6_addr ipv6;
277 memcpy(&ipv6, value, sizeof ipv6);
278 cls_rule_set_ipv6_src(rule, &ipv6);
281 case NFI_NXM_NX_IPV6_SRC_W:
282 if (!ipv6_mask_is_any(&wc->ipv6_src_mask)) {
285 struct in6_addr ipv6, netmask;
286 memcpy(&ipv6, value, sizeof ipv6);
287 memcpy(&netmask, mask, sizeof netmask);
288 if (!cls_rule_set_ipv6_src_masked(rule, &ipv6, &netmask)) {
293 case NFI_NXM_NX_IPV6_DST:
294 if (!ipv6_mask_is_any(&wc->ipv6_dst_mask)) {
297 struct in6_addr ipv6;
298 memcpy(&ipv6, value, sizeof ipv6);
299 cls_rule_set_ipv6_dst(rule, &ipv6);
302 case NFI_NXM_NX_IPV6_DST_W:
303 if (!ipv6_mask_is_any(&wc->ipv6_dst_mask)) {
306 struct in6_addr ipv6, netmask;
307 memcpy(&ipv6, value, sizeof ipv6);
308 memcpy(&netmask, mask, sizeof netmask);
309 if (!cls_rule_set_ipv6_dst_masked(rule, &ipv6, &netmask)) {
316 case NFI_NXM_OF_TCP_SRC:
317 flow->tp_src = get_unaligned_be16(value);
319 case NFI_NXM_OF_TCP_DST:
320 flow->tp_dst = get_unaligned_be16(value);
324 case NFI_NXM_OF_UDP_SRC:
325 flow->tp_src = get_unaligned_be16(value);
327 case NFI_NXM_OF_UDP_DST:
328 flow->tp_dst = get_unaligned_be16(value);
332 case NFI_NXM_OF_ICMP_TYPE:
333 flow->tp_src = htons(*(uint8_t *) value);
335 case NFI_NXM_OF_ICMP_CODE:
336 flow->tp_dst = htons(*(uint8_t *) value);
340 case NFI_NXM_NX_ICMPV6_TYPE:
341 flow->tp_src = htons(*(uint8_t *) value);
343 case NFI_NXM_NX_ICMPV6_CODE:
344 flow->tp_dst = htons(*(uint8_t *) value);
347 /* IPv6 Neighbor Discovery. */
348 case NFI_NXM_NX_ND_TARGET:
349 /* We've already verified that it's an ICMPv6 message. */
350 if ((flow->tp_src != htons(ND_NEIGHBOR_SOLICIT))
351 && (flow->tp_src != htons(ND_NEIGHBOR_ADVERT))) {
352 return NXM_BAD_PREREQ;
354 memcpy(&flow->nd_target, value, sizeof flow->nd_target);
356 case NFI_NXM_NX_ND_SLL:
357 /* We've already verified that it's an ICMPv6 message. */
358 if (flow->tp_src != htons(ND_NEIGHBOR_SOLICIT)) {
359 return NXM_BAD_PREREQ;
361 memcpy(flow->arp_sha, value, ETH_ADDR_LEN);
363 case NFI_NXM_NX_ND_TLL:
364 /* We've already verified that it's an ICMPv6 message. */
365 if (flow->tp_src != htons(ND_NEIGHBOR_ADVERT)) {
366 return NXM_BAD_PREREQ;
368 memcpy(flow->arp_tha, value, ETH_ADDR_LEN);
372 case NFI_NXM_OF_ARP_OP:
373 if (ntohs(get_unaligned_be16(value)) > 255) {
374 return NXM_BAD_VALUE;
376 flow->nw_proto = ntohs(get_unaligned_be16(value));
380 case NFI_NXM_NX_ARP_SHA:
381 memcpy(flow->arp_sha, value, ETH_ADDR_LEN);
383 case NFI_NXM_NX_ARP_THA:
384 memcpy(flow->arp_tha, value, ETH_ADDR_LEN);
388 case NFI_NXM_NX_TUN_ID:
389 if (wc->tun_id_mask) {
392 cls_rule_set_tun_id(rule, get_unaligned_be64(value));
395 case NFI_NXM_NX_TUN_ID_W:
396 if (wc->tun_id_mask) {
399 ovs_be64 tun_id = get_unaligned_be64(value);
400 ovs_be64 tun_mask = get_unaligned_be64(mask);
401 cls_rule_set_tun_id_masked(rule, tun_id, tun_mask);
406 case NFI_NXM_NX_REG0:
407 case NFI_NXM_NX_REG0_W:
409 case NFI_NXM_NX_REG1:
410 case NFI_NXM_NX_REG1_W:
413 case NFI_NXM_NX_REG2:
414 case NFI_NXM_NX_REG2_W:
417 case NFI_NXM_NX_REG3:
418 case NFI_NXM_NX_REG3_W:
423 return parse_nx_reg(f, flow, wc, value, mask);
432 nxm_prereqs_ok(const struct nxm_field *field, const struct flow *flow)
434 if (field->nw_proto && field->nw_proto != flow->nw_proto) {
438 if (!field->dl_type[0]) {
440 } else if (field->dl_type[0] == flow->dl_type) {
442 } else if (field->dl_type[1] && field->dl_type[1] == flow->dl_type) {
450 nx_entry_ok(const void *p, unsigned int match_len)
452 unsigned int payload_len;
458 VLOG_DBG_RL(&rl, "nx_match ends with partial nxm_header");
462 memcpy(&header_be, p, 4);
463 header = ntohl(header_be);
465 payload_len = NXM_LENGTH(header);
467 VLOG_DBG_RL(&rl, "nxm_entry %08"PRIx32" has invalid payload "
471 if (match_len < payload_len + 4) {
472 VLOG_DBG_RL(&rl, "%"PRIu32"-byte nxm_entry but only "
473 "%u bytes left in nx_match", payload_len + 4, match_len);
481 nx_pull_match(struct ofpbuf *b, unsigned int match_len, uint16_t priority,
482 struct cls_rule *rule)
487 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
489 VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
490 "multiple of 8, is longer than space in message (max "
491 "length %zu)", match_len, b->size);
492 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
495 cls_rule_init_catchall(rule, priority);
496 while ((header = nx_entry_ok(p, match_len)) != 0) {
497 unsigned length = NXM_LENGTH(header);
498 const struct nxm_field *f;
501 f = nxm_field_lookup(header);
503 error = NXM_BAD_TYPE;
504 } else if (!nxm_prereqs_ok(f, &rule->flow)) {
505 error = NXM_BAD_PREREQ;
506 } else if (f->wildcard && !(rule->wc.wildcards & f->wildcard)) {
507 error = NXM_DUP_TYPE;
509 /* 'hasmask' and 'length' are known to be correct at this point
510 * because they are included in 'header' and nxm_field_lookup()
511 * checked them already. */
512 rule->wc.wildcards &= ~f->wildcard;
513 error = parse_nxm_entry(rule, f, p + 4, p + 4 + length / 2);
516 VLOG_DBG_RL(&rl, "bad nxm_entry with vendor=%"PRIu32", "
517 "field=%"PRIu32", hasmask=%"PRIu32", type=%"PRIu32" "
519 NXM_VENDOR(header), NXM_FIELD(header),
520 NXM_HASMASK(header), NXM_TYPE(header),
527 match_len -= 4 + length;
530 return match_len ? NXM_INVALID : 0;
533 /* nx_put_match() and helpers.
535 * 'put' functions whose names end in 'w' add a wildcarded field.
536 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
537 * Other 'put' functions add exact-match fields.
541 nxm_put_header(struct ofpbuf *b, uint32_t header)
543 ovs_be32 n_header = htonl(header);
544 ofpbuf_put(b, &n_header, sizeof n_header);
548 nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
550 nxm_put_header(b, header);
551 ofpbuf_put(b, &value, sizeof value);
555 nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
557 nxm_put_header(b, header);
558 ofpbuf_put(b, &value, sizeof value);
562 nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
564 nxm_put_header(b, header);
565 ofpbuf_put(b, &value, sizeof value);
566 ofpbuf_put(b, &mask, sizeof mask);
570 nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
576 case CONSTANT_HTONS(UINT16_MAX):
577 nxm_put_16(b, header, value);
581 nxm_put_16w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
587 nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
589 nxm_put_header(b, header);
590 ofpbuf_put(b, &value, sizeof value);
594 nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
596 nxm_put_header(b, header);
597 ofpbuf_put(b, &value, sizeof value);
598 ofpbuf_put(b, &mask, sizeof mask);
602 nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
608 case CONSTANT_HTONL(UINT32_MAX):
609 nxm_put_32(b, header, value);
613 nxm_put_32w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
619 nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
621 nxm_put_header(b, header);
622 ofpbuf_put(b, &value, sizeof value);
626 nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
628 nxm_put_header(b, header);
629 ofpbuf_put(b, &value, sizeof value);
630 ofpbuf_put(b, &mask, sizeof mask);
634 nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
640 case CONSTANT_HTONLL(UINT64_MAX):
641 nxm_put_64(b, header, value);
645 nxm_put_64w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
651 nxm_put_eth(struct ofpbuf *b, uint32_t header,
652 const uint8_t value[ETH_ADDR_LEN])
654 nxm_put_header(b, header);
655 ofpbuf_put(b, value, ETH_ADDR_LEN);
659 nxm_put_eth_dst(struct ofpbuf *b,
660 flow_wildcards_t wc, const uint8_t value[ETH_ADDR_LEN])
662 switch (wc & (FWW_DL_DST | FWW_ETH_MCAST)) {
663 case FWW_DL_DST | FWW_ETH_MCAST:
666 nxm_put_header(b, NXM_OF_ETH_DST_W);
667 ofpbuf_put(b, value, ETH_ADDR_LEN);
668 ofpbuf_put(b, flow_wildcards_to_dl_dst_mask(wc), ETH_ADDR_LEN);
671 nxm_put_eth(b, NXM_OF_ETH_DST, value);
677 nxm_put_ipv6(struct ofpbuf *b, uint32_t header,
678 const struct in6_addr *value, const struct in6_addr *mask)
680 if (ipv6_mask_is_any(mask)) {
682 } else if (ipv6_mask_is_exact(mask)) {
683 nxm_put_header(b, header);
684 ofpbuf_put(b, value, sizeof *value);
686 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
687 ofpbuf_put(b, value, sizeof *value);
688 ofpbuf_put(b, mask, sizeof *mask);
692 /* Appends to 'b' the nx_match format that expresses 'cr' (except for
693 * 'cr->priority', because priority is not part of nx_match), plus enough
694 * zero bytes to pad the nx_match out to a multiple of 8.
696 * This function can cause 'b''s data to be reallocated.
698 * Returns the number of bytes appended to 'b', excluding padding.
700 * If 'cr' is a catch-all rule that matches every packet, then this function
701 * appends nothing to 'b' and returns 0. */
703 nx_put_match(struct ofpbuf *b, const struct cls_rule *cr)
705 const flow_wildcards_t wc = cr->wc.wildcards;
706 const struct flow *flow = &cr->flow;
707 const size_t start_len = b->size;
711 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 1);
714 if (!(wc & FWW_IN_PORT)) {
715 uint16_t in_port = flow->in_port;
716 nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port));
720 nxm_put_eth_dst(b, wc, flow->dl_dst);
721 if (!(wc & FWW_DL_SRC)) {
722 nxm_put_eth(b, NXM_OF_ETH_SRC, flow->dl_src);
724 if (!(wc & FWW_DL_TYPE)) {
725 nxm_put_16(b, NXM_OF_ETH_TYPE,
726 ofputil_dl_type_to_openflow(flow->dl_type));
730 nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci, cr->wc.vlan_tci_mask);
733 if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) {
735 if (!(wc & FWW_NW_TOS)) {
736 nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc);
738 nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask);
739 nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask);
741 if (!(wc & FWW_NW_PROTO)) {
742 nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto);
743 switch (flow->nw_proto) {
746 if (!(wc & FWW_TP_SRC)) {
747 nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src);
749 if (!(wc & FWW_TP_DST)) {
750 nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst);
756 if (!(wc & FWW_TP_SRC)) {
757 nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src);
759 if (!(wc & FWW_TP_DST)) {
760 nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst);
766 if (!(wc & FWW_TP_SRC)) {
767 nxm_put_8(b, NXM_OF_ICMP_TYPE, ntohs(flow->tp_src));
769 if (!(wc & FWW_TP_DST)) {
770 nxm_put_8(b, NXM_OF_ICMP_CODE, ntohs(flow->tp_dst));
775 } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IPV6)) {
778 if (!(wc & FWW_NW_TOS)) {
779 nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc);
781 nxm_put_ipv6(b, NXM_NX_IPV6_SRC, &flow->ipv6_src,
782 &cr->wc.ipv6_src_mask);
783 nxm_put_ipv6(b, NXM_NX_IPV6_DST, &flow->ipv6_dst,
784 &cr->wc.ipv6_dst_mask);
786 if (!(wc & FWW_NW_PROTO)) {
787 nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto);
788 switch (flow->nw_proto) {
791 if (!(wc & FWW_TP_SRC)) {
792 nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src);
794 if (!(wc & FWW_TP_DST)) {
795 nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst);
801 if (!(wc & FWW_TP_SRC)) {
802 nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src);
804 if (!(wc & FWW_TP_DST)) {
805 nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst);
811 if (!(wc & FWW_TP_SRC)) {
812 nxm_put_8(b, NXM_NX_ICMPV6_TYPE, ntohs(flow->tp_src));
814 if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
815 flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
816 if (!(wc & FWW_ND_TARGET)) {
817 nxm_put_ipv6(b, NXM_NX_ND_TARGET, &flow->nd_target,
820 if (!(wc & FWW_ARP_SHA)
821 && flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
822 nxm_put_eth(b, NXM_NX_ND_SLL, flow->arp_sha);
824 if (!(wc & FWW_ARP_THA)
825 && flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
826 nxm_put_eth(b, NXM_NX_ND_TLL, flow->arp_tha);
830 if (!(wc & FWW_TP_DST)) {
831 nxm_put_8(b, NXM_NX_ICMPV6_CODE, ntohs(flow->tp_dst));
836 } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) {
838 if (!(wc & FWW_NW_PROTO)) {
839 nxm_put_16(b, NXM_OF_ARP_OP, htons(flow->nw_proto));
841 nxm_put_32m(b, NXM_OF_ARP_SPA, flow->nw_src, cr->wc.nw_src_mask);
842 nxm_put_32m(b, NXM_OF_ARP_TPA, flow->nw_dst, cr->wc.nw_dst_mask);
843 if (!(wc & FWW_ARP_SHA)) {
844 nxm_put_eth(b, NXM_NX_ARP_SHA, flow->arp_sha);
846 if (!(wc & FWW_ARP_THA)) {
847 nxm_put_eth(b, NXM_NX_ARP_THA, flow->arp_tha);
852 nxm_put_64m(b, NXM_NX_TUN_ID, flow->tun_id, cr->wc.tun_id_mask);
855 for (i = 0; i < FLOW_N_REGS; i++) {
856 nxm_put_32m(b, NXM_NX_REG(i),
857 htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i]));
860 match_len = b->size - start_len;
861 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
865 /* nx_match_to_string() and helpers. */
867 static void format_nxm_field_name(struct ds *, uint32_t header);
870 nx_match_to_string(const uint8_t *p, unsigned int match_len)
876 return xstrdup("<any>");
880 while ((header = nx_entry_ok(p, match_len)) != 0) {
881 unsigned int length = NXM_LENGTH(header);
882 unsigned int value_len = nxm_field_bytes(header);
883 const uint8_t *value = p + 4;
884 const uint8_t *mask = value + value_len;
888 ds_put_cstr(&s, ", ");
891 format_nxm_field_name(&s, header);
892 ds_put_char(&s, '(');
894 for (i = 0; i < value_len; i++) {
895 ds_put_format(&s, "%02x", value[i]);
897 if (NXM_HASMASK(header)) {
898 ds_put_char(&s, '/');
899 for (i = 0; i < value_len; i++) {
900 ds_put_format(&s, "%02x", mask[i]);
903 ds_put_char(&s, ')');
906 match_len -= 4 + length;
911 ds_put_cstr(&s, ", ");
914 ds_put_format(&s, "<%u invalid bytes>", match_len);
917 return ds_steal_cstr(&s);
921 format_nxm_field_name(struct ds *s, uint32_t header)
923 const struct nxm_field *f = nxm_field_lookup(header);
925 ds_put_cstr(s, f->name);
927 ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header));
932 parse_nxm_field_name(const char *name, int name_len)
934 const struct nxm_field *f;
936 /* Check whether it's a field name. */
937 for (f = nxm_fields; f < &nxm_fields[ARRAY_SIZE(nxm_fields)]; f++) {
938 if (!strncmp(f->name, name, name_len) && f->name[name_len] == '\0') {
943 /* Check whether it's a 32-bit field header value as hex.
944 * (This isn't ordinarily useful except for testing error behavior.) */
946 uint32_t header = hexits_value(name, name_len, NULL);
947 if (header != UINT_MAX) {
955 /* nx_match_from_string(). */
958 nx_match_from_string(const char *s, struct ofpbuf *b)
960 const char *full_s = s;
961 const size_t start_len = b->size;
964 if (!strcmp(s, "<any>")) {
965 /* Ensure that 'b->data' isn't actually null. */
966 ofpbuf_prealloc_tailroom(b, 1);
970 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
977 name_len = strcspn(s, "(");
978 if (s[name_len] != '(') {
979 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
982 header = parse_nxm_field_name(name, name_len);
984 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
989 nxm_put_header(b, header);
990 s = ofpbuf_put_hex(b, s, &n);
991 if (n != nxm_field_bytes(header)) {
992 ovs_fatal(0, "%.2s: hex digits expected", s);
994 if (NXM_HASMASK(header)) {
997 ovs_fatal(0, "%s: missing / in masked field %.*s",
998 full_s, name_len, name);
1000 s = ofpbuf_put_hex(b, s + 1, &n);
1001 if (n != nxm_field_bytes(header)) {
1002 ovs_fatal(0, "%.2s: hex digits expected", s);
1006 s += strspn(s, " ");
1008 ovs_fatal(0, "%s: missing ) following field %.*s",
1009 full_s, name_len, name);
1014 match_len = b->size - start_len;
1015 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
1020 nxm_parse_field_bits(const char *s, uint32_t *headerp, int *ofsp, int *n_bitsp)
1022 const char *full_s = s;
1030 name_len = strcspn(s, "[");
1031 if (s[name_len] != '[') {
1032 ovs_fatal(0, "%s: missing [ looking for field name", full_s);
1035 header = parse_nxm_field_name(name, name_len);
1037 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
1039 width = nxm_field_bits(header);
1042 if (sscanf(s, "[%d..%d]", &start, &end) == 2) {
1043 /* Nothing to do. */
1044 } else if (sscanf(s, "[%d]", &start) == 1) {
1046 } else if (!strncmp(s, "[]", 2)) {
1050 ovs_fatal(0, "%s: syntax error expecting [] or [<bit>] or "
1051 "[<start>..<end>]", full_s);
1053 s = strchr(s, ']') + 1;
1056 ovs_fatal(0, "%s: starting bit %d is after ending bit %d",
1057 full_s, start, end);
1058 } else if (start >= width) {
1059 ovs_fatal(0, "%s: starting bit %d is not valid because field is only "
1060 "%d bits wide", full_s, start, width);
1061 } else if (end >= width){
1062 ovs_fatal(0, "%s: ending bit %d is not valid because field is only "
1063 "%d bits wide", full_s, end, width);
1068 *n_bitsp = end - start + 1;
1074 nxm_parse_reg_move(struct nx_action_reg_move *move, const char *s)
1076 const char *full_s = s;
1078 int src_ofs, dst_ofs;
1079 int src_n_bits, dst_n_bits;
1081 s = nxm_parse_field_bits(s, &src, &src_ofs, &src_n_bits);
1082 if (strncmp(s, "->", 2)) {
1083 ovs_fatal(0, "%s: missing `->' following source", full_s);
1086 s = nxm_parse_field_bits(s, &dst, &dst_ofs, &dst_n_bits);
1088 ovs_fatal(0, "%s: trailing garbage following destination", full_s);
1091 if (src_n_bits != dst_n_bits) {
1092 ovs_fatal(0, "%s: source field is %d bits wide but destination is "
1093 "%d bits wide", full_s, src_n_bits, dst_n_bits);
1096 ofputil_init_NXAST_REG_MOVE(move);
1097 move->n_bits = htons(src_n_bits);
1098 move->src_ofs = htons(src_ofs);
1099 move->dst_ofs = htons(dst_ofs);
1100 move->src = htonl(src);
1101 move->dst = htonl(dst);
1105 nxm_parse_reg_load(struct nx_action_reg_load *load, const char *s)
1107 const char *full_s = s;
1112 value = strtoull(s, (char **) &s, 0);
1113 if (strncmp(s, "->", 2)) {
1114 ovs_fatal(0, "%s: missing `->' following value", full_s);
1117 s = nxm_parse_field_bits(s, &dst, &ofs, &n_bits);
1119 ovs_fatal(0, "%s: trailing garbage following destination", full_s);
1122 if (n_bits < 64 && (value >> n_bits) != 0) {
1123 ovs_fatal(0, "%s: value %"PRIu64" does not fit into %d bits",
1124 full_s, value, n_bits);
1127 ofputil_init_NXAST_REG_LOAD(load);
1128 load->ofs_nbits = nxm_encode_ofs_nbits(ofs, n_bits);
1129 load->dst = htonl(dst);
1130 load->value = htonll(value);
1133 /* nxm_format_reg_move(), nxm_format_reg_load(). */
1136 nxm_format_field_bits(struct ds *s, uint32_t header, int ofs, int n_bits)
1138 format_nxm_field_name(s, header);
1139 if (ofs == 0 && n_bits == nxm_field_bits(header)) {
1140 ds_put_cstr(s, "[]");
1141 } else if (n_bits == 1) {
1142 ds_put_format(s, "[%d]", ofs);
1144 ds_put_format(s, "[%d..%d]", ofs, ofs + n_bits - 1);
1149 nxm_format_reg_move(const struct nx_action_reg_move *move, struct ds *s)
1151 int n_bits = ntohs(move->n_bits);
1152 int src_ofs = ntohs(move->src_ofs);
1153 int dst_ofs = ntohs(move->dst_ofs);
1154 uint32_t src = ntohl(move->src);
1155 uint32_t dst = ntohl(move->dst);
1157 ds_put_format(s, "move:");
1158 nxm_format_field_bits(s, src, src_ofs, n_bits);
1159 ds_put_cstr(s, "->");
1160 nxm_format_field_bits(s, dst, dst_ofs, n_bits);
1164 nxm_format_reg_load(const struct nx_action_reg_load *load, struct ds *s)
1166 int ofs = nxm_decode_ofs(load->ofs_nbits);
1167 int n_bits = nxm_decode_n_bits(load->ofs_nbits);
1168 uint32_t dst = ntohl(load->dst);
1169 uint64_t value = ntohll(load->value);
1171 ds_put_format(s, "load:%#"PRIx64"->", value);
1172 nxm_format_field_bits(s, dst, ofs, n_bits);
1175 /* nxm_check_reg_move(), nxm_check_reg_load(). */
1178 field_ok(const struct nxm_field *f, const struct flow *flow, int size)
1180 return (f && !NXM_HASMASK(f->header)
1181 && nxm_prereqs_ok(f, flow) && size <= nxm_field_bits(f->header));
1185 nxm_check_reg_move(const struct nx_action_reg_move *action,
1186 const struct flow *flow)
1188 int src_ofs, dst_ofs, n_bits;
1191 n_bits = ntohs(action->n_bits);
1192 src_ofs = ntohs(action->src_ofs);
1193 dst_ofs = ntohs(action->dst_ofs);
1195 error = nxm_src_check(action->src, src_ofs, n_bits, flow);
1200 return nxm_dst_check(action->dst, dst_ofs, n_bits, flow);
1203 /* Given a flow, checks that the source field represented by 'src_header'
1204 * in the range ['ofs', 'ofs' + 'n_bits') is valid. */
1206 nxm_src_check(ovs_be32 src_header, unsigned int ofs, unsigned int n_bits,
1207 const struct flow *flow)
1209 const struct nxm_field *src = nxm_field_lookup(ntohl(src_header));
1212 VLOG_WARN_RL(&rl, "zero bit source field");
1213 } else if (!field_ok(src, flow, ofs + n_bits)) {
1214 VLOG_WARN_RL(&rl, "invalid source field");
1219 return BAD_ARGUMENT;
1222 /* Given a flow, checks that the destination field represented by 'dst_header'
1223 * in the range ['ofs', 'ofs' + 'n_bits') is valid. */
1225 nxm_dst_check(ovs_be32 dst_header, unsigned int ofs, unsigned int n_bits,
1226 const struct flow *flow)
1228 const struct nxm_field *dst = nxm_field_lookup(ntohl(dst_header));
1231 VLOG_WARN_RL(&rl, "zero bit destination field");
1232 } else if (!field_ok(dst, flow, ofs + n_bits)) {
1233 VLOG_WARN_RL(&rl, "invalid destination field");
1234 } else if (!dst->writable) {
1235 VLOG_WARN_RL(&rl, "destination field is not writable");
1240 return BAD_ARGUMENT;
1244 nxm_check_reg_load(const struct nx_action_reg_load *action,
1245 const struct flow *flow)
1247 unsigned int ofs = nxm_decode_ofs(action->ofs_nbits);
1248 unsigned int n_bits = nxm_decode_n_bits(action->ofs_nbits);
1251 error = nxm_dst_check(action->dst, ofs, n_bits, flow);
1256 /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in
1258 if (n_bits < 64 && ntohll(action->value) >> n_bits) {
1259 return BAD_ARGUMENT;
1265 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1268 nxm_read_field(const struct nxm_field *src, const struct flow *flow)
1270 switch (src->index) {
1271 case NFI_NXM_OF_IN_PORT:
1272 return flow->in_port;
1274 case NFI_NXM_OF_ETH_DST:
1275 return eth_addr_to_uint64(flow->dl_dst);
1277 case NFI_NXM_OF_ETH_SRC:
1278 return eth_addr_to_uint64(flow->dl_src);
1280 case NFI_NXM_OF_ETH_TYPE:
1281 return ntohs(ofputil_dl_type_to_openflow(flow->dl_type));
1283 case NFI_NXM_OF_VLAN_TCI:
1284 return ntohs(flow->vlan_tci);
1286 case NFI_NXM_OF_IP_TOS:
1287 return flow->nw_tos;
1289 case NFI_NXM_OF_IP_PROTO:
1290 case NFI_NXM_OF_ARP_OP:
1291 return flow->nw_proto;
1293 case NFI_NXM_OF_IP_SRC:
1294 case NFI_NXM_OF_ARP_SPA:
1295 return ntohl(flow->nw_src);
1297 case NFI_NXM_OF_IP_DST:
1298 case NFI_NXM_OF_ARP_TPA:
1299 return ntohl(flow->nw_dst);
1301 case NFI_NXM_OF_TCP_SRC:
1302 case NFI_NXM_OF_UDP_SRC:
1303 return ntohs(flow->tp_src);
1305 case NFI_NXM_OF_TCP_DST:
1306 case NFI_NXM_OF_UDP_DST:
1307 return ntohs(flow->tp_dst);
1309 case NFI_NXM_OF_ICMP_TYPE:
1310 case NFI_NXM_NX_ICMPV6_TYPE:
1311 return ntohs(flow->tp_src) & 0xff;
1313 case NFI_NXM_OF_ICMP_CODE:
1314 case NFI_NXM_NX_ICMPV6_CODE:
1315 return ntohs(flow->tp_dst) & 0xff;
1317 case NFI_NXM_NX_TUN_ID:
1318 return ntohll(flow->tun_id);
1320 #define NXM_READ_REGISTER(IDX) \
1321 case NFI_NXM_NX_REG##IDX: \
1322 return flow->regs[IDX]; \
1323 case NFI_NXM_NX_REG##IDX##_W: \
1326 NXM_READ_REGISTER(0);
1327 #if FLOW_N_REGS >= 2
1328 NXM_READ_REGISTER(1);
1330 #if FLOW_N_REGS >= 3
1331 NXM_READ_REGISTER(2);
1333 #if FLOW_N_REGS >= 4
1334 NXM_READ_REGISTER(3);
1340 case NFI_NXM_NX_ARP_SHA:
1341 case NFI_NXM_NX_ND_SLL:
1342 return eth_addr_to_uint64(flow->arp_sha);
1344 case NFI_NXM_NX_ARP_THA:
1345 case NFI_NXM_NX_ND_TLL:
1346 return eth_addr_to_uint64(flow->arp_tha);
1348 case NFI_NXM_NX_TUN_ID_W:
1349 case NFI_NXM_OF_ETH_DST_W:
1350 case NFI_NXM_OF_VLAN_TCI_W:
1351 case NFI_NXM_OF_IP_SRC_W:
1352 case NFI_NXM_OF_IP_DST_W:
1353 case NFI_NXM_OF_ARP_SPA_W:
1354 case NFI_NXM_OF_ARP_TPA_W:
1355 case NFI_NXM_NX_IPV6_SRC:
1356 case NFI_NXM_NX_IPV6_SRC_W:
1357 case NFI_NXM_NX_IPV6_DST:
1358 case NFI_NXM_NX_IPV6_DST_W:
1359 case NFI_NXM_NX_ND_TARGET:
1367 /* Returns the value of the NXM field corresponding to 'header' at 'ofs_nbits'
1370 nxm_read_field_bits(ovs_be32 header, ovs_be16 ofs_nbits,
1371 const struct flow *flow)
1373 int n_bits = nxm_decode_n_bits(ofs_nbits);
1374 int ofs = nxm_decode_ofs(ofs_nbits);
1375 uint64_t mask, data;
1377 mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1;
1378 data = nxm_read_field(nxm_field_lookup(ntohl(header)), flow);
1379 data = (data >> ofs) & mask;
1385 nxm_write_field(const struct nxm_field *dst, struct flow *flow,
1388 switch (dst->index) {
1389 case NFI_NXM_OF_ETH_DST:
1390 eth_addr_from_uint64(new_value, flow->dl_dst);
1393 case NFI_NXM_OF_ETH_SRC:
1394 eth_addr_from_uint64(new_value, flow->dl_src);
1397 case NFI_NXM_OF_VLAN_TCI:
1398 flow->vlan_tci = htons(new_value);
1401 case NFI_NXM_NX_TUN_ID:
1402 flow->tun_id = htonll(new_value);
1405 #define NXM_WRITE_REGISTER(IDX) \
1406 case NFI_NXM_NX_REG##IDX: \
1407 flow->regs[IDX] = new_value; \
1409 case NFI_NXM_NX_REG##IDX##_W: \
1412 NXM_WRITE_REGISTER(0);
1413 #if FLOW_N_REGS >= 2
1414 NXM_WRITE_REGISTER(1);
1416 #if FLOW_N_REGS >= 3
1417 NXM_WRITE_REGISTER(2);
1419 #if FLOW_N_REGS >= 4
1420 NXM_WRITE_REGISTER(3);
1426 case NFI_NXM_OF_IP_TOS:
1427 flow->nw_tos = new_value & IP_DSCP_MASK;
1430 case NFI_NXM_OF_IP_SRC:
1431 flow->nw_src = htonl(new_value);
1434 case NFI_NXM_OF_IP_DST:
1435 flow->nw_dst = htonl(new_value);
1438 case NFI_NXM_OF_TCP_SRC:
1439 case NFI_NXM_OF_UDP_SRC:
1440 flow->tp_src = htons(new_value);
1443 case NFI_NXM_OF_TCP_DST:
1444 case NFI_NXM_OF_UDP_DST:
1445 flow->tp_dst = htons(new_value);
1448 case NFI_NXM_OF_IN_PORT:
1449 case NFI_NXM_OF_ETH_TYPE:
1450 case NFI_NXM_OF_IP_PROTO:
1451 case NFI_NXM_OF_ARP_OP:
1452 case NFI_NXM_OF_ARP_SPA:
1453 case NFI_NXM_OF_ARP_TPA:
1454 case NFI_NXM_OF_ICMP_TYPE:
1455 case NFI_NXM_OF_ICMP_CODE:
1456 case NFI_NXM_NX_TUN_ID_W:
1457 case NFI_NXM_OF_ETH_DST_W:
1458 case NFI_NXM_OF_VLAN_TCI_W:
1459 case NFI_NXM_OF_IP_SRC_W:
1460 case NFI_NXM_OF_IP_DST_W:
1461 case NFI_NXM_OF_ARP_SPA_W:
1462 case NFI_NXM_OF_ARP_TPA_W:
1463 case NFI_NXM_NX_ARP_SHA:
1464 case NFI_NXM_NX_ARP_THA:
1465 case NFI_NXM_NX_IPV6_SRC:
1466 case NFI_NXM_NX_IPV6_SRC_W:
1467 case NFI_NXM_NX_IPV6_DST:
1468 case NFI_NXM_NX_IPV6_DST_W:
1469 case NFI_NXM_NX_ICMPV6_TYPE:
1470 case NFI_NXM_NX_ICMPV6_CODE:
1471 case NFI_NXM_NX_ND_TARGET:
1472 case NFI_NXM_NX_ND_SLL:
1473 case NFI_NXM_NX_ND_TLL:
1480 nxm_execute_reg_move(const struct nx_action_reg_move *action,
1483 ovs_be16 src_ofs_nbits, dst_ofs_nbits;
1487 n_bits = ntohs(action->n_bits);
1488 src_ofs_nbits = nxm_encode_ofs_nbits(ntohs(action->src_ofs), n_bits);
1489 dst_ofs_nbits = nxm_encode_ofs_nbits(ntohs(action->dst_ofs), n_bits);
1491 src_data = nxm_read_field_bits(action->src, src_ofs_nbits, flow);
1492 nxm_reg_load(action->dst, dst_ofs_nbits, src_data, flow);
1496 nxm_execute_reg_load(const struct nx_action_reg_load *action,
1499 nxm_reg_load(action->dst, action->ofs_nbits, ntohll(action->value), flow);
1502 /* Calculates ofs and n_bits from the given 'ofs_nbits' parameter, and copies
1503 * 'src_data'[0:n_bits] to 'dst_header'[ofs:ofs+n_bits] in the given 'flow'. */
1505 nxm_reg_load(ovs_be32 dst_header, ovs_be16 ofs_nbits, uint64_t src_data,
1508 int n_bits = nxm_decode_n_bits(ofs_nbits);
1509 int dst_ofs = nxm_decode_ofs(ofs_nbits);
1510 uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1;
1512 /* Get remaining bits of the destination field. */
1513 const struct nxm_field *dst = nxm_field_lookup(ntohl(dst_header));
1514 uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs);
1516 /* Get the final value. */
1517 uint64_t new_data = dst_data | (src_data << dst_ofs);
1519 nxm_write_field(dst, flow, new_data);