2 * Copyright (c) 2010, 2011, 2012 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include <netinet/icmp6.h>
23 #include "classifier.h"
24 #include "dynamic-string.h"
25 #include "meta-flow.h"
26 #include "ofp-errors.h"
29 #include "openflow/nicira-ext.h"
31 #include "unaligned.h"
35 VLOG_DEFINE_THIS_MODULE(nx_match);
37 /* Rate limit for nx_match parse errors. These always indicate a bug in the
38 * peer and so there's not much point in showing a lot of them. */
39 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
41 /* Returns the width of the data for a field with the given 'header', in
44 nxm_field_bytes(uint32_t header)
46 unsigned int length = NXM_LENGTH(header);
47 return NXM_HASMASK(header) ? length / 2 : length;
50 /* Returns the width of the data for a field with the given 'header', in
53 nxm_field_bits(uint32_t header)
55 return nxm_field_bytes(header) * 8;
58 /* nx_pull_match() and helpers. */
61 nx_entry_ok(const void *p, unsigned int match_len)
63 unsigned int payload_len;
69 VLOG_DBG_RL(&rl, "nx_match ends with partial nxm_header");
73 memcpy(&header_be, p, 4);
74 header = ntohl(header_be);
76 payload_len = NXM_LENGTH(header);
78 VLOG_DBG_RL(&rl, "nxm_entry %08"PRIx32" has invalid payload "
82 if (match_len < payload_len + 4) {
83 VLOG_DBG_RL(&rl, "%"PRIu32"-byte nxm_entry but only "
84 "%u bytes left in nx_match", payload_len + 4, match_len);
92 nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict,
93 uint16_t priority, struct cls_rule *rule,
94 ovs_be64 *cookie, ovs_be64 *cookie_mask)
99 assert((cookie != NULL) == (cookie_mask != NULL));
101 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
103 VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
104 "multiple of 8, is longer than space in message (max "
105 "length %zu)", match_len, b->size);
106 return OFPERR_OFPBMC_BAD_LEN;
109 cls_rule_init_catchall(rule, priority);
111 *cookie = *cookie_mask = htonll(0);
114 (header = nx_entry_ok(p, match_len)) != 0;
115 p += 4 + NXM_LENGTH(header), match_len -= 4 + NXM_LENGTH(header)) {
116 const struct mf_field *mf;
119 mf = mf_from_nxm_header(header);
122 error = OFPERR_OFPBMC_BAD_FIELD;
126 } else if (!mf_are_prereqs_ok(mf, &rule->flow)) {
127 error = OFPERR_OFPBMC_BAD_PREREQ;
128 } else if (!mf_is_all_wild(mf, &rule->wc)) {
129 error = OFPERR_OFPBMC_DUP_FIELD;
130 } else if (header != OXM_OF_IN_PORT) {
131 unsigned int width = mf->n_bytes;
132 union mf_value value;
134 memcpy(&value, p + 4, width);
135 if (!mf_is_value_valid(mf, &value)) {
136 error = OFPERR_OFPBMC_BAD_VALUE;
137 } else if (!NXM_HASMASK(header)) {
139 mf_set_value(mf, &value, rule);
143 memcpy(&mask, p + 4 + width, width);
144 if (!mf_is_mask_valid(mf, &mask)) {
145 error = OFPERR_OFPBMC_BAD_MASK;
148 mf_set(mf, &value, &mask, rule);
152 /* Special case for 32bit ports when using OXM,
153 * ports are 16 bits wide otherwise. */
157 memcpy(&port_of11, p + 4, sizeof port_of11);
158 error = ofputil_port_from_ofp11(port_of11, &port);
160 cls_rule_set_in_port(rule, port);
164 /* Check if the match is for a cookie rather than a classifier rule. */
165 if ((header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W) && cookie) {
167 error = OFPERR_OFPBMC_DUP_FIELD;
169 unsigned int width = sizeof *cookie;
171 memcpy(cookie, p + 4, width);
172 if (NXM_HASMASK(header)) {
173 memcpy(cookie_mask, p + 4 + width, width);
175 *cookie_mask = htonll(UINT64_MAX);
182 VLOG_DBG_RL(&rl, "bad nxm_entry %#08"PRIx32" (vendor=%"PRIu32", "
183 "field=%"PRIu32", hasmask=%"PRIu32", len=%"PRIu32"), "
185 NXM_VENDOR(header), NXM_FIELD(header),
186 NXM_HASMASK(header), NXM_LENGTH(header),
187 ofperr_to_string(error));
192 return match_len ? OFPERR_OFPBMC_BAD_LEN : 0;
195 /* Parses the nx_match formatted match description in 'b' with length
196 * 'match_len'. The results are stored in 'rule', which is initialized with
197 * 'priority'. If 'cookie' and 'cookie_mask' contain valid pointers, then the
198 * cookie and mask will be stored in them if a "NXM_NX_COOKIE*" match is
199 * defined. Otherwise, 0 is stored in both.
201 * Fails with an error when encountering unknown NXM headers.
203 * Returns 0 if successful, otherwise an OpenFlow error code. */
205 nx_pull_match(struct ofpbuf *b, unsigned int match_len,
206 uint16_t priority, struct cls_rule *rule,
207 ovs_be64 *cookie, ovs_be64 *cookie_mask)
209 return nx_pull_match__(b, match_len, true, priority, rule, cookie,
213 /* Behaves the same as nx_pull_match() with one exception. Skips over unknown
214 * NXM headers instead of failing with an error when they are encountered. */
216 nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len,
217 uint16_t priority, struct cls_rule *rule,
218 ovs_be64 *cookie, ovs_be64 *cookie_mask)
220 return nx_pull_match__(b, match_len, false, priority, rule, cookie,
224 /* nx_put_match() and helpers.
226 * 'put' functions whose names end in 'w' add a wildcarded field.
227 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
228 * Other 'put' functions add exact-match fields.
232 nxm_put_header(struct ofpbuf *b, uint32_t header)
234 ovs_be32 n_header = htonl(header);
235 ofpbuf_put(b, &n_header, sizeof n_header);
239 nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
241 nxm_put_header(b, header);
242 ofpbuf_put(b, &value, sizeof value);
246 nxm_put_8m(struct ofpbuf *b, uint32_t header, uint8_t value, uint8_t mask)
253 nxm_put_8(b, header, value);
257 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
258 ofpbuf_put(b, &value, sizeof value);
259 ofpbuf_put(b, &mask, sizeof mask);
264 nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
266 nxm_put_header(b, header);
267 ofpbuf_put(b, &value, sizeof value);
271 nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
273 nxm_put_header(b, header);
274 ofpbuf_put(b, &value, sizeof value);
275 ofpbuf_put(b, &mask, sizeof mask);
279 nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
285 case CONSTANT_HTONS(UINT16_MAX):
286 nxm_put_16(b, header, value);
290 nxm_put_16w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
296 nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
298 nxm_put_header(b, header);
299 ofpbuf_put(b, &value, sizeof value);
303 nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
305 nxm_put_header(b, header);
306 ofpbuf_put(b, &value, sizeof value);
307 ofpbuf_put(b, &mask, sizeof mask);
311 nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
317 case CONSTANT_HTONL(UINT32_MAX):
318 nxm_put_32(b, header, value);
322 nxm_put_32w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
328 nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
330 nxm_put_header(b, header);
331 ofpbuf_put(b, &value, sizeof value);
335 nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
337 nxm_put_header(b, header);
338 ofpbuf_put(b, &value, sizeof value);
339 ofpbuf_put(b, &mask, sizeof mask);
343 nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
349 case CONSTANT_HTONLL(UINT64_MAX):
350 nxm_put_64(b, header, value);
354 nxm_put_64w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
360 nxm_put_eth(struct ofpbuf *b, uint32_t header,
361 const uint8_t value[ETH_ADDR_LEN])
363 nxm_put_header(b, header);
364 ofpbuf_put(b, value, ETH_ADDR_LEN);
368 nxm_put_eth_masked(struct ofpbuf *b, uint32_t header,
369 const uint8_t value[ETH_ADDR_LEN],
370 const uint8_t mask[ETH_ADDR_LEN])
372 if (!eth_addr_is_zero(mask)) {
373 if (eth_mask_is_exact(mask)) {
374 nxm_put_eth(b, header, value);
376 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
377 ofpbuf_put(b, value, ETH_ADDR_LEN);
378 ofpbuf_put(b, mask, ETH_ADDR_LEN);
384 nxm_put_ipv6(struct ofpbuf *b, uint32_t header,
385 const struct in6_addr *value, const struct in6_addr *mask)
387 if (ipv6_mask_is_any(mask)) {
389 } else if (ipv6_mask_is_exact(mask)) {
390 nxm_put_header(b, header);
391 ofpbuf_put(b, value, sizeof *value);
393 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
394 ofpbuf_put(b, value, sizeof *value);
395 ofpbuf_put(b, mask, sizeof *mask);
400 nxm_put_frag(struct ofpbuf *b, const struct cls_rule *cr)
402 uint8_t nw_frag = cr->flow.nw_frag;
403 uint8_t nw_frag_mask = cr->wc.nw_frag_mask;
405 switch (nw_frag_mask) {
409 case FLOW_NW_FRAG_MASK:
410 nxm_put_8(b, NXM_NX_IP_FRAG, nw_frag);
414 nxm_put_8m(b, NXM_NX_IP_FRAG, nw_frag,
415 nw_frag_mask & FLOW_NW_FRAG_MASK);
421 nxm_put_ip(struct ofpbuf *b, const struct cls_rule *cr,
422 uint8_t icmp_proto, uint32_t icmp_type, uint32_t icmp_code,
425 const flow_wildcards_t wc = cr->wc.wildcards;
426 const struct flow *flow = &cr->flow;
430 if (!(wc & FWW_NW_DSCP)) {
431 nxm_put_8(b, oxm ? OXM_OF_IP_DSCP : NXM_OF_IP_TOS,
432 flow->nw_tos & IP_DSCP_MASK);
435 if (!(wc & FWW_NW_ECN)) {
436 nxm_put_8(b, oxm ? OXM_OF_IP_ECN : NXM_NX_IP_ECN,
437 flow->nw_tos & IP_ECN_MASK);
440 if (!oxm && !(wc & FWW_NW_TTL)) {
441 nxm_put_8(b, NXM_NX_IP_TTL, flow->nw_ttl);
444 if (!(wc & FWW_NW_PROTO)) {
445 nxm_put_8(b, oxm ? OXM_OF_IP_PROTO : NXM_OF_IP_PROTO, flow->nw_proto);
447 if (flow->nw_proto == IPPROTO_TCP) {
448 nxm_put_16m(b, oxm ? OXM_OF_TCP_SRC : NXM_OF_TCP_SRC,
449 flow->tp_src, cr->wc.tp_src_mask);
450 nxm_put_16m(b, oxm ? OXM_OF_TCP_DST : NXM_OF_TCP_DST,
451 flow->tp_dst, cr->wc.tp_dst_mask);
452 } else if (flow->nw_proto == IPPROTO_UDP) {
453 nxm_put_16m(b, oxm ? OXM_OF_UDP_SRC : NXM_OF_UDP_SRC,
454 flow->tp_src, cr->wc.tp_src_mask);
455 nxm_put_16m(b, oxm ? OXM_OF_UDP_DST : NXM_OF_UDP_DST,
456 flow->tp_dst, cr->wc.tp_dst_mask);
457 } else if (flow->nw_proto == icmp_proto) {
458 if (cr->wc.tp_src_mask) {
459 nxm_put_8(b, icmp_type, ntohs(flow->tp_src));
461 if (cr->wc.tp_dst_mask) {
462 nxm_put_8(b, icmp_code, ntohs(flow->tp_dst));
468 /* Appends to 'b' the nx_match format that expresses 'cr' (except for
469 * 'cr->priority', because priority is not part of nx_match), plus enough
470 * zero bytes to pad the nx_match out to a multiple of 8. For Flow Mod
471 * and Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be
472 * supplied. Otherwise, 'cookie_mask' should be zero.
474 * This function can cause 'b''s data to be reallocated.
476 * Returns the number of bytes appended to 'b', excluding padding.
478 * If 'cr' is a catch-all rule that matches every packet, then this function
479 * appends nothing to 'b' and returns 0. */
481 nx_put_match(struct ofpbuf *b, bool oxm, const struct cls_rule *cr,
482 ovs_be64 cookie, ovs_be64 cookie_mask)
484 const flow_wildcards_t wc = cr->wc.wildcards;
485 const struct flow *flow = &cr->flow;
486 const size_t start_len = b->size;
490 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 11);
493 if (!(wc & FWW_IN_PORT)) {
494 uint16_t in_port = flow->in_port;
496 nxm_put_32(b, OXM_OF_IN_PORT, ofputil_port_to_ofp11(in_port));
498 nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port));
503 nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_SRC : NXM_OF_ETH_SRC,
504 flow->dl_src, cr->wc.dl_src_mask);
505 nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_DST : NXM_OF_ETH_DST,
506 flow->dl_dst, cr->wc.dl_dst_mask);
507 if (!(wc & FWW_DL_TYPE)) {
508 nxm_put_16(b, oxm ? OXM_OF_ETH_TYPE : NXM_OF_ETH_TYPE,
509 ofputil_dl_type_to_openflow(flow->dl_type));
514 * XXX missing OXM support */
515 nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci, cr->wc.vlan_tci_mask);
518 if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) {
520 nxm_put_32m(b, oxm ? OXM_OF_IPV4_SRC : NXM_OF_IP_SRC,
521 flow->nw_src, cr->wc.nw_src_mask);
522 nxm_put_32m(b, oxm ? OXM_OF_IPV4_DST : NXM_OF_IP_DST,
523 flow->nw_dst, cr->wc.nw_dst_mask);
524 nxm_put_ip(b, cr, IPPROTO_ICMP,
525 oxm ? OXM_OF_ICMPV4_TYPE : NXM_OF_ICMP_TYPE,
526 oxm ? OXM_OF_ICMPV4_CODE : NXM_OF_ICMP_CODE, oxm);
527 } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IPV6)) {
529 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_SRC : NXM_NX_IPV6_SRC,
530 &flow->ipv6_src, &cr->wc.ipv6_src_mask);
531 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_DST : NXM_NX_IPV6_DST,
532 &flow->ipv6_dst, &cr->wc.ipv6_dst_mask);
533 nxm_put_ip(b, cr, IPPROTO_ICMPV6,
534 oxm ? OXM_OF_ICMPV6_TYPE : NXM_NX_ICMPV6_TYPE,
535 oxm ? OXM_OF_ICMPV6_CODE : NXM_NX_ICMPV6_CODE, oxm);
537 if (!(wc & FWW_IPV6_LABEL)) {
538 nxm_put_32(b, oxm ? OXM_OF_IPV6_FLABEL : NXM_NX_IPV6_LABEL,
542 if (flow->nw_proto == IPPROTO_ICMPV6
543 && (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
544 flow->tp_src == htons(ND_NEIGHBOR_ADVERT))) {
545 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_ND_TARGET : NXM_NX_ND_TARGET,
546 &flow->nd_target, &cr->wc.nd_target_mask);
547 if (!(wc & FWW_ARP_SHA)
548 && flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
549 nxm_put_eth(b, oxm ? OXM_OF_IPV6_ND_SLL : NXM_NX_ND_SLL,
552 if (!(wc & FWW_ARP_THA)
553 && flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
554 nxm_put_eth(b, oxm ? OXM_OF_IPV6_ND_TLL : NXM_NX_ND_TLL,
558 } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) {
560 if (!(wc & FWW_NW_PROTO)) {
561 nxm_put_16(b, oxm ? OXM_OF_ARP_OP : NXM_OF_ARP_OP,
562 htons(flow->nw_proto));
564 nxm_put_32m(b, oxm ? OXM_OF_ARP_SPA : NXM_OF_ARP_SPA,
565 flow->nw_src, cr->wc.nw_src_mask);
566 nxm_put_32m(b, oxm ? OXM_OF_ARP_TPA : NXM_OF_ARP_TPA,
567 flow->nw_dst, cr->wc.nw_dst_mask);
568 if (!(wc & FWW_ARP_SHA)) {
569 nxm_put_eth(b, oxm ? OXM_OF_ARP_SHA : NXM_NX_ARP_SHA,
572 if (!(wc & FWW_ARP_THA)) {
573 nxm_put_eth(b, oxm ? OXM_OF_ARP_THA : NXM_NX_ARP_THA,
579 nxm_put_64m(b, NXM_NX_TUN_ID, flow->tun_id, cr->wc.tun_id_mask);
582 for (i = 0; i < FLOW_N_REGS; i++) {
583 nxm_put_32m(b, NXM_NX_REG(i),
584 htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i]));
588 nxm_put_64m(b, NXM_NX_COOKIE, cookie, cookie_mask);
590 match_len = b->size - start_len;
591 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
595 /* nx_match_to_string() and helpers. */
597 static void format_nxm_field_name(struct ds *, uint32_t header);
600 nx_match_to_string(const uint8_t *p, unsigned int match_len)
606 return xstrdup("<any>");
610 while ((header = nx_entry_ok(p, match_len)) != 0) {
611 unsigned int length = NXM_LENGTH(header);
612 unsigned int value_len = nxm_field_bytes(header);
613 const uint8_t *value = p + 4;
614 const uint8_t *mask = value + value_len;
618 ds_put_cstr(&s, ", ");
621 format_nxm_field_name(&s, header);
622 ds_put_char(&s, '(');
624 for (i = 0; i < value_len; i++) {
625 ds_put_format(&s, "%02x", value[i]);
627 if (NXM_HASMASK(header)) {
628 ds_put_char(&s, '/');
629 for (i = 0; i < value_len; i++) {
630 ds_put_format(&s, "%02x", mask[i]);
633 ds_put_char(&s, ')');
636 match_len -= 4 + length;
641 ds_put_cstr(&s, ", ");
644 ds_put_format(&s, "<%u invalid bytes>", match_len);
647 return ds_steal_cstr(&s);
651 format_nxm_field_name(struct ds *s, uint32_t header)
653 const struct mf_field *mf = mf_from_nxm_header(header);
655 ds_put_cstr(s, IS_OXM_HEADER(header) ? mf->oxm_name : mf->nxm_name);
656 if (NXM_HASMASK(header)) {
657 ds_put_cstr(s, "_W");
659 } else if (header == NXM_NX_COOKIE) {
660 ds_put_cstr(s, "NXM_NX_COOKIE");
661 } else if (header == NXM_NX_COOKIE_W) {
662 ds_put_cstr(s, "NXM_NX_COOKIE_W");
664 ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header));
669 parse_nxm_field_name(const char *name, int name_len)
674 /* Check whether it's a field name. */
675 wild = name_len > 2 && !memcmp(&name[name_len - 2], "_W", 2);
680 for (i = 0; i < MFF_N_IDS; i++) {
681 const struct mf_field *mf = mf_from_id(i);
685 !strncmp(mf->nxm_name, name, name_len) &&
686 mf->nxm_name[name_len] == '\0') {
687 header = mf->nxm_header;
688 } else if (mf->oxm_name &&
689 !strncmp(mf->oxm_name, name, name_len) &&
690 mf->oxm_name[name_len] == '\0') {
691 header = mf->oxm_header;
698 } else if (mf->maskable != MFM_NONE) {
699 return NXM_MAKE_WILD_HEADER(header);
703 if (!strncmp("NXM_NX_COOKIE", name, name_len) &&
704 (name_len == strlen("NXM_NX_COOKIE"))) {
706 return NXM_NX_COOKIE;
708 return NXM_NX_COOKIE_W;
712 /* Check whether it's a 32-bit field header value as hex.
713 * (This isn't ordinarily useful except for testing error behavior.) */
715 uint32_t header = hexits_value(name, name_len, NULL);
716 if (header != UINT_MAX) {
724 /* nx_match_from_string(). */
727 nx_match_from_string(const char *s, struct ofpbuf *b)
729 const char *full_s = s;
730 const size_t start_len = b->size;
733 if (!strcmp(s, "<any>")) {
734 /* Ensure that 'b->data' isn't actually null. */
735 ofpbuf_prealloc_tailroom(b, 1);
739 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
746 name_len = strcspn(s, "(");
747 if (s[name_len] != '(') {
748 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
751 header = parse_nxm_field_name(name, name_len);
753 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
758 nxm_put_header(b, header);
759 s = ofpbuf_put_hex(b, s, &n);
760 if (n != nxm_field_bytes(header)) {
761 ovs_fatal(0, "%.2s: hex digits expected", s);
763 if (NXM_HASMASK(header)) {
766 ovs_fatal(0, "%s: missing / in masked field %.*s",
767 full_s, name_len, name);
769 s = ofpbuf_put_hex(b, s + 1, &n);
770 if (n != nxm_field_bytes(header)) {
771 ovs_fatal(0, "%.2s: hex digits expected", s);
777 ovs_fatal(0, "%s: missing ) following field %.*s",
778 full_s, name_len, name);
783 match_len = b->size - start_len;
784 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
789 nxm_parse_reg_move(struct nx_action_reg_move *move, const char *s)
791 const char *full_s = s;
792 struct mf_subfield src, dst;
794 s = mf_parse_subfield(&src, s);
795 if (strncmp(s, "->", 2)) {
796 ovs_fatal(0, "%s: missing `->' following source", full_s);
799 s = mf_parse_subfield(&dst, s);
801 ovs_fatal(0, "%s: trailing garbage following destination", full_s);
804 if (src.n_bits != dst.n_bits) {
805 ovs_fatal(0, "%s: source field is %d bits wide but destination is "
806 "%d bits wide", full_s, src.n_bits, dst.n_bits);
809 ofputil_init_NXAST_REG_MOVE(move);
810 move->n_bits = htons(src.n_bits);
811 move->src_ofs = htons(src.ofs);
812 move->dst_ofs = htons(dst.ofs);
813 move->src = htonl(src.field->nxm_header);
814 move->dst = htonl(dst.field->nxm_header);
818 nxm_parse_reg_load(struct nx_action_reg_load *load, const char *s)
820 const char *full_s = s;
821 struct mf_subfield dst;
824 value = strtoull(s, (char **) &s, 0);
825 if (strncmp(s, "->", 2)) {
826 ovs_fatal(0, "%s: missing `->' following value", full_s);
829 s = mf_parse_subfield(&dst, s);
831 ovs_fatal(0, "%s: trailing garbage following destination", full_s);
834 if (dst.n_bits < 64 && (value >> dst.n_bits) != 0) {
835 ovs_fatal(0, "%s: value %"PRIu64" does not fit into %u bits",
836 full_s, value, dst.n_bits);
839 ofputil_init_NXAST_REG_LOAD(load);
840 load->ofs_nbits = nxm_encode_ofs_nbits(dst.ofs, dst.n_bits);
841 load->dst = htonl(dst.field->nxm_header);
842 load->value = htonll(value);
845 /* nxm_format_reg_move(), nxm_format_reg_load(). */
848 nxm_format_reg_move(const struct nx_action_reg_move *move, struct ds *s)
850 struct mf_subfield src, dst;
852 nxm_decode_discrete(&src, move->src, move->src_ofs, move->n_bits);
853 nxm_decode_discrete(&dst, move->dst, move->dst_ofs, move->n_bits);
855 ds_put_format(s, "move:");
856 mf_format_subfield(&src, s);
857 ds_put_cstr(s, "->");
858 mf_format_subfield(&dst, s);
862 nxm_format_reg_load(const struct nx_action_reg_load *load, struct ds *s)
864 struct mf_subfield dst;
866 ds_put_format(s, "load:%#"PRIx64"->", ntohll(load->value));
868 nxm_decode(&dst, load->dst, load->ofs_nbits);
869 mf_format_subfield(&dst, s);
872 /* nxm_check_reg_move(), nxm_check_reg_load(). */
875 nxm_check_reg_move(const struct nx_action_reg_move *action,
876 const struct flow *flow)
878 struct mf_subfield src;
879 struct mf_subfield dst;
882 nxm_decode_discrete(&src, action->src, action->src_ofs, action->n_bits);
883 error = mf_check_src(&src, flow);
888 nxm_decode_discrete(&dst, action->dst, action->dst_ofs, action->n_bits);
889 return mf_check_dst(&dst, flow);
893 nxm_check_reg_load(const struct nx_action_reg_load *action,
894 const struct flow *flow)
896 struct mf_subfield dst;
899 nxm_decode(&dst, action->dst, action->ofs_nbits);
900 error = mf_check_dst(&dst, flow);
905 /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in
907 if (dst.n_bits < 64 && ntohll(action->value) >> dst.n_bits) {
908 return OFPERR_OFPBAC_BAD_ARGUMENT;
914 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
917 nxm_execute_reg_move(const struct nx_action_reg_move *action,
920 struct mf_subfield src, dst;
921 union mf_value src_value;
922 union mf_value dst_value;
924 nxm_decode_discrete(&src, action->src, action->src_ofs, action->n_bits);
925 nxm_decode_discrete(&dst, action->dst, action->dst_ofs, action->n_bits);
927 mf_get_value(dst.field, flow, &dst_value);
928 mf_get_value(src.field, flow, &src_value);
929 bitwise_copy(&src_value, src.field->n_bytes, src.ofs,
930 &dst_value, dst.field->n_bytes, dst.ofs,
932 mf_set_flow_value(dst.field, &dst_value, flow);
936 nxm_execute_reg_load(const struct nx_action_reg_load *action,
939 struct mf_subfield dst;
941 nxm_decode(&dst, action->dst, action->ofs_nbits);
942 mf_set_subfield_value(&dst, ntohll(action->value), flow);
945 /* Initializes 'sf->field' with the field corresponding to the given NXM
946 * 'header' and 'sf->ofs' and 'sf->n_bits' decoded from 'ofs_nbits' with
947 * nxm_decode_ofs() and nxm_decode_n_bits(), respectively.
949 * Afterward, 'sf' might be invalid in a few different ways:
951 * - 'sf->field' will be NULL if 'header' is unknown.
953 * - 'sf->ofs' and 'sf->n_bits' might exceed the width of sf->field.
955 * The caller should call mf_check_src() or mf_check_dst() to check for these
958 nxm_decode(struct mf_subfield *sf, ovs_be32 header, ovs_be16 ofs_nbits)
960 sf->field = mf_from_nxm_header(ntohl(header));
961 sf->ofs = nxm_decode_ofs(ofs_nbits);
962 sf->n_bits = nxm_decode_n_bits(ofs_nbits);
965 /* Initializes 'sf->field' with the field corresponding to the given NXM
966 * 'header' and 'sf->ofs' and 'sf->n_bits' from 'ofs' and 'n_bits',
969 * Afterward, 'sf' might be invalid in a few different ways:
971 * - 'sf->field' will be NULL if 'header' is unknown.
973 * - 'sf->ofs' and 'sf->n_bits' might exceed the width of sf->field.
975 * The caller should call mf_check_src() or mf_check_dst() to check for these
978 nxm_decode_discrete(struct mf_subfield *sf, ovs_be32 header,
979 ovs_be16 ofs, ovs_be16 n_bits)
981 sf->field = mf_from_nxm_header(ntohl(header));
982 sf->ofs = ntohs(ofs);
983 sf->n_bits = ntohs(n_bits);