2 * Copyright (c) 2009, 2010, 2011 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "ofproto/private.h"
25 #include "byte-order.h"
30 #include "dynamic-string.h"
31 #include "fail-open.h"
34 #include "mac-learning.h"
35 #include "multipath.h"
42 #include "ofp-print.h"
43 #include "ofproto-sflow.h"
44 #include "poll-loop.h"
46 #include "unaligned.h"
48 #include "vlan-bitmap.h"
51 VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
53 COVERAGE_DEFINE(ofproto_dpif_ctlr_action);
54 COVERAGE_DEFINE(ofproto_dpif_expired);
55 COVERAGE_DEFINE(ofproto_dpif_no_packet_in);
56 COVERAGE_DEFINE(ofproto_dpif_xlate);
57 COVERAGE_DEFINE(facet_changed_rule);
58 COVERAGE_DEFINE(facet_invalidated);
59 COVERAGE_DEFINE(facet_revalidate);
60 COVERAGE_DEFINE(facet_unexpected);
62 /* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a
63 * flow translation. */
64 #define MAX_RESUBMIT_RECURSION 16
72 long long int used; /* Time last used; time created if not used. */
76 * - Do include packets and bytes from facets that have been deleted or
77 * whose own statistics have been folded into the rule.
79 * - Do include packets and bytes sent "by hand" that were accounted to
80 * the rule without any facet being involved (this is a rare corner
81 * case in rule_execute()).
83 * - Do not include packet or bytes that can be obtained from any facet's
84 * packet_count or byte_count member or that can be obtained from the
85 * datapath by, e.g., dpif_flow_get() for any facet.
87 uint64_t packet_count; /* Number of packets received. */
88 uint64_t byte_count; /* Number of bytes received. */
90 struct list facets; /* List of "struct facet"s. */
93 static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
95 return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL;
98 static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *ofproto,
99 const struct flow *flow);
101 #define MAX_MIRRORS 32
102 typedef uint32_t mirror_mask_t;
103 #define MIRROR_MASK_C(X) UINT32_C(X)
104 BUILD_ASSERT_DECL(sizeof(mirror_mask_t) * CHAR_BIT >= MAX_MIRRORS);
106 struct ofproto_dpif *ofproto; /* Owning ofproto. */
107 size_t idx; /* In ofproto's "mirrors" array. */
108 void *aux; /* Key supplied by ofproto's client. */
109 char *name; /* Identifier for log messages. */
111 /* Selection criteria. */
112 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
113 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
114 unsigned long *vlans; /* Bitmap of chosen VLANs, NULL selects all. */
116 /* Output (mutually exclusive). */
117 struct ofbundle *out; /* Output port or NULL. */
118 int out_vlan; /* Output VLAN or -1. */
121 static void mirror_destroy(struct ofmirror *);
123 /* A group of one or more OpenFlow ports. */
124 #define OFBUNDLE_FLOOD ((struct ofbundle *) 1)
126 struct ofproto_dpif *ofproto; /* Owning ofproto. */
127 struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
128 void *aux; /* Key supplied by ofproto's client. */
129 char *name; /* Identifier for log messages. */
132 struct list ports; /* Contains "struct ofport"s. */
133 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
134 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
135 * NULL if all VLANs are trunked. */
136 struct lacp *lacp; /* LACP if LACP is enabled, otherwise NULL. */
137 struct bond *bond; /* Nonnull iff more than one port. */
140 bool floodable; /* True if no port has OFPPC_NO_FLOOD set. */
142 /* Port mirroring info. */
143 mirror_mask_t src_mirrors; /* Mirrors triggered when packet received. */
144 mirror_mask_t dst_mirrors; /* Mirrors triggered when packet sent. */
145 mirror_mask_t mirror_out; /* Mirrors that output to this bundle. */
148 static void bundle_remove(struct ofport *);
149 static void bundle_destroy(struct ofbundle *);
150 static void bundle_del_port(struct ofport_dpif *);
151 static void bundle_run(struct ofbundle *);
152 static void bundle_wait(struct ofbundle *);
154 struct action_xlate_ctx {
155 /* action_xlate_ctx_init() initializes these members. */
158 struct ofproto_dpif *ofproto;
160 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
161 * this flow when actions change header fields. */
164 /* The packet corresponding to 'flow', or a null pointer if we are
165 * revalidating without a packet to refer to. */
166 const struct ofpbuf *packet;
168 /* If nonnull, called just before executing a resubmit action.
170 * This is normally null so the client has to set it manually after
171 * calling action_xlate_ctx_init(). */
172 void (*resubmit_hook)(struct action_xlate_ctx *, struct rule_dpif *);
174 /* xlate_actions() initializes and uses these members. The client might want
175 * to look at them after it returns. */
177 struct ofpbuf *odp_actions; /* Datapath actions. */
178 tag_type tags; /* Tags associated with OFPP_NORMAL actions. */
179 bool may_set_up_flow; /* True ordinarily; false if the actions must
180 * be reassessed for every packet. */
181 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
183 /* xlate_actions() initializes and uses these members, but the client has no
184 * reason to look at them. */
186 int recurse; /* Recursion level, via xlate_table_action. */
187 int last_pop_priority; /* Offset in 'odp_actions' just past most
188 * recent ODP_ACTION_ATTR_SET_PRIORITY. */
191 static void action_xlate_ctx_init(struct action_xlate_ctx *,
192 struct ofproto_dpif *, const struct flow *,
193 const struct ofpbuf *);
194 static struct ofpbuf *xlate_actions(struct action_xlate_ctx *,
195 const union ofp_action *in, size_t n_in);
197 /* An exact-match instantiation of an OpenFlow flow. */
199 long long int used; /* Time last used; time created if not used. */
203 * - Do include packets and bytes sent "by hand", e.g. with
206 * - Do include packets and bytes that were obtained from the datapath
207 * when a flow was deleted (e.g. dpif_flow_del()) or when its
208 * statistics were reset (e.g. dpif_flow_put() with
209 * DPIF_FP_ZERO_STATS).
211 * - Do not include any packets or bytes that can currently be obtained
212 * from the datapath by, e.g., dpif_flow_get().
214 uint64_t packet_count; /* Number of packets received. */
215 uint64_t byte_count; /* Number of bytes received. */
217 uint64_t dp_packet_count; /* Last known packet count in the datapath. */
218 uint64_t dp_byte_count; /* Last known byte count in the datapath. */
220 uint64_t rs_packet_count; /* Packets pushed to resubmit children. */
221 uint64_t rs_byte_count; /* Bytes pushed to resubmit children. */
222 long long int rs_used; /* Used time pushed to resubmit children. */
224 /* Number of bytes passed to account_cb. This may include bytes that can
225 * currently obtained from the datapath (thus, it can be greater than
227 uint64_t accounted_bytes;
229 struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */
230 struct list list_node; /* In owning rule's 'facets' list. */
231 struct rule_dpif *rule; /* Owning rule. */
232 struct flow flow; /* Exact-match flow. */
233 bool installed; /* Installed in datapath? */
234 bool may_install; /* True ordinarily; false if actions must
235 * be reassessed for every packet. */
236 size_t actions_len; /* Number of bytes in actions[]. */
237 struct nlattr *actions; /* Datapath actions. */
238 tag_type tags; /* Tags. */
239 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
242 static struct facet *facet_create(struct rule_dpif *, const struct flow *,
243 const struct ofpbuf *packet);
244 static void facet_remove(struct ofproto_dpif *, struct facet *);
245 static void facet_free(struct facet *);
247 static struct facet *facet_find(struct ofproto_dpif *, const struct flow *);
248 static struct facet *facet_lookup_valid(struct ofproto_dpif *,
249 const struct flow *);
250 static bool facet_revalidate(struct ofproto_dpif *, struct facet *);
252 static void facet_execute(struct ofproto_dpif *, struct facet *,
253 struct ofpbuf *packet);
255 static int facet_put__(struct ofproto_dpif *, struct facet *,
256 const struct nlattr *actions, size_t actions_len,
257 struct dpif_flow_stats *);
258 static void facet_install(struct ofproto_dpif *, struct facet *,
260 static void facet_uninstall(struct ofproto_dpif *, struct facet *);
261 static void facet_flush_stats(struct ofproto_dpif *, struct facet *);
263 static void facet_make_actions(struct ofproto_dpif *, struct facet *,
264 const struct ofpbuf *packet);
265 static void facet_update_time(struct ofproto_dpif *, struct facet *,
267 static void facet_update_stats(struct ofproto_dpif *, struct facet *,
268 const struct dpif_flow_stats *);
269 static void facet_push_stats(struct facet *);
270 static void facet_account(struct ofproto_dpif *, struct facet *,
271 uint64_t extra_bytes);
273 static bool facet_is_controller_flow(struct facet *);
275 static void flow_push_stats(const struct rule_dpif *,
276 struct flow *, uint64_t packets, uint64_t bytes,
283 struct ofbundle *bundle; /* Bundle that contains this port, if any. */
284 struct list bundle_node; /* In struct ofbundle's "ports" list. */
285 struct cfm *cfm; /* Connectivity Fault Management, if any. */
286 tag_type tag; /* Tag associated with this port. */
287 uint32_t bond_stable_id; /* stable_id to use as bond slave, or 0. */
290 static struct ofport_dpif *
291 ofport_dpif_cast(const struct ofport *ofport)
293 assert(ofport->ofproto->ofproto_class == &ofproto_dpif_class);
294 return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL;
297 static void port_run(struct ofport_dpif *);
298 static void port_wait(struct ofport_dpif *);
299 static int set_cfm(struct ofport *, const struct cfm *,
300 const uint16_t *remote_mps, size_t n_remote_mps);
302 struct ofproto_dpif {
311 struct netflow *netflow;
312 struct ofproto_sflow *sflow;
313 struct hmap bundles; /* Contains "struct ofbundle"s. */
314 struct mac_learning *ml;
315 struct ofmirror *mirrors[MAX_MIRRORS];
316 bool has_bonded_bundles;
319 struct timer next_expiration;
323 bool need_revalidate;
324 struct tag_set revalidate_set;
327 static void ofproto_dpif_unixctl_init(void);
329 static struct ofproto_dpif *
330 ofproto_dpif_cast(const struct ofproto *ofproto)
332 assert(ofproto->ofproto_class == &ofproto_dpif_class);
333 return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
336 static struct ofport_dpif *get_ofp_port(struct ofproto_dpif *,
338 static struct ofport_dpif *get_odp_port(struct ofproto_dpif *,
341 /* Packet processing. */
342 static void update_learning_table(struct ofproto_dpif *,
343 const struct flow *, int vlan,
345 static bool is_admissible(struct ofproto_dpif *, const struct flow *,
346 bool have_packet, tag_type *, int *vlanp,
347 struct ofbundle **in_bundlep);
348 static void handle_upcall(struct ofproto_dpif *, struct dpif_upcall *);
350 /* Flow expiration. */
351 static int expire(struct ofproto_dpif *);
354 static int send_packet(struct ofproto_dpif *,
355 uint32_t odp_port, uint16_t vlan_tci,
356 const struct ofpbuf *packet);
358 /* Global variables. */
359 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
361 /* Factory functions. */
364 enumerate_types(struct sset *types)
366 dp_enumerate_types(types);
370 enumerate_names(const char *type, struct sset *names)
372 return dp_enumerate_names(type, names);
376 del(const char *type, const char *name)
381 error = dpif_open(name, type, &dpif);
383 error = dpif_delete(dpif);
389 /* Basic life-cycle. */
391 static struct ofproto *
394 struct ofproto_dpif *ofproto = xmalloc(sizeof *ofproto);
399 dealloc(struct ofproto *ofproto_)
401 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
406 construct(struct ofproto *ofproto_)
408 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
409 const char *name = ofproto->up.name;
413 error = dpif_create_and_open(name, ofproto->up.type, &ofproto->dpif);
415 VLOG_ERR("failed to open datapath %s: %s", name, strerror(error));
419 ofproto->max_ports = dpif_get_max_ports(ofproto->dpif);
420 ofproto->n_matches = 0;
422 error = dpif_recv_set_mask(ofproto->dpif,
423 ((1u << DPIF_UC_MISS) |
424 (1u << DPIF_UC_ACTION) |
425 (1u << DPIF_UC_SAMPLE)));
427 VLOG_ERR("failed to listen on datapath %s: %s", name, strerror(error));
428 dpif_close(ofproto->dpif);
431 dpif_flow_flush(ofproto->dpif);
432 dpif_recv_purge(ofproto->dpif);
434 ofproto->netflow = NULL;
435 ofproto->sflow = NULL;
436 hmap_init(&ofproto->bundles);
437 ofproto->ml = mac_learning_create();
438 for (i = 0; i < MAX_MIRRORS; i++) {
439 ofproto->mirrors[i] = NULL;
441 ofproto->has_bonded_bundles = false;
443 timer_set_duration(&ofproto->next_expiration, 1000);
445 hmap_init(&ofproto->facets);
446 ofproto->need_revalidate = false;
447 tag_set_init(&ofproto->revalidate_set);
449 ofproto->up.tables = xmalloc(sizeof *ofproto->up.tables);
450 classifier_init(&ofproto->up.tables[0]);
451 ofproto->up.n_tables = 1;
453 ofproto_dpif_unixctl_init();
459 destruct(struct ofproto *ofproto_)
461 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
464 for (i = 0; i < MAX_MIRRORS; i++) {
465 mirror_destroy(ofproto->mirrors[i]);
468 netflow_destroy(ofproto->netflow);
469 ofproto_sflow_destroy(ofproto->sflow);
470 hmap_destroy(&ofproto->bundles);
471 mac_learning_destroy(ofproto->ml);
473 hmap_destroy(&ofproto->facets);
475 dpif_close(ofproto->dpif);
479 run(struct ofproto *ofproto_)
481 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
482 struct ofport_dpif *ofport;
483 struct ofbundle *bundle;
486 dpif_run(ofproto->dpif);
488 for (i = 0; i < 50; i++) {
489 struct dpif_upcall packet;
492 error = dpif_recv(ofproto->dpif, &packet);
494 if (error == ENODEV) {
495 /* Datapath destroyed. */
501 handle_upcall(ofproto, &packet);
504 if (timer_expired(&ofproto->next_expiration)) {
505 int delay = expire(ofproto);
506 timer_set_duration(&ofproto->next_expiration, delay);
509 if (ofproto->netflow) {
510 netflow_run(ofproto->netflow);
512 if (ofproto->sflow) {
513 ofproto_sflow_run(ofproto->sflow);
516 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
519 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
523 /* Now revalidate if there's anything to do. */
524 if (ofproto->need_revalidate
525 || !tag_set_is_empty(&ofproto->revalidate_set)) {
526 struct tag_set revalidate_set = ofproto->revalidate_set;
527 bool revalidate_all = ofproto->need_revalidate;
528 struct facet *facet, *next;
530 /* Clear the revalidation flags. */
531 tag_set_init(&ofproto->revalidate_set);
532 ofproto->need_revalidate = false;
534 HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &ofproto->facets) {
536 || tag_set_intersects(&revalidate_set, facet->tags)) {
537 facet_revalidate(ofproto, facet);
546 wait(struct ofproto *ofproto_)
548 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
549 struct ofport_dpif *ofport;
550 struct ofbundle *bundle;
552 dpif_wait(ofproto->dpif);
553 dpif_recv_wait(ofproto->dpif);
554 if (ofproto->sflow) {
555 ofproto_sflow_wait(ofproto->sflow);
557 if (!tag_set_is_empty(&ofproto->revalidate_set)) {
558 poll_immediate_wake();
560 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
563 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
566 if (ofproto->need_revalidate) {
567 /* Shouldn't happen, but if it does just go around again. */
568 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
569 poll_immediate_wake();
571 timer_wait(&ofproto->next_expiration);
576 flush(struct ofproto *ofproto_)
578 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
579 struct facet *facet, *next_facet;
581 HMAP_FOR_EACH_SAFE (facet, next_facet, hmap_node, &ofproto->facets) {
582 /* Mark the facet as not installed so that facet_remove() doesn't
583 * bother trying to uninstall it. There is no point in uninstalling it
584 * individually since we are about to blow away all the facets with
585 * dpif_flow_flush(). */
586 facet->installed = false;
587 facet->dp_packet_count = 0;
588 facet->dp_byte_count = 0;
589 facet_remove(ofproto, facet);
591 dpif_flow_flush(ofproto->dpif);
595 get_features(struct ofproto *ofproto_ OVS_UNUSED,
596 bool *arp_match_ip, uint32_t *actions)
598 *arp_match_ip = true;
599 *actions = ((1u << OFPAT_OUTPUT) |
600 (1u << OFPAT_SET_VLAN_VID) |
601 (1u << OFPAT_SET_VLAN_PCP) |
602 (1u << OFPAT_STRIP_VLAN) |
603 (1u << OFPAT_SET_DL_SRC) |
604 (1u << OFPAT_SET_DL_DST) |
605 (1u << OFPAT_SET_NW_SRC) |
606 (1u << OFPAT_SET_NW_DST) |
607 (1u << OFPAT_SET_NW_TOS) |
608 (1u << OFPAT_SET_TP_SRC) |
609 (1u << OFPAT_SET_TP_DST) |
610 (1u << OFPAT_ENQUEUE));
614 get_tables(struct ofproto *ofproto_, struct ofp_table_stats *ots)
616 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
619 strcpy(ots->name, "classifier");
621 dpif_get_dp_stats(ofproto->dpif, &s);
622 put_32aligned_be64(&ots->lookup_count, htonll(s.n_hit + s.n_missed));
623 put_32aligned_be64(&ots->matched_count,
624 htonll(s.n_hit + ofproto->n_matches));
628 set_netflow(struct ofproto *ofproto_,
629 const struct netflow_options *netflow_options)
631 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
633 if (netflow_options) {
634 if (!ofproto->netflow) {
635 ofproto->netflow = netflow_create();
637 return netflow_set_options(ofproto->netflow, netflow_options);
639 netflow_destroy(ofproto->netflow);
640 ofproto->netflow = NULL;
645 static struct ofport *
648 struct ofport_dpif *port = xmalloc(sizeof *port);
653 port_dealloc(struct ofport *port_)
655 struct ofport_dpif *port = ofport_dpif_cast(port_);
660 port_construct(struct ofport *port_)
662 struct ofport_dpif *port = ofport_dpif_cast(port_);
663 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
665 port->odp_port = ofp_port_to_odp_port(port->up.ofp_port);
668 port->tag = tag_create_random();
670 if (ofproto->sflow) {
671 ofproto_sflow_add_port(ofproto->sflow, port->odp_port,
672 netdev_get_name(port->up.netdev));
679 port_destruct(struct ofport *port_)
681 struct ofport_dpif *port = ofport_dpif_cast(port_);
682 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
684 bundle_remove(port_);
685 set_cfm(port_, NULL, NULL, 0);
686 if (ofproto->sflow) {
687 ofproto_sflow_del_port(ofproto->sflow, port->odp_port);
692 port_modified(struct ofport *port_)
694 struct ofport_dpif *port = ofport_dpif_cast(port_);
696 if (port->bundle && port->bundle->bond) {
697 bond_slave_set_netdev(port->bundle->bond, port, port->up.netdev);
702 port_reconfigured(struct ofport *port_, ovs_be32 old_config)
704 struct ofport_dpif *port = ofport_dpif_cast(port_);
705 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
706 ovs_be32 changed = old_config ^ port->up.opp.config;
708 if (changed & htonl(OFPPC_NO_RECV | OFPPC_NO_RECV_STP |
709 OFPPC_NO_FWD | OFPPC_NO_FLOOD)) {
710 ofproto->need_revalidate = true;
715 set_sflow(struct ofproto *ofproto_,
716 const struct ofproto_sflow_options *sflow_options)
718 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
719 struct ofproto_sflow *os = ofproto->sflow;
722 struct ofport_dpif *ofport;
724 os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif);
725 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
726 ofproto_sflow_add_port(os, ofport->odp_port,
727 netdev_get_name(ofport->up.netdev));
730 ofproto_sflow_set_options(os, sflow_options);
732 ofproto_sflow_destroy(os);
733 ofproto->sflow = NULL;
739 set_cfm(struct ofport *ofport_, const struct cfm *cfm,
740 const uint16_t *remote_mps, size_t n_remote_mps)
742 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
749 ofport->cfm = cfm_create();
752 ofport->cfm->mpid = cfm->mpid;
753 ofport->cfm->interval = cfm->interval;
754 memcpy(ofport->cfm->maid, cfm->maid, CCM_MAID_LEN);
756 cfm_update_remote_mps(ofport->cfm, remote_mps, n_remote_mps);
758 if (cfm_configure(ofport->cfm)) {
764 cfm_destroy(ofport->cfm);
770 get_cfm(struct ofport *ofport_, const struct cfm **cfmp)
772 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
779 /* Expires all MAC learning entries associated with 'port' and forces ofproto
780 * to revalidate every flow. */
782 bundle_flush_macs(struct ofbundle *bundle)
784 struct ofproto_dpif *ofproto = bundle->ofproto;
785 struct mac_learning *ml = ofproto->ml;
786 struct mac_entry *mac, *next_mac;
788 ofproto->need_revalidate = true;
789 LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
790 if (mac->port.p == bundle) {
791 mac_learning_expire(ml, mac);
796 static struct ofbundle *
797 bundle_lookup(const struct ofproto_dpif *ofproto, void *aux)
799 struct ofbundle *bundle;
801 HMAP_FOR_EACH_IN_BUCKET (bundle, hmap_node, hash_pointer(aux, 0),
803 if (bundle->aux == aux) {
810 /* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
811 * ones that are found to 'bundles'. */
813 bundle_lookup_multiple(struct ofproto_dpif *ofproto,
814 void **auxes, size_t n_auxes,
815 struct hmapx *bundles)
820 for (i = 0; i < n_auxes; i++) {
821 struct ofbundle *bundle = bundle_lookup(ofproto, auxes[i]);
823 hmapx_add(bundles, bundle);
829 bundle_del_port(struct ofport_dpif *port)
831 struct ofbundle *bundle = port->bundle;
833 list_remove(&port->bundle_node);
837 lacp_slave_unregister(bundle->lacp, port);
840 bond_slave_unregister(bundle->bond, port);
843 bundle->floodable = true;
844 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
845 if (port->up.opp.config & htonl(OFPPC_NO_FLOOD)) {
846 bundle->floodable = false;
852 bundle_add_port(struct ofbundle *bundle, uint32_t ofp_port,
853 struct lacp_slave_settings *lacp,
854 uint32_t bond_stable_id)
856 struct ofport_dpif *port;
858 port = get_ofp_port(bundle->ofproto, ofp_port);
863 if (port->bundle != bundle) {
865 bundle_del_port(port);
868 port->bundle = bundle;
869 list_push_back(&bundle->ports, &port->bundle_node);
870 if (port->up.opp.config & htonl(OFPPC_NO_FLOOD)) {
871 bundle->floodable = false;
875 lacp_slave_register(bundle->lacp, port, lacp);
878 port->bond_stable_id = bond_stable_id;
884 bundle_destroy(struct ofbundle *bundle)
886 struct ofproto_dpif *ofproto;
887 struct ofport_dpif *port, *next_port;
894 ofproto = bundle->ofproto;
895 for (i = 0; i < MAX_MIRRORS; i++) {
896 struct ofmirror *m = ofproto->mirrors[i];
898 if (m->out == bundle) {
900 } else if (hmapx_find_and_delete(&m->srcs, bundle)
901 || hmapx_find_and_delete(&m->dsts, bundle)) {
902 ofproto->need_revalidate = true;
907 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
908 bundle_del_port(port);
911 bundle_flush_macs(bundle);
912 hmap_remove(&ofproto->bundles, &bundle->hmap_node);
914 free(bundle->trunks);
915 lacp_destroy(bundle->lacp);
916 bond_destroy(bundle->bond);
921 bundle_set(struct ofproto *ofproto_, void *aux,
922 const struct ofproto_bundle_settings *s)
924 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
925 bool need_flush = false;
926 const unsigned long *trunks;
927 struct ofport_dpif *port;
928 struct ofbundle *bundle;
933 bundle_destroy(bundle_lookup(ofproto, aux));
937 assert(s->n_slaves == 1 || s->bond != NULL);
938 assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
940 bundle = bundle_lookup(ofproto, aux);
942 bundle = xmalloc(sizeof *bundle);
944 bundle->ofproto = ofproto;
945 hmap_insert(&ofproto->bundles, &bundle->hmap_node,
946 hash_pointer(aux, 0));
950 list_init(&bundle->ports);
952 bundle->trunks = NULL;
956 bundle->floodable = true;
958 bundle->src_mirrors = 0;
959 bundle->dst_mirrors = 0;
960 bundle->mirror_out = 0;
963 if (!bundle->name || strcmp(s->name, bundle->name)) {
965 bundle->name = xstrdup(s->name);
971 bundle->lacp = lacp_create();
973 lacp_configure(bundle->lacp, s->lacp);
975 lacp_destroy(bundle->lacp);
979 /* Update set of ports. */
981 for (i = 0; i < s->n_slaves; i++) {
982 if (!bundle_add_port(bundle, s->slaves[i],
983 s->lacp ? &s->lacp_slaves[i] : NULL,
984 s->bond_stable_ids ? s->bond_stable_ids[i] : 0)) {
988 if (!ok || list_size(&bundle->ports) != s->n_slaves) {
989 struct ofport_dpif *next_port;
991 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
992 for (i = 0; i < s->n_slaves; i++) {
993 if (s->slaves[i] == odp_port_to_ofp_port(port->odp_port)) {
998 bundle_del_port(port);
1002 assert(list_size(&bundle->ports) <= s->n_slaves);
1004 if (list_is_empty(&bundle->ports)) {
1005 bundle_destroy(bundle);
1010 if (s->vlan != bundle->vlan) {
1011 bundle->vlan = s->vlan;
1015 /* Get trunked VLANs. */
1016 trunks = s->vlan == -1 ? NULL : s->trunks;
1017 if (!vlan_bitmap_equal(trunks, bundle->trunks)) {
1018 free(bundle->trunks);
1019 bundle->trunks = vlan_bitmap_clone(trunks);
1024 if (!list_is_short(&bundle->ports)) {
1025 bundle->ofproto->has_bonded_bundles = true;
1027 if (bond_reconfigure(bundle->bond, s->bond)) {
1028 ofproto->need_revalidate = true;
1031 bundle->bond = bond_create(s->bond);
1034 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
1035 bond_slave_register(bundle->bond, port, port->bond_stable_id,
1039 bond_destroy(bundle->bond);
1040 bundle->bond = NULL;
1043 /* If we changed something that would affect MAC learning, un-learn
1044 * everything on this port and force flow revalidation. */
1046 bundle_flush_macs(bundle);
1053 bundle_remove(struct ofport *port_)
1055 struct ofport_dpif *port = ofport_dpif_cast(port_);
1056 struct ofbundle *bundle = port->bundle;
1059 bundle_del_port(port);
1060 if (list_is_empty(&bundle->ports)) {
1061 bundle_destroy(bundle);
1062 } else if (list_is_short(&bundle->ports)) {
1063 bond_destroy(bundle->bond);
1064 bundle->bond = NULL;
1070 send_pdu_cb(void *port_, const struct lacp_pdu *pdu)
1072 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
1073 struct ofport_dpif *port = port_;
1074 uint8_t ea[ETH_ADDR_LEN];
1077 error = netdev_get_etheraddr(port->up.netdev, ea);
1079 struct lacp_pdu *packet_pdu;
1080 struct ofpbuf packet;
1082 ofpbuf_init(&packet, 0);
1083 packet_pdu = eth_compose(&packet, eth_addr_lacp, ea, ETH_TYPE_LACP,
1084 sizeof *packet_pdu);
1086 error = netdev_send(port->up.netdev, &packet);
1088 VLOG_WARN_RL(&rl, "port %s: sending LACP PDU on iface %s failed "
1089 "(%s)", port->bundle->name,
1090 netdev_get_name(port->up.netdev), strerror(error));
1092 ofpbuf_uninit(&packet);
1094 VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
1095 "%s (%s)", port->bundle->name,
1096 netdev_get_name(port->up.netdev), strerror(error));
1101 bundle_send_learning_packets(struct ofbundle *bundle)
1103 struct ofproto_dpif *ofproto = bundle->ofproto;
1104 int error, n_packets, n_errors;
1105 struct mac_entry *e;
1107 error = n_packets = n_errors = 0;
1108 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
1109 if (e->port.p != bundle) {
1110 int ret = bond_send_learning_packet(bundle->bond, e->mac, e->vlan);
1120 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1121 VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
1122 "packets, last error was: %s",
1123 bundle->name, n_errors, n_packets, strerror(error));
1125 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
1126 bundle->name, n_packets);
1131 bundle_run(struct ofbundle *bundle)
1134 lacp_run(bundle->lacp, send_pdu_cb);
1137 struct ofport_dpif *port;
1139 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
1140 bool may_enable = lacp_slave_may_enable(bundle->lacp, port);
1141 bond_slave_set_lacp_may_enable(bundle->bond, port, may_enable);
1144 bond_run(bundle->bond, &bundle->ofproto->revalidate_set,
1145 lacp_negotiated(bundle->lacp));
1146 if (bond_should_send_learning_packets(bundle->bond)) {
1147 bundle_send_learning_packets(bundle);
1153 bundle_wait(struct ofbundle *bundle)
1156 lacp_wait(bundle->lacp);
1159 bond_wait(bundle->bond);
1166 mirror_scan(struct ofproto_dpif *ofproto)
1170 for (idx = 0; idx < MAX_MIRRORS; idx++) {
1171 if (!ofproto->mirrors[idx]) {
1178 static struct ofmirror *
1179 mirror_lookup(struct ofproto_dpif *ofproto, void *aux)
1183 for (i = 0; i < MAX_MIRRORS; i++) {
1184 struct ofmirror *mirror = ofproto->mirrors[i];
1185 if (mirror && mirror->aux == aux) {
1194 mirror_set(struct ofproto *ofproto_, void *aux,
1195 const struct ofproto_mirror_settings *s)
1197 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1198 mirror_mask_t mirror_bit;
1199 struct ofbundle *bundle;
1200 struct ofmirror *mirror;
1201 struct ofbundle *out;
1202 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
1203 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
1206 mirror = mirror_lookup(ofproto, aux);
1208 mirror_destroy(mirror);
1214 idx = mirror_scan(ofproto);
1216 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
1218 ofproto->up.name, MAX_MIRRORS, s->name);
1222 mirror = ofproto->mirrors[idx] = xzalloc(sizeof *mirror);
1223 mirror->ofproto = ofproto;
1225 mirror->out_vlan = -1;
1226 mirror->name = NULL;
1229 if (!mirror->name || strcmp(s->name, mirror->name)) {
1231 mirror->name = xstrdup(s->name);
1234 /* Get the new configuration. */
1235 if (s->out_bundle) {
1236 out = bundle_lookup(ofproto, s->out_bundle);
1238 mirror_destroy(mirror);
1244 out_vlan = s->out_vlan;
1246 bundle_lookup_multiple(ofproto, s->srcs, s->n_srcs, &srcs);
1247 bundle_lookup_multiple(ofproto, s->dsts, s->n_dsts, &dsts);
1249 /* If the configuration has not changed, do nothing. */
1250 if (hmapx_equals(&srcs, &mirror->srcs)
1251 && hmapx_equals(&dsts, &mirror->dsts)
1252 && vlan_bitmap_equal(mirror->vlans, s->src_vlans)
1253 && mirror->out == out
1254 && mirror->out_vlan == out_vlan)
1256 hmapx_destroy(&srcs);
1257 hmapx_destroy(&dsts);
1261 hmapx_swap(&srcs, &mirror->srcs);
1262 hmapx_destroy(&srcs);
1264 hmapx_swap(&dsts, &mirror->dsts);
1265 hmapx_destroy(&dsts);
1267 free(mirror->vlans);
1268 mirror->vlans = vlan_bitmap_clone(s->src_vlans);
1271 mirror->out_vlan = out_vlan;
1273 /* Update bundles. */
1274 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
1275 HMAP_FOR_EACH (bundle, hmap_node, &mirror->ofproto->bundles) {
1276 if (hmapx_contains(&mirror->srcs, bundle)) {
1277 bundle->src_mirrors |= mirror_bit;
1279 bundle->src_mirrors &= ~mirror_bit;
1282 if (hmapx_contains(&mirror->dsts, bundle)) {
1283 bundle->dst_mirrors |= mirror_bit;
1285 bundle->dst_mirrors &= ~mirror_bit;
1288 if (mirror->out == bundle) {
1289 bundle->mirror_out |= mirror_bit;
1291 bundle->mirror_out &= ~mirror_bit;
1295 ofproto->need_revalidate = true;
1296 mac_learning_flush(ofproto->ml);
1302 mirror_destroy(struct ofmirror *mirror)
1304 struct ofproto_dpif *ofproto;
1305 mirror_mask_t mirror_bit;
1306 struct ofbundle *bundle;
1312 ofproto = mirror->ofproto;
1313 ofproto->need_revalidate = true;
1314 mac_learning_flush(ofproto->ml);
1316 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
1317 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1318 bundle->src_mirrors &= ~mirror_bit;
1319 bundle->dst_mirrors &= ~mirror_bit;
1320 bundle->mirror_out &= ~mirror_bit;
1323 hmapx_destroy(&mirror->srcs);
1324 hmapx_destroy(&mirror->dsts);
1325 free(mirror->vlans);
1327 ofproto->mirrors[mirror->idx] = NULL;
1333 set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
1335 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1336 if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
1337 ofproto->need_revalidate = true;
1338 mac_learning_flush(ofproto->ml);
1344 is_mirror_output_bundle(struct ofproto *ofproto_, void *aux)
1346 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1347 struct ofbundle *bundle = bundle_lookup(ofproto, aux);
1348 return bundle && bundle->mirror_out != 0;
1353 static struct ofport_dpif *
1354 get_ofp_port(struct ofproto_dpif *ofproto, uint16_t ofp_port)
1356 return ofport_dpif_cast(ofproto_get_port(&ofproto->up, ofp_port));
1359 static struct ofport_dpif *
1360 get_odp_port(struct ofproto_dpif *ofproto, uint32_t odp_port)
1362 return get_ofp_port(ofproto, odp_port_to_ofp_port(odp_port));
1366 ofproto_port_from_dpif_port(struct ofproto_port *ofproto_port,
1367 struct dpif_port *dpif_port)
1369 ofproto_port->name = dpif_port->name;
1370 ofproto_port->type = dpif_port->type;
1371 ofproto_port->ofp_port = odp_port_to_ofp_port(dpif_port->port_no);
1375 port_run(struct ofport_dpif *ofport)
1378 cfm_run(ofport->cfm);
1380 if (cfm_should_send_ccm(ofport->cfm)) {
1381 struct ofpbuf packet;
1384 ofpbuf_init(&packet, 0);
1385 ccm = eth_compose(&packet, eth_addr_ccm, ofport->up.opp.hw_addr,
1386 ETH_TYPE_CFM, sizeof *ccm);
1387 cfm_compose_ccm(ofport->cfm, ccm);
1388 send_packet(ofproto_dpif_cast(ofport->up.ofproto),
1389 ofport->odp_port, 0, &packet);
1390 ofpbuf_uninit(&packet);
1396 port_wait(struct ofport_dpif *ofport)
1399 cfm_wait(ofport->cfm);
1404 port_query_by_name(const struct ofproto *ofproto_, const char *devname,
1405 struct ofproto_port *ofproto_port)
1407 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1408 struct dpif_port dpif_port;
1411 error = dpif_port_query_by_name(ofproto->dpif, devname, &dpif_port);
1413 ofproto_port_from_dpif_port(ofproto_port, &dpif_port);
1419 port_add(struct ofproto *ofproto_, struct netdev *netdev, uint16_t *ofp_portp)
1421 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1425 error = dpif_port_add(ofproto->dpif, netdev, &odp_port);
1427 *ofp_portp = odp_port_to_ofp_port(odp_port);
1433 port_del(struct ofproto *ofproto_, uint16_t ofp_port)
1435 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1438 error = dpif_port_del(ofproto->dpif, ofp_port_to_odp_port(ofp_port));
1440 struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
1442 /* The caller is going to close ofport->up.netdev. If this is a
1443 * bonded port, then the bond is using that netdev, so remove it
1444 * from the bond. The client will need to reconfigure everything
1445 * after deleting ports, so then the slave will get re-added. */
1446 bundle_remove(&ofport->up);
1452 struct port_dump_state {
1453 struct dpif_port_dump dump;
1458 port_dump_start(const struct ofproto *ofproto_, void **statep)
1460 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1461 struct port_dump_state *state;
1463 *statep = state = xmalloc(sizeof *state);
1464 dpif_port_dump_start(&state->dump, ofproto->dpif);
1465 state->done = false;
1470 port_dump_next(const struct ofproto *ofproto_ OVS_UNUSED, void *state_,
1471 struct ofproto_port *port)
1473 struct port_dump_state *state = state_;
1474 struct dpif_port dpif_port;
1476 if (dpif_port_dump_next(&state->dump, &dpif_port)) {
1477 ofproto_port_from_dpif_port(port, &dpif_port);
1480 int error = dpif_port_dump_done(&state->dump);
1482 return error ? error : EOF;
1487 port_dump_done(const struct ofproto *ofproto_ OVS_UNUSED, void *state_)
1489 struct port_dump_state *state = state_;
1492 dpif_port_dump_done(&state->dump);
1499 port_poll(const struct ofproto *ofproto_, char **devnamep)
1501 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1502 return dpif_port_poll(ofproto->dpif, devnamep);
1506 port_poll_wait(const struct ofproto *ofproto_)
1508 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1509 dpif_port_poll_wait(ofproto->dpif);
1513 port_is_lacp_current(const struct ofport *ofport_)
1515 const struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1516 return (ofport->bundle && ofport->bundle->lacp
1517 ? lacp_slave_is_current(ofport->bundle->lacp, ofport)
1521 /* Upcall handling. */
1523 /* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an
1524 * OFPT_PACKET_IN message to each OpenFlow controller as necessary according to
1525 * their individual configurations.
1527 * If 'clone' is true, the caller retains ownership of 'upcall->packet'.
1528 * Otherwise, ownership is transferred to this function. */
1530 send_packet_in(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall,
1531 const struct flow *flow, bool clone)
1533 struct ofputil_packet_in pin;
1535 pin.packet = upcall->packet;
1536 pin.in_port = flow->in_port;
1537 pin.reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION;
1538 pin.buffer_id = 0; /* not yet known */
1539 pin.send_len = upcall->userdata;
1540 connmgr_send_packet_in(ofproto->up.connmgr, &pin, flow,
1541 clone ? NULL : upcall->packet);
1545 process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
1546 const struct ofpbuf *packet)
1548 if (cfm_should_process_flow(flow)) {
1549 struct ofport_dpif *ofport = get_ofp_port(ofproto, flow->in_port);
1550 if (ofport && ofport->cfm) {
1551 cfm_process_heartbeat(ofport->cfm, packet);
1554 } else if (flow->dl_type == htons(ETH_TYPE_LACP)) {
1555 struct ofport_dpif *port = get_ofp_port(ofproto, flow->in_port);
1556 if (port && port->bundle && port->bundle->lacp) {
1557 const struct lacp_pdu *pdu = parse_lacp_packet(packet);
1559 lacp_process_pdu(port->bundle->lacp, port, pdu);
1568 handle_miss_upcall(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall)
1570 struct facet *facet;
1573 /* Obtain in_port and tun_id, at least. */
1574 odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
1576 /* Set header pointers in 'flow'. */
1577 flow_extract(upcall->packet, flow.tun_id, flow.in_port, &flow);
1579 /* Handle 802.1ag and LACP. */
1580 if (process_special(ofproto, &flow, upcall->packet)) {
1581 ofpbuf_delete(upcall->packet);
1582 ofproto->n_matches++;
1586 /* Check with in-band control to see if this packet should be sent
1587 * to the local port regardless of the flow table. */
1588 if (connmgr_msg_in_hook(ofproto->up.connmgr, &flow, upcall->packet)) {
1589 send_packet(ofproto, OFPP_LOCAL, 0, upcall->packet);
1592 facet = facet_lookup_valid(ofproto, &flow);
1594 struct rule_dpif *rule = rule_dpif_lookup(ofproto, &flow);
1596 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
1597 struct ofport_dpif *port = get_ofp_port(ofproto, flow.in_port);
1599 if (port->up.opp.config & htonl(OFPPC_NO_PACKET_IN)) {
1600 COVERAGE_INC(ofproto_dpif_no_packet_in);
1601 /* XXX install 'drop' flow entry */
1602 ofpbuf_delete(upcall->packet);
1606 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16,
1610 send_packet_in(ofproto, upcall, &flow, false);
1614 facet = facet_create(rule, &flow, upcall->packet);
1615 } else if (!facet->may_install) {
1616 /* The facet is not installable, that is, we need to process every
1617 * packet, so process the current packet's actions into 'facet'. */
1618 facet_make_actions(ofproto, facet, upcall->packet);
1621 if (facet->rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
1623 * Extra-special case for fail-open mode.
1625 * We are in fail-open mode and the packet matched the fail-open rule,
1626 * but we are connected to a controller too. We should send the packet
1627 * up to the controller in the hope that it will try to set up a flow
1628 * and thereby allow us to exit fail-open.
1630 * See the top-level comment in fail-open.c for more information.
1632 send_packet_in(ofproto, upcall, &flow, true);
1635 facet_execute(ofproto, facet, upcall->packet);
1636 facet_install(ofproto, facet, false);
1637 ofproto->n_matches++;
1641 handle_upcall(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall)
1645 switch (upcall->type) {
1646 case DPIF_UC_ACTION:
1647 COVERAGE_INC(ofproto_dpif_ctlr_action);
1648 odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
1649 send_packet_in(ofproto, upcall, &flow, false);
1652 case DPIF_UC_SAMPLE:
1653 if (ofproto->sflow) {
1654 odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
1655 ofproto_sflow_received(ofproto->sflow, upcall, &flow);
1657 ofpbuf_delete(upcall->packet);
1661 handle_miss_upcall(ofproto, upcall);
1664 case DPIF_N_UC_TYPES:
1666 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type);
1671 /* Flow expiration. */
1673 static int facet_max_idle(const struct ofproto_dpif *);
1674 static void update_stats(struct ofproto_dpif *);
1675 static void rule_expire(struct rule_dpif *);
1676 static void expire_facets(struct ofproto_dpif *, int dp_max_idle);
1678 /* This function is called periodically by run(). Its job is to collect
1679 * updates for the flows that have been installed into the datapath, most
1680 * importantly when they last were used, and then use that information to
1681 * expire flows that have not been used recently.
1683 * Returns the number of milliseconds after which it should be called again. */
1685 expire(struct ofproto_dpif *ofproto)
1687 struct rule_dpif *rule, *next_rule;
1688 struct cls_cursor cursor;
1691 /* Update stats for each flow in the datapath. */
1692 update_stats(ofproto);
1694 /* Expire facets that have been idle too long. */
1695 dp_max_idle = facet_max_idle(ofproto);
1696 expire_facets(ofproto, dp_max_idle);
1698 /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
1699 cls_cursor_init(&cursor, &ofproto->up.tables[0], NULL);
1700 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
1704 /* All outstanding data in existing flows has been accounted, so it's a
1705 * good time to do bond rebalancing. */
1706 if (ofproto->has_bonded_bundles) {
1707 struct ofbundle *bundle;
1709 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1711 bond_rebalance(bundle->bond, &ofproto->revalidate_set);
1716 return MIN(dp_max_idle, 1000);
1719 /* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
1721 * This function also pushes statistics updates to rules which each facet
1722 * resubmits into. Generally these statistics will be accurate. However, if a
1723 * facet changes the rule it resubmits into at some time in between
1724 * update_stats() runs, it is possible that statistics accrued to the
1725 * old rule will be incorrectly attributed to the new rule. This could be
1726 * avoided by calling update_stats() whenever rules are created or
1727 * deleted. However, the performance impact of making so many calls to the
1728 * datapath do not justify the benefit of having perfectly accurate statistics.
1731 update_stats(struct ofproto_dpif *p)
1733 const struct dpif_flow_stats *stats;
1734 struct dpif_flow_dump dump;
1735 const struct nlattr *key;
1738 dpif_flow_dump_start(&dump, p->dpif);
1739 while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
1740 struct facet *facet;
1743 if (odp_flow_key_to_flow(key, key_len, &flow)) {
1747 odp_flow_key_format(key, key_len, &s);
1748 VLOG_WARN_RL(&rl, "failed to convert ODP flow key to flow: %s",
1754 facet = facet_find(p, &flow);
1756 if (facet && facet->installed) {
1758 if (stats->n_packets >= facet->dp_packet_count) {
1759 uint64_t extra = stats->n_packets - facet->dp_packet_count;
1760 facet->packet_count += extra;
1762 VLOG_WARN_RL(&rl, "unexpected packet count from the datapath");
1765 if (stats->n_bytes >= facet->dp_byte_count) {
1766 facet->byte_count += stats->n_bytes - facet->dp_byte_count;
1768 VLOG_WARN_RL(&rl, "unexpected byte count from datapath");
1771 facet->dp_packet_count = stats->n_packets;
1772 facet->dp_byte_count = stats->n_bytes;
1774 facet_update_time(p, facet, stats->used);
1775 facet_account(p, facet, stats->n_bytes);
1776 facet_push_stats(facet);
1778 /* There's a flow in the datapath that we know nothing about.
1780 COVERAGE_INC(facet_unexpected);
1781 dpif_flow_del(p->dpif, key, key_len, NULL);
1784 dpif_flow_dump_done(&dump);
1787 /* Calculates and returns the number of milliseconds of idle time after which
1788 * facets should expire from the datapath and we should fold their statistics
1789 * into their parent rules in userspace. */
1791 facet_max_idle(const struct ofproto_dpif *ofproto)
1794 * Idle time histogram.
1796 * Most of the time a switch has a relatively small number of facets. When
1797 * this is the case we might as well keep statistics for all of them in
1798 * userspace and to cache them in the kernel datapath for performance as
1801 * As the number of facets increases, the memory required to maintain
1802 * statistics about them in userspace and in the kernel becomes
1803 * significant. However, with a large number of facets it is likely that
1804 * only a few of them are "heavy hitters" that consume a large amount of
1805 * bandwidth. At this point, only heavy hitters are worth caching in the
1806 * kernel and maintaining in userspaces; other facets we can discard.
1808 * The technique used to compute the idle time is to build a histogram with
1809 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each facet
1810 * that is installed in the kernel gets dropped in the appropriate bucket.
1811 * After the histogram has been built, we compute the cutoff so that only
1812 * the most-recently-used 1% of facets (but at least 1000 flows) are kept
1813 * cached. At least the most-recently-used bucket of facets is kept, so
1814 * actually an arbitrary number of facets can be kept in any given
1815 * expiration run (though the next run will delete most of those unless
1816 * they receive additional data).
1818 * This requires a second pass through the facets, in addition to the pass
1819 * made by update_stats(), because the former function never looks
1820 * at uninstallable facets.
1822 enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
1823 enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
1824 int buckets[N_BUCKETS] = { 0 };
1825 struct facet *facet;
1830 total = hmap_count(&ofproto->facets);
1831 if (total <= 1000) {
1832 return N_BUCKETS * BUCKET_WIDTH;
1835 /* Build histogram. */
1837 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
1838 long long int idle = now - facet->used;
1839 int bucket = (idle <= 0 ? 0
1840 : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
1841 : (unsigned int) idle / BUCKET_WIDTH);
1845 /* Find the first bucket whose flows should be expired. */
1846 for (bucket = 0; bucket < N_BUCKETS; bucket++) {
1847 if (buckets[bucket]) {
1850 subtotal += buckets[bucket++];
1851 } while (bucket < N_BUCKETS && subtotal < MAX(1000, total / 100));
1856 if (VLOG_IS_DBG_ENABLED()) {
1860 ds_put_cstr(&s, "keep");
1861 for (i = 0; i < N_BUCKETS; i++) {
1863 ds_put_cstr(&s, ", drop");
1866 ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
1869 VLOG_INFO("%s: %s (msec:count)", ofproto->up.name, ds_cstr(&s));
1873 return bucket * BUCKET_WIDTH;
1877 facet_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet)
1879 if (ofproto->netflow && !facet_is_controller_flow(facet) &&
1880 netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) {
1881 struct ofexpired expired;
1883 if (facet->installed) {
1884 struct dpif_flow_stats stats;
1886 facet_put__(ofproto, facet, facet->actions, facet->actions_len,
1888 facet_update_stats(ofproto, facet, &stats);
1891 expired.flow = facet->flow;
1892 expired.packet_count = facet->packet_count;
1893 expired.byte_count = facet->byte_count;
1894 expired.used = facet->used;
1895 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
1900 expire_facets(struct ofproto_dpif *ofproto, int dp_max_idle)
1902 long long int cutoff = time_msec() - dp_max_idle;
1903 struct facet *facet, *next_facet;
1905 HMAP_FOR_EACH_SAFE (facet, next_facet, hmap_node, &ofproto->facets) {
1906 facet_active_timeout(ofproto, facet);
1907 if (facet->used < cutoff) {
1908 facet_remove(ofproto, facet);
1913 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
1914 * then delete it entirely. */
1916 rule_expire(struct rule_dpif *rule)
1918 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
1919 struct facet *facet, *next_facet;
1923 /* Has 'rule' expired? */
1925 if (rule->up.hard_timeout
1926 && now > rule->up.created + rule->up.hard_timeout * 1000) {
1927 reason = OFPRR_HARD_TIMEOUT;
1928 } else if (rule->up.idle_timeout && list_is_empty(&rule->facets)
1929 && now > rule->used + rule->up.idle_timeout * 1000) {
1930 reason = OFPRR_IDLE_TIMEOUT;
1935 COVERAGE_INC(ofproto_dpif_expired);
1937 /* Update stats. (This is a no-op if the rule expired due to an idle
1938 * timeout, because that only happens when the rule has no facets left.) */
1939 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
1940 facet_remove(ofproto, facet);
1943 /* Get rid of the rule. */
1944 ofproto_rule_expire(&rule->up, reason);
1949 /* Creates and returns a new facet owned by 'rule', given a 'flow' and an
1950 * example 'packet' within that flow.
1952 * The caller must already have determined that no facet with an identical
1953 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
1954 * the ofproto's classifier table. */
1955 static struct facet *
1956 facet_create(struct rule_dpif *rule, const struct flow *flow,
1957 const struct ofpbuf *packet)
1959 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
1960 struct facet *facet;
1962 facet = xzalloc(sizeof *facet);
1963 facet->used = time_msec();
1964 hmap_insert(&ofproto->facets, &facet->hmap_node, flow_hash(flow, 0));
1965 list_push_back(&rule->facets, &facet->list_node);
1967 facet->flow = *flow;
1968 netflow_flow_init(&facet->nf_flow);
1969 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
1971 facet_make_actions(ofproto, facet, packet);
1977 facet_free(struct facet *facet)
1979 free(facet->actions);
1983 /* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
1984 * 'packet', which arrived on 'in_port'.
1986 * Takes ownership of 'packet'. */
1988 execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
1989 const struct nlattr *odp_actions, size_t actions_len,
1990 struct ofpbuf *packet)
1992 if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t))
1993 && odp_actions->nla_type == ODP_ACTION_ATTR_CONTROLLER) {
1994 /* As an optimization, avoid a round-trip from userspace to kernel to
1995 * userspace. This also avoids possibly filling up kernel packet
1996 * buffers along the way. */
1997 struct dpif_upcall upcall;
1999 upcall.type = DPIF_UC_ACTION;
2000 upcall.packet = packet;
2003 upcall.userdata = nl_attr_get_u64(odp_actions);
2004 upcall.sample_pool = 0;
2005 upcall.actions = NULL;
2006 upcall.actions_len = 0;
2008 send_packet_in(ofproto, &upcall, flow, false);
2014 error = dpif_execute(ofproto->dpif, odp_actions, actions_len, packet);
2015 ofpbuf_delete(packet);
2020 /* Executes the actions indicated by 'facet' on 'packet' and credits 'facet''s
2021 * statistics appropriately. 'packet' must have at least sizeof(struct
2022 * ofp_packet_in) bytes of headroom.
2024 * For correct results, 'packet' must actually be in 'facet''s flow; that is,
2025 * applying flow_extract() to 'packet' would yield the same flow as
2028 * 'facet' must have accurately composed ODP actions; that is, it must not be
2029 * in need of revalidation.
2031 * Takes ownership of 'packet'. */
2033 facet_execute(struct ofproto_dpif *ofproto, struct facet *facet,
2034 struct ofpbuf *packet)
2036 struct dpif_flow_stats stats;
2038 assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in));
2040 flow_extract_stats(&facet->flow, packet, &stats);
2041 stats.used = time_msec();
2042 if (execute_odp_actions(ofproto, &facet->flow,
2043 facet->actions, facet->actions_len, packet)) {
2044 facet_update_stats(ofproto, facet, &stats);
2048 /* Remove 'facet' from 'ofproto' and free up the associated memory:
2050 * - If 'facet' was installed in the datapath, uninstalls it and updates its
2051 * rule's statistics, via facet_uninstall().
2053 * - Removes 'facet' from its rule and from ofproto->facets.
2056 facet_remove(struct ofproto_dpif *ofproto, struct facet *facet)
2058 facet_uninstall(ofproto, facet);
2059 facet_flush_stats(ofproto, facet);
2060 hmap_remove(&ofproto->facets, &facet->hmap_node);
2061 list_remove(&facet->list_node);
2065 /* Composes the ODP actions for 'facet' based on its rule's actions. */
2067 facet_make_actions(struct ofproto_dpif *p, struct facet *facet,
2068 const struct ofpbuf *packet)
2070 const struct rule_dpif *rule = facet->rule;
2071 struct ofpbuf *odp_actions;
2072 struct action_xlate_ctx ctx;
2074 action_xlate_ctx_init(&ctx, p, &facet->flow, packet);
2075 odp_actions = xlate_actions(&ctx, rule->up.actions, rule->up.n_actions);
2076 facet->tags = ctx.tags;
2077 facet->may_install = ctx.may_set_up_flow;
2078 facet->nf_flow.output_iface = ctx.nf_output_iface;
2080 if (facet->actions_len != odp_actions->size
2081 || memcmp(facet->actions, odp_actions->data, odp_actions->size)) {
2082 free(facet->actions);
2083 facet->actions_len = odp_actions->size;
2084 facet->actions = xmemdup(odp_actions->data, odp_actions->size);
2087 ofpbuf_delete(odp_actions);
2091 facet_put__(struct ofproto_dpif *ofproto, struct facet *facet,
2092 const struct nlattr *actions, size_t actions_len,
2093 struct dpif_flow_stats *stats)
2095 struct odputil_keybuf keybuf;
2096 enum dpif_flow_put_flags flags;
2099 flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
2101 flags |= DPIF_FP_ZERO_STATS;
2102 facet->dp_packet_count = 0;
2103 facet->dp_byte_count = 0;
2106 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
2107 odp_flow_key_from_flow(&key, &facet->flow);
2109 return dpif_flow_put(ofproto->dpif, flags, key.data, key.size,
2110 actions, actions_len, stats);
2113 /* If 'facet' is installable, inserts or re-inserts it into 'p''s datapath. If
2114 * 'zero_stats' is true, clears any existing statistics from the datapath for
2117 facet_install(struct ofproto_dpif *p, struct facet *facet, bool zero_stats)
2119 struct dpif_flow_stats stats;
2121 if (facet->may_install
2122 && !facet_put__(p, facet, facet->actions, facet->actions_len,
2123 zero_stats ? &stats : NULL)) {
2124 facet->installed = true;
2129 facet_account(struct ofproto_dpif *ofproto,
2130 struct facet *facet, uint64_t extra_bytes)
2132 uint64_t total_bytes, n_bytes;
2133 struct ofbundle *in_bundle;
2134 const struct nlattr *a;
2139 total_bytes = facet->byte_count + extra_bytes;
2140 if (total_bytes <= facet->accounted_bytes) {
2143 n_bytes = total_bytes - facet->accounted_bytes;
2144 facet->accounted_bytes = total_bytes;
2146 /* Test that 'tags' is nonzero to ensure that only flows that include an
2147 * OFPP_NORMAL action are used for learning and bond slave rebalancing.
2148 * This works because OFPP_NORMAL always sets a nonzero tag value.
2150 * Feed information from the active flows back into the learning table to
2151 * ensure that table is always in sync with what is actually flowing
2152 * through the datapath. */
2154 || !is_admissible(ofproto, &facet->flow, false, &dummy,
2155 &vlan, &in_bundle)) {
2159 update_learning_table(ofproto, &facet->flow, vlan, in_bundle);
2161 if (!ofproto->has_bonded_bundles) {
2164 NL_ATTR_FOR_EACH_UNSAFE (a, left, facet->actions, facet->actions_len) {
2165 if (nl_attr_type(a) == ODP_ACTION_ATTR_OUTPUT) {
2166 struct ofport_dpif *port;
2168 port = get_odp_port(ofproto, nl_attr_get_u32(a));
2169 if (port && port->bundle && port->bundle->bond) {
2170 bond_account(port->bundle->bond, &facet->flow, vlan, n_bytes);
2176 /* If 'rule' is installed in the datapath, uninstalls it. */
2178 facet_uninstall(struct ofproto_dpif *p, struct facet *facet)
2180 if (facet->installed) {
2181 struct odputil_keybuf keybuf;
2182 struct dpif_flow_stats stats;
2185 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
2186 odp_flow_key_from_flow(&key, &facet->flow);
2188 if (!dpif_flow_del(p->dpif, key.data, key.size, &stats)) {
2189 facet_update_stats(p, facet, &stats);
2191 facet->installed = false;
2192 facet->dp_packet_count = 0;
2193 facet->dp_byte_count = 0;
2195 assert(facet->dp_packet_count == 0);
2196 assert(facet->dp_byte_count == 0);
2200 /* Returns true if the only action for 'facet' is to send to the controller.
2201 * (We don't report NetFlow expiration messages for such facets because they
2202 * are just part of the control logic for the network, not real traffic). */
2204 facet_is_controller_flow(struct facet *facet)
2207 && facet->rule->up.n_actions == 1
2208 && action_outputs_to_port(&facet->rule->up.actions[0],
2209 htons(OFPP_CONTROLLER)));
2212 /* Folds all of 'facet''s statistics into its rule. Also updates the
2213 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
2214 * 'facet''s statistics in the datapath should have been zeroed and folded into
2215 * its packet and byte counts before this function is called. */
2217 facet_flush_stats(struct ofproto_dpif *ofproto, struct facet *facet)
2219 assert(!facet->dp_byte_count);
2220 assert(!facet->dp_packet_count);
2222 facet_push_stats(facet);
2223 facet_account(ofproto, facet, 0);
2225 if (ofproto->netflow && !facet_is_controller_flow(facet)) {
2226 struct ofexpired expired;
2227 expired.flow = facet->flow;
2228 expired.packet_count = facet->packet_count;
2229 expired.byte_count = facet->byte_count;
2230 expired.used = facet->used;
2231 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
2234 facet->rule->packet_count += facet->packet_count;
2235 facet->rule->byte_count += facet->byte_count;
2237 /* Reset counters to prevent double counting if 'facet' ever gets
2239 facet->packet_count = 0;
2240 facet->byte_count = 0;
2241 facet->rs_packet_count = 0;
2242 facet->rs_byte_count = 0;
2243 facet->accounted_bytes = 0;
2245 netflow_flow_clear(&facet->nf_flow);
2248 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
2249 * Returns it if found, otherwise a null pointer.
2251 * The returned facet might need revalidation; use facet_lookup_valid()
2252 * instead if that is important. */
2253 static struct facet *
2254 facet_find(struct ofproto_dpif *ofproto, const struct flow *flow)
2256 struct facet *facet;
2258 HMAP_FOR_EACH_WITH_HASH (facet, hmap_node, flow_hash(flow, 0),
2260 if (flow_equal(flow, &facet->flow)) {
2268 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
2269 * Returns it if found, otherwise a null pointer.
2271 * The returned facet is guaranteed to be valid. */
2272 static struct facet *
2273 facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow)
2275 struct facet *facet = facet_find(ofproto, flow);
2277 /* The facet we found might not be valid, since we could be in need of
2278 * revalidation. If it is not valid, don't return it. */
2280 && ofproto->need_revalidate
2281 && !facet_revalidate(ofproto, facet)) {
2282 COVERAGE_INC(facet_invalidated);
2289 /* Re-searches 'ofproto''s classifier for a rule matching 'facet':
2291 * - If the rule found is different from 'facet''s current rule, moves
2292 * 'facet' to the new rule and recompiles its actions.
2294 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
2295 * where it is and recompiles its actions anyway.
2297 * - If there is none, destroys 'facet'.
2299 * Returns true if 'facet' still exists, false if it has been destroyed. */
2301 facet_revalidate(struct ofproto_dpif *ofproto, struct facet *facet)
2303 struct action_xlate_ctx ctx;
2304 struct ofpbuf *odp_actions;
2305 struct rule_dpif *new_rule;
2306 bool actions_changed;
2308 COVERAGE_INC(facet_revalidate);
2310 /* Determine the new rule. */
2311 new_rule = rule_dpif_lookup(ofproto, &facet->flow);
2313 /* No new rule, so delete the facet. */
2314 facet_remove(ofproto, facet);
2318 /* Calculate new ODP actions.
2320 * We do not modify any 'facet' state yet, because we might need to, e.g.,
2321 * emit a NetFlow expiration and, if so, we need to have the old state
2322 * around to properly compose it. */
2323 action_xlate_ctx_init(&ctx, ofproto, &facet->flow, NULL);
2324 odp_actions = xlate_actions(&ctx,
2325 new_rule->up.actions, new_rule->up.n_actions);
2326 actions_changed = (facet->actions_len != odp_actions->size
2327 || memcmp(facet->actions, odp_actions->data,
2328 facet->actions_len));
2330 /* If the ODP actions changed or the installability changed, then we need
2331 * to talk to the datapath. */
2332 if (actions_changed || ctx.may_set_up_flow != facet->installed) {
2333 if (ctx.may_set_up_flow) {
2334 struct dpif_flow_stats stats;
2336 facet_put__(ofproto, facet,
2337 odp_actions->data, odp_actions->size, &stats);
2338 facet_update_stats(ofproto, facet, &stats);
2340 facet_uninstall(ofproto, facet);
2343 /* The datapath flow is gone or has zeroed stats, so push stats out of
2344 * 'facet' into 'rule'. */
2345 facet_flush_stats(ofproto, facet);
2348 /* Update 'facet' now that we've taken care of all the old state. */
2349 facet->tags = ctx.tags;
2350 facet->nf_flow.output_iface = ctx.nf_output_iface;
2351 facet->may_install = ctx.may_set_up_flow;
2352 if (actions_changed) {
2353 free(facet->actions);
2354 facet->actions_len = odp_actions->size;
2355 facet->actions = xmemdup(odp_actions->data, odp_actions->size);
2357 if (facet->rule != new_rule) {
2358 COVERAGE_INC(facet_changed_rule);
2359 list_remove(&facet->list_node);
2360 list_push_back(&new_rule->facets, &facet->list_node);
2361 facet->rule = new_rule;
2362 facet->used = new_rule->up.created;
2363 facet->rs_used = facet->used;
2366 ofpbuf_delete(odp_actions);
2371 /* Updates 'facet''s used time. Caller is responsible for calling
2372 * facet_push_stats() to update the flows which 'facet' resubmits into. */
2374 facet_update_time(struct ofproto_dpif *ofproto, struct facet *facet,
2377 if (used > facet->used) {
2379 if (used > facet->rule->used) {
2380 facet->rule->used = used;
2382 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
2386 /* Folds the statistics from 'stats' into the counters in 'facet'.
2388 * Because of the meaning of a facet's counters, it only makes sense to do this
2389 * if 'stats' are not tracked in the datapath, that is, if 'stats' represents a
2390 * packet that was sent by hand or if it represents statistics that have been
2391 * cleared out of the datapath. */
2393 facet_update_stats(struct ofproto_dpif *ofproto, struct facet *facet,
2394 const struct dpif_flow_stats *stats)
2396 if (stats->n_packets || stats->used > facet->used) {
2397 facet_update_time(ofproto, facet, stats->used);
2398 facet->packet_count += stats->n_packets;
2399 facet->byte_count += stats->n_bytes;
2400 facet_push_stats(facet);
2401 netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
2406 facet_push_stats(struct facet *facet)
2408 uint64_t rs_packets, rs_bytes;
2410 assert(facet->packet_count >= facet->rs_packet_count);
2411 assert(facet->byte_count >= facet->rs_byte_count);
2412 assert(facet->used >= facet->rs_used);
2414 rs_packets = facet->packet_count - facet->rs_packet_count;
2415 rs_bytes = facet->byte_count - facet->rs_byte_count;
2417 if (rs_packets || rs_bytes || facet->used > facet->rs_used) {
2418 facet->rs_packet_count = facet->packet_count;
2419 facet->rs_byte_count = facet->byte_count;
2420 facet->rs_used = facet->used;
2422 flow_push_stats(facet->rule, &facet->flow,
2423 rs_packets, rs_bytes, facet->used);
2427 struct ofproto_push {
2428 struct action_xlate_ctx ctx;
2435 push_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
2437 struct ofproto_push *push = CONTAINER_OF(ctx, struct ofproto_push, ctx);
2440 rule->packet_count += push->packets;
2441 rule->byte_count += push->bytes;
2442 rule->used = MAX(push->used, rule->used);
2446 /* Pushes flow statistics to the rules which 'flow' resubmits into given
2447 * 'rule''s actions. */
2449 flow_push_stats(const struct rule_dpif *rule,
2450 struct flow *flow, uint64_t packets, uint64_t bytes,
2453 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2454 struct ofproto_push push;
2456 push.packets = packets;
2460 action_xlate_ctx_init(&push.ctx, ofproto, flow, NULL);
2461 push.ctx.resubmit_hook = push_resubmit;
2462 ofpbuf_delete(xlate_actions(&push.ctx,
2463 rule->up.actions, rule->up.n_actions));
2468 static struct rule_dpif *
2469 rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow)
2471 return rule_dpif_cast(rule_from_cls_rule(
2472 classifier_lookup(&ofproto->up.tables[0],
2476 static struct rule *
2479 struct rule_dpif *rule = xmalloc(sizeof *rule);
2484 rule_dealloc(struct rule *rule_)
2486 struct rule_dpif *rule = rule_dpif_cast(rule_);
2491 rule_construct(struct rule *rule_)
2493 struct rule_dpif *rule = rule_dpif_cast(rule_);
2494 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2495 struct rule_dpif *old_rule;
2498 error = validate_actions(rule->up.actions, rule->up.n_actions,
2499 &rule->up.cr.flow, ofproto->max_ports);
2504 old_rule = rule_dpif_cast(rule_from_cls_rule(classifier_find_rule_exactly(
2505 &ofproto->up.tables[0],
2508 ofproto_rule_destroy(&old_rule->up);
2511 rule->used = rule->up.created;
2512 rule->packet_count = 0;
2513 rule->byte_count = 0;
2514 list_init(&rule->facets);
2515 classifier_insert(&ofproto->up.tables[0], &rule->up.cr);
2517 ofproto->need_revalidate = true;
2523 rule_destruct(struct rule *rule_)
2525 struct rule_dpif *rule = rule_dpif_cast(rule_);
2526 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2527 struct facet *facet, *next_facet;
2529 classifier_remove(&ofproto->up.tables[0], &rule->up.cr);
2530 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
2531 facet_revalidate(ofproto, facet);
2533 ofproto->need_revalidate = true;
2537 rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes)
2539 struct rule_dpif *rule = rule_dpif_cast(rule_);
2540 struct facet *facet;
2542 /* Start from historical data for 'rule' itself that are no longer tracked
2543 * in facets. This counts, for example, facets that have expired. */
2544 *packets = rule->packet_count;
2545 *bytes = rule->byte_count;
2547 /* Add any statistics that are tracked by facets. This includes
2548 * statistical data recently updated by ofproto_update_stats() as well as
2549 * stats for packets that were executed "by hand" via dpif_execute(). */
2550 LIST_FOR_EACH (facet, list_node, &rule->facets) {
2551 *packets += facet->packet_count;
2552 *bytes += facet->byte_count;
2557 rule_execute(struct rule *rule_, struct flow *flow, struct ofpbuf *packet)
2559 struct rule_dpif *rule = rule_dpif_cast(rule_);
2560 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2561 struct action_xlate_ctx ctx;
2562 struct ofpbuf *odp_actions;
2563 struct facet *facet;
2566 /* First look for a related facet. If we find one, account it to that. */
2567 facet = facet_lookup_valid(ofproto, flow);
2568 if (facet && facet->rule == rule) {
2569 facet_execute(ofproto, facet, packet);
2573 /* Otherwise, if 'rule' is in fact the correct rule for 'packet', then
2574 * create a new facet for it and use that. */
2575 if (rule_dpif_lookup(ofproto, flow) == rule) {
2576 facet = facet_create(rule, flow, packet);
2577 facet_execute(ofproto, facet, packet);
2578 facet_install(ofproto, facet, true);
2582 /* We can't account anything to a facet. If we were to try, then that
2583 * facet would have a non-matching rule, busting our invariants. */
2584 action_xlate_ctx_init(&ctx, ofproto, flow, packet);
2585 odp_actions = xlate_actions(&ctx, rule->up.actions, rule->up.n_actions);
2586 size = packet->size;
2587 if (execute_odp_actions(ofproto, flow, odp_actions->data,
2588 odp_actions->size, packet)) {
2589 rule->used = time_msec();
2590 rule->packet_count++;
2591 rule->byte_count += size;
2592 flow_push_stats(rule, flow, 1, size, rule->used);
2594 ofpbuf_delete(odp_actions);
2600 rule_modify_actions(struct rule *rule_,
2601 const union ofp_action *actions, size_t n_actions)
2603 struct rule_dpif *rule = rule_dpif_cast(rule_);
2604 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2607 error = validate_actions(actions, n_actions, &rule->up.cr.flow,
2608 ofproto->max_ports);
2610 ofproto->need_revalidate = true;
2615 /* Sends 'packet' out of port 'odp_port' within 'ofproto'. If 'vlan_tci' is
2616 * zero the packet will not have any 802.1Q hader; if it is nonzero, then the
2617 * packet will be sent with the VLAN TCI specified by 'vlan_tci & ~VLAN_CFI'.
2619 * Returns 0 if successful, otherwise a positive errno value. */
2621 send_packet(struct ofproto_dpif *ofproto, uint32_t odp_port, uint16_t vlan_tci,
2622 const struct ofpbuf *packet)
2624 struct ofpbuf odp_actions;
2627 ofpbuf_init(&odp_actions, 32);
2628 if (vlan_tci != 0) {
2629 nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_SET_DL_TCI,
2630 ntohs(vlan_tci & ~VLAN_CFI));
2632 nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_OUTPUT, odp_port);
2633 error = dpif_execute(ofproto->dpif, odp_actions.data, odp_actions.size,
2635 ofpbuf_uninit(&odp_actions);
2638 VLOG_WARN_RL(&rl, "%s: failed to send packet on port %"PRIu32" (%s)",
2639 ofproto->up.name, odp_port, strerror(error));
2644 /* OpenFlow to ODP action translation. */
2646 static void do_xlate_actions(const union ofp_action *in, size_t n_in,
2647 struct action_xlate_ctx *ctx);
2648 static bool xlate_normal(struct action_xlate_ctx *);
2651 add_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port)
2653 const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
2654 uint16_t odp_port = ofp_port_to_odp_port(ofp_port);
2657 if (ofport->up.opp.config & htonl(OFPPC_NO_FWD)) {
2658 /* Forwarding disabled on port. */
2663 * We don't have an ofport record for this port, but it doesn't hurt to
2664 * allow forwarding to it anyhow. Maybe such a port will appear later
2665 * and we're pre-populating the flow table.
2669 nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_OUTPUT, odp_port);
2670 ctx->nf_output_iface = ofp_port;
2674 xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port)
2676 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
2677 struct rule_dpif *rule;
2678 uint16_t old_in_port;
2680 /* Look up a flow with 'in_port' as the input port. Then restore the
2681 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
2682 * have surprising behavior). */
2683 old_in_port = ctx->flow.in_port;
2684 ctx->flow.in_port = in_port;
2685 rule = rule_dpif_lookup(ctx->ofproto, &ctx->flow);
2686 ctx->flow.in_port = old_in_port;
2688 if (ctx->resubmit_hook) {
2689 ctx->resubmit_hook(ctx, rule);
2694 do_xlate_actions(rule->up.actions, rule->up.n_actions, ctx);
2698 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
2700 VLOG_ERR_RL(&recurse_rl, "NXAST_RESUBMIT recursed over %d times",
2701 MAX_RESUBMIT_RECURSION);
2706 flood_packets(struct ofproto_dpif *ofproto,
2707 uint16_t ofp_in_port, ovs_be32 mask,
2708 uint16_t *nf_output_iface, struct ofpbuf *odp_actions)
2710 struct ofport_dpif *ofport;
2712 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
2713 uint16_t ofp_port = ofport->up.ofp_port;
2714 if (ofp_port != ofp_in_port && !(ofport->up.opp.config & mask)) {
2715 nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT,
2719 *nf_output_iface = NF_OUT_FLOOD;
2723 xlate_output_action__(struct action_xlate_ctx *ctx,
2724 uint16_t port, uint16_t max_len)
2726 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
2728 ctx->nf_output_iface = NF_OUT_DROP;
2732 add_output_action(ctx, ctx->flow.in_port);
2735 xlate_table_action(ctx, ctx->flow.in_port);
2741 flood_packets(ctx->ofproto, ctx->flow.in_port, htonl(OFPPC_NO_FLOOD),
2742 &ctx->nf_output_iface, ctx->odp_actions);
2745 flood_packets(ctx->ofproto, ctx->flow.in_port, htonl(0),
2746 &ctx->nf_output_iface, ctx->odp_actions);
2748 case OFPP_CONTROLLER:
2749 nl_msg_put_u64(ctx->odp_actions, ODP_ACTION_ATTR_CONTROLLER, max_len);
2752 add_output_action(ctx, OFPP_LOCAL);
2755 if (port != ctx->flow.in_port) {
2756 add_output_action(ctx, port);
2761 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2762 ctx->nf_output_iface = NF_OUT_FLOOD;
2763 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
2764 ctx->nf_output_iface = prev_nf_output_iface;
2765 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2766 ctx->nf_output_iface != NF_OUT_FLOOD) {
2767 ctx->nf_output_iface = NF_OUT_MULTI;
2772 xlate_output_action(struct action_xlate_ctx *ctx,
2773 const struct ofp_action_output *oao)
2775 xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len));
2778 /* If the final ODP action in 'ctx' is "pop priority", drop it, as an
2779 * optimization, because we're going to add another action that sets the
2780 * priority immediately after, or because there are no actions following the
2783 remove_pop_action(struct action_xlate_ctx *ctx)
2785 if (ctx->odp_actions->size == ctx->last_pop_priority) {
2786 ctx->odp_actions->size -= NLA_ALIGN(NLA_HDRLEN);
2787 ctx->last_pop_priority = -1;
2792 add_pop_action(struct action_xlate_ctx *ctx)
2794 if (ctx->odp_actions->size != ctx->last_pop_priority) {
2795 nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_POP_PRIORITY);
2796 ctx->last_pop_priority = ctx->odp_actions->size;
2801 xlate_enqueue_action(struct action_xlate_ctx *ctx,
2802 const struct ofp_action_enqueue *oae)
2804 uint16_t ofp_port, odp_port;
2808 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(oae->queue_id),
2811 /* Fall back to ordinary output action. */
2812 xlate_output_action__(ctx, ntohs(oae->port), 0);
2816 /* Figure out ODP output port. */
2817 ofp_port = ntohs(oae->port);
2818 if (ofp_port == OFPP_IN_PORT) {
2819 ofp_port = ctx->flow.in_port;
2821 odp_port = ofp_port_to_odp_port(ofp_port);
2823 /* Add ODP actions. */
2824 remove_pop_action(ctx);
2825 nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, priority);
2826 add_output_action(ctx, odp_port);
2827 add_pop_action(ctx);
2829 /* Update NetFlow output port. */
2830 if (ctx->nf_output_iface == NF_OUT_DROP) {
2831 ctx->nf_output_iface = odp_port;
2832 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
2833 ctx->nf_output_iface = NF_OUT_MULTI;
2838 xlate_set_queue_action(struct action_xlate_ctx *ctx,
2839 const struct nx_action_set_queue *nasq)
2844 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(nasq->queue_id),
2847 /* Couldn't translate queue to a priority, so ignore. A warning
2848 * has already been logged. */
2852 remove_pop_action(ctx);
2853 nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, priority);
2857 xlate_set_dl_tci(struct action_xlate_ctx *ctx)
2859 ovs_be16 tci = ctx->flow.vlan_tci;
2860 if (!(tci & htons(VLAN_CFI))) {
2861 nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_STRIP_VLAN);
2863 nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_TCI,
2864 tci & ~htons(VLAN_CFI));
2868 struct xlate_reg_state {
2874 save_reg_state(const struct action_xlate_ctx *ctx,
2875 struct xlate_reg_state *state)
2877 state->vlan_tci = ctx->flow.vlan_tci;
2878 state->tun_id = ctx->flow.tun_id;
2882 update_reg_state(struct action_xlate_ctx *ctx,
2883 const struct xlate_reg_state *state)
2885 if (ctx->flow.vlan_tci != state->vlan_tci) {
2886 xlate_set_dl_tci(ctx);
2888 if (ctx->flow.tun_id != state->tun_id) {
2889 nl_msg_put_be64(ctx->odp_actions,
2890 ODP_ACTION_ATTR_SET_TUNNEL, ctx->flow.tun_id);
2895 xlate_autopath(struct action_xlate_ctx *ctx,
2896 const struct nx_action_autopath *naa)
2898 uint16_t ofp_port = ntohl(naa->id);
2899 struct ofport_dpif *port = get_ofp_port(ctx->ofproto, ofp_port);
2901 if (!port || !port->bundle) {
2902 ofp_port = OFPP_NONE;
2903 } else if (port->bundle->bond) {
2904 /* Autopath does not support VLAN hashing. */
2905 struct ofport_dpif *slave = bond_choose_output_slave(
2906 port->bundle->bond, &ctx->flow, OFP_VLAN_NONE, &ctx->tags);
2908 ofp_port = slave->up.ofp_port;
2911 autopath_execute(naa, &ctx->flow, ofp_port);
2915 xlate_nicira_action(struct action_xlate_ctx *ctx,
2916 const struct nx_action_header *nah)
2918 const struct nx_action_resubmit *nar;
2919 const struct nx_action_set_tunnel *nast;
2920 const struct nx_action_set_queue *nasq;
2921 const struct nx_action_multipath *nam;
2922 const struct nx_action_autopath *naa;
2923 enum nx_action_subtype subtype = ntohs(nah->subtype);
2924 struct xlate_reg_state state;
2927 assert(nah->vendor == htonl(NX_VENDOR_ID));
2929 case NXAST_RESUBMIT:
2930 nar = (const struct nx_action_resubmit *) nah;
2931 xlate_table_action(ctx, ntohs(nar->in_port));
2934 case NXAST_SET_TUNNEL:
2935 nast = (const struct nx_action_set_tunnel *) nah;
2936 tun_id = htonll(ntohl(nast->tun_id));
2937 nl_msg_put_be64(ctx->odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, tun_id);
2938 ctx->flow.tun_id = tun_id;
2941 case NXAST_DROP_SPOOFED_ARP:
2942 if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) {
2943 nl_msg_put_flag(ctx->odp_actions,
2944 ODP_ACTION_ATTR_DROP_SPOOFED_ARP);
2948 case NXAST_SET_QUEUE:
2949 nasq = (const struct nx_action_set_queue *) nah;
2950 xlate_set_queue_action(ctx, nasq);
2953 case NXAST_POP_QUEUE:
2954 add_pop_action(ctx);
2957 case NXAST_REG_MOVE:
2958 save_reg_state(ctx, &state);
2959 nxm_execute_reg_move((const struct nx_action_reg_move *) nah,
2961 update_reg_state(ctx, &state);
2964 case NXAST_REG_LOAD:
2965 save_reg_state(ctx, &state);
2966 nxm_execute_reg_load((const struct nx_action_reg_load *) nah,
2968 update_reg_state(ctx, &state);
2972 /* Nothing to do. */
2975 case NXAST_SET_TUNNEL64:
2976 tun_id = ((const struct nx_action_set_tunnel64 *) nah)->tun_id;
2977 nl_msg_put_be64(ctx->odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, tun_id);
2978 ctx->flow.tun_id = tun_id;
2981 case NXAST_MULTIPATH:
2982 nam = (const struct nx_action_multipath *) nah;
2983 multipath_execute(nam, &ctx->flow);
2986 case NXAST_AUTOPATH:
2987 naa = (const struct nx_action_autopath *) nah;
2988 xlate_autopath(ctx, naa);
2991 /* If you add a new action here that modifies flow data, don't forget to
2992 * update the flow key in ctx->flow at the same time. */
2994 case NXAST_SNAT__OBSOLETE:
2996 VLOG_DBG_RL(&rl, "unknown Nicira action type %d", (int) subtype);
3002 do_xlate_actions(const union ofp_action *in, size_t n_in,
3003 struct action_xlate_ctx *ctx)
3005 const struct ofport_dpif *port;
3006 struct actions_iterator iter;
3007 const union ofp_action *ia;
3009 port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
3011 && port->up.opp.config & htonl(OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
3012 port->up.opp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
3013 ? htonl(OFPPC_NO_RECV_STP)
3014 : htonl(OFPPC_NO_RECV))) {
3015 /* Drop this flow. */
3019 for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
3020 enum ofp_action_type type = ntohs(ia->type);
3021 const struct ofp_action_dl_addr *oada;
3025 xlate_output_action(ctx, &ia->output);
3028 case OFPAT_SET_VLAN_VID:
3029 ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
3030 ctx->flow.vlan_tci |= ia->vlan_vid.vlan_vid | htons(VLAN_CFI);
3031 xlate_set_dl_tci(ctx);
3034 case OFPAT_SET_VLAN_PCP:
3035 ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
3036 ctx->flow.vlan_tci |= htons(
3037 (ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
3038 xlate_set_dl_tci(ctx);
3041 case OFPAT_STRIP_VLAN:
3042 ctx->flow.vlan_tci = htons(0);
3043 xlate_set_dl_tci(ctx);
3046 case OFPAT_SET_DL_SRC:
3047 oada = ((struct ofp_action_dl_addr *) ia);
3048 nl_msg_put_unspec(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_SRC,
3049 oada->dl_addr, ETH_ADDR_LEN);
3050 memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN);
3053 case OFPAT_SET_DL_DST:
3054 oada = ((struct ofp_action_dl_addr *) ia);
3055 nl_msg_put_unspec(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_DST,
3056 oada->dl_addr, ETH_ADDR_LEN);
3057 memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN);
3060 case OFPAT_SET_NW_SRC:
3061 nl_msg_put_be32(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_SRC,
3062 ia->nw_addr.nw_addr);
3063 ctx->flow.nw_src = ia->nw_addr.nw_addr;
3066 case OFPAT_SET_NW_DST:
3067 nl_msg_put_be32(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_DST,
3068 ia->nw_addr.nw_addr);
3069 ctx->flow.nw_dst = ia->nw_addr.nw_addr;
3072 case OFPAT_SET_NW_TOS:
3073 nl_msg_put_u8(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_TOS,
3075 ctx->flow.nw_tos = ia->nw_tos.nw_tos;
3078 case OFPAT_SET_TP_SRC:
3079 nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_TP_SRC,
3080 ia->tp_port.tp_port);
3081 ctx->flow.tp_src = ia->tp_port.tp_port;
3084 case OFPAT_SET_TP_DST:
3085 nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_TP_DST,
3086 ia->tp_port.tp_port);
3087 ctx->flow.tp_dst = ia->tp_port.tp_port;
3091 xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
3095 xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
3099 VLOG_DBG_RL(&rl, "unknown action type %d", (int) type);
3106 action_xlate_ctx_init(struct action_xlate_ctx *ctx,
3107 struct ofproto_dpif *ofproto, const struct flow *flow,
3108 const struct ofpbuf *packet)
3110 ctx->ofproto = ofproto;
3112 ctx->packet = packet;
3113 ctx->resubmit_hook = NULL;
3116 static struct ofpbuf *
3117 xlate_actions(struct action_xlate_ctx *ctx,
3118 const union ofp_action *in, size_t n_in)
3120 COVERAGE_INC(ofproto_dpif_xlate);
3122 ctx->odp_actions = ofpbuf_new(512);
3124 ctx->may_set_up_flow = true;
3125 ctx->nf_output_iface = NF_OUT_DROP;
3127 ctx->last_pop_priority = -1;
3129 if (process_special(ctx->ofproto, &ctx->flow, ctx->packet)) {
3130 ctx->may_set_up_flow = false;
3132 do_xlate_actions(in, n_in, ctx);
3135 remove_pop_action(ctx);
3137 /* Check with in-band control to see if we're allowed to set up this
3139 if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
3140 ctx->odp_actions->data,
3141 ctx->odp_actions->size)) {
3142 ctx->may_set_up_flow = false;
3145 return ctx->odp_actions;
3148 /* OFPP_NORMAL implementation. */
3151 struct ofport_dpif *port;
3156 struct dst builtin[32];
3158 size_t n, allocated;
3161 static void dst_set_init(struct dst_set *);
3162 static void dst_set_add(struct dst_set *, const struct dst *);
3163 static void dst_set_free(struct dst_set *);
3165 static struct ofport_dpif *ofbundle_get_a_port(const struct ofbundle *);
3168 set_dst(struct action_xlate_ctx *ctx, struct dst *dst,
3169 const struct ofbundle *in_bundle, const struct ofbundle *out_bundle)
3171 dst->vlan = (out_bundle->vlan >= 0 ? OFP_VLAN_NONE
3172 : in_bundle->vlan >= 0 ? in_bundle->vlan
3173 : ctx->flow.vlan_tci == 0 ? OFP_VLAN_NONE
3174 : vlan_tci_to_vid(ctx->flow.vlan_tci));
3176 dst->port = (!out_bundle->bond
3177 ? ofbundle_get_a_port(out_bundle)
3178 : bond_choose_output_slave(out_bundle->bond, &ctx->flow,
3179 dst->vlan, &ctx->tags));
3181 return dst->port != NULL;
3185 mirror_mask_ffs(mirror_mask_t mask)
3187 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask));
3192 dst_set_init(struct dst_set *set)
3194 set->dsts = set->builtin;
3196 set->allocated = ARRAY_SIZE(set->builtin);
3200 dst_set_add(struct dst_set *set, const struct dst *dst)
3202 if (set->n >= set->allocated) {
3203 size_t new_allocated;
3204 struct dst *new_dsts;
3206 new_allocated = set->allocated * 2;
3207 new_dsts = xmalloc(new_allocated * sizeof *new_dsts);
3208 memcpy(new_dsts, set->dsts, set->n * sizeof *new_dsts);
3212 set->dsts = new_dsts;
3213 set->allocated = new_allocated;
3215 set->dsts[set->n++] = *dst;
3219 dst_set_free(struct dst_set *set)
3221 if (set->dsts != set->builtin) {
3227 dst_is_duplicate(const struct dst_set *set, const struct dst *test)
3230 for (i = 0; i < set->n; i++) {
3231 if (set->dsts[i].vlan == test->vlan
3232 && set->dsts[i].port == test->port) {
3240 ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
3242 return bundle->vlan < 0 && vlan_bitmap_contains(bundle->trunks, vlan);
3246 ofbundle_includes_vlan(const struct ofbundle *bundle, uint16_t vlan)
3248 return vlan == bundle->vlan || ofbundle_trunks_vlan(bundle, vlan);
3251 /* Returns an arbitrary interface within 'bundle'. */
3252 static struct ofport_dpif *
3253 ofbundle_get_a_port(const struct ofbundle *bundle)
3255 return CONTAINER_OF(list_front(&bundle->ports),
3256 struct ofport_dpif, bundle_node);
3260 compose_dsts(struct action_xlate_ctx *ctx, uint16_t vlan,
3261 const struct ofbundle *in_bundle,
3262 const struct ofbundle *out_bundle, struct dst_set *set)
3266 if (out_bundle == OFBUNDLE_FLOOD) {
3267 struct ofbundle *bundle;
3269 HMAP_FOR_EACH (bundle, hmap_node, &ctx->ofproto->bundles) {
3270 if (bundle != in_bundle
3271 && ofbundle_includes_vlan(bundle, vlan)
3272 && bundle->floodable
3273 && !bundle->mirror_out
3274 && set_dst(ctx, &dst, in_bundle, bundle)) {
3275 dst_set_add(set, &dst);
3278 ctx->nf_output_iface = NF_OUT_FLOOD;
3279 } else if (out_bundle && set_dst(ctx, &dst, in_bundle, out_bundle)) {
3280 dst_set_add(set, &dst);
3281 ctx->nf_output_iface = dst.port->odp_port;
3286 vlan_is_mirrored(const struct ofmirror *m, int vlan)
3288 return vlan_bitmap_contains(m->vlans, vlan);
3292 compose_mirror_dsts(struct action_xlate_ctx *ctx,
3293 uint16_t vlan, const struct ofbundle *in_bundle,
3294 struct dst_set *set)
3296 struct ofproto_dpif *ofproto = ctx->ofproto;
3297 mirror_mask_t mirrors;
3301 mirrors = in_bundle->src_mirrors;
3302 for (i = 0; i < set->n; i++) {
3303 mirrors |= set->dsts[i].port->bundle->dst_mirrors;
3310 flow_vlan = vlan_tci_to_vid(ctx->flow.vlan_tci);
3311 if (flow_vlan == 0) {
3312 flow_vlan = OFP_VLAN_NONE;
3316 struct ofmirror *m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
3317 if (vlan_is_mirrored(m, vlan)) {
3321 if (set_dst(ctx, &dst, in_bundle, m->out)
3322 && !dst_is_duplicate(set, &dst)) {
3323 dst_set_add(set, &dst);
3326 struct ofbundle *bundle;
3328 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
3329 if (ofbundle_includes_vlan(bundle, m->out_vlan)
3330 && set_dst(ctx, &dst, in_bundle, bundle))
3332 if (bundle->vlan < 0) {
3333 dst.vlan = m->out_vlan;
3335 if (dst_is_duplicate(set, &dst)) {
3339 /* Use the vlan tag on the original flow instead of
3340 * the one passed in the vlan parameter. This ensures
3341 * that we compare the vlan from before any implicit
3342 * tagging tags place. This is necessary because
3343 * dst->vlan is the final vlan, after removing implicit
3345 if (bundle == in_bundle && dst.vlan == flow_vlan) {
3346 /* Don't send out input port on same VLAN. */
3349 dst_set_add(set, &dst);
3354 mirrors &= mirrors - 1;
3359 compose_actions(struct action_xlate_ctx *ctx, uint16_t vlan,
3360 const struct ofbundle *in_bundle,
3361 const struct ofbundle *out_bundle)
3363 uint16_t initial_vlan, cur_vlan;
3364 const struct dst *dst;
3368 compose_dsts(ctx, vlan, in_bundle, out_bundle, &set);
3369 compose_mirror_dsts(ctx, vlan, in_bundle, &set);
3371 /* Output all the packets we can without having to change the VLAN. */
3372 initial_vlan = vlan_tci_to_vid(ctx->flow.vlan_tci);
3373 if (initial_vlan == 0) {
3374 initial_vlan = OFP_VLAN_NONE;
3376 for (dst = set.dsts; dst < &set.dsts[set.n]; dst++) {
3377 if (dst->vlan != initial_vlan) {
3380 nl_msg_put_u32(ctx->odp_actions,
3381 ODP_ACTION_ATTR_OUTPUT, dst->port->odp_port);
3384 /* Then output the rest. */
3385 cur_vlan = initial_vlan;
3386 for (dst = set.dsts; dst < &set.dsts[set.n]; dst++) {
3387 if (dst->vlan == initial_vlan) {
3390 if (dst->vlan != cur_vlan) {
3391 if (dst->vlan == OFP_VLAN_NONE) {
3392 nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_STRIP_VLAN);
3395 tci = htons(dst->vlan & VLAN_VID_MASK);
3396 tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
3397 nl_msg_put_be16(ctx->odp_actions,
3398 ODP_ACTION_ATTR_SET_DL_TCI, tci);
3400 cur_vlan = dst->vlan;
3402 nl_msg_put_u32(ctx->odp_actions,
3403 ODP_ACTION_ATTR_OUTPUT, dst->port->odp_port);
3409 /* Returns the effective vlan of a packet, taking into account both the
3410 * 802.1Q header and implicitly tagged ports. A value of 0 indicates that
3411 * the packet is untagged and -1 indicates it has an invalid header and
3412 * should be dropped. */
3414 flow_get_vlan(struct ofproto_dpif *ofproto, const struct flow *flow,
3415 struct ofbundle *in_bundle, bool have_packet)
3417 int vlan = vlan_tci_to_vid(flow->vlan_tci);
3418 if (in_bundle->vlan >= 0) {
3421 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3422 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %d tagged "
3423 "packet received on port %s configured with "
3424 "implicit VLAN %"PRIu16,
3425 ofproto->up.name, vlan,
3426 in_bundle->name, in_bundle->vlan);
3430 vlan = in_bundle->vlan;
3432 if (!ofbundle_includes_vlan(in_bundle, vlan)) {
3434 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3435 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %d tagged "
3436 "packet received on port %s not configured for "
3438 ofproto->up.name, vlan, in_bundle->name, vlan);
3447 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
3448 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
3449 * indicate this; newer upstream kernels use gratuitous ARP requests. */
3451 is_gratuitous_arp(const struct flow *flow)
3453 return (flow->dl_type == htons(ETH_TYPE_ARP)
3454 && eth_addr_is_broadcast(flow->dl_dst)
3455 && (flow->nw_proto == ARP_OP_REPLY
3456 || (flow->nw_proto == ARP_OP_REQUEST
3457 && flow->nw_src == flow->nw_dst)));
3461 update_learning_table(struct ofproto_dpif *ofproto,
3462 const struct flow *flow, int vlan,
3463 struct ofbundle *in_bundle)
3465 struct mac_entry *mac;
3467 if (!mac_learning_may_learn(ofproto->ml, flow->dl_src, vlan)) {
3471 mac = mac_learning_insert(ofproto->ml, flow->dl_src, vlan);
3472 if (is_gratuitous_arp(flow)) {
3473 /* We don't want to learn from gratuitous ARP packets that are
3474 * reflected back over bond slaves so we lock the learning table. */
3475 if (!in_bundle->bond) {
3476 mac_entry_set_grat_arp_lock(mac);
3477 } else if (mac_entry_is_grat_arp_locked(mac)) {
3482 if (mac_entry_is_new(mac) || mac->port.p != in_bundle) {
3483 /* The log messages here could actually be useful in debugging,
3484 * so keep the rate limit relatively high. */
3485 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
3486 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
3487 "on port %s in VLAN %d",
3488 ofproto->up.name, ETH_ADDR_ARGS(flow->dl_src),
3489 in_bundle->name, vlan);
3491 mac->port.p = in_bundle;
3492 tag_set_add(&ofproto->revalidate_set,
3493 mac_learning_changed(ofproto->ml, mac));
3497 /* Determines whether packets in 'flow' within 'br' should be forwarded or
3498 * dropped. Returns true if they may be forwarded, false if they should be
3501 * If 'have_packet' is true, it indicates that the caller is processing a
3502 * received packet. If 'have_packet' is false, then the caller is just
3503 * revalidating an existing flow because configuration has changed. Either
3504 * way, 'have_packet' only affects logging (there is no point in logging errors
3505 * during revalidation).
3507 * Sets '*in_portp' to the input port. This will be a null pointer if
3508 * flow->in_port does not designate a known input port (in which case
3509 * is_admissible() returns false).
3511 * When returning true, sets '*vlanp' to the effective VLAN of the input
3512 * packet, as returned by flow_get_vlan().
3514 * May also add tags to '*tags', although the current implementation only does
3515 * so in one special case.
3518 is_admissible(struct ofproto_dpif *ofproto, const struct flow *flow,
3520 tag_type *tags, int *vlanp, struct ofbundle **in_bundlep)
3522 struct ofport_dpif *in_port;
3523 struct ofbundle *in_bundle;
3526 /* Find the port and bundle for the received packet. */
3527 in_port = get_ofp_port(ofproto, flow->in_port);
3528 *in_bundlep = in_bundle = in_port->bundle;
3529 if (!in_port || !in_bundle) {
3530 /* No interface? Something fishy... */
3532 /* Odd. A few possible reasons here:
3534 * - We deleted a port but there are still a few packets queued up
3537 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
3538 * we don't know about.
3540 * - Packet arrived on the local port but the local port is not
3543 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3545 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
3547 ofproto->up.name, flow->in_port);
3551 *vlanp = vlan = flow_get_vlan(ofproto, flow, in_bundle, have_packet);
3556 /* Drop frames for reserved multicast addresses. */
3557 if (eth_addr_is_reserved(flow->dl_dst)) {
3561 /* Drop frames on bundles reserved for mirroring. */
3562 if (in_bundle->mirror_out) {
3564 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3565 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
3566 "%s, which is reserved exclusively for mirroring",
3567 ofproto->up.name, in_bundle->name);
3572 if (in_bundle->bond) {
3573 struct mac_entry *mac;
3575 switch (bond_check_admissibility(in_bundle->bond, in_port,
3576 flow->dl_dst, tags)) {
3583 case BV_DROP_IF_MOVED:
3584 mac = mac_learning_lookup(ofproto->ml, flow->dl_src, vlan, NULL);
3585 if (mac && mac->port.p != in_bundle &&
3586 (!is_gratuitous_arp(flow)
3587 || mac_entry_is_grat_arp_locked(mac))) {
3597 /* If the composed actions may be applied to any packet in the given 'flow',
3598 * returns true. Otherwise, the actions should only be applied to 'packet', or
3599 * not at all, if 'packet' was NULL. */
3601 xlate_normal(struct action_xlate_ctx *ctx)
3603 struct ofbundle *in_bundle;
3604 struct ofbundle *out_bundle;
3605 struct mac_entry *mac;
3608 /* Check whether we should drop packets in this flow. */
3609 if (!is_admissible(ctx->ofproto, &ctx->flow, ctx->packet != NULL,
3610 &ctx->tags, &vlan, &in_bundle)) {
3615 /* Learn source MAC (but don't try to learn from revalidation). */
3617 update_learning_table(ctx->ofproto, &ctx->flow, vlan, in_bundle);
3620 /* Determine output bundle. */
3621 mac = mac_learning_lookup(ctx->ofproto->ml, ctx->flow.dl_dst, vlan,
3624 out_bundle = mac->port.p;
3625 } else if (!ctx->packet && !eth_addr_is_multicast(ctx->flow.dl_dst)) {
3626 /* If we are revalidating but don't have a learning entry then eject
3627 * the flow. Installing a flow that floods packets opens up a window
3628 * of time where we could learn from a packet reflected on a bond and
3629 * blackhole packets before the learning table is updated to reflect
3630 * the correct port. */
3633 out_bundle = OFBUNDLE_FLOOD;
3636 /* Don't send packets out their input bundles. */
3637 if (in_bundle == out_bundle) {
3643 compose_actions(ctx, vlan, in_bundle, out_bundle);
3650 get_drop_frags(struct ofproto *ofproto_)
3652 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3655 dpif_get_drop_frags(ofproto->dpif, &drop_frags);
3660 set_drop_frags(struct ofproto *ofproto_, bool drop_frags)
3662 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3664 dpif_set_drop_frags(ofproto->dpif, drop_frags);
3668 packet_out(struct ofproto *ofproto_, struct ofpbuf *packet,
3669 const struct flow *flow,
3670 const union ofp_action *ofp_actions, size_t n_ofp_actions)
3672 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3675 error = validate_actions(ofp_actions, n_ofp_actions, flow,
3676 ofproto->max_ports);
3678 struct action_xlate_ctx ctx;
3679 struct ofpbuf *odp_actions;
3681 action_xlate_ctx_init(&ctx, ofproto, flow, packet);
3682 odp_actions = xlate_actions(&ctx, ofp_actions, n_ofp_actions);
3683 dpif_execute(ofproto->dpif, odp_actions->data, odp_actions->size,
3685 ofpbuf_delete(odp_actions);
3691 get_netflow_ids(const struct ofproto *ofproto_,
3692 uint8_t *engine_type, uint8_t *engine_id)
3694 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3696 dpif_get_netflow_ids(ofproto->dpif, engine_type, engine_id);
3699 static struct ofproto_dpif *
3700 ofproto_dpif_lookup(const char *name)
3702 struct ofproto *ofproto = ofproto_lookup(name);
3703 return (ofproto && ofproto->ofproto_class == &ofproto_dpif_class
3704 ? ofproto_dpif_cast(ofproto)
3709 ofproto_unixctl_fdb_show(struct unixctl_conn *conn,
3710 const char *args, void *aux OVS_UNUSED)
3712 struct ds ds = DS_EMPTY_INITIALIZER;
3713 const struct ofproto_dpif *ofproto;
3714 const struct mac_entry *e;
3716 ofproto = ofproto_dpif_lookup(args);
3718 unixctl_command_reply(conn, 501, "no such bridge");
3722 ds_put_cstr(&ds, " port VLAN MAC Age\n");
3723 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
3724 struct ofbundle *bundle = e->port.p;
3725 ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
3726 ofbundle_get_a_port(bundle)->odp_port,
3727 e->vlan, ETH_ADDR_ARGS(e->mac), mac_entry_age(e));
3729 unixctl_command_reply(conn, 200, ds_cstr(&ds));
3733 struct ofproto_trace {
3734 struct action_xlate_ctx ctx;
3740 trace_format_rule(struct ds *result, int level, const struct rule *rule)
3742 ds_put_char_multiple(result, '\t', level);
3744 ds_put_cstr(result, "No match\n");
3748 ds_put_format(result, "Rule: cookie=%#"PRIx64" ",
3749 ntohll(rule->flow_cookie));
3750 cls_rule_format(&rule->cr, result);
3751 ds_put_char(result, '\n');
3753 ds_put_char_multiple(result, '\t', level);
3754 ds_put_cstr(result, "OpenFlow ");
3755 ofp_print_actions(result, (const struct ofp_action_header *) rule->actions,
3756 rule->n_actions * sizeof *rule->actions);
3757 ds_put_char(result, '\n');
3761 trace_format_flow(struct ds *result, int level, const char *title,
3762 struct ofproto_trace *trace)
3764 ds_put_char_multiple(result, '\t', level);
3765 ds_put_format(result, "%s: ", title);
3766 if (flow_equal(&trace->ctx.flow, &trace->flow)) {
3767 ds_put_cstr(result, "unchanged");
3769 flow_format(result, &trace->ctx.flow);
3770 trace->flow = trace->ctx.flow;
3772 ds_put_char(result, '\n');
3776 trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
3778 struct ofproto_trace *trace = CONTAINER_OF(ctx, struct ofproto_trace, ctx);
3779 struct ds *result = trace->result;
3781 ds_put_char(result, '\n');
3782 trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace);
3783 trace_format_rule(result, ctx->recurse + 1, &rule->up);
3787 ofproto_unixctl_trace(struct unixctl_conn *conn, const char *args_,
3788 void *aux OVS_UNUSED)
3790 char *dpname, *in_port_s, *tun_id_s, *packet_s;
3791 char *args = xstrdup(args_);
3792 char *save_ptr = NULL;
3793 struct ofproto_dpif *ofproto;
3794 struct ofpbuf packet;
3795 struct rule_dpif *rule;
3802 ofpbuf_init(&packet, strlen(args) / 2);
3805 dpname = strtok_r(args, " ", &save_ptr);
3806 tun_id_s = strtok_r(NULL, " ", &save_ptr);
3807 in_port_s = strtok_r(NULL, " ", &save_ptr);
3808 packet_s = strtok_r(NULL, "", &save_ptr); /* Get entire rest of line. */
3809 if (!dpname || !in_port_s || !packet_s) {
3810 unixctl_command_reply(conn, 501, "Bad command syntax");
3814 ofproto = ofproto_dpif_lookup(dpname);
3816 unixctl_command_reply(conn, 501, "Unknown ofproto (use ofproto/list "
3821 tun_id = htonll(strtoull(tun_id_s, NULL, 0));
3822 in_port = ofp_port_to_odp_port(atoi(in_port_s));
3824 packet_s = ofpbuf_put_hex(&packet, packet_s, NULL);
3825 packet_s += strspn(packet_s, " ");
3826 if (*packet_s != '\0') {
3827 unixctl_command_reply(conn, 501, "Trailing garbage in command");
3830 if (packet.size < ETH_HEADER_LEN) {
3831 unixctl_command_reply(conn, 501, "Packet data too short for Ethernet");
3835 ds_put_cstr(&result, "Packet: ");
3836 s = ofp_packet_to_string(packet.data, packet.size, packet.size);
3837 ds_put_cstr(&result, s);
3840 flow_extract(&packet, tun_id, in_port, &flow);
3841 ds_put_cstr(&result, "Flow: ");
3842 flow_format(&result, &flow);
3843 ds_put_char(&result, '\n');
3845 rule = rule_dpif_lookup(ofproto, &flow);
3846 trace_format_rule(&result, 0, &rule->up);
3848 struct ofproto_trace trace;
3849 struct ofpbuf *odp_actions;
3851 trace.result = &result;
3853 action_xlate_ctx_init(&trace.ctx, ofproto, &flow, &packet);
3854 trace.ctx.resubmit_hook = trace_resubmit;
3855 odp_actions = xlate_actions(&trace.ctx,
3856 rule->up.actions, rule->up.n_actions);
3858 ds_put_char(&result, '\n');
3859 trace_format_flow(&result, 0, "Final flow", &trace);
3860 ds_put_cstr(&result, "Datapath actions: ");
3861 format_odp_actions(&result, odp_actions->data, odp_actions->size);
3862 ofpbuf_delete(odp_actions);
3865 unixctl_command_reply(conn, 200, ds_cstr(&result));
3868 ds_destroy(&result);
3869 ofpbuf_uninit(&packet);
3874 ofproto_dpif_unixctl_init(void)
3876 static bool registered;
3882 unixctl_command_register("ofproto/trace", ofproto_unixctl_trace, NULL);
3883 unixctl_command_register("fdb/show", ofproto_unixctl_fdb_show, NULL);
3886 const struct ofproto_class ofproto_dpif_class = {
3913 port_is_lacp_current,
3920 rule_modify_actions,
3933 is_mirror_output_bundle,