2 * Copyright (c) 2009, 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
25 #include <netinet/in.h>
26 #include <sys/socket.h>
30 #include <sys/ioctl.h>
35 #include "dpif-provider.h"
41 #include "ofp-print.h"
44 #include "poll-loop.h"
50 VLOG_DEFINE_THIS_MODULE(dpif_netdev)
52 /* Configuration parameters. */
53 enum { N_QUEUES = 2 }; /* Number of queues for dpif_recv(). */
54 enum { MAX_QUEUE_LEN = 100 }; /* Maximum number of packets per queue. */
55 enum { N_GROUPS = 16 }; /* Number of port groups. */
56 enum { MAX_PORTS = 256 }; /* Maximum number of ports. */
57 enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */
59 /* Enough headroom to add a vlan tag, plus an extra 2 bytes to allow IP
60 * headers to be aligned on a 4-byte boundary. */
61 enum { DP_NETDEV_HEADROOM = 2 + VLAN_HEADER_LEN };
63 /* Datapath based on the network device interface from netdev.h. */
70 bool drop_frags; /* Drop all IP fragments, if true. */
71 struct ovs_queue queues[N_QUEUES]; /* Messages queued for dpif_recv(). */
72 struct hmap flow_table; /* Flow table. */
73 struct odp_port_group groups[N_GROUPS];
76 long long int n_frags; /* Number of dropped IP fragments. */
77 long long int n_hit; /* Number of flow table matches. */
78 long long int n_missed; /* Number of flow table misses. */
79 long long int n_lost; /* Number of misses not passed to client. */
83 struct dp_netdev_port *ports[MAX_PORTS];
84 struct list port_list;
88 /* A port in a netdev-based datapath. */
89 struct dp_netdev_port {
90 int port_no; /* Index into dp_netdev's 'ports'. */
91 struct list node; /* Element in dp_netdev's 'port_list'. */
92 struct netdev *netdev;
93 bool internal; /* Internal port (as ODP_PORT_INTERNAL)? */
96 /* A flow in dp_netdev's 'flow_table'. */
97 struct dp_netdev_flow {
98 struct hmap_node node; /* Element in dp_netdev's 'flow_table'. */
102 struct timespec used; /* Last used time. */
103 long long int packet_count; /* Number of packets matched. */
104 long long int byte_count; /* Number of bytes matched. */
105 uint8_t ip_tos; /* IP TOS value. */
106 uint16_t tcp_ctl; /* Bitwise-OR of seen tcp_ctl values. */
109 union odp_action *actions;
110 unsigned int n_actions;
113 /* Interface to netdev-based datapath. */
116 struct dp_netdev *dp;
118 unsigned int dp_serial;
121 /* All netdev-based datapaths. */
122 static struct dp_netdev *dp_netdevs[256];
123 struct list dp_netdev_list = LIST_INITIALIZER(&dp_netdev_list);
124 enum { N_DP_NETDEVS = ARRAY_SIZE(dp_netdevs) };
126 /* Maximum port MTU seen so far. */
127 static int max_mtu = ETH_PAYLOAD_MAX;
129 static int get_port_by_number(struct dp_netdev *, uint16_t port_no,
130 struct dp_netdev_port **portp);
131 static int get_port_by_name(struct dp_netdev *, const char *devname,
132 struct dp_netdev_port **portp);
133 static void dp_netdev_free(struct dp_netdev *);
134 static void dp_netdev_flow_flush(struct dp_netdev *);
135 static int do_add_port(struct dp_netdev *, const char *devname, uint16_t flags,
137 static int do_del_port(struct dp_netdev *, uint16_t port_no);
138 static int dp_netdev_output_control(struct dp_netdev *, const struct ofpbuf *,
139 int queue_no, int port_no, uint32_t arg);
140 static int dp_netdev_execute_actions(struct dp_netdev *,
141 struct ofpbuf *, const flow_t *,
142 const union odp_action *, int n);
144 static struct dpif_netdev *
145 dpif_netdev_cast(const struct dpif *dpif)
147 dpif_assert_class(dpif, &dpif_netdev_class);
148 return CONTAINER_OF(dpif, struct dpif_netdev, dpif);
151 static struct dp_netdev *
152 get_dp_netdev(const struct dpif *dpif)
154 return dpif_netdev_cast(dpif)->dp;
158 name_to_dp_idx(const char *name)
160 if (!strncmp(name, "dp", 2) && isdigit((unsigned char)name[2])) {
161 int dp_idx = atoi(name + 2);
162 if (dp_idx >= 0 && dp_idx < N_DP_NETDEVS) {
169 static struct dp_netdev *
170 find_dp_netdev(const char *name)
175 dp_idx = name_to_dp_idx(name);
177 return dp_netdevs[dp_idx];
180 for (i = 0; i < N_DP_NETDEVS; i++) {
181 struct dp_netdev *dp = dp_netdevs[i];
183 struct dp_netdev_port *port;
184 if (!get_port_by_name(dp, name, &port)) {
193 create_dpif_netdev(struct dp_netdev *dp)
195 struct dpif_netdev *dpif;
200 dpname = xasprintf("dp%d", dp->dp_idx);
201 dpif = xmalloc(sizeof *dpif);
202 dpif_init(&dpif->dpif, &dpif_netdev_class, dpname, dp->dp_idx, dp->dp_idx);
204 dpif->listen_mask = 0;
205 dpif->dp_serial = dp->serial;
212 create_dp_netdev(const char *name, int dp_idx, struct dpif **dpifp)
214 struct dp_netdev *dp;
218 if (dp_netdevs[dp_idx]) {
222 /* Create datapath. */
223 dp_netdevs[dp_idx] = dp = xzalloc(sizeof *dp);
224 list_push_back(&dp_netdev_list, &dp->node);
227 dp->drop_frags = false;
228 for (i = 0; i < N_QUEUES; i++) {
229 queue_init(&dp->queues[i]);
231 hmap_init(&dp->flow_table);
232 for (i = 0; i < N_GROUPS; i++) {
233 dp->groups[i].ports = NULL;
234 dp->groups[i].n_ports = 0;
235 dp->groups[i].group = i;
237 list_init(&dp->port_list);
238 error = do_add_port(dp, name, ODP_PORT_INTERNAL, ODPP_LOCAL);
244 *dpifp = create_dpif_netdev(dp);
249 dpif_netdev_open(const char *name, const char *type OVS_UNUSED, bool create,
253 if (find_dp_netdev(name)) {
256 int dp_idx = name_to_dp_idx(name);
258 return create_dp_netdev(name, dp_idx, dpifp);
260 /* Scan for unused dp_idx number. */
261 for (dp_idx = 0; dp_idx < N_DP_NETDEVS; dp_idx++) {
262 int error = create_dp_netdev(name, dp_idx, dpifp);
263 if (error != EBUSY) {
268 /* All datapath numbers in use. */
273 struct dp_netdev *dp = find_dp_netdev(name);
275 *dpifp = create_dpif_netdev(dp);
284 dp_netdev_free(struct dp_netdev *dp)
288 dp_netdev_flow_flush(dp);
289 while (dp->n_ports > 0) {
290 struct dp_netdev_port *port = CONTAINER_OF(
291 dp->port_list.next, struct dp_netdev_port, node);
292 do_del_port(dp, port->port_no);
294 for (i = 0; i < N_QUEUES; i++) {
295 queue_destroy(&dp->queues[i]);
297 hmap_destroy(&dp->flow_table);
298 for (i = 0; i < N_GROUPS; i++) {
299 free(dp->groups[i].ports);
301 dp_netdevs[dp->dp_idx] = NULL;
302 list_remove(&dp->node);
307 dpif_netdev_close(struct dpif *dpif)
309 struct dp_netdev *dp = get_dp_netdev(dpif);
310 assert(dp->open_cnt > 0);
311 if (--dp->open_cnt == 0 && dp->destroyed) {
318 dpif_netdev_destroy(struct dpif *dpif)
320 struct dp_netdev *dp = get_dp_netdev(dpif);
321 dp->destroyed = true;
326 dpif_netdev_get_stats(const struct dpif *dpif, struct odp_stats *stats)
328 struct dp_netdev *dp = get_dp_netdev(dpif);
329 memset(stats, 0, sizeof *stats);
330 stats->n_flows = hmap_count(&dp->flow_table);
331 stats->cur_capacity = hmap_capacity(&dp->flow_table);
332 stats->max_capacity = MAX_FLOWS;
333 stats->n_ports = dp->n_ports;
334 stats->max_ports = MAX_PORTS;
335 stats->max_groups = N_GROUPS;
336 stats->n_frags = dp->n_frags;
337 stats->n_hit = dp->n_hit;
338 stats->n_missed = dp->n_missed;
339 stats->n_lost = dp->n_lost;
340 stats->max_miss_queue = MAX_QUEUE_LEN;
341 stats->max_action_queue = MAX_QUEUE_LEN;
346 dpif_netdev_get_drop_frags(const struct dpif *dpif, bool *drop_fragsp)
348 struct dp_netdev *dp = get_dp_netdev(dpif);
349 *drop_fragsp = dp->drop_frags;
354 dpif_netdev_set_drop_frags(struct dpif *dpif, bool drop_frags)
356 struct dp_netdev *dp = get_dp_netdev(dpif);
357 dp->drop_frags = drop_frags;
362 do_add_port(struct dp_netdev *dp, const char *devname, uint16_t flags,
365 bool internal = (flags & ODP_PORT_INTERNAL) != 0;
366 struct dp_netdev_port *port;
367 struct netdev_options netdev_options;
368 struct netdev *netdev;
372 /* XXX reject devices already in some dp_netdev. */
374 /* Open and validate network device. */
375 memset(&netdev_options, 0, sizeof netdev_options);
376 netdev_options.name = devname;
377 netdev_options.ethertype = NETDEV_ETH_TYPE_ANY;
379 netdev_options.type = "tap";
382 error = netdev_open(&netdev_options, &netdev);
386 /* XXX reject loopback devices */
387 /* XXX reject non-Ethernet devices */
389 error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, false);
391 netdev_close(netdev);
395 port = xmalloc(sizeof *port);
396 port->port_no = port_no;
397 port->netdev = netdev;
398 port->internal = internal;
400 netdev_get_mtu(netdev, &mtu);
405 list_push_back(&dp->port_list, &port->node);
406 dp->ports[port_no] = port;
414 dpif_netdev_port_add(struct dpif *dpif, const char *devname, uint16_t flags,
417 struct dp_netdev *dp = get_dp_netdev(dpif);
420 for (port_no = 0; port_no < MAX_PORTS; port_no++) {
421 if (!dp->ports[port_no]) {
423 return do_add_port(dp, devname, flags, port_no);
430 dpif_netdev_port_del(struct dpif *dpif, uint16_t port_no)
432 struct dp_netdev *dp = get_dp_netdev(dpif);
433 return port_no == ODPP_LOCAL ? EINVAL : do_del_port(dp, port_no);
437 is_valid_port_number(uint16_t port_no)
439 return port_no < MAX_PORTS;
443 get_port_by_number(struct dp_netdev *dp,
444 uint16_t port_no, struct dp_netdev_port **portp)
446 if (!is_valid_port_number(port_no)) {
450 *portp = dp->ports[port_no];
451 return *portp ? 0 : ENOENT;
456 get_port_by_name(struct dp_netdev *dp,
457 const char *devname, struct dp_netdev_port **portp)
459 struct dp_netdev_port *port;
461 LIST_FOR_EACH (port, struct dp_netdev_port, node, &dp->port_list) {
462 if (!strcmp(netdev_get_name(port->netdev), devname)) {
471 do_del_port(struct dp_netdev *dp, uint16_t port_no)
473 struct dp_netdev_port *port;
477 error = get_port_by_number(dp, port_no, &port);
482 list_remove(&port->node);
483 dp->ports[port->port_no] = NULL;
487 name = xstrdup(netdev_get_name(port->netdev));
488 netdev_close(port->netdev);
497 answer_port_query(const struct dp_netdev_port *port, struct odp_port *odp_port)
499 memset(odp_port, 0, sizeof *odp_port);
500 ovs_strlcpy(odp_port->devname, netdev_get_name(port->netdev),
501 sizeof odp_port->devname);
502 odp_port->port = port->port_no;
503 odp_port->flags = port->internal ? ODP_PORT_INTERNAL : 0;
507 dpif_netdev_port_query_by_number(const struct dpif *dpif, uint16_t port_no,
508 struct odp_port *odp_port)
510 struct dp_netdev *dp = get_dp_netdev(dpif);
511 struct dp_netdev_port *port;
514 error = get_port_by_number(dp, port_no, &port);
516 answer_port_query(port, odp_port);
522 dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
523 struct odp_port *odp_port)
525 struct dp_netdev *dp = get_dp_netdev(dpif);
526 struct dp_netdev_port *port;
529 error = get_port_by_name(dp, devname, &port);
531 answer_port_query(port, odp_port);
537 dp_netdev_free_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow)
539 hmap_remove(&dp->flow_table, &flow->node);
545 dp_netdev_flow_flush(struct dp_netdev *dp)
547 struct dp_netdev_flow *flow, *next;
549 HMAP_FOR_EACH_SAFE (flow, next, struct dp_netdev_flow, node,
551 dp_netdev_free_flow(dp, flow);
556 dpif_netdev_flow_flush(struct dpif *dpif)
558 struct dp_netdev *dp = get_dp_netdev(dpif);
559 dp_netdev_flow_flush(dp);
564 dpif_netdev_port_list(const struct dpif *dpif, struct odp_port *ports, int n)
566 struct dp_netdev *dp = get_dp_netdev(dpif);
567 struct dp_netdev_port *port;
571 LIST_FOR_EACH (port, struct dp_netdev_port, node, &dp->port_list) {
572 struct odp_port *odp_port = &ports[i];
576 answer_port_query(port, odp_port);
583 dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED)
585 struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
586 if (dpif->dp_serial != dpif->dp->serial) {
587 dpif->dp_serial = dpif->dp->serial;
595 dpif_netdev_port_poll_wait(const struct dpif *dpif_)
597 struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
598 if (dpif->dp_serial != dpif->dp->serial) {
599 poll_immediate_wake();
604 get_port_group(const struct dpif *dpif, int group_no,
605 struct odp_port_group **groupp)
607 struct dp_netdev *dp = get_dp_netdev(dpif);
609 if (group_no >= 0 && group_no < N_GROUPS) {
610 *groupp = &dp->groups[group_no];
619 dpif_netdev_port_group_get(const struct dpif *dpif, int group_no,
620 uint16_t ports[], int n)
622 struct odp_port_group *group;
629 error = get_port_group(dpif, group_no, &group);
631 memcpy(ports, group->ports, MIN(n, group->n_ports) * sizeof *ports);
632 return group->n_ports;
639 dpif_netdev_port_group_set(struct dpif *dpif, int group_no,
640 const uint16_t ports[], int n)
642 struct odp_port_group *group;
645 if (n < 0 || n > MAX_PORTS) {
649 error = get_port_group(dpif, group_no, &group);
652 group->ports = xmemdup(ports, n * sizeof *group->ports);
654 group->group = group_no;
659 static struct dp_netdev_flow *
660 dp_netdev_lookup_flow(const struct dp_netdev *dp, const flow_t *key)
662 struct dp_netdev_flow *flow;
664 assert(!key->reserved[0] && !key->reserved[1] && !key->reserved[2]);
665 HMAP_FOR_EACH_WITH_HASH (flow, struct dp_netdev_flow, node,
666 flow_hash(key, 0), &dp->flow_table) {
667 if (flow_equal(&flow->key, key)) {
675 answer_flow_query(struct dp_netdev_flow *flow, uint32_t query_flags,
676 struct odp_flow *odp_flow)
679 odp_flow->key = flow->key;
680 odp_flow->stats.n_packets = flow->packet_count;
681 odp_flow->stats.n_bytes = flow->byte_count;
682 odp_flow->stats.used_sec = flow->used.tv_sec;
683 odp_flow->stats.used_nsec = flow->used.tv_nsec;
684 odp_flow->stats.tcp_flags = TCP_FLAGS(flow->tcp_ctl);
685 odp_flow->stats.ip_tos = flow->ip_tos;
686 odp_flow->stats.error = 0;
687 if (odp_flow->n_actions > 0) {
688 unsigned int n = MIN(odp_flow->n_actions, flow->n_actions);
689 memcpy(odp_flow->actions, flow->actions,
690 n * sizeof *odp_flow->actions);
691 odp_flow->n_actions = flow->n_actions;
694 if (query_flags & ODPFF_ZERO_TCP_FLAGS) {
699 odp_flow->stats.error = ENOENT;
704 dpif_netdev_flow_get(const struct dpif *dpif, struct odp_flow flows[], int n)
706 struct dp_netdev *dp = get_dp_netdev(dpif);
709 for (i = 0; i < n; i++) {
710 struct odp_flow *odp_flow = &flows[i];
711 answer_flow_query(dp_netdev_lookup_flow(dp, &odp_flow->key),
712 odp_flow->flags, odp_flow);
718 dpif_netdev_validate_actions(const union odp_action *actions, int n_actions,
724 for (i = 0; i < n_actions; i++) {
725 const union odp_action *a = &actions[i];
728 if (a->output.port >= MAX_PORTS) {
733 case ODPAT_OUTPUT_GROUP:
735 if (a->output_group.group >= N_GROUPS) {
740 case ODPAT_CONTROLLER:
743 case ODPAT_SET_VLAN_VID:
745 if (a->vlan_vid.vlan_vid & htons(~VLAN_VID_MASK)) {
750 case ODPAT_SET_VLAN_PCP:
752 if (a->vlan_pcp.vlan_pcp & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) {
757 case ODPAT_SET_NW_TOS:
759 if (a->nw_tos.nw_tos & IP_ECN_MASK) {
764 case ODPAT_STRIP_VLAN:
765 case ODPAT_SET_DL_SRC:
766 case ODPAT_SET_DL_DST:
767 case ODPAT_SET_NW_SRC:
768 case ODPAT_SET_NW_DST:
769 case ODPAT_SET_TP_SRC:
770 case ODPAT_SET_TP_DST:
782 set_flow_actions(struct dp_netdev_flow *flow, struct odp_flow *odp_flow)
788 if (odp_flow->n_actions >= 4096 / sizeof *odp_flow->actions) {
791 error = dpif_netdev_validate_actions(odp_flow->actions,
792 odp_flow->n_actions, &mutates);
797 n_bytes = odp_flow->n_actions * sizeof *flow->actions;
798 flow->actions = xrealloc(flow->actions, n_bytes);
799 flow->n_actions = odp_flow->n_actions;
800 memcpy(flow->actions, odp_flow->actions, n_bytes);
805 add_flow(struct dpif *dpif, struct odp_flow *odp_flow)
807 struct dp_netdev *dp = get_dp_netdev(dpif);
808 struct dp_netdev_flow *flow;
811 flow = xzalloc(sizeof *flow);
812 flow->key = odp_flow->key;
813 memset(flow->key.reserved, 0, sizeof flow->key.reserved);
815 error = set_flow_actions(flow, odp_flow);
821 hmap_insert(&dp->flow_table, &flow->node, flow_hash(&flow->key, 0));
826 clear_stats(struct dp_netdev_flow *flow)
828 flow->used.tv_sec = 0;
829 flow->used.tv_nsec = 0;
830 flow->packet_count = 0;
831 flow->byte_count = 0;
837 dpif_netdev_flow_put(struct dpif *dpif, struct odp_flow_put *put)
839 struct dp_netdev *dp = get_dp_netdev(dpif);
840 struct dp_netdev_flow *flow;
842 flow = dp_netdev_lookup_flow(dp, &put->flow.key);
844 if (put->flags & ODPPF_CREATE) {
845 if (hmap_count(&dp->flow_table) < MAX_FLOWS) {
846 return add_flow(dpif, &put->flow);
854 if (put->flags & ODPPF_MODIFY) {
855 int error = set_flow_actions(flow, &put->flow);
856 if (!error && put->flags & ODPPF_ZERO_STATS) {
868 dpif_netdev_flow_del(struct dpif *dpif, struct odp_flow *odp_flow)
870 struct dp_netdev *dp = get_dp_netdev(dpif);
871 struct dp_netdev_flow *flow;
873 flow = dp_netdev_lookup_flow(dp, &odp_flow->key);
875 answer_flow_query(flow, 0, odp_flow);
876 dp_netdev_free_flow(dp, flow);
884 dpif_netdev_flow_list(const struct dpif *dpif, struct odp_flow flows[], int n)
886 struct dp_netdev *dp = get_dp_netdev(dpif);
887 struct dp_netdev_flow *flow;
891 HMAP_FOR_EACH (flow, struct dp_netdev_flow, node, &dp->flow_table) {
895 answer_flow_query(flow, 0, &flows[i++]);
897 return hmap_count(&dp->flow_table);
901 dpif_netdev_execute(struct dpif *dpif, uint16_t in_port,
902 const union odp_action actions[], int n_actions,
903 const struct ofpbuf *packet)
905 struct dp_netdev *dp = get_dp_netdev(dpif);
911 if (packet->size < ETH_HEADER_LEN || packet->size > UINT16_MAX) {
915 error = dpif_netdev_validate_actions(actions, n_actions, &mutates);
921 /* We need a deep copy of 'packet' since we're going to modify its
923 ofpbuf_init(©, DP_NETDEV_HEADROOM + packet->size);
924 copy.data = (char*)copy.base + DP_NETDEV_HEADROOM;
925 ofpbuf_put(©, packet->data, packet->size);
927 /* We still need a shallow copy of 'packet', even though we won't
928 * modify its data, because flow_extract() modifies packet->l2, etc.
929 * We could probably get away with modifying those but it's more polite
933 flow_extract(©, 0, in_port, &flow);
934 error = dp_netdev_execute_actions(dp, ©, &flow, actions, n_actions);
936 ofpbuf_uninit(©);
942 dpif_netdev_recv_get_mask(const struct dpif *dpif, int *listen_mask)
944 struct dpif_netdev *dpif_netdev = dpif_netdev_cast(dpif);
945 *listen_mask = dpif_netdev->listen_mask;
950 dpif_netdev_recv_set_mask(struct dpif *dpif, int listen_mask)
952 struct dpif_netdev *dpif_netdev = dpif_netdev_cast(dpif);
953 if (!(listen_mask & ~ODPL_ALL)) {
954 dpif_netdev->listen_mask = listen_mask;
961 static struct ovs_queue *
962 find_nonempty_queue(struct dpif *dpif)
964 struct dpif_netdev *dpif_netdev = dpif_netdev_cast(dpif);
965 struct dp_netdev *dp = get_dp_netdev(dpif);
966 int mask = dpif_netdev->listen_mask;
969 for (i = 0; i < N_QUEUES; i++) {
970 struct ovs_queue *q = &dp->queues[i];
971 if (q->n && mask & (1u << i)) {
979 dpif_netdev_recv(struct dpif *dpif, struct ofpbuf **bufp)
981 struct ovs_queue *q = find_nonempty_queue(dpif);
983 *bufp = queue_pop_head(q);
991 dpif_netdev_recv_wait(struct dpif *dpif)
993 struct ovs_queue *q = find_nonempty_queue(dpif);
995 poll_immediate_wake();
997 /* No messages ready to be received, and dp_wait() will ensure that we
998 * wake up to queue new messages, so there is nothing to do. */
1003 dp_netdev_flow_used(struct dp_netdev_flow *flow, const flow_t *key,
1004 const struct ofpbuf *packet)
1006 time_timespec(&flow->used);
1007 flow->packet_count++;
1008 flow->byte_count += packet->size;
1009 if (key->dl_type == htons(ETH_TYPE_IP)) {
1010 struct ip_header *nh = packet->l3;
1011 flow->ip_tos = nh->ip_tos;
1013 if (key->nw_proto == IPPROTO_TCP) {
1014 struct tcp_header *th = packet->l4;
1015 flow->tcp_ctl |= th->tcp_ctl;
1021 dp_netdev_port_input(struct dp_netdev *dp, struct dp_netdev_port *port,
1022 struct ofpbuf *packet)
1024 struct dp_netdev_flow *flow;
1027 if (flow_extract(packet, 0, port->port_no, &key) && dp->drop_frags) {
1032 flow = dp_netdev_lookup_flow(dp, &key);
1034 dp_netdev_flow_used(flow, &key, packet);
1035 dp_netdev_execute_actions(dp, packet, &key,
1036 flow->actions, flow->n_actions);
1040 dp_netdev_output_control(dp, packet, _ODPL_MISS_NR, port->port_no, 0);
1047 struct ofpbuf packet;
1048 struct dp_netdev *dp;
1050 ofpbuf_init(&packet, DP_NETDEV_HEADROOM + max_mtu);
1051 LIST_FOR_EACH (dp, struct dp_netdev, node, &dp_netdev_list) {
1052 struct dp_netdev_port *port;
1054 LIST_FOR_EACH (port, struct dp_netdev_port, node, &dp->port_list) {
1057 /* Reset packet contents. */
1058 packet.data = (char*)packet.base + DP_NETDEV_HEADROOM;
1061 error = netdev_recv(port->netdev, &packet);
1063 dp_netdev_port_input(dp, port, &packet);
1064 } else if (error != EAGAIN) {
1065 struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1066 VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
1067 netdev_get_name(port->netdev), strerror(error));
1071 ofpbuf_uninit(&packet);
1075 dp_netdev_wait(void)
1077 struct dp_netdev *dp;
1079 LIST_FOR_EACH (dp, struct dp_netdev, node, &dp_netdev_list) {
1080 struct dp_netdev_port *port;
1081 LIST_FOR_EACH (port, struct dp_netdev_port, node, &dp->port_list) {
1082 netdev_recv_wait(port->netdev);
1088 /* Modify the TCI field of 'packet'. If a VLAN tag is not present, one
1089 * is added with the TCI field set to 'tci'. If a VLAN tag is present,
1090 * then 'mask' bits are cleared before 'tci' is logically OR'd into the
1093 * Note that the function does not ensure that 'tci' does not affect
1094 * bits outside of 'mask'.
1097 dp_netdev_modify_vlan_tci(struct ofpbuf *packet, const flow_t *key,
1098 uint16_t tci, uint16_t mask)
1100 struct vlan_eth_header *veh;
1102 if (key->dl_vlan != htons(ODP_VLAN_NONE)) {
1103 /* Clear 'mask' bits, but maintain other TCI bits. */
1105 veh->veth_tci &= ~htons(mask);
1106 veh->veth_tci |= htons(tci);
1108 /* Insert new 802.1Q header. */
1109 struct eth_header *eh = packet->l2;
1110 struct vlan_eth_header tmp;
1111 memcpy(tmp.veth_dst, eh->eth_dst, ETH_ADDR_LEN);
1112 memcpy(tmp.veth_src, eh->eth_src, ETH_ADDR_LEN);
1113 tmp.veth_type = htons(ETH_TYPE_VLAN);
1114 tmp.veth_tci = htons(tci);
1115 tmp.veth_next_type = eh->eth_type;
1117 veh = ofpbuf_push_uninit(packet, VLAN_HEADER_LEN);
1118 memcpy(veh, &tmp, sizeof tmp);
1119 packet->l2 = (char*)packet->l2 - VLAN_HEADER_LEN;
1124 dp_netdev_strip_vlan(struct ofpbuf *packet)
1126 struct vlan_eth_header *veh = packet->l2;
1127 if (veh->veth_type == htons(ETH_TYPE_VLAN)) {
1128 struct eth_header tmp;
1130 memcpy(tmp.eth_dst, veh->veth_dst, ETH_ADDR_LEN);
1131 memcpy(tmp.eth_src, veh->veth_src, ETH_ADDR_LEN);
1132 tmp.eth_type = veh->veth_next_type;
1134 packet->size -= VLAN_HEADER_LEN;
1135 packet->data = (char*)packet->data + VLAN_HEADER_LEN;
1136 packet->l2 = (char*)packet->l2 + VLAN_HEADER_LEN;
1137 memcpy(packet->data, &tmp, sizeof tmp);
1142 dp_netdev_set_dl_src(struct ofpbuf *packet, const uint8_t dl_addr[ETH_ADDR_LEN])
1144 struct eth_header *eh = packet->l2;
1145 memcpy(eh->eth_src, dl_addr, sizeof eh->eth_src);
1149 dp_netdev_set_dl_dst(struct ofpbuf *packet, const uint8_t dl_addr[ETH_ADDR_LEN])
1151 struct eth_header *eh = packet->l2;
1152 memcpy(eh->eth_dst, dl_addr, sizeof eh->eth_dst);
1156 dp_netdev_set_nw_addr(struct ofpbuf *packet, const flow_t *key,
1157 const struct odp_action_nw_addr *a)
1159 if (key->dl_type == htons(ETH_TYPE_IP)) {
1160 struct ip_header *nh = packet->l3;
1163 field = a->type == ODPAT_SET_NW_SRC ? &nh->ip_src : &nh->ip_dst;
1164 if (key->nw_proto == IP_TYPE_TCP) {
1165 struct tcp_header *th = packet->l4;
1166 th->tcp_csum = recalc_csum32(th->tcp_csum, *field, a->nw_addr);
1167 } else if (key->nw_proto == IP_TYPE_UDP) {
1168 struct udp_header *uh = packet->l4;
1170 uh->udp_csum = recalc_csum32(uh->udp_csum, *field, a->nw_addr);
1171 if (!uh->udp_csum) {
1172 uh->udp_csum = 0xffff;
1176 nh->ip_csum = recalc_csum32(nh->ip_csum, *field, a->nw_addr);
1177 *field = a->nw_addr;
1182 dp_netdev_set_nw_tos(struct ofpbuf *packet, const flow_t *key,
1183 const struct odp_action_nw_tos *a)
1185 if (key->dl_type == htons(ETH_TYPE_IP)) {
1186 struct ip_header *nh = packet->l3;
1187 uint8_t *field = &nh->ip_tos;
1189 /* Set the DSCP bits and preserve the ECN bits. */
1190 uint8_t new = a->nw_tos | (nh->ip_tos & IP_ECN_MASK);
1192 nh->ip_csum = recalc_csum16(nh->ip_csum, htons((uint16_t)*field),
1193 htons((uint16_t)a->nw_tos));
1199 dp_netdev_set_tp_port(struct ofpbuf *packet, const flow_t *key,
1200 const struct odp_action_tp_port *a)
1202 if (key->dl_type == htons(ETH_TYPE_IP)) {
1204 if (key->nw_proto == IPPROTO_TCP) {
1205 struct tcp_header *th = packet->l4;
1206 field = a->type == ODPAT_SET_TP_SRC ? &th->tcp_src : &th->tcp_dst;
1207 th->tcp_csum = recalc_csum16(th->tcp_csum, *field, a->tp_port);
1208 *field = a->tp_port;
1209 } else if (key->nw_proto == IPPROTO_UDP) {
1210 struct udp_header *uh = packet->l4;
1211 field = a->type == ODPAT_SET_TP_SRC ? &uh->udp_src : &uh->udp_dst;
1212 uh->udp_csum = recalc_csum16(uh->udp_csum, *field, a->tp_port);
1213 *field = a->tp_port;
1221 dp_netdev_output_port(struct dp_netdev *dp, struct ofpbuf *packet,
1224 struct dp_netdev_port *p = dp->ports[out_port];
1226 netdev_send(p->netdev, packet);
1231 dp_netdev_output_group(struct dp_netdev *dp, uint16_t group, uint16_t in_port,
1232 struct ofpbuf *packet)
1234 struct odp_port_group *g = &dp->groups[group];
1237 for (i = 0; i < g->n_ports; i++) {
1238 uint16_t out_port = g->ports[i];
1239 if (out_port != in_port) {
1240 dp_netdev_output_port(dp, packet, out_port);
1246 dp_netdev_output_control(struct dp_netdev *dp, const struct ofpbuf *packet,
1247 int queue_no, int port_no, uint32_t arg)
1249 struct ovs_queue *q = &dp->queues[queue_no];
1250 struct odp_msg *header;
1254 if (q->n >= MAX_QUEUE_LEN) {
1259 msg_size = sizeof *header + packet->size;
1260 msg = ofpbuf_new(msg_size + DPIF_RECV_MSG_PADDING);
1261 ofpbuf_reserve(msg, DPIF_RECV_MSG_PADDING);
1262 header = ofpbuf_put_uninit(msg, sizeof *header);
1263 header->type = queue_no;
1264 header->length = msg_size;
1265 header->port = port_no;
1267 ofpbuf_put(msg, packet->data, packet->size);
1268 queue_push_tail(q, msg);
1274 dp_netdev_execute_actions(struct dp_netdev *dp,
1275 struct ofpbuf *packet, const flow_t *key,
1276 const union odp_action *actions, int n_actions)
1279 for (i = 0; i < n_actions; i++) {
1280 const union odp_action *a = &actions[i];
1284 dp_netdev_output_port(dp, packet, a->output.port);
1287 case ODPAT_OUTPUT_GROUP:
1288 dp_netdev_output_group(dp, a->output_group.group, key->in_port,
1292 case ODPAT_CONTROLLER:
1293 dp_netdev_output_control(dp, packet, _ODPL_ACTION_NR,
1294 key->in_port, a->controller.arg);
1297 case ODPAT_SET_VLAN_VID:
1298 dp_netdev_modify_vlan_tci(packet, key, ntohs(a->vlan_vid.vlan_vid),
1302 case ODPAT_SET_VLAN_PCP:
1303 dp_netdev_modify_vlan_tci(
1304 packet, key, a->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT,
1308 case ODPAT_STRIP_VLAN:
1309 dp_netdev_strip_vlan(packet);
1312 case ODPAT_SET_DL_SRC:
1313 dp_netdev_set_dl_src(packet, a->dl_addr.dl_addr);
1316 case ODPAT_SET_DL_DST:
1317 dp_netdev_set_dl_dst(packet, a->dl_addr.dl_addr);
1320 case ODPAT_SET_NW_SRC:
1321 case ODPAT_SET_NW_DST:
1322 dp_netdev_set_nw_addr(packet, key, &a->nw_addr);
1325 case ODPAT_SET_NW_TOS:
1326 dp_netdev_set_nw_tos(packet, key, &a->nw_tos);
1329 case ODPAT_SET_TP_SRC:
1330 case ODPAT_SET_TP_DST:
1331 dp_netdev_set_tp_port(packet, key, &a->tp_port);
1338 const struct dpif_class dpif_netdev_class = {
1342 NULL, /* enumerate */
1345 NULL, /* get_all_names */
1346 dpif_netdev_destroy,
1347 dpif_netdev_get_stats,
1348 dpif_netdev_get_drop_frags,
1349 dpif_netdev_set_drop_frags,
1350 dpif_netdev_port_add,
1351 dpif_netdev_port_del,
1352 dpif_netdev_port_query_by_number,
1353 dpif_netdev_port_query_by_name,
1354 dpif_netdev_port_list,
1355 dpif_netdev_port_poll,
1356 dpif_netdev_port_poll_wait,
1357 dpif_netdev_port_group_get,
1358 dpif_netdev_port_group_set,
1359 dpif_netdev_flow_get,
1360 dpif_netdev_flow_put,
1361 dpif_netdev_flow_del,
1362 dpif_netdev_flow_flush,
1363 dpif_netdev_flow_list,
1364 dpif_netdev_execute,
1365 dpif_netdev_recv_get_mask,
1366 dpif_netdev_recv_set_mask,
1367 NULL, /* get_sflow_probability */
1368 NULL, /* set_sflow_probability */
1369 NULL, /* queue_to_priority */
1371 dpif_netdev_recv_wait,