2 * Copyright (c) 2007-2011 Nicira Networks.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/icmp.h>
40 #include <linux/icmpv6.h>
41 #include <linux/rculist.h>
44 #include <net/ndisc.h>
48 static struct kmem_cache *flow_cache;
50 static int check_header(struct sk_buff *skb, int len)
52 if (unlikely(skb->len < len))
54 if (unlikely(!pskb_may_pull(skb, len)))
59 static bool arphdr_ok(struct sk_buff *skb)
61 return pskb_may_pull(skb, skb_network_offset(skb) +
62 sizeof(struct arp_eth_header));
65 static int check_iphdr(struct sk_buff *skb)
67 unsigned int nh_ofs = skb_network_offset(skb);
71 err = check_header(skb, nh_ofs + sizeof(struct iphdr));
75 ip_len = ip_hdrlen(skb);
76 if (unlikely(ip_len < sizeof(struct iphdr) ||
77 skb->len < nh_ofs + ip_len))
80 skb_set_transport_header(skb, nh_ofs + ip_len);
84 static bool tcphdr_ok(struct sk_buff *skb)
86 int th_ofs = skb_transport_offset(skb);
89 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
92 tcp_len = tcp_hdrlen(skb);
93 if (unlikely(tcp_len < sizeof(struct tcphdr) ||
94 skb->len < th_ofs + tcp_len))
100 static bool udphdr_ok(struct sk_buff *skb)
102 return pskb_may_pull(skb, skb_transport_offset(skb) +
103 sizeof(struct udphdr));
106 static bool icmphdr_ok(struct sk_buff *skb)
108 return pskb_may_pull(skb, skb_transport_offset(skb) +
109 sizeof(struct icmphdr));
112 u64 ovs_flow_used_time(unsigned long flow_jiffies)
114 struct timespec cur_ts;
117 ktime_get_ts(&cur_ts);
118 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
119 cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
120 cur_ts.tv_nsec / NSEC_PER_MSEC;
122 return cur_ms - idle_ms;
125 #define SW_FLOW_KEY_OFFSET(field) \
126 (offsetof(struct sw_flow_key, field) + \
127 FIELD_SIZEOF(struct sw_flow_key, field))
129 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
132 unsigned int nh_ofs = skb_network_offset(skb);
140 *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
142 err = check_header(skb, nh_ofs + sizeof(*nh));
147 nexthdr = nh->nexthdr;
148 payload_ofs = (u8 *)(nh + 1) - skb->data;
150 key->ip.proto = NEXTHDR_NONE;
151 key->ip.tos = ipv6_get_dsfield(nh);
152 key->ip.ttl = nh->hop_limit;
153 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
154 key->ipv6.addr.src = nh->saddr;
155 key->ipv6.addr.dst = nh->daddr;
157 payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
158 if (unlikely(payload_ofs < 0))
162 if (frag_off & htons(~0x7))
163 key->ip.frag = OVS_FRAG_TYPE_LATER;
165 key->ip.frag = OVS_FRAG_TYPE_FIRST;
168 nh_len = payload_ofs - nh_ofs;
169 skb_set_transport_header(skb, nh_ofs + nh_len);
170 key->ip.proto = nexthdr;
174 static bool icmp6hdr_ok(struct sk_buff *skb)
176 return pskb_may_pull(skb, skb_transport_offset(skb) +
177 sizeof(struct icmp6hdr));
180 #define TCP_FLAGS_OFFSET 13
181 #define TCP_FLAG_MASK 0x3f
183 void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
187 if (flow->key.eth.type == htons(ETH_P_IP) &&
188 flow->key.ip.proto == IPPROTO_TCP) {
189 u8 *tcp = (u8 *)tcp_hdr(skb);
190 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
193 spin_lock(&flow->lock);
194 flow->used = jiffies;
195 flow->packet_count++;
196 flow->byte_count += skb->len;
197 flow->tcp_flags |= tcp_flags;
198 spin_unlock(&flow->lock);
201 struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
203 int actions_len = nla_len(actions);
204 struct sw_flow_actions *sfa;
206 /* At least DP_MAX_PORTS actions are required to be able to flood a
207 * packet to every port. Factor of 2 allows for setting VLAN tags,
209 if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
210 return ERR_PTR(-EINVAL);
212 sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
214 return ERR_PTR(-ENOMEM);
216 sfa->actions_len = actions_len;
217 memcpy(sfa->actions, nla_data(actions), actions_len);
221 struct sw_flow *ovs_flow_alloc(void)
223 struct sw_flow *flow;
225 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
227 return ERR_PTR(-ENOMEM);
229 spin_lock_init(&flow->lock);
230 atomic_set(&flow->refcnt, 1);
231 flow->sf_acts = NULL;
237 static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
239 hash = jhash_1word(hash, table->hash_seed);
240 return flex_array_get(table->buckets,
241 (hash & (table->n_buckets - 1)));
244 static struct flex_array *alloc_buckets(unsigned int n_buckets)
246 struct flex_array *buckets;
249 buckets = flex_array_alloc(sizeof(struct hlist_head *),
250 n_buckets, GFP_KERNEL);
254 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
256 flex_array_free(buckets);
260 for (i = 0; i < n_buckets; i++)
261 INIT_HLIST_HEAD((struct hlist_head *)
262 flex_array_get(buckets, i));
267 static void free_buckets(struct flex_array *buckets)
269 flex_array_free(buckets);
272 struct flow_table *ovs_flow_tbl_alloc(int new_size)
274 struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
279 table->buckets = alloc_buckets(new_size);
281 if (!table->buckets) {
285 table->n_buckets = new_size;
288 table->keep_flows = false;
289 get_random_bytes(&table->hash_seed, sizeof(u32));
294 static void flow_free(struct sw_flow *flow)
300 void ovs_flow_tbl_destroy(struct flow_table *table)
307 if (table->keep_flows)
310 for (i = 0; i < table->n_buckets; i++) {
311 struct sw_flow *flow;
312 struct hlist_head *head = flex_array_get(table->buckets, i);
313 struct hlist_node *node, *n;
314 int ver = table->node_ver;
316 hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
317 hlist_del_rcu(&flow->hash_node[ver]);
323 free_buckets(table->buckets);
327 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
329 struct flow_table *table = container_of(rcu, struct flow_table, rcu);
331 ovs_flow_tbl_destroy(table);
334 void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
339 call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
342 struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
344 struct sw_flow *flow;
345 struct hlist_head *head;
346 struct hlist_node *n;
350 ver = table->node_ver;
351 while (*bucket < table->n_buckets) {
353 head = flex_array_get(table->buckets, *bucket);
354 hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) {
369 static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
374 old_ver = old->node_ver;
375 new->node_ver = !old_ver;
377 /* Insert in new table. */
378 for (i = 0; i < old->n_buckets; i++) {
379 struct sw_flow *flow;
380 struct hlist_head *head;
381 struct hlist_node *n;
383 head = flex_array_get(old->buckets, i);
385 hlist_for_each_entry(flow, n, head, hash_node[old_ver])
386 ovs_flow_tbl_insert(new, flow);
388 old->keep_flows = true;
391 static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
393 struct flow_table *new_table;
395 new_table = ovs_flow_tbl_alloc(n_buckets);
397 return ERR_PTR(-ENOMEM);
399 flow_table_copy_flows(table, new_table);
404 struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
406 return __flow_tbl_rehash(table, table->n_buckets);
409 struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
411 return __flow_tbl_rehash(table, table->n_buckets * 2);
414 /* RCU callback used by ovs_flow_deferred_free. */
415 static void rcu_free_flow_callback(struct rcu_head *rcu)
417 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
423 /* Schedules 'flow' to be freed after the next RCU grace period.
424 * The caller must hold rcu_read_lock for this to be sensible. */
425 void ovs_flow_deferred_free(struct sw_flow *flow)
427 call_rcu(&flow->rcu, rcu_free_flow_callback);
430 void ovs_flow_hold(struct sw_flow *flow)
432 atomic_inc(&flow->refcnt);
435 void ovs_flow_put(struct sw_flow *flow)
440 if (atomic_dec_and_test(&flow->refcnt)) {
441 kfree((struct sf_flow_acts __force *)flow->sf_acts);
442 kmem_cache_free(flow_cache, flow);
446 /* RCU callback used by ovs_flow_deferred_free_acts. */
447 static void rcu_free_acts_callback(struct rcu_head *rcu)
449 struct sw_flow_actions *sf_acts = container_of(rcu,
450 struct sw_flow_actions, rcu);
454 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
455 * The caller must hold rcu_read_lock for this to be sensible. */
456 void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
458 call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
461 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
464 __be16 eth_type; /* ETH_P_8021Q */
467 struct qtag_prefix *qp;
469 if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
472 if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
476 qp = (struct qtag_prefix *) skb->data;
477 key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
478 __skb_pull(skb, sizeof(struct qtag_prefix));
483 static __be16 parse_ethertype(struct sk_buff *skb)
485 struct llc_snap_hdr {
486 u8 dsap; /* Always 0xAA */
487 u8 ssap; /* Always 0xAA */
492 struct llc_snap_hdr *llc;
495 proto = *(__be16 *) skb->data;
496 __skb_pull(skb, sizeof(__be16));
498 if (ntohs(proto) >= 1536)
501 if (skb->len < sizeof(struct llc_snap_hdr))
502 return htons(ETH_P_802_2);
504 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
507 llc = (struct llc_snap_hdr *) skb->data;
508 if (llc->dsap != LLC_SAP_SNAP ||
509 llc->ssap != LLC_SAP_SNAP ||
510 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
511 return htons(ETH_P_802_2);
513 __skb_pull(skb, sizeof(struct llc_snap_hdr));
514 return llc->ethertype;
517 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
518 int *key_lenp, int nh_len)
520 struct icmp6hdr *icmp = icmp6_hdr(skb);
524 /* The ICMPv6 type and code fields use the 16-bit transport port
525 * fields, so we need to store them in 16-bit network byte order.
527 key->ipv6.tp.src = htons(icmp->icmp6_type);
528 key->ipv6.tp.dst = htons(icmp->icmp6_code);
529 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
531 if (icmp->icmp6_code == 0 &&
532 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
533 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
534 int icmp_len = skb->len - skb_transport_offset(skb);
538 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
540 /* In order to process neighbor discovery options, we need the
543 if (unlikely(icmp_len < sizeof(*nd)))
545 if (unlikely(skb_linearize(skb))) {
550 nd = (struct nd_msg *)skb_transport_header(skb);
551 key->ipv6.nd.target = nd->target;
552 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
554 icmp_len -= sizeof(*nd);
556 while (icmp_len >= 8) {
557 struct nd_opt_hdr *nd_opt =
558 (struct nd_opt_hdr *)(nd->opt + offset);
559 int opt_len = nd_opt->nd_opt_len * 8;
561 if (unlikely(!opt_len || opt_len > icmp_len))
564 /* Store the link layer address if the appropriate
565 * option is provided. It is considered an error if
566 * the same link layer option is specified twice.
568 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
570 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
572 memcpy(key->ipv6.nd.sll,
573 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
574 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
576 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
578 memcpy(key->ipv6.nd.tll,
579 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
590 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
591 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
592 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
600 * ovs_flow_extract - extracts a flow key from an Ethernet frame.
601 * @skb: sk_buff that contains the frame, with skb->data pointing to the
603 * @in_port: port number on which @skb was received.
604 * @key: output flow key
605 * @key_lenp: length of output flow key
607 * The caller must ensure that skb->len >= ETH_HLEN.
609 * Returns 0 if successful, otherwise a negative errno value.
611 * Initializes @skb header pointers as follows:
613 * - skb->mac_header: the Ethernet header.
615 * - skb->network_header: just past the Ethernet header, or just past the
616 * VLAN header, to the first byte of the Ethernet payload.
618 * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
619 * on output, then just past the IP header, if one is present and
620 * of a correct length, otherwise the same as skb->network_header.
621 * For other key->dl_type values it is left untouched.
623 int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
627 int key_len = SW_FLOW_KEY_OFFSET(eth);
630 memset(key, 0, sizeof(*key));
632 key->phy.priority = skb->priority;
633 key->phy.tun_id = OVS_CB(skb)->tun_id;
634 key->phy.in_port = in_port;
636 skb_reset_mac_header(skb);
638 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
639 * header in the linear data area.
642 memcpy(key->eth.src, eth->h_source, ETH_ALEN);
643 memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
645 __skb_pull(skb, 2 * ETH_ALEN);
647 if (vlan_tx_tag_present(skb))
648 key->eth.tci = htons(vlan_get_tci(skb));
649 else if (eth->h_proto == htons(ETH_P_8021Q))
650 if (unlikely(parse_vlan(skb, key)))
653 key->eth.type = parse_ethertype(skb);
654 if (unlikely(key->eth.type == htons(0)))
657 skb_reset_network_header(skb);
658 __skb_push(skb, skb->data - skb_mac_header(skb));
661 if (key->eth.type == htons(ETH_P_IP)) {
665 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
667 error = check_iphdr(skb);
668 if (unlikely(error)) {
669 if (error == -EINVAL) {
670 skb->transport_header = skb->network_header;
677 key->ipv4.addr.src = nh->saddr;
678 key->ipv4.addr.dst = nh->daddr;
680 key->ip.proto = nh->protocol;
681 key->ip.tos = nh->tos;
682 key->ip.ttl = nh->ttl;
684 offset = nh->frag_off & htons(IP_OFFSET);
686 key->ip.frag = OVS_FRAG_TYPE_LATER;
689 if (nh->frag_off & htons(IP_MF) ||
690 skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
691 key->ip.frag = OVS_FRAG_TYPE_FIRST;
693 /* Transport layer. */
694 if (key->ip.proto == IPPROTO_TCP) {
695 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
696 if (tcphdr_ok(skb)) {
697 struct tcphdr *tcp = tcp_hdr(skb);
698 key->ipv4.tp.src = tcp->source;
699 key->ipv4.tp.dst = tcp->dest;
701 } else if (key->ip.proto == IPPROTO_UDP) {
702 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
703 if (udphdr_ok(skb)) {
704 struct udphdr *udp = udp_hdr(skb);
705 key->ipv4.tp.src = udp->source;
706 key->ipv4.tp.dst = udp->dest;
708 } else if (key->ip.proto == IPPROTO_ICMP) {
709 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
710 if (icmphdr_ok(skb)) {
711 struct icmphdr *icmp = icmp_hdr(skb);
712 /* The ICMP type and code fields use the 16-bit
713 * transport port fields, so we need to store
714 * them in 16-bit network byte order. */
715 key->ipv4.tp.src = htons(icmp->type);
716 key->ipv4.tp.dst = htons(icmp->code);
720 } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
721 struct arp_eth_header *arp;
723 arp = (struct arp_eth_header *)skb_network_header(skb);
725 if (arp->ar_hrd == htons(ARPHRD_ETHER)
726 && arp->ar_pro == htons(ETH_P_IP)
727 && arp->ar_hln == ETH_ALEN
728 && arp->ar_pln == 4) {
730 /* We only match on the lower 8 bits of the opcode. */
731 if (ntohs(arp->ar_op) <= 0xff)
732 key->ip.proto = ntohs(arp->ar_op);
734 if (key->ip.proto == ARPOP_REQUEST
735 || key->ip.proto == ARPOP_REPLY) {
736 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
737 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
738 memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
739 memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
740 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
743 } else if (key->eth.type == htons(ETH_P_IPV6)) {
744 int nh_len; /* IPv6 Header + Extensions */
746 nh_len = parse_ipv6hdr(skb, key, &key_len);
747 if (unlikely(nh_len < 0)) {
748 if (nh_len == -EINVAL)
749 skb->transport_header = skb->network_header;
755 if (key->ip.frag == OVS_FRAG_TYPE_LATER)
757 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
758 key->ip.frag = OVS_FRAG_TYPE_FIRST;
760 /* Transport layer. */
761 if (key->ip.proto == NEXTHDR_TCP) {
762 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
763 if (tcphdr_ok(skb)) {
764 struct tcphdr *tcp = tcp_hdr(skb);
765 key->ipv6.tp.src = tcp->source;
766 key->ipv6.tp.dst = tcp->dest;
768 } else if (key->ip.proto == NEXTHDR_UDP) {
769 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
770 if (udphdr_ok(skb)) {
771 struct udphdr *udp = udp_hdr(skb);
772 key->ipv6.tp.src = udp->source;
773 key->ipv6.tp.dst = udp->dest;
775 } else if (key->ip.proto == NEXTHDR_ICMP) {
776 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
777 if (icmp6hdr_ok(skb)) {
778 error = parse_icmpv6(skb, key, &key_len, nh_len);
790 u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
792 return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0);
795 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
796 struct sw_flow_key *key, int key_len)
798 struct sw_flow *flow;
799 struct hlist_node *n;
800 struct hlist_head *head;
803 hash = ovs_flow_hash(key, key_len);
805 head = find_bucket(table, hash);
806 hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) {
808 if (flow->hash == hash &&
809 !memcmp(&flow->key, key, key_len)) {
816 void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
818 struct hlist_head *head;
820 head = find_bucket(table, flow->hash);
821 hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
825 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
827 hlist_del_rcu(&flow->hash_node[table->node_ver]);
829 BUG_ON(table->count < 0);
832 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
833 const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
834 [OVS_KEY_ATTR_ENCAP] = -1,
835 [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
836 [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
837 [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
838 [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
839 [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
840 [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
841 [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
842 [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
843 [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
844 [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
845 [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
846 [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
847 [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
850 [OVS_KEY_ATTR_TUN_ID] = sizeof(__be64),
853 static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
854 const struct nlattr *a[], u64 *attrs)
856 const struct ovs_key_icmp *icmp_key;
857 const struct ovs_key_tcp *tcp_key;
858 const struct ovs_key_udp *udp_key;
860 switch (swkey->ip.proto) {
862 if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
864 *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
866 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
867 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
868 swkey->ipv4.tp.src = tcp_key->tcp_src;
869 swkey->ipv4.tp.dst = tcp_key->tcp_dst;
873 if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
875 *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
877 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
878 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
879 swkey->ipv4.tp.src = udp_key->udp_src;
880 swkey->ipv4.tp.dst = udp_key->udp_dst;
884 if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
886 *attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
888 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
889 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
890 swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
891 swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
898 static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
899 const struct nlattr *a[], u64 *attrs)
901 const struct ovs_key_icmpv6 *icmpv6_key;
902 const struct ovs_key_tcp *tcp_key;
903 const struct ovs_key_udp *udp_key;
905 switch (swkey->ip.proto) {
907 if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
909 *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
911 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
912 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
913 swkey->ipv6.tp.src = tcp_key->tcp_src;
914 swkey->ipv6.tp.dst = tcp_key->tcp_dst;
918 if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
920 *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
922 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
923 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
924 swkey->ipv6.tp.src = udp_key->udp_src;
925 swkey->ipv6.tp.dst = udp_key->udp_dst;
929 if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
931 *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
933 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
934 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
935 swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
936 swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
938 if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
939 swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
940 const struct ovs_key_nd *nd_key;
942 if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
944 *attrs &= ~(1 << OVS_KEY_ATTR_ND);
946 *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
947 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
948 memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
949 sizeof(swkey->ipv6.nd.target));
950 memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
951 memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
959 static int parse_flow_nlattrs(const struct nlattr *attr,
960 const struct nlattr *a[], u64 *attrsp)
962 const struct nlattr *nla;
967 nla_for_each_nested(nla, attr, rem) {
968 u16 type = nla_type(nla);
971 if (type > OVS_KEY_ATTR_MAX || attrs & (1ULL << type))
974 expected_len = ovs_key_lens[type];
975 if (nla_len(nla) != expected_len && expected_len != -1)
978 attrs |= 1ULL << type;
989 * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
990 * @swkey: receives the extracted flow key.
991 * @key_lenp: number of bytes used in @swkey.
992 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
995 int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
996 const struct nlattr *attr)
998 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
999 const struct ovs_key_ethernet *eth_key;
1004 memset(swkey, 0, sizeof(struct sw_flow_key));
1005 key_len = SW_FLOW_KEY_OFFSET(eth);
1007 err = parse_flow_nlattrs(attr, a, &attrs);
1011 /* Metadata attributes. */
1012 if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
1013 swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]);
1014 attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
1016 if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
1017 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
1018 if (in_port >= DP_MAX_PORTS)
1020 swkey->phy.in_port = in_port;
1021 attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
1023 swkey->phy.in_port = USHRT_MAX;
1026 if (attrs & (1ULL << OVS_KEY_ATTR_TUN_ID)) {
1027 swkey->phy.tun_id = nla_get_be64(a[OVS_KEY_ATTR_TUN_ID]);
1028 attrs &= ~(1ULL << OVS_KEY_ATTR_TUN_ID);
1031 /* Data attributes. */
1032 if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
1034 attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
1036 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
1037 memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
1038 memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
1040 if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) &&
1041 nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) {
1042 const struct nlattr *encap;
1045 if (attrs != ((1 << OVS_KEY_ATTR_VLAN) |
1046 (1 << OVS_KEY_ATTR_ETHERTYPE) |
1047 (1 << OVS_KEY_ATTR_ENCAP)))
1050 encap = a[OVS_KEY_ATTR_ENCAP];
1051 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1052 if (tci & htons(VLAN_TAG_PRESENT)) {
1053 swkey->eth.tci = tci;
1055 err = parse_flow_nlattrs(encap, a, &attrs);
1059 /* Corner case for truncated 802.1Q header. */
1063 swkey->eth.type = htons(ETH_P_8021Q);
1064 *key_lenp = key_len;
1071 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
1072 swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1073 if (ntohs(swkey->eth.type) < 1536)
1075 attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1077 swkey->eth.type = htons(ETH_P_802_2);
1080 if (swkey->eth.type == htons(ETH_P_IP)) {
1081 const struct ovs_key_ipv4 *ipv4_key;
1083 if (!(attrs & (1 << OVS_KEY_ATTR_IPV4)))
1085 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
1087 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
1088 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
1089 if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
1091 swkey->ip.proto = ipv4_key->ipv4_proto;
1092 swkey->ip.tos = ipv4_key->ipv4_tos;
1093 swkey->ip.ttl = ipv4_key->ipv4_ttl;
1094 swkey->ip.frag = ipv4_key->ipv4_frag;
1095 swkey->ipv4.addr.src = ipv4_key->ipv4_src;
1096 swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
1098 if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1099 err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
1103 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1104 const struct ovs_key_ipv6 *ipv6_key;
1106 if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
1108 attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
1110 key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
1111 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
1112 if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
1114 swkey->ipv6.label = ipv6_key->ipv6_label;
1115 swkey->ip.proto = ipv6_key->ipv6_proto;
1116 swkey->ip.tos = ipv6_key->ipv6_tclass;
1117 swkey->ip.ttl = ipv6_key->ipv6_hlimit;
1118 swkey->ip.frag = ipv6_key->ipv6_frag;
1119 memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
1120 sizeof(swkey->ipv6.addr.src));
1121 memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
1122 sizeof(swkey->ipv6.addr.dst));
1124 if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1125 err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
1129 } else if (swkey->eth.type == htons(ETH_P_ARP)) {
1130 const struct ovs_key_arp *arp_key;
1132 if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
1134 attrs &= ~(1 << OVS_KEY_ATTR_ARP);
1136 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
1137 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
1138 swkey->ipv4.addr.src = arp_key->arp_sip;
1139 swkey->ipv4.addr.dst = arp_key->arp_tip;
1140 if (arp_key->arp_op & htons(0xff00))
1142 swkey->ip.proto = ntohs(arp_key->arp_op);
1143 memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
1144 memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
1149 *key_lenp = key_len;
1155 * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
1156 * @in_port: receives the extracted input port.
1157 * @tun_id: receives the extracted tunnel ID.
1158 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1161 * This parses a series of Netlink attributes that form a flow key, which must
1162 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1163 * get the metadata, that is, the parts of the flow key that cannot be
1164 * extracted from the packet itself.
1166 int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
1167 const struct nlattr *attr)
1169 const struct nlattr *nla;
1172 *in_port = USHRT_MAX;
1176 nla_for_each_nested(nla, attr, rem) {
1177 int type = nla_type(nla);
1179 if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
1180 if (nla_len(nla) != ovs_key_lens[type])
1184 case OVS_KEY_ATTR_PRIORITY:
1185 *priority = nla_get_u32(nla);
1188 case OVS_KEY_ATTR_TUN_ID:
1189 *tun_id = nla_get_be64(nla);
1192 case OVS_KEY_ATTR_IN_PORT:
1193 if (nla_get_u32(nla) >= DP_MAX_PORTS)
1195 *in_port = nla_get_u32(nla);
1205 int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1207 struct ovs_key_ethernet *eth_key;
1208 struct nlattr *nla, *encap;
1210 if (swkey->phy.priority)
1211 NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority);
1213 if (swkey->phy.tun_id != cpu_to_be64(0))
1214 NLA_PUT_BE64(skb, OVS_KEY_ATTR_TUN_ID, swkey->phy.tun_id);
1216 if (swkey->phy.in_port != USHRT_MAX)
1217 NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
1219 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1221 goto nla_put_failure;
1222 eth_key = nla_data(nla);
1223 memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
1224 memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
1226 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1227 NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q));
1228 NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci);
1229 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1230 if (!swkey->eth.tci)
1236 if (swkey->eth.type == htons(ETH_P_802_2))
1239 NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
1241 if (swkey->eth.type == htons(ETH_P_IP)) {
1242 struct ovs_key_ipv4 *ipv4_key;
1244 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1246 goto nla_put_failure;
1247 ipv4_key = nla_data(nla);
1248 ipv4_key->ipv4_src = swkey->ipv4.addr.src;
1249 ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
1250 ipv4_key->ipv4_proto = swkey->ip.proto;
1251 ipv4_key->ipv4_tos = swkey->ip.tos;
1252 ipv4_key->ipv4_ttl = swkey->ip.ttl;
1253 ipv4_key->ipv4_frag = swkey->ip.frag;
1254 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1255 struct ovs_key_ipv6 *ipv6_key;
1257 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1259 goto nla_put_failure;
1260 ipv6_key = nla_data(nla);
1261 memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
1262 sizeof(ipv6_key->ipv6_src));
1263 memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
1264 sizeof(ipv6_key->ipv6_dst));
1265 ipv6_key->ipv6_label = swkey->ipv6.label;
1266 ipv6_key->ipv6_proto = swkey->ip.proto;
1267 ipv6_key->ipv6_tclass = swkey->ip.tos;
1268 ipv6_key->ipv6_hlimit = swkey->ip.ttl;
1269 ipv6_key->ipv6_frag = swkey->ip.frag;
1270 } else if (swkey->eth.type == htons(ETH_P_ARP)) {
1271 struct ovs_key_arp *arp_key;
1273 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1275 goto nla_put_failure;
1276 arp_key = nla_data(nla);
1277 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1278 arp_key->arp_sip = swkey->ipv4.addr.src;
1279 arp_key->arp_tip = swkey->ipv4.addr.dst;
1280 arp_key->arp_op = htons(swkey->ip.proto);
1281 memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
1282 memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
1285 if ((swkey->eth.type == htons(ETH_P_IP) ||
1286 swkey->eth.type == htons(ETH_P_IPV6)) &&
1287 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1289 if (swkey->ip.proto == IPPROTO_TCP) {
1290 struct ovs_key_tcp *tcp_key;
1292 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1294 goto nla_put_failure;
1295 tcp_key = nla_data(nla);
1296 if (swkey->eth.type == htons(ETH_P_IP)) {
1297 tcp_key->tcp_src = swkey->ipv4.tp.src;
1298 tcp_key->tcp_dst = swkey->ipv4.tp.dst;
1299 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1300 tcp_key->tcp_src = swkey->ipv6.tp.src;
1301 tcp_key->tcp_dst = swkey->ipv6.tp.dst;
1303 } else if (swkey->ip.proto == IPPROTO_UDP) {
1304 struct ovs_key_udp *udp_key;
1306 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1308 goto nla_put_failure;
1309 udp_key = nla_data(nla);
1310 if (swkey->eth.type == htons(ETH_P_IP)) {
1311 udp_key->udp_src = swkey->ipv4.tp.src;
1312 udp_key->udp_dst = swkey->ipv4.tp.dst;
1313 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1314 udp_key->udp_src = swkey->ipv6.tp.src;
1315 udp_key->udp_dst = swkey->ipv6.tp.dst;
1317 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1318 swkey->ip.proto == IPPROTO_ICMP) {
1319 struct ovs_key_icmp *icmp_key;
1321 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1323 goto nla_put_failure;
1324 icmp_key = nla_data(nla);
1325 icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
1326 icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
1327 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1328 swkey->ip.proto == IPPROTO_ICMPV6) {
1329 struct ovs_key_icmpv6 *icmpv6_key;
1331 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1332 sizeof(*icmpv6_key));
1334 goto nla_put_failure;
1335 icmpv6_key = nla_data(nla);
1336 icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
1337 icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
1339 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1340 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1341 struct ovs_key_nd *nd_key;
1343 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1345 goto nla_put_failure;
1346 nd_key = nla_data(nla);
1347 memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
1348 sizeof(nd_key->nd_target));
1349 memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
1350 memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
1357 nla_nest_end(skb, encap);
1365 /* Initializes the flow module.
1366 * Returns zero if successful or a negative error code. */
1367 int ovs_flow_init(void)
1369 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
1371 if (flow_cache == NULL)
1377 /* Uninitializes the flow module. */
1378 void ovs_flow_exit(void)
1380 kmem_cache_destroy(flow_cache);