2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
11 #include <asm/uaccess.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_ether.h>
15 #include <linux/if_vlan.h>
16 #include <net/llc_pdu.h>
17 #include <linux/kernel.h>
18 #include <linux/jhash.h>
19 #include <linux/jiffies.h>
20 #include <linux/llc.h>
21 #include <linux/module.h>
23 #include <linux/rcupdate.h>
24 #include <linux/if_arp.h>
25 #include <linux/if_ether.h>
27 #include <linux/ipv6.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/icmp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/rculist.h>
33 #include <net/inet_ecn.h>
36 #include <net/ndisc.h>
40 static struct kmem_cache *flow_cache;
41 static unsigned int hash_seed __read_mostly;
43 static int check_header(struct sk_buff *skb, int len)
45 if (unlikely(skb->len < len))
47 if (unlikely(!pskb_may_pull(skb, len)))
52 static inline bool arphdr_ok(struct sk_buff *skb)
54 return pskb_may_pull(skb, skb_network_offset(skb) +
55 sizeof(struct arp_eth_header));
58 static inline int check_iphdr(struct sk_buff *skb)
60 unsigned int nh_ofs = skb_network_offset(skb);
64 err = check_header(skb, nh_ofs + sizeof(struct iphdr));
68 ip_len = ip_hdrlen(skb);
69 if (unlikely(ip_len < sizeof(struct iphdr) ||
70 skb->len < nh_ofs + ip_len))
73 skb_set_transport_header(skb, nh_ofs + ip_len);
77 static inline bool tcphdr_ok(struct sk_buff *skb)
79 int th_ofs = skb_transport_offset(skb);
82 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
85 tcp_len = tcp_hdrlen(skb);
86 if (unlikely(tcp_len < sizeof(struct tcphdr) ||
87 skb->len < th_ofs + tcp_len))
93 static inline bool udphdr_ok(struct sk_buff *skb)
95 return pskb_may_pull(skb, skb_transport_offset(skb) +
96 sizeof(struct udphdr));
99 static inline bool icmphdr_ok(struct sk_buff *skb)
101 return pskb_may_pull(skb, skb_transport_offset(skb) +
102 sizeof(struct icmphdr));
105 u64 flow_used_time(unsigned long flow_jiffies)
107 struct timespec cur_ts;
110 ktime_get_ts(&cur_ts);
111 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
112 cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
113 cur_ts.tv_nsec / NSEC_PER_MSEC;
115 return cur_ms - idle_ms;
118 #define SW_FLOW_KEY_OFFSET(field) \
119 offsetof(struct sw_flow_key, field) + \
120 FIELD_SIZEOF(struct sw_flow_key, field)
122 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
125 unsigned int nh_ofs = skb_network_offset(skb);
132 *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.addr);
134 err = check_header(skb, nh_ofs + sizeof(*nh));
139 nexthdr = nh->nexthdr;
140 payload_ofs = (u8 *)(nh + 1) - skb->data;
142 key->ip.proto = NEXTHDR_NONE;
143 key->ip.tos = ipv6_get_dsfield(nh) & ~INET_ECN_MASK;
144 ipv6_addr_copy(&key->ipv6.addr.src, &nh->saddr);
145 ipv6_addr_copy(&key->ipv6.addr.dst, &nh->daddr);
147 payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr);
148 if (unlikely(payload_ofs < 0))
151 nh_len = payload_ofs - nh_ofs;
152 skb_set_transport_header(skb, nh_ofs + nh_len);
153 key->ip.proto = nexthdr;
157 static bool icmp6hdr_ok(struct sk_buff *skb)
159 return pskb_may_pull(skb, skb_transport_offset(skb) +
160 sizeof(struct icmp6hdr));
163 #define TCP_FLAGS_OFFSET 13
164 #define TCP_FLAG_MASK 0x3f
166 void flow_used(struct sw_flow *flow, struct sk_buff *skb)
170 if (flow->key.eth.type == htons(ETH_P_IP) &&
171 flow->key.ip.proto == IPPROTO_TCP) {
172 u8 *tcp = (u8 *)tcp_hdr(skb);
173 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
176 spin_lock_bh(&flow->lock);
177 flow->used = jiffies;
178 flow->packet_count++;
179 flow->byte_count += skb->len;
180 flow->tcp_flags |= tcp_flags;
181 spin_unlock_bh(&flow->lock);
184 struct sw_flow_actions *flow_actions_alloc(const struct nlattr *actions)
186 int actions_len = nla_len(actions);
187 struct sw_flow_actions *sfa;
189 /* At least DP_MAX_PORTS actions are required to be able to flood a
190 * packet to every port. Factor of 2 allows for setting VLAN tags,
192 if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
193 return ERR_PTR(-EINVAL);
195 sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
197 return ERR_PTR(-ENOMEM);
199 sfa->actions_len = actions_len;
200 memcpy(sfa->actions, nla_data(actions), actions_len);
204 struct sw_flow *flow_alloc(void)
206 struct sw_flow *flow;
208 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
210 return ERR_PTR(-ENOMEM);
212 spin_lock_init(&flow->lock);
213 atomic_set(&flow->refcnt, 1);
214 flow->sf_acts = NULL;
220 static struct hlist_head __rcu *find_bucket(struct flow_table * table, u32 hash)
222 return flex_array_get(table->buckets,
223 (hash & (table->n_buckets - 1)));
226 static struct flex_array __rcu *alloc_buckets(unsigned int n_buckets)
228 struct flex_array __rcu * buckets;
231 buckets = flex_array_alloc(sizeof(struct hlist_head *),
232 n_buckets, GFP_KERNEL);
236 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
238 flex_array_free(buckets);
242 for (i = 0; i < n_buckets; i++)
243 INIT_HLIST_HEAD((struct hlist_head *)
244 flex_array_get(buckets, i));
249 static void free_buckets(struct flex_array * buckets)
251 flex_array_free(buckets);
254 struct flow_table *flow_tbl_alloc(int new_size)
256 struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
261 table->buckets = alloc_buckets(new_size);
263 if (!table->buckets) {
267 table->n_buckets = new_size;
273 static void flow_free(struct sw_flow *flow)
279 void flow_tbl_destroy(struct flow_table *table)
286 for (i = 0; i < table->n_buckets; i++) {
287 struct sw_flow *flow;
288 struct hlist_head *head = flex_array_get(table->buckets, i);
289 struct hlist_node *node, *n;
291 hlist_for_each_entry_safe(flow, node, n, head, hash_node) {
292 hlist_del_init_rcu(&flow->hash_node);
297 free_buckets(table->buckets);
301 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
303 struct flow_table *table = container_of(rcu, struct flow_table, rcu);
305 flow_tbl_destroy(table);
308 void flow_tbl_deferred_destroy(struct flow_table *table)
313 call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
316 struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
318 struct sw_flow *flow;
319 struct hlist_head *head;
320 struct hlist_node *n;
323 while (*bucket < table->n_buckets) {
325 head = flex_array_get(table->buckets, *bucket);
326 hlist_for_each_entry_rcu(flow, n, head, hash_node) {
341 struct flow_table *flow_tbl_expand(struct flow_table *table)
343 struct flow_table *new_table;
344 int n_buckets = table->n_buckets * 2;
347 new_table = flow_tbl_alloc(n_buckets);
349 return ERR_PTR(-ENOMEM);
351 for (i = 0; i < table->n_buckets; i++) {
352 struct sw_flow *flow;
353 struct hlist_head *head;
354 struct hlist_node *n, *pos;
356 head = flex_array_get(table->buckets, i);
358 hlist_for_each_entry_safe(flow, n, pos, head, hash_node) {
359 hlist_del_init_rcu(&flow->hash_node);
360 flow_tbl_insert(new_table, flow);
367 /* RCU callback used by flow_deferred_free. */
368 static void rcu_free_flow_callback(struct rcu_head *rcu)
370 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
376 /* Schedules 'flow' to be freed after the next RCU grace period.
377 * The caller must hold rcu_read_lock for this to be sensible. */
378 void flow_deferred_free(struct sw_flow *flow)
380 call_rcu(&flow->rcu, rcu_free_flow_callback);
383 void flow_hold(struct sw_flow *flow)
385 atomic_inc(&flow->refcnt);
388 void flow_put(struct sw_flow *flow)
393 if (atomic_dec_and_test(&flow->refcnt)) {
394 kfree((struct sf_flow_acts __force *)flow->sf_acts);
395 kmem_cache_free(flow_cache, flow);
399 /* RCU callback used by flow_deferred_free_acts. */
400 static void rcu_free_acts_callback(struct rcu_head *rcu)
402 struct sw_flow_actions *sf_acts = container_of(rcu,
403 struct sw_flow_actions, rcu);
407 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
408 * The caller must hold rcu_read_lock for this to be sensible. */
409 void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
411 call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
414 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
417 __be16 eth_type; /* ETH_P_8021Q */
420 struct qtag_prefix *qp;
422 if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
426 qp = (struct qtag_prefix *) skb->data;
427 key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
428 __skb_pull(skb, sizeof(struct qtag_prefix));
433 static __be16 parse_ethertype(struct sk_buff *skb)
435 struct llc_snap_hdr {
436 u8 dsap; /* Always 0xAA */
437 u8 ssap; /* Always 0xAA */
442 struct llc_snap_hdr *llc;
445 proto = *(__be16 *) skb->data;
446 __skb_pull(skb, sizeof(__be16));
448 if (ntohs(proto) >= 1536)
451 if (skb->len < sizeof(struct llc_snap_hdr))
452 return htons(ETH_P_802_2);
454 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
457 llc = (struct llc_snap_hdr *) skb->data;
458 if (llc->dsap != LLC_SAP_SNAP ||
459 llc->ssap != LLC_SAP_SNAP ||
460 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
461 return htons(ETH_P_802_2);
463 __skb_pull(skb, sizeof(struct llc_snap_hdr));
464 return llc->ethertype;
467 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
468 int *key_lenp, int nh_len)
470 struct icmp6hdr *icmp = icmp6_hdr(skb);
474 /* The ICMPv6 type and code fields use the 16-bit transport port
475 * fields, so we need to store them in 16-bit network byte order.
477 key->ipv6.tp.src = htons(icmp->icmp6_type);
478 key->ipv6.tp.dst = htons(icmp->icmp6_code);
479 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
481 if (icmp->icmp6_code == 0 &&
482 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
483 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
484 int icmp_len = skb->len - skb_transport_offset(skb);
488 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
490 /* In order to process neighbor discovery options, we need the
493 if (unlikely(icmp_len < sizeof(*nd)))
495 if (unlikely(skb_linearize(skb))) {
500 nd = (struct nd_msg *)skb_transport_header(skb);
501 ipv6_addr_copy(&key->ipv6.nd.target, &nd->target);
502 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
504 icmp_len -= sizeof(*nd);
506 while (icmp_len >= 8) {
507 struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd->opt + offset);
508 int opt_len = nd_opt->nd_opt_len * 8;
510 if (unlikely(!opt_len || opt_len > icmp_len))
513 /* Store the link layer address if the appropriate
514 * option is provided. It is considered an error if
515 * the same link layer option is specified twice.
517 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
519 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
521 memcpy(key->ipv6.nd.sll,
522 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
523 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
525 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
527 memcpy(key->ipv6.nd.tll,
528 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
539 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
540 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
541 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
549 * flow_extract - extracts a flow key from an Ethernet frame.
550 * @skb: sk_buff that contains the frame, with skb->data pointing to the
552 * @in_port: port number on which @skb was received.
553 * @key: output flow key
554 * @key_lenp: length of output flow key
555 * @is_frag: set to 1 if @skb contains an IPv4 fragment, or to 0 if @skb does
556 * not contain an IPv4 packet or if it is not a fragment.
558 * The caller must ensure that skb->len >= ETH_HLEN.
560 * Returns 0 if successful, otherwise a negative errno value.
562 * Initializes @skb header pointers as follows:
564 * - skb->mac_header: the Ethernet header.
566 * - skb->network_header: just past the Ethernet header, or just past the
567 * VLAN header, to the first byte of the Ethernet payload.
569 * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
570 * on output, then just past the IP header, if one is present and
571 * of a correct length, otherwise the same as skb->network_header.
572 * For other key->dl_type values it is left untouched.
574 int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
575 int *key_lenp, bool *is_frag)
578 int key_len = SW_FLOW_KEY_OFFSET(eth);
581 memset(key, 0, sizeof(*key));
582 key->eth.tun_id = OVS_CB(skb)->tun_id;
583 key->eth.in_port = in_port;
586 skb_reset_mac_header(skb);
588 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
589 * header in the linear data area.
592 memcpy(key->eth.src, eth->h_source, ETH_ALEN);
593 memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
595 __skb_pull(skb, 2 * ETH_ALEN);
597 if (vlan_tx_tag_present(skb))
598 key->eth.tci = htons(vlan_get_tci(skb));
599 else if (eth->h_proto == htons(ETH_P_8021Q))
600 if (unlikely(parse_vlan(skb, key)))
603 key->eth.type = parse_ethertype(skb);
604 if (unlikely(key->eth.type == htons(0)))
607 skb_reset_network_header(skb);
608 __skb_push(skb, skb->data - skb_mac_header(skb));
611 if (key->eth.type == htons(ETH_P_IP)) {
614 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
616 error = check_iphdr(skb);
617 if (unlikely(error)) {
618 if (error == -EINVAL) {
619 skb->transport_header = skb->network_header;
626 key->ipv4.addr.src = nh->saddr;
627 key->ipv4.addr.dst = nh->daddr;
628 key->ip.tos = nh->tos & ~INET_ECN_MASK;
629 key->ip.proto = nh->protocol;
631 /* Transport layer. */
632 if ((nh->frag_off & htons(IP_MF | IP_OFFSET)) ||
633 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
636 if (key->ip.proto == IPPROTO_TCP) {
637 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
638 if (!*is_frag && tcphdr_ok(skb)) {
639 struct tcphdr *tcp = tcp_hdr(skb);
640 key->ipv4.tp.src = tcp->source;
641 key->ipv4.tp.dst = tcp->dest;
643 } else if (key->ip.proto == IPPROTO_UDP) {
644 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
645 if (!*is_frag && udphdr_ok(skb)) {
646 struct udphdr *udp = udp_hdr(skb);
647 key->ipv4.tp.src = udp->source;
648 key->ipv4.tp.dst = udp->dest;
650 } else if (key->ip.proto == IPPROTO_ICMP) {
651 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
652 if (!*is_frag && icmphdr_ok(skb)) {
653 struct icmphdr *icmp = icmp_hdr(skb);
654 /* The ICMP type and code fields use the 16-bit
655 * transport port fields, so we need to store them
656 * in 16-bit network byte order. */
657 key->ipv4.tp.src = htons(icmp->type);
658 key->ipv4.tp.dst = htons(icmp->code);
662 } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
663 struct arp_eth_header *arp;
665 arp = (struct arp_eth_header *)skb_network_header(skb);
667 if (arp->ar_hrd == htons(ARPHRD_ETHER)
668 && arp->ar_pro == htons(ETH_P_IP)
669 && arp->ar_hln == ETH_ALEN
670 && arp->ar_pln == 4) {
672 /* We only match on the lower 8 bits of the opcode. */
673 if (ntohs(arp->ar_op) <= 0xff)
674 key->ip.proto = ntohs(arp->ar_op);
676 if (key->ip.proto == ARPOP_REQUEST
677 || key->ip.proto == ARPOP_REPLY) {
678 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
679 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
680 memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
681 memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
682 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
685 } else if (key->eth.type == htons(ETH_P_IPV6)) {
686 int nh_len; /* IPv6 Header + Extensions */
688 nh_len = parse_ipv6hdr(skb, key, &key_len);
689 if (unlikely(nh_len < 0)) {
690 if (nh_len == -EINVAL)
691 skb->transport_header = skb->network_header;
697 /* Transport layer. */
698 if (key->ip.proto == NEXTHDR_TCP) {
699 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
700 if (tcphdr_ok(skb)) {
701 struct tcphdr *tcp = tcp_hdr(skb);
702 key->ipv6.tp.src = tcp->source;
703 key->ipv6.tp.dst = tcp->dest;
705 } else if (key->ip.proto == NEXTHDR_UDP) {
706 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
707 if (udphdr_ok(skb)) {
708 struct udphdr *udp = udp_hdr(skb);
709 key->ipv6.tp.src = udp->source;
710 key->ipv6.tp.dst = udp->dest;
712 } else if (key->ip.proto == NEXTHDR_ICMP) {
713 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
714 if (icmp6hdr_ok(skb)) {
715 error = parse_icmpv6(skb, key, &key_len, nh_len);
727 u32 flow_hash(const struct sw_flow_key *key, int key_len)
729 return jhash2((u32*)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed);
732 struct sw_flow * flow_tbl_lookup(struct flow_table *table,
733 struct sw_flow_key *key, int key_len)
735 struct sw_flow *flow;
736 struct hlist_node *n;
737 struct hlist_head *head;
740 hash = flow_hash(key, key_len);
742 head = find_bucket(table, hash);
743 hlist_for_each_entry_rcu(flow, n, head, hash_node) {
745 if (flow->hash == hash &&
746 !memcmp(&flow->key, key, key_len)) {
753 void flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
755 struct hlist_head *head;
757 head = find_bucket(table, flow->hash);
758 hlist_add_head_rcu(&flow->hash_node, head);
762 void flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
764 if (!hlist_unhashed(&flow->hash_node)) {
765 hlist_del_init_rcu(&flow->hash_node);
767 BUG_ON(table->count < 0);
771 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
772 static const u32 key_lens[OVS_KEY_ATTR_MAX + 1] = {
773 [OVS_KEY_ATTR_TUN_ID] = 8,
774 [OVS_KEY_ATTR_IN_PORT] = 4,
775 [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
776 [OVS_KEY_ATTR_8021Q] = sizeof(struct ovs_key_8021q),
777 [OVS_KEY_ATTR_ETHERTYPE] = 2,
778 [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
779 [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
780 [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
781 [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
782 [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
783 [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
784 [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
785 [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
789 * flow_from_nlattrs - parses Netlink attributes into a flow key.
790 * @swkey: receives the extracted flow key.
791 * @key_lenp: number of bytes used in @swkey.
792 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
795 * This state machine accepts the following forms, with [] for optional
796 * elements and | for alternatives:
798 * [tun_id] [in_port] ethernet [8021q] [ethertype \
799 * [IPv4 [TCP|UDP|ICMP] | IPv6 [TCP|UDP|ICMPv6 [ND]] | ARP]]
801 int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
802 const struct nlattr *attr)
805 const struct nlattr *nla;
810 memset(swkey, 0, sizeof(*swkey));
811 swkey->eth.in_port = USHRT_MAX;
812 swkey->eth.type = htons(ETH_P_802_2);
813 key_len = SW_FLOW_KEY_OFFSET(eth);
815 prev_type = OVS_KEY_ATTR_UNSPEC;
816 nla_for_each_nested(nla, attr, rem) {
817 const struct ovs_key_ethernet *eth_key;
818 const struct ovs_key_8021q *q_key;
819 const struct ovs_key_ipv4 *ipv4_key;
820 const struct ovs_key_ipv6 *ipv6_key;
821 const struct ovs_key_tcp *tcp_key;
822 const struct ovs_key_udp *udp_key;
823 const struct ovs_key_icmp *icmp_key;
824 const struct ovs_key_icmpv6 *icmpv6_key;
825 const struct ovs_key_arp *arp_key;
826 const struct ovs_key_nd *nd_key;
828 int type = nla_type(nla);
830 if (type > OVS_KEY_ATTR_MAX || nla_len(nla) != key_lens[type])
833 #define TRANSITION(PREV_TYPE, TYPE) (((PREV_TYPE) << 16) | (TYPE))
834 switch (TRANSITION(prev_type, type)) {
835 case TRANSITION(OVS_KEY_ATTR_UNSPEC, OVS_KEY_ATTR_TUN_ID):
836 swkey->eth.tun_id = nla_get_be64(nla);
839 case TRANSITION(OVS_KEY_ATTR_UNSPEC, OVS_KEY_ATTR_IN_PORT):
840 case TRANSITION(OVS_KEY_ATTR_TUN_ID, OVS_KEY_ATTR_IN_PORT):
841 if (nla_get_u32(nla) >= DP_MAX_PORTS)
843 swkey->eth.in_port = nla_get_u32(nla);
846 case TRANSITION(OVS_KEY_ATTR_UNSPEC, OVS_KEY_ATTR_ETHERNET):
847 case TRANSITION(OVS_KEY_ATTR_TUN_ID, OVS_KEY_ATTR_ETHERNET):
848 case TRANSITION(OVS_KEY_ATTR_IN_PORT, OVS_KEY_ATTR_ETHERNET):
849 eth_key = nla_data(nla);
850 memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
851 memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
854 case TRANSITION(OVS_KEY_ATTR_ETHERNET, OVS_KEY_ATTR_8021Q):
855 q_key = nla_data(nla);
856 /* Only standard 0x8100 VLANs currently supported. */
857 if (q_key->q_tpid != htons(ETH_P_8021Q))
859 if (q_key->q_tci & htons(VLAN_TAG_PRESENT))
861 swkey->eth.tci = q_key->q_tci | htons(VLAN_TAG_PRESENT);
864 case TRANSITION(OVS_KEY_ATTR_8021Q, OVS_KEY_ATTR_ETHERTYPE):
865 case TRANSITION(OVS_KEY_ATTR_ETHERNET, OVS_KEY_ATTR_ETHERTYPE):
866 swkey->eth.type = nla_get_be16(nla);
867 if (ntohs(swkey->eth.type) < 1536)
871 case TRANSITION(OVS_KEY_ATTR_ETHERTYPE, OVS_KEY_ATTR_IPV4):
872 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
873 if (swkey->eth.type != htons(ETH_P_IP))
875 ipv4_key = nla_data(nla);
876 swkey->ip.proto = ipv4_key->ipv4_proto;
877 swkey->ip.tos = ipv4_key->ipv4_tos;
878 swkey->ipv4.addr.src = ipv4_key->ipv4_src;
879 swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
880 if (swkey->ip.tos & INET_ECN_MASK)
884 case TRANSITION(OVS_KEY_ATTR_ETHERTYPE, OVS_KEY_ATTR_IPV6):
885 key_len = SW_FLOW_KEY_OFFSET(ipv6.addr);
886 if (swkey->eth.type != htons(ETH_P_IPV6))
888 ipv6_key = nla_data(nla);
889 swkey->ip.proto = ipv6_key->ipv6_proto;
890 swkey->ip.tos = ipv6_key->ipv6_tos;
891 memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
892 sizeof(swkey->ipv6.addr.src));
893 memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
894 sizeof(swkey->ipv6.addr.dst));
895 if (swkey->ip.tos & INET_ECN_MASK)
899 case TRANSITION(OVS_KEY_ATTR_IPV4, OVS_KEY_ATTR_TCP):
900 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
901 if (swkey->ip.proto != IPPROTO_TCP)
903 tcp_key = nla_data(nla);
904 swkey->ipv4.tp.src = tcp_key->tcp_src;
905 swkey->ipv4.tp.dst = tcp_key->tcp_dst;
908 case TRANSITION(OVS_KEY_ATTR_IPV6, OVS_KEY_ATTR_TCP):
909 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
910 if (swkey->ip.proto != IPPROTO_TCP)
912 tcp_key = nla_data(nla);
913 swkey->ipv6.tp.src = tcp_key->tcp_src;
914 swkey->ipv6.tp.dst = tcp_key->tcp_dst;
917 case TRANSITION(OVS_KEY_ATTR_IPV4, OVS_KEY_ATTR_UDP):
918 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
919 if (swkey->ip.proto != IPPROTO_UDP)
921 udp_key = nla_data(nla);
922 swkey->ipv4.tp.src = udp_key->udp_src;
923 swkey->ipv4.tp.dst = udp_key->udp_dst;
926 case TRANSITION(OVS_KEY_ATTR_IPV6, OVS_KEY_ATTR_UDP):
927 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
928 if (swkey->ip.proto != IPPROTO_UDP)
930 udp_key = nla_data(nla);
931 swkey->ipv6.tp.src = udp_key->udp_src;
932 swkey->ipv6.tp.dst = udp_key->udp_dst;
935 case TRANSITION(OVS_KEY_ATTR_IPV4, OVS_KEY_ATTR_ICMP):
936 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
937 if (swkey->ip.proto != IPPROTO_ICMP)
939 icmp_key = nla_data(nla);
940 swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
941 swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
944 case TRANSITION(OVS_KEY_ATTR_IPV6, OVS_KEY_ATTR_ICMPV6):
945 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
946 if (swkey->ip.proto != IPPROTO_ICMPV6)
948 icmpv6_key = nla_data(nla);
949 swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
950 swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
953 case TRANSITION(OVS_KEY_ATTR_ETHERTYPE, OVS_KEY_ATTR_ARP):
954 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
955 if (swkey->eth.type != htons(ETH_P_ARP))
957 arp_key = nla_data(nla);
958 swkey->ipv4.addr.src = arp_key->arp_sip;
959 swkey->ipv4.addr.dst = arp_key->arp_tip;
960 if (arp_key->arp_op & htons(0xff00))
962 swkey->ip.proto = ntohs(arp_key->arp_op);
963 memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
964 memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
967 case TRANSITION(OVS_KEY_ATTR_ICMPV6, OVS_KEY_ATTR_ND):
968 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
969 if (swkey->ipv6.tp.src != htons(NDISC_NEIGHBOUR_SOLICITATION)
970 && swkey->ipv6.tp.src != htons(NDISC_NEIGHBOUR_ADVERTISEMENT))
972 nd_key = nla_data(nla);
973 memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
974 sizeof(swkey->ipv6.nd.target));
975 memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
976 memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
989 case OVS_KEY_ATTR_UNSPEC:
992 case OVS_KEY_ATTR_TUN_ID:
993 case OVS_KEY_ATTR_IN_PORT:
996 case OVS_KEY_ATTR_ETHERNET:
997 case OVS_KEY_ATTR_8021Q:
1000 case OVS_KEY_ATTR_ETHERTYPE:
1001 if (swkey->eth.type == htons(ETH_P_IP) ||
1002 swkey->eth.type == htons(ETH_P_ARP))
1006 case OVS_KEY_ATTR_IPV4:
1007 if (swkey->ip.proto == IPPROTO_TCP ||
1008 swkey->ip.proto == IPPROTO_UDP ||
1009 swkey->ip.proto == IPPROTO_ICMP)
1013 case OVS_KEY_ATTR_IPV6:
1014 if (swkey->ip.proto == IPPROTO_TCP ||
1015 swkey->ip.proto == IPPROTO_UDP ||
1016 swkey->ip.proto == IPPROTO_ICMPV6)
1020 case OVS_KEY_ATTR_ICMPV6:
1021 if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
1022 swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT))
1026 case OVS_KEY_ATTR_TCP:
1027 case OVS_KEY_ATTR_UDP:
1028 case OVS_KEY_ATTR_ICMP:
1029 case OVS_KEY_ATTR_ARP:
1030 case OVS_KEY_ATTR_ND:
1041 WARN_ON_ONCE(!key_len && !error);
1042 *key_lenp = key_len;
1047 * flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
1048 * @in_port: receives the extracted input port.
1049 * @tun_id: receives the extracted tunnel ID.
1050 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1053 * This parses a series of Netlink attributes that form a flow key, which must
1054 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1055 * get the metadata, that is, the parts of the flow key that cannot be
1056 * extracted from the packet itself.
1058 int flow_metadata_from_nlattrs(u16 *in_port, __be64 *tun_id,
1059 const struct nlattr *attr)
1061 const struct nlattr *nla;
1065 *in_port = USHRT_MAX;
1068 prev_type = OVS_KEY_ATTR_UNSPEC;
1069 nla_for_each_nested(nla, attr, rem) {
1070 int type = nla_type(nla);
1072 if (type > OVS_KEY_ATTR_MAX || nla_len(nla) != key_lens[type])
1075 switch (TRANSITION(prev_type, type)) {
1076 case TRANSITION(OVS_KEY_ATTR_UNSPEC, OVS_KEY_ATTR_TUN_ID):
1077 *tun_id = nla_get_be64(nla);
1080 case TRANSITION(OVS_KEY_ATTR_UNSPEC, OVS_KEY_ATTR_IN_PORT):
1081 case TRANSITION(OVS_KEY_ATTR_TUN_ID, OVS_KEY_ATTR_IN_PORT):
1082 if (nla_get_u32(nla) >= DP_MAX_PORTS)
1084 *in_port = nla_get_u32(nla);
1098 int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1100 struct ovs_key_ethernet *eth_key;
1103 /* This is an imperfect sanity-check that FLOW_BUFSIZE doesn't need
1104 * to be updated, but will at least raise awareness when new
1105 * datapath key types are added. */
1106 BUILD_BUG_ON(__OVS_KEY_ATTR_MAX != 14);
1108 if (swkey->eth.tun_id != cpu_to_be64(0))
1109 NLA_PUT_BE64(skb, OVS_KEY_ATTR_TUN_ID, swkey->eth.tun_id);
1111 if (swkey->eth.in_port != USHRT_MAX)
1112 NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->eth.in_port);
1114 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1116 goto nla_put_failure;
1117 eth_key = nla_data(nla);
1118 memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
1119 memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
1121 if (swkey->eth.tci != htons(0)) {
1122 struct ovs_key_8021q q_key;
1124 q_key.q_tpid = htons(ETH_P_8021Q);
1125 q_key.q_tci = swkey->eth.tci & ~htons(VLAN_TAG_PRESENT);
1126 NLA_PUT(skb, OVS_KEY_ATTR_8021Q, sizeof(q_key), &q_key);
1129 if (swkey->eth.type == htons(ETH_P_802_2))
1132 NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
1134 if (swkey->eth.type == htons(ETH_P_IP)) {
1135 struct ovs_key_ipv4 *ipv4_key;
1137 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1139 goto nla_put_failure;
1140 ipv4_key = nla_data(nla);
1141 memset(ipv4_key, 0, sizeof(struct ovs_key_ipv4));
1142 ipv4_key->ipv4_src = swkey->ipv4.addr.src;
1143 ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
1144 ipv4_key->ipv4_proto = swkey->ip.proto;
1145 ipv4_key->ipv4_tos = swkey->ip.tos;
1146 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1147 struct ovs_key_ipv6 *ipv6_key;
1149 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1151 goto nla_put_failure;
1152 ipv6_key = nla_data(nla);
1153 memset(ipv6_key, 0, sizeof(struct ovs_key_ipv6));
1154 memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
1155 sizeof(ipv6_key->ipv6_src));
1156 memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
1157 sizeof(ipv6_key->ipv6_dst));
1158 ipv6_key->ipv6_proto = swkey->ip.proto;
1159 ipv6_key->ipv6_tos = swkey->ip.tos;
1160 } else if (swkey->eth.type == htons(ETH_P_ARP)) {
1161 struct ovs_key_arp *arp_key;
1163 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1165 goto nla_put_failure;
1166 arp_key = nla_data(nla);
1167 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1168 arp_key->arp_sip = swkey->ipv4.addr.src;
1169 arp_key->arp_tip = swkey->ipv4.addr.dst;
1170 arp_key->arp_op = htons(swkey->ip.proto);
1171 memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
1172 memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
1175 if (swkey->eth.type == htons(ETH_P_IP) ||
1176 swkey->eth.type == htons(ETH_P_IPV6)) {
1178 if (swkey->ip.proto == IPPROTO_TCP) {
1179 struct ovs_key_tcp *tcp_key;
1181 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1183 goto nla_put_failure;
1184 tcp_key = nla_data(nla);
1185 if (swkey->eth.type == htons(ETH_P_IP)) {
1186 tcp_key->tcp_src = swkey->ipv4.tp.src;
1187 tcp_key->tcp_dst = swkey->ipv4.tp.dst;
1188 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1189 tcp_key->tcp_src = swkey->ipv6.tp.src;
1190 tcp_key->tcp_dst = swkey->ipv6.tp.dst;
1192 } else if (swkey->ip.proto == IPPROTO_UDP) {
1193 struct ovs_key_udp *udp_key;
1195 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1197 goto nla_put_failure;
1198 udp_key = nla_data(nla);
1199 if (swkey->eth.type == htons(ETH_P_IP)) {
1200 udp_key->udp_src = swkey->ipv4.tp.src;
1201 udp_key->udp_dst = swkey->ipv4.tp.dst;
1202 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1203 udp_key->udp_src = swkey->ipv6.tp.src;
1204 udp_key->udp_dst = swkey->ipv6.tp.dst;
1206 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1207 swkey->ip.proto == IPPROTO_ICMP) {
1208 struct ovs_key_icmp *icmp_key;
1210 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1212 goto nla_put_failure;
1213 icmp_key = nla_data(nla);
1214 icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
1215 icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
1216 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1217 swkey->ip.proto == IPPROTO_ICMPV6) {
1218 struct ovs_key_icmpv6 *icmpv6_key;
1220 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1221 sizeof(*icmpv6_key));
1223 goto nla_put_failure;
1224 icmpv6_key = nla_data(nla);
1225 icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
1226 icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
1228 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1229 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1230 struct ovs_key_nd *nd_key;
1232 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1234 goto nla_put_failure;
1235 nd_key = nla_data(nla);
1236 memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
1237 sizeof(nd_key->nd_target));
1238 memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
1239 memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
1250 /* Initializes the flow module.
1251 * Returns zero if successful or a negative error code. */
1254 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
1256 if (flow_cache == NULL)
1259 get_random_bytes(&hash_seed, sizeof(hash_seed));
1264 /* Uninitializes the flow module. */
1265 void flow_exit(void)
1267 kmem_cache_destroy(flow_cache);