bond: Convert stb_id to 32bit parameter.
[openvswitch] / datapath / flow.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 #include "flow.h"
10 #include "datapath.h"
11 #include <asm/uaccess.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_ether.h>
15 #include <linux/if_vlan.h>
16 #include <net/llc_pdu.h>
17 #include <linux/kernel.h>
18 #include <linux/jhash.h>
19 #include <linux/jiffies.h>
20 #include <linux/llc.h>
21 #include <linux/module.h>
22 #include <linux/in.h>
23 #include <linux/rcupdate.h>
24 #include <linux/if_arp.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/icmp.h>
31 #include <linux/icmpv6.h>
32 #include <net/inet_ecn.h>
33 #include <net/ip.h>
34 #include <net/ipv6.h>
35 #include <net/ndisc.h>
36
37 #include "vlan.h"
38
39 static struct kmem_cache *flow_cache;
40 static unsigned int hash_seed __read_mostly;
41
42 static inline bool arphdr_ok(struct sk_buff *skb)
43 {
44         return skb->len >= skb_network_offset(skb) + sizeof(struct arp_eth_header);
45 }
46
47 static inline int check_iphdr(struct sk_buff *skb)
48 {
49         unsigned int nh_ofs = skb_network_offset(skb);
50         unsigned int ip_len;
51
52         if (skb->len < nh_ofs + sizeof(struct iphdr))
53                 return -EINVAL;
54
55         ip_len = ip_hdrlen(skb);
56         if (ip_len < sizeof(struct iphdr) || skb->len < nh_ofs + ip_len)
57                 return -EINVAL;
58
59         /*
60          * Pull enough header bytes to account for the IP header plus the
61          * longest transport header that we parse, currently 20 bytes for TCP.
62          */
63         if (!pskb_may_pull(skb, min(nh_ofs + ip_len + 20, skb->len)))
64                 return -ENOMEM;
65
66         skb_set_transport_header(skb, nh_ofs + ip_len);
67         return 0;
68 }
69
70 static inline bool tcphdr_ok(struct sk_buff *skb)
71 {
72         int th_ofs = skb_transport_offset(skb);
73         if (skb->len >= th_ofs + sizeof(struct tcphdr)) {
74                 int tcp_len = tcp_hdrlen(skb);
75                 return (tcp_len >= sizeof(struct tcphdr)
76                         && skb->len >= th_ofs + tcp_len);
77         }
78         return false;
79 }
80
81 static inline bool udphdr_ok(struct sk_buff *skb)
82 {
83         return skb->len >= skb_transport_offset(skb) + sizeof(struct udphdr);
84 }
85
86 static inline bool icmphdr_ok(struct sk_buff *skb)
87 {
88         return skb->len >= skb_transport_offset(skb) + sizeof(struct icmphdr);
89 }
90
91 u64 flow_used_time(unsigned long flow_jiffies)
92 {
93         struct timespec cur_ts;
94         u64 cur_ms, idle_ms;
95
96         ktime_get_ts(&cur_ts);
97         idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
98         cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
99                  cur_ts.tv_nsec / NSEC_PER_MSEC;
100
101         return cur_ms - idle_ms;
102 }
103
104 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
105 {
106         unsigned int nh_ofs = skb_network_offset(skb);
107         unsigned int nh_len;
108         int payload_ofs;
109         struct ipv6hdr *nh;
110         uint8_t nexthdr;
111
112         if (unlikely(skb->len < nh_ofs + sizeof(*nh)))
113                 return -EINVAL;
114
115         nh = ipv6_hdr(skb);
116         nexthdr = nh->nexthdr;
117         payload_ofs = (u8 *)(nh + 1) - skb->data;
118
119         ipv6_addr_copy(&key->ipv6_src, &nh->saddr);
120         ipv6_addr_copy(&key->ipv6_dst, &nh->daddr);
121         key->nw_tos = ipv6_get_dsfield(nh) & ~INET_ECN_MASK;
122         key->nw_proto = NEXTHDR_NONE;
123
124         payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr);
125         if (unlikely(payload_ofs < 0))
126                 return -EINVAL;
127
128         nh_len = payload_ofs - nh_ofs;
129
130         /* Pull enough header bytes to account for the IP header plus the
131          * longest transport header that we parse, currently 20 bytes for TCP.
132          * To dig deeper than the transport header, transport parsers may need
133          * to pull more header bytes.
134          */
135         if (unlikely(!pskb_may_pull(skb, min(nh_ofs + nh_len + 20, skb->len))))
136                 return -ENOMEM;
137
138         skb_set_transport_header(skb, nh_ofs + nh_len);
139         key->nw_proto = nexthdr;
140         return nh_len;
141 }
142
143 static bool icmp6hdr_ok(struct sk_buff *skb)
144 {
145         return skb->len >= skb_transport_offset(skb) + sizeof(struct icmp6hdr);
146 }
147
148 #define TCP_FLAGS_OFFSET 13
149 #define TCP_FLAG_MASK 0x3f
150
151 void flow_used(struct sw_flow *flow, struct sk_buff *skb)
152 {
153         u8 tcp_flags = 0;
154
155         if (flow->key.dl_type == htons(ETH_P_IP) &&
156             flow->key.nw_proto == IPPROTO_TCP) {
157                 u8 *tcp = (u8 *)tcp_hdr(skb);
158                 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
159         }
160
161         spin_lock_bh(&flow->lock);
162         flow->used = jiffies;
163         flow->packet_count++;
164         flow->byte_count += skb->len;
165         flow->tcp_flags |= tcp_flags;
166         spin_unlock_bh(&flow->lock);
167 }
168
169 struct sw_flow_actions *flow_actions_alloc(const struct nlattr *actions)
170 {
171         int actions_len = nla_len(actions);
172         struct sw_flow_actions *sfa;
173
174         /* At least DP_MAX_PORTS actions are required to be able to flood a
175          * packet to every port.  Factor of 2 allows for setting VLAN tags,
176          * etc. */
177         if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
178                 return ERR_PTR(-EINVAL);
179
180         sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
181         if (!sfa)
182                 return ERR_PTR(-ENOMEM);
183
184         sfa->actions_len = actions_len;
185         memcpy(sfa->actions, nla_data(actions), actions_len);
186         return sfa;
187 }
188
189 struct sw_flow *flow_alloc(void)
190 {
191         struct sw_flow *flow;
192
193         flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
194         if (!flow)
195                 return ERR_PTR(-ENOMEM);
196
197         spin_lock_init(&flow->lock);
198         atomic_set(&flow->refcnt, 1);
199         flow->sf_acts = NULL;
200         flow->dead = false;
201
202         return flow;
203 }
204
205 void flow_free_tbl(struct tbl_node *node)
206 {
207         struct sw_flow *flow = flow_cast(node);
208
209         flow->dead = true;
210         flow_put(flow);
211 }
212
213 /* RCU callback used by flow_deferred_free. */
214 static void rcu_free_flow_callback(struct rcu_head *rcu)
215 {
216         struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
217
218         flow->dead = true;
219         flow_put(flow);
220 }
221
222 /* Schedules 'flow' to be freed after the next RCU grace period.
223  * The caller must hold rcu_read_lock for this to be sensible. */
224 void flow_deferred_free(struct sw_flow *flow)
225 {
226         call_rcu(&flow->rcu, rcu_free_flow_callback);
227 }
228
229 void flow_hold(struct sw_flow *flow)
230 {
231         atomic_inc(&flow->refcnt);
232 }
233
234 void flow_put(struct sw_flow *flow)
235 {
236         if (unlikely(!flow))
237                 return;
238
239         if (atomic_dec_and_test(&flow->refcnt)) {
240                 kfree((struct sf_flow_acts __force *)flow->sf_acts);
241                 kmem_cache_free(flow_cache, flow);
242         }
243 }
244
245 /* RCU callback used by flow_deferred_free_acts. */
246 static void rcu_free_acts_callback(struct rcu_head *rcu)
247 {
248         struct sw_flow_actions *sf_acts = container_of(rcu,
249                         struct sw_flow_actions, rcu);
250         kfree(sf_acts);
251 }
252
253 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
254  * The caller must hold rcu_read_lock for this to be sensible. */
255 void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
256 {
257         call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
258 }
259
260 static void parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
261 {
262         struct qtag_prefix {
263                 __be16 eth_type; /* ETH_P_8021Q */
264                 __be16 tci;
265         };
266         struct qtag_prefix *qp;
267
268         if (skb->len < sizeof(struct qtag_prefix) + sizeof(__be16))
269                 return;
270
271         qp = (struct qtag_prefix *) skb->data;
272         key->dl_tci = qp->tci | htons(VLAN_TAG_PRESENT);
273         __skb_pull(skb, sizeof(struct qtag_prefix));
274 }
275
276 static __be16 parse_ethertype(struct sk_buff *skb)
277 {
278         struct llc_snap_hdr {
279                 u8  dsap;  /* Always 0xAA */
280                 u8  ssap;  /* Always 0xAA */
281                 u8  ctrl;
282                 u8  oui[3];
283                 __be16 ethertype;
284         };
285         struct llc_snap_hdr *llc;
286         __be16 proto;
287
288         proto = *(__be16 *) skb->data;
289         __skb_pull(skb, sizeof(__be16));
290
291         if (ntohs(proto) >= 1536)
292                 return proto;
293
294         if (unlikely(skb->len < sizeof(struct llc_snap_hdr)))
295                 return htons(ETH_P_802_2);
296
297         llc = (struct llc_snap_hdr *) skb->data;
298         if (llc->dsap != LLC_SAP_SNAP ||
299             llc->ssap != LLC_SAP_SNAP ||
300             (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
301                 return htons(ETH_P_802_2);
302
303         __skb_pull(skb, sizeof(struct llc_snap_hdr));
304         return llc->ethertype;
305 }
306
307 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
308                         int nh_len)
309 {
310         struct icmp6hdr *icmp = icmp6_hdr(skb);
311
312         /* The ICMPv6 type and code fields use the 16-bit transport port
313          * fields, so we need to store them in 16-bit network byte order.
314          */
315         key->tp_src = htons(icmp->icmp6_type);
316         key->tp_dst = htons(icmp->icmp6_code);
317
318         if (icmp->icmp6_code == 0 &&
319             (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
320              icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
321                 int icmp_len = skb->len - skb_transport_offset(skb);
322                 struct nd_msg *nd;
323                 int offset;
324
325                 /* In order to process neighbor discovery options, we need the
326                  * entire packet.
327                  */
328                 if (unlikely(icmp_len < sizeof(*nd)))
329                         return 0;
330                 if (unlikely(skb_linearize(skb)))
331                         return -ENOMEM;
332
333                 nd = (struct nd_msg *)skb_transport_header(skb);
334                 ipv6_addr_copy(&key->nd_target, &nd->target);
335
336                 icmp_len -= sizeof(*nd);
337                 offset = 0;
338                 while (icmp_len >= 8) {
339                         struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd->opt + offset);
340                         int opt_len = nd_opt->nd_opt_len * 8;
341
342                         if (unlikely(!opt_len || opt_len > icmp_len))
343                                 goto invalid;
344
345                         /* Store the link layer address if the appropriate
346                          * option is provided.  It is considered an error if
347                          * the same link layer option is specified twice.
348                          */
349                         if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
350                             && opt_len == 8) {
351                                 if (unlikely(!is_zero_ether_addr(key->arp_sha)))
352                                         goto invalid;
353                                 memcpy(key->arp_sha,
354                                     &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
355                         } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
356                                    && opt_len == 8) {
357                                 if (unlikely(!is_zero_ether_addr(key->arp_tha)))
358                                         goto invalid;
359                                 memcpy(key->arp_tha,
360                                     &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
361                         }
362
363                         icmp_len -= opt_len;
364                         offset += opt_len;
365                 }
366         }
367
368         return 0;
369
370 invalid:
371         memset(&key->nd_target, 0, sizeof(key->nd_target));
372         memset(key->arp_sha, 0, sizeof(key->arp_sha));
373         memset(key->arp_tha, 0, sizeof(key->arp_tha));
374
375         return 0;
376 }
377
378 /**
379  * flow_extract - extracts a flow key from an Ethernet frame.
380  * @skb: sk_buff that contains the frame, with skb->data pointing to the
381  * Ethernet header
382  * @in_port: port number on which @skb was received.
383  * @key: output flow key
384  * @is_frag: set to 1 if @skb contains an IPv4 fragment, or to 0 if @skb does
385  * not contain an IPv4 packet or if it is not a fragment.
386  *
387  * The caller must ensure that skb->len >= ETH_HLEN.
388  *
389  * Returns 0 if successful, otherwise a negative errno value.
390  *
391  * Initializes @skb header pointers as follows:
392  *
393  *    - skb->mac_header: the Ethernet header.
394  *
395  *    - skb->network_header: just past the Ethernet header, or just past the
396  *      VLAN header, to the first byte of the Ethernet payload.
397  *
398  *    - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
399  *      on output, then just past the IP header, if one is present and
400  *      of a correct length, otherwise the same as skb->network_header.
401  *      For other key->dl_type values it is left untouched.
402  */
403 int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
404                  bool *is_frag)
405 {
406         struct ethhdr *eth;
407
408         memset(key, 0, sizeof(*key));
409         key->tun_id = OVS_CB(skb)->tun_id;
410         key->in_port = in_port;
411         *is_frag = false;
412
413         /*
414          * We would really like to pull as many bytes as we could possibly
415          * want to parse into the linear data area.  Currently, for IPv4,
416          * that is:
417          *
418          *    14     Ethernet header
419          *     4     VLAN header
420          *    60     max IP header with options
421          *    20     max TCP/UDP/ICMP header (don't care about options)
422          *    --
423          *    98
424          *
425          * But Xen only allocates 64 or 72 bytes for the linear data area in
426          * netback, which means that we would reallocate and copy the skb's
427          * linear data on every packet if we did that.  So instead just pull 64
428          * bytes, which is always sufficient without IP options, and then check
429          * whether we need to pull more later when we look at the IP header.
430          */
431         if (!pskb_may_pull(skb, min(skb->len, 64u)))
432                 return -ENOMEM;
433
434         skb_reset_mac_header(skb);
435
436         /* Link layer. */
437         eth = eth_hdr(skb);
438         memcpy(key->dl_src, eth->h_source, ETH_ALEN);
439         memcpy(key->dl_dst, eth->h_dest, ETH_ALEN);
440
441         /* dl_type, dl_vlan, dl_vlan_pcp. */
442         __skb_pull(skb, 2 * ETH_ALEN);
443
444         if (vlan_tx_tag_present(skb))
445                 key->dl_tci = htons(vlan_get_tci(skb));
446         else if (eth->h_proto == htons(ETH_P_8021Q))
447                 parse_vlan(skb, key);
448
449         key->dl_type = parse_ethertype(skb);
450         skb_reset_network_header(skb);
451         __skb_push(skb, skb->data - (unsigned char *)eth);
452
453         /* Network layer. */
454         if (key->dl_type == htons(ETH_P_IP)) {
455                 struct iphdr *nh;
456                 int error;
457
458                 error = check_iphdr(skb);
459                 if (unlikely(error)) {
460                         if (error == -EINVAL) {
461                                 skb->transport_header = skb->network_header;
462                                 return 0;
463                         }
464                         return error;
465                 }
466
467                 nh = ip_hdr(skb);
468                 key->ipv4_src = nh->saddr;
469                 key->ipv4_dst = nh->daddr;
470                 key->nw_tos = nh->tos & ~INET_ECN_MASK;
471                 key->nw_proto = nh->protocol;
472
473                 /* Transport layer. */
474                 if (!(nh->frag_off & htons(IP_MF | IP_OFFSET)) &&
475                     !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP)) {
476                         if (key->nw_proto == IPPROTO_TCP) {
477                                 if (tcphdr_ok(skb)) {
478                                         struct tcphdr *tcp = tcp_hdr(skb);
479                                         key->tp_src = tcp->source;
480                                         key->tp_dst = tcp->dest;
481                                 }
482                         } else if (key->nw_proto == IPPROTO_UDP) {
483                                 if (udphdr_ok(skb)) {
484                                         struct udphdr *udp = udp_hdr(skb);
485                                         key->tp_src = udp->source;
486                                         key->tp_dst = udp->dest;
487                                 }
488                         } else if (key->nw_proto == IPPROTO_ICMP) {
489                                 if (icmphdr_ok(skb)) {
490                                         struct icmphdr *icmp = icmp_hdr(skb);
491                                         /* The ICMP type and code fields use the 16-bit
492                                          * transport port fields, so we need to store them
493                                          * in 16-bit network byte order. */
494                                         key->tp_src = htons(icmp->type);
495                                         key->tp_dst = htons(icmp->code);
496                                 }
497                         }
498                 } else
499                         *is_frag = true;
500
501         } else if (key->dl_type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
502                 struct arp_eth_header *arp;
503
504                 arp = (struct arp_eth_header *)skb_network_header(skb);
505
506                 if (arp->ar_hrd == htons(ARPHRD_ETHER)
507                                 && arp->ar_pro == htons(ETH_P_IP)
508                                 && arp->ar_hln == ETH_ALEN
509                                 && arp->ar_pln == 4) {
510
511                         /* We only match on the lower 8 bits of the opcode. */
512                         if (ntohs(arp->ar_op) <= 0xff)
513                                 key->nw_proto = ntohs(arp->ar_op);
514
515                         if (key->nw_proto == ARPOP_REQUEST
516                                         || key->nw_proto == ARPOP_REPLY) {
517                                 memcpy(&key->ipv4_src, arp->ar_sip, sizeof(key->ipv4_src));
518                                 memcpy(&key->ipv4_dst, arp->ar_tip, sizeof(key->ipv4_dst));
519                                 memcpy(key->arp_sha, arp->ar_sha, ETH_ALEN);
520                                 memcpy(key->arp_tha, arp->ar_tha, ETH_ALEN);
521                         }
522                 }
523         } else if (key->dl_type == htons(ETH_P_IPV6)) {
524                 int nh_len;             /* IPv6 Header + Extensions */
525
526                 nh_len = parse_ipv6hdr(skb, key);
527                 if (unlikely(nh_len < 0)) {
528                         if (nh_len == -EINVAL) {
529                                 skb->transport_header = skb->network_header;
530                                 return 0;
531                         }
532                         return nh_len;
533                 }
534
535                 /* Transport layer. */
536                 if (key->nw_proto == NEXTHDR_TCP) {
537                         if (tcphdr_ok(skb)) {
538                                 struct tcphdr *tcp = tcp_hdr(skb);
539                                 key->tp_src = tcp->source;
540                                 key->tp_dst = tcp->dest;
541                         }
542                 } else if (key->nw_proto == NEXTHDR_UDP) {
543                         if (udphdr_ok(skb)) {
544                                 struct udphdr *udp = udp_hdr(skb);
545                                 key->tp_src = udp->source;
546                                 key->tp_dst = udp->dest;
547                         }
548                 } else if (key->nw_proto == NEXTHDR_ICMP) {
549                         if (icmp6hdr_ok(skb)) {
550                                 int error = parse_icmpv6(skb, key, nh_len);
551                                 if (error < 0)
552                                         return error;
553                         }
554                 }
555         }
556         return 0;
557 }
558
559 u32 flow_hash(const struct sw_flow_key *key)
560 {
561         return jhash2((u32*)key, sizeof(*key) / sizeof(u32), hash_seed);
562 }
563
564 int flow_cmp(const struct tbl_node *node, void *key2_)
565 {
566         const struct sw_flow_key *key1 = &flow_cast(node)->key;
567         const struct sw_flow_key *key2 = key2_;
568
569         return !memcmp(key1, key2, sizeof(struct sw_flow_key));
570 }
571
572 /**
573  * flow_from_nlattrs - parses Netlink attributes into a flow key.
574  * @swkey: receives the extracted flow key.
575  * @key: Netlink attribute holding nested %ODP_KEY_ATTR_* Netlink attribute
576  * sequence.
577  *
578  * This state machine accepts the following forms, with [] for optional
579  * elements and | for alternatives:
580  *
581  * [tun_id] in_port ethernet [8021q] [ethertype \
582  *              [IPv4 [TCP|UDP|ICMP] | IPv6 [TCP|UDP|ICMPv6 [ND]] | ARP]]
583  */
584 int flow_from_nlattrs(struct sw_flow_key *swkey, const struct nlattr *attr)
585 {
586         const struct nlattr *nla;
587         u16 prev_type;
588         int rem;
589
590         memset(swkey, 0, sizeof(*swkey));
591         swkey->dl_type = htons(ETH_P_802_2);
592
593         prev_type = ODP_KEY_ATTR_UNSPEC;
594         nla_for_each_nested(nla, attr, rem) {
595                 static const u32 key_lens[ODP_KEY_ATTR_MAX + 1] = {
596                         [ODP_KEY_ATTR_TUN_ID] = 8,
597                         [ODP_KEY_ATTR_IN_PORT] = 4,
598                         [ODP_KEY_ATTR_ETHERNET] = sizeof(struct odp_key_ethernet),
599                         [ODP_KEY_ATTR_8021Q] = sizeof(struct odp_key_8021q),
600                         [ODP_KEY_ATTR_ETHERTYPE] = 2,
601                         [ODP_KEY_ATTR_IPV4] = sizeof(struct odp_key_ipv4),
602                         [ODP_KEY_ATTR_IPV6] = sizeof(struct odp_key_ipv6),
603                         [ODP_KEY_ATTR_TCP] = sizeof(struct odp_key_tcp),
604                         [ODP_KEY_ATTR_UDP] = sizeof(struct odp_key_udp),
605                         [ODP_KEY_ATTR_ICMP] = sizeof(struct odp_key_icmp),
606                         [ODP_KEY_ATTR_ICMPV6] = sizeof(struct odp_key_icmpv6),
607                         [ODP_KEY_ATTR_ARP] = sizeof(struct odp_key_arp),
608                         [ODP_KEY_ATTR_ND] = sizeof(struct odp_key_nd),
609                 };
610
611                 const struct odp_key_ethernet *eth_key;
612                 const struct odp_key_8021q *q_key;
613                 const struct odp_key_ipv4 *ipv4_key;
614                 const struct odp_key_ipv6 *ipv6_key;
615                 const struct odp_key_tcp *tcp_key;
616                 const struct odp_key_udp *udp_key;
617                 const struct odp_key_icmp *icmp_key;
618                 const struct odp_key_icmpv6 *icmpv6_key;
619                 const struct odp_key_arp *arp_key;
620                 const struct odp_key_nd *nd_key;
621
622                 int type = nla_type(nla);
623
624                 if (type > ODP_KEY_ATTR_MAX || nla_len(nla) != key_lens[type])
625                         return -EINVAL;
626
627 #define TRANSITION(PREV_TYPE, TYPE) (((PREV_TYPE) << 16) | (TYPE))
628                 switch (TRANSITION(prev_type, type)) {
629                 case TRANSITION(ODP_KEY_ATTR_UNSPEC, ODP_KEY_ATTR_TUN_ID):
630                         swkey->tun_id = nla_get_be64(nla);
631                         break;
632
633                 case TRANSITION(ODP_KEY_ATTR_UNSPEC, ODP_KEY_ATTR_IN_PORT):
634                 case TRANSITION(ODP_KEY_ATTR_TUN_ID, ODP_KEY_ATTR_IN_PORT):
635                         if (nla_get_u32(nla) >= DP_MAX_PORTS)
636                                 return -EINVAL;
637                         swkey->in_port = nla_get_u32(nla);
638                         break;
639
640                 case TRANSITION(ODP_KEY_ATTR_IN_PORT, ODP_KEY_ATTR_ETHERNET):
641                         eth_key = nla_data(nla);
642                         memcpy(swkey->dl_src, eth_key->eth_src, ETH_ALEN);
643                         memcpy(swkey->dl_dst, eth_key->eth_dst, ETH_ALEN);
644                         break;
645
646                 case TRANSITION(ODP_KEY_ATTR_ETHERNET, ODP_KEY_ATTR_8021Q):
647                         q_key = nla_data(nla);
648                         /* Only standard 0x8100 VLANs currently supported. */
649                         if (q_key->q_tpid != htons(ETH_P_8021Q))
650                                 return -EINVAL;
651                         if (q_key->q_tci & htons(VLAN_TAG_PRESENT))
652                                 return -EINVAL;
653                         swkey->dl_tci = q_key->q_tci | htons(VLAN_TAG_PRESENT);
654                         break;
655
656                 case TRANSITION(ODP_KEY_ATTR_8021Q, ODP_KEY_ATTR_ETHERTYPE):
657                 case TRANSITION(ODP_KEY_ATTR_ETHERNET, ODP_KEY_ATTR_ETHERTYPE):
658                         swkey->dl_type = nla_get_be16(nla);
659                         if (ntohs(swkey->dl_type) < 1536)
660                                 return -EINVAL;
661                         break;
662
663                 case TRANSITION(ODP_KEY_ATTR_ETHERTYPE, ODP_KEY_ATTR_IPV4):
664                         if (swkey->dl_type != htons(ETH_P_IP))
665                                 return -EINVAL;
666                         ipv4_key = nla_data(nla);
667                         swkey->ipv4_src = ipv4_key->ipv4_src;
668                         swkey->ipv4_dst = ipv4_key->ipv4_dst;
669                         swkey->nw_proto = ipv4_key->ipv4_proto;
670                         swkey->nw_tos = ipv4_key->ipv4_tos;
671                         if (swkey->nw_tos & INET_ECN_MASK)
672                                 return -EINVAL;
673                         break;
674
675                 case TRANSITION(ODP_KEY_ATTR_ETHERTYPE, ODP_KEY_ATTR_IPV6):
676                         if (swkey->dl_type != htons(ETH_P_IPV6))
677                                 return -EINVAL;
678                         ipv6_key = nla_data(nla);
679                         memcpy(&swkey->ipv6_src, ipv6_key->ipv6_src,
680                                         sizeof(swkey->ipv6_src));
681                         memcpy(&swkey->ipv6_dst, ipv6_key->ipv6_dst,
682                                         sizeof(swkey->ipv6_dst));
683                         swkey->nw_proto = ipv6_key->ipv6_proto;
684                         swkey->nw_tos = ipv6_key->ipv6_tos;
685                         if (swkey->nw_tos & INET_ECN_MASK)
686                                 return -EINVAL;
687                         break;
688
689                 case TRANSITION(ODP_KEY_ATTR_IPV4, ODP_KEY_ATTR_TCP):
690                 case TRANSITION(ODP_KEY_ATTR_IPV6, ODP_KEY_ATTR_TCP):
691                         if (swkey->nw_proto != IPPROTO_TCP)
692                                 return -EINVAL;
693                         tcp_key = nla_data(nla);
694                         swkey->tp_src = tcp_key->tcp_src;
695                         swkey->tp_dst = tcp_key->tcp_dst;
696                         break;
697
698                 case TRANSITION(ODP_KEY_ATTR_IPV4, ODP_KEY_ATTR_UDP):
699                 case TRANSITION(ODP_KEY_ATTR_IPV6, ODP_KEY_ATTR_UDP):
700                         if (swkey->nw_proto != IPPROTO_UDP)
701                                 return -EINVAL;
702                         udp_key = nla_data(nla);
703                         swkey->tp_src = udp_key->udp_src;
704                         swkey->tp_dst = udp_key->udp_dst;
705                         break;
706
707                 case TRANSITION(ODP_KEY_ATTR_IPV4, ODP_KEY_ATTR_ICMP):
708                         if (swkey->nw_proto != IPPROTO_ICMP)
709                                 return -EINVAL;
710                         icmp_key = nla_data(nla);
711                         swkey->tp_src = htons(icmp_key->icmp_type);
712                         swkey->tp_dst = htons(icmp_key->icmp_code);
713                         break;
714
715                 case TRANSITION(ODP_KEY_ATTR_IPV6, ODP_KEY_ATTR_ICMPV6):
716                         if (swkey->nw_proto != IPPROTO_ICMPV6)
717                                 return -EINVAL;
718                         icmpv6_key = nla_data(nla);
719                         swkey->tp_src = htons(icmpv6_key->icmpv6_type);
720                         swkey->tp_dst = htons(icmpv6_key->icmpv6_code);
721                         break;
722
723                 case TRANSITION(ODP_KEY_ATTR_ETHERTYPE, ODP_KEY_ATTR_ARP):
724                         if (swkey->dl_type != htons(ETH_P_ARP))
725                                 return -EINVAL;
726                         arp_key = nla_data(nla);
727                         swkey->ipv4_src = arp_key->arp_sip;
728                         swkey->ipv4_dst = arp_key->arp_tip;
729                         if (arp_key->arp_op & htons(0xff00))
730                                 return -EINVAL;
731                         swkey->nw_proto = ntohs(arp_key->arp_op);
732                         memcpy(swkey->arp_sha, arp_key->arp_sha, ETH_ALEN);
733                         memcpy(swkey->arp_tha, arp_key->arp_tha, ETH_ALEN);
734                         break;
735
736                 case TRANSITION(ODP_KEY_ATTR_ICMPV6, ODP_KEY_ATTR_ND):
737                         if (swkey->tp_src != htons(NDISC_NEIGHBOUR_SOLICITATION)
738                             && swkey->tp_src != htons(NDISC_NEIGHBOUR_ADVERTISEMENT))
739                                 return -EINVAL;
740                         nd_key = nla_data(nla);
741                         memcpy(&swkey->nd_target, nd_key->nd_target,
742                                         sizeof(swkey->nd_target));
743                         memcpy(swkey->arp_sha, nd_key->nd_sll, ETH_ALEN);
744                         memcpy(swkey->arp_tha, nd_key->nd_tll, ETH_ALEN);
745                         break;
746
747                 default:
748                         return -EINVAL;
749                 }
750
751                 prev_type = type;
752         }
753         if (rem)
754                 return -EINVAL;
755
756         switch (prev_type) {
757         case ODP_KEY_ATTR_UNSPEC:
758                 return -EINVAL;
759
760         case ODP_KEY_ATTR_TUN_ID:
761         case ODP_KEY_ATTR_IN_PORT:
762                 return -EINVAL;
763
764         case ODP_KEY_ATTR_ETHERNET:
765         case ODP_KEY_ATTR_8021Q:
766                 return 0;
767
768         case ODP_KEY_ATTR_ETHERTYPE:
769                 if (swkey->dl_type == htons(ETH_P_IP) ||
770                     swkey->dl_type == htons(ETH_P_ARP))
771                         return -EINVAL;
772                 return 0;
773
774         case ODP_KEY_ATTR_IPV4:
775                 if (swkey->nw_proto == IPPROTO_TCP ||
776                     swkey->nw_proto == IPPROTO_UDP ||
777                     swkey->nw_proto == IPPROTO_ICMP)
778                         return -EINVAL;
779                 return 0;
780
781         case ODP_KEY_ATTR_IPV6:
782                 if (swkey->nw_proto == IPPROTO_TCP ||
783                     swkey->nw_proto == IPPROTO_UDP ||
784                     swkey->nw_proto == IPPROTO_ICMPV6)
785                         return -EINVAL;
786                 return 0;
787
788         case ODP_KEY_ATTR_ICMPV6:
789                 if (swkey->tp_src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
790                     swkey->tp_src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT))
791                         return -EINVAL;
792                 return 0;
793
794         case ODP_KEY_ATTR_TCP:
795         case ODP_KEY_ATTR_UDP:
796         case ODP_KEY_ATTR_ICMP:
797         case ODP_KEY_ATTR_ARP:
798         case ODP_KEY_ATTR_ND:
799                 return 0;
800         }
801
802         WARN_ON_ONCE(1);
803         return -EINVAL;
804 }
805
806 int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
807 {
808         struct odp_key_ethernet *eth_key;
809         struct nlattr *nla;
810
811         /* This is an imperfect sanity-check that FLOW_BUFSIZE doesn't need
812          * to be updated, but will at least raise awareness when new ODP key
813          * types are added. */
814         BUILD_BUG_ON(__ODP_KEY_ATTR_MAX != 14);
815
816         if (swkey->tun_id != cpu_to_be64(0))
817                 NLA_PUT_BE64(skb, ODP_KEY_ATTR_TUN_ID, swkey->tun_id);
818
819         NLA_PUT_U32(skb, ODP_KEY_ATTR_IN_PORT, swkey->in_port);
820
821         nla = nla_reserve(skb, ODP_KEY_ATTR_ETHERNET, sizeof(*eth_key));
822         if (!nla)
823                 goto nla_put_failure;
824         eth_key = nla_data(nla);
825         memcpy(eth_key->eth_src, swkey->dl_src, ETH_ALEN);
826         memcpy(eth_key->eth_dst, swkey->dl_dst, ETH_ALEN);
827
828         if (swkey->dl_tci != htons(0)) {
829                 struct odp_key_8021q q_key;
830
831                 q_key.q_tpid = htons(ETH_P_8021Q);
832                 q_key.q_tci = swkey->dl_tci & ~htons(VLAN_TAG_PRESENT);
833                 NLA_PUT(skb, ODP_KEY_ATTR_8021Q, sizeof(q_key), &q_key);
834         }
835
836         if (swkey->dl_type == htons(ETH_P_802_2))
837                 return 0;
838
839         NLA_PUT_BE16(skb, ODP_KEY_ATTR_ETHERTYPE, swkey->dl_type);
840
841         if (swkey->dl_type == htons(ETH_P_IP)) {
842                 struct odp_key_ipv4 *ipv4_key;
843
844                 nla = nla_reserve(skb, ODP_KEY_ATTR_IPV4, sizeof(*ipv4_key));
845                 if (!nla)
846                         goto nla_put_failure;
847                 ipv4_key = nla_data(nla);
848                 memset(ipv4_key, 0, sizeof(struct odp_key_ipv4));
849                 ipv4_key->ipv4_src = swkey->ipv4_src;
850                 ipv4_key->ipv4_dst = swkey->ipv4_dst;
851                 ipv4_key->ipv4_proto = swkey->nw_proto;
852                 ipv4_key->ipv4_tos = swkey->nw_tos;
853         } else if (swkey->dl_type == htons(ETH_P_IPV6)) {
854                 struct odp_key_ipv6 *ipv6_key;
855
856                 nla = nla_reserve(skb, ODP_KEY_ATTR_IPV6, sizeof(*ipv6_key));
857                 if (!nla)
858                         goto nla_put_failure;
859                 ipv6_key = nla_data(nla);
860                 memset(ipv6_key, 0, sizeof(struct odp_key_ipv6));
861                 memcpy(ipv6_key->ipv6_src, &swkey->ipv6_src,
862                                 sizeof(ipv6_key->ipv6_src));
863                 memcpy(ipv6_key->ipv6_dst, &swkey->ipv6_dst,
864                                 sizeof(ipv6_key->ipv6_dst));
865                 ipv6_key->ipv6_proto = swkey->nw_proto;
866                 ipv6_key->ipv6_tos = swkey->nw_tos;
867         } else if (swkey->dl_type == htons(ETH_P_ARP)) {
868                 struct odp_key_arp *arp_key;
869
870                 nla = nla_reserve(skb, ODP_KEY_ATTR_ARP, sizeof(*arp_key));
871                 if (!nla)
872                         goto nla_put_failure;
873                 arp_key = nla_data(nla);
874                 memset(arp_key, 0, sizeof(struct odp_key_arp));
875                 arp_key->arp_sip = swkey->ipv4_src;
876                 arp_key->arp_tip = swkey->ipv4_dst;
877                 arp_key->arp_op = htons(swkey->nw_proto);
878                 memcpy(arp_key->arp_sha, swkey->arp_sha, ETH_ALEN);
879                 memcpy(arp_key->arp_tha, swkey->arp_tha, ETH_ALEN);
880         }
881
882         if (swkey->dl_type == htons(ETH_P_IP) ||
883             swkey->dl_type == htons(ETH_P_IPV6)) {
884
885                 if (swkey->nw_proto == IPPROTO_TCP) {
886                         struct odp_key_tcp *tcp_key;
887
888                         nla = nla_reserve(skb, ODP_KEY_ATTR_TCP, sizeof(*tcp_key));
889                         if (!nla)
890                                 goto nla_put_failure;
891                         tcp_key = nla_data(nla);
892                         tcp_key->tcp_src = swkey->tp_src;
893                         tcp_key->tcp_dst = swkey->tp_dst;
894                 } else if (swkey->nw_proto == IPPROTO_UDP) {
895                         struct odp_key_udp *udp_key;
896
897                         nla = nla_reserve(skb, ODP_KEY_ATTR_UDP, sizeof(*udp_key));
898                         if (!nla)
899                                 goto nla_put_failure;
900                         udp_key = nla_data(nla);
901                         udp_key->udp_src = swkey->tp_src;
902                         udp_key->udp_dst = swkey->tp_dst;
903                 } else if (swkey->dl_type == htons(ETH_P_IP) &&
904                            swkey->nw_proto == IPPROTO_ICMP) {
905                         struct odp_key_icmp *icmp_key;
906
907                         nla = nla_reserve(skb, ODP_KEY_ATTR_ICMP, sizeof(*icmp_key));
908                         if (!nla)
909                                 goto nla_put_failure;
910                         icmp_key = nla_data(nla);
911                         icmp_key->icmp_type = ntohs(swkey->tp_src);
912                         icmp_key->icmp_code = ntohs(swkey->tp_dst);
913                 } else if (swkey->dl_type == htons(ETH_P_IPV6) &&
914                            swkey->nw_proto == IPPROTO_ICMPV6) {
915                         struct odp_key_icmpv6 *icmpv6_key;
916
917                         nla = nla_reserve(skb, ODP_KEY_ATTR_ICMPV6,
918                                                 sizeof(*icmpv6_key));
919                         if (!nla)
920                                 goto nla_put_failure;
921                         icmpv6_key = nla_data(nla);
922                         icmpv6_key->icmpv6_type = ntohs(swkey->tp_src);
923                         icmpv6_key->icmpv6_code = ntohs(swkey->tp_dst);
924
925                         if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
926                             icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
927                                 struct odp_key_nd *nd_key;
928
929                                 nla = nla_reserve(skb, ODP_KEY_ATTR_ND, sizeof(*nd_key));
930                                 if (!nla)
931                                         goto nla_put_failure;
932                                 nd_key = nla_data(nla);
933                                 memcpy(nd_key->nd_target, &swkey->nd_target,
934                                                         sizeof(nd_key->nd_target));
935                                 memcpy(nd_key->nd_sll, swkey->arp_sha, ETH_ALEN);
936                                 memcpy(nd_key->nd_tll, swkey->arp_tha, ETH_ALEN);
937                         }
938                 }
939         }
940
941         return 0;
942
943 nla_put_failure:
944         return -EMSGSIZE;
945 }
946
947 /* Initializes the flow module.
948  * Returns zero if successful or a negative error code. */
949 int flow_init(void)
950 {
951         flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
952                                         0, NULL);
953         if (flow_cache == NULL)
954                 return -ENOMEM;
955
956         get_random_bytes(&hash_seed, sizeof(hash_seed));
957
958         return 0;
959 }
960
961 /* Uninitializes the flow module. */
962 void flow_exit(void)
963 {
964         kmem_cache_destroy(flow_cache);
965 }