datapath: Use vlan acceleration for vlan operations.
[openvswitch] / datapath / flow.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 #include "flow.h"
10 #include "datapath.h"
11 #include <asm/uaccess.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_ether.h>
15 #include <linux/if_vlan.h>
16 #include <net/llc_pdu.h>
17 #include <linux/kernel.h>
18 #include <linux/jhash.h>
19 #include <linux/jiffies.h>
20 #include <linux/llc.h>
21 #include <linux/module.h>
22 #include <linux/in.h>
23 #include <linux/rcupdate.h>
24 #include <linux/if_arp.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/icmp.h>
31 #include <linux/icmpv6.h>
32 #include <net/inet_ecn.h>
33 #include <net/ip.h>
34 #include <net/ipv6.h>
35 #include <net/ndisc.h>
36
37 #include "vlan.h"
38
39 static struct kmem_cache *flow_cache;
40 static unsigned int hash_seed __read_mostly;
41
42 static inline bool arphdr_ok(struct sk_buff *skb)
43 {
44         return skb->len >= skb_network_offset(skb) + sizeof(struct arp_eth_header);
45 }
46
47 static inline int check_iphdr(struct sk_buff *skb)
48 {
49         unsigned int nh_ofs = skb_network_offset(skb);
50         unsigned int ip_len;
51
52         if (skb->len < nh_ofs + sizeof(struct iphdr))
53                 return -EINVAL;
54
55         ip_len = ip_hdrlen(skb);
56         if (ip_len < sizeof(struct iphdr) || skb->len < nh_ofs + ip_len)
57                 return -EINVAL;
58
59         /*
60          * Pull enough header bytes to account for the IP header plus the
61          * longest transport header that we parse, currently 20 bytes for TCP.
62          */
63         if (!pskb_may_pull(skb, min(nh_ofs + ip_len + 20, skb->len)))
64                 return -ENOMEM;
65
66         skb_set_transport_header(skb, nh_ofs + ip_len);
67         return 0;
68 }
69
70 static inline bool tcphdr_ok(struct sk_buff *skb)
71 {
72         int th_ofs = skb_transport_offset(skb);
73         if (skb->len >= th_ofs + sizeof(struct tcphdr)) {
74                 int tcp_len = tcp_hdrlen(skb);
75                 return (tcp_len >= sizeof(struct tcphdr)
76                         && skb->len >= th_ofs + tcp_len);
77         }
78         return false;
79 }
80
81 static inline bool udphdr_ok(struct sk_buff *skb)
82 {
83         return skb->len >= skb_transport_offset(skb) + sizeof(struct udphdr);
84 }
85
86 static inline bool icmphdr_ok(struct sk_buff *skb)
87 {
88         return skb->len >= skb_transport_offset(skb) + sizeof(struct icmphdr);
89 }
90
91 u64 flow_used_time(unsigned long flow_jiffies)
92 {
93         struct timespec cur_ts;
94         u64 cur_ms, idle_ms;
95
96         ktime_get_ts(&cur_ts);
97         idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
98         cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
99                  cur_ts.tv_nsec / NSEC_PER_MSEC;
100
101         return cur_ms - idle_ms;
102 }
103
104 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
105 {
106         unsigned int nh_ofs = skb_network_offset(skb);
107         unsigned int nh_len;
108         int payload_ofs;
109         int payload_len;
110         struct ipv6hdr *nh;
111         uint8_t nexthdr;
112
113         if (unlikely(skb->len < nh_ofs + sizeof(*nh)))
114                 return -EINVAL;
115
116         nh = ipv6_hdr(skb);
117         nexthdr = nh->nexthdr;
118         payload_ofs = (u8 *)(nh + 1) - skb->data;
119         payload_len = ntohs(nh->payload_len);
120
121         memcpy(key->ipv6_src, nh->saddr.in6_u.u6_addr8, sizeof(key->ipv6_src));
122         memcpy(key->ipv6_dst, nh->daddr.in6_u.u6_addr8, sizeof(key->ipv6_dst));
123         key->nw_tos = ipv6_get_dsfield(nh) & ~INET_ECN_MASK;
124         key->nw_proto = NEXTHDR_NONE;
125
126         /* We don't process jumbograms. */
127         if (!payload_len)
128                 return -EINVAL;
129
130         if (unlikely(skb->len < nh_ofs + sizeof(*nh) + payload_len))
131                 return -EINVAL;
132
133         payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr);
134         if (payload_ofs < 0) {
135                 return -EINVAL;
136         }
137         nh_len = payload_ofs - nh_ofs;
138
139         /* Ensure that the payload length claimed is at least large enough
140          * for the headers we've already processed. */
141         if (payload_len < nh_len - sizeof(*nh))
142                 return -EINVAL;
143
144         /* Pull enough header bytes to account for the IP header plus the
145          * longest transport header that we parse, currently 20 bytes for TCP.
146          * To dig deeper than the transport header, transport parsers may need
147          * to pull more header bytes.
148          */
149         if (unlikely(!pskb_may_pull(skb, min(nh_ofs + nh_len + 20, skb->len))))
150                 return -ENOMEM;
151
152         skb_set_transport_header(skb, nh_ofs + nh_len);
153         key->nw_proto = nexthdr;
154         return nh_len;
155 }
156
157 static bool icmp6hdr_ok(struct sk_buff *skb)
158 {
159         return skb->len >= skb_transport_offset(skb) + sizeof(struct icmp6hdr);
160 }
161
162 #define TCP_FLAGS_OFFSET 13
163 #define TCP_FLAG_MASK 0x3f
164
165 void flow_used(struct sw_flow *flow, struct sk_buff *skb)
166 {
167         u8 tcp_flags = 0;
168
169         if (flow->key.dl_type == htons(ETH_P_IP) &&
170             flow->key.nw_proto == IPPROTO_TCP) {
171                 u8 *tcp = (u8 *)tcp_hdr(skb);
172                 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
173         }
174
175         spin_lock_bh(&flow->lock);
176         flow->used = jiffies;
177         flow->packet_count++;
178         flow->byte_count += skb->len;
179         flow->tcp_flags |= tcp_flags;
180         spin_unlock_bh(&flow->lock);
181 }
182
183 struct sw_flow_actions *flow_actions_alloc(const struct nlattr *actions)
184 {
185         int actions_len = nla_len(actions);
186         struct sw_flow_actions *sfa;
187
188         /* At least DP_MAX_PORTS actions are required to be able to flood a
189          * packet to every port.  Factor of 2 allows for setting VLAN tags,
190          * etc. */
191         if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
192                 return ERR_PTR(-EINVAL);
193
194         sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
195         if (!sfa)
196                 return ERR_PTR(-ENOMEM);
197
198         sfa->actions_len = actions_len;
199         memcpy(sfa->actions, nla_data(actions), actions_len);
200         return sfa;
201 }
202
203 struct sw_flow *flow_alloc(void)
204 {
205         struct sw_flow *flow;
206
207         flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
208         if (!flow)
209                 return ERR_PTR(-ENOMEM);
210
211         spin_lock_init(&flow->lock);
212         atomic_set(&flow->refcnt, 1);
213         flow->dead = false;
214
215         return flow;
216 }
217
218 void flow_free_tbl(struct tbl_node *node)
219 {
220         struct sw_flow *flow = flow_cast(node);
221
222         flow->dead = true;
223         flow_put(flow);
224 }
225
226 /* RCU callback used by flow_deferred_free. */
227 static void rcu_free_flow_callback(struct rcu_head *rcu)
228 {
229         struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
230
231         flow->dead = true;
232         flow_put(flow);
233 }
234
235 /* Schedules 'flow' to be freed after the next RCU grace period.
236  * The caller must hold rcu_read_lock for this to be sensible. */
237 void flow_deferred_free(struct sw_flow *flow)
238 {
239         call_rcu(&flow->rcu, rcu_free_flow_callback);
240 }
241
242 void flow_hold(struct sw_flow *flow)
243 {
244         atomic_inc(&flow->refcnt);
245 }
246
247 void flow_put(struct sw_flow *flow)
248 {
249         if (unlikely(!flow))
250                 return;
251
252         if (atomic_dec_and_test(&flow->refcnt)) {
253                 kfree((struct sf_flow_acts __force *)flow->sf_acts);
254                 kmem_cache_free(flow_cache, flow);
255         }
256 }
257
258 /* RCU callback used by flow_deferred_free_acts. */
259 static void rcu_free_acts_callback(struct rcu_head *rcu)
260 {
261         struct sw_flow_actions *sf_acts = container_of(rcu,
262                         struct sw_flow_actions, rcu);
263         kfree(sf_acts);
264 }
265
266 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
267  * The caller must hold rcu_read_lock for this to be sensible. */
268 void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
269 {
270         call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
271 }
272
273 static void parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
274 {
275         struct qtag_prefix {
276                 __be16 eth_type; /* ETH_P_8021Q */
277                 __be16 tci;
278         };
279         struct qtag_prefix *qp;
280
281         if (skb->len < sizeof(struct qtag_prefix) + sizeof(__be16))
282                 return;
283
284         qp = (struct qtag_prefix *) skb->data;
285         key->dl_tci = qp->tci | htons(VLAN_TAG_PRESENT);
286         __skb_pull(skb, sizeof(struct qtag_prefix));
287 }
288
289 static __be16 parse_ethertype(struct sk_buff *skb)
290 {
291         struct llc_snap_hdr {
292                 u8  dsap;  /* Always 0xAA */
293                 u8  ssap;  /* Always 0xAA */
294                 u8  ctrl;
295                 u8  oui[3];
296                 __be16 ethertype;
297         };
298         struct llc_snap_hdr *llc;
299         __be16 proto;
300
301         proto = *(__be16 *) skb->data;
302         __skb_pull(skb, sizeof(__be16));
303
304         if (ntohs(proto) >= 1536)
305                 return proto;
306
307         if (unlikely(skb->len < sizeof(struct llc_snap_hdr)))
308                 return htons(ETH_P_802_2);
309
310         llc = (struct llc_snap_hdr *) skb->data;
311         if (llc->dsap != LLC_SAP_SNAP ||
312             llc->ssap != LLC_SAP_SNAP ||
313             (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
314                 return htons(ETH_P_802_2);
315
316         __skb_pull(skb, sizeof(struct llc_snap_hdr));
317         return llc->ethertype;
318 }
319
320 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
321                 int nh_len)
322 {
323         struct ipv6hdr *nh = ipv6_hdr(skb);
324         int icmp_len = ntohs(nh->payload_len) + sizeof(*nh) - nh_len;
325         struct icmp6hdr *icmp = icmp6_hdr(skb);
326
327         /* The ICMPv6 type and code fields use the 16-bit transport port
328          * fields, so we need to store them in 16-bit network byte order. */
329         key->tp_src = htons(icmp->icmp6_type);
330         key->tp_dst = htons(icmp->icmp6_code);
331
332         if (!icmp->icmp6_code
333                         && ((icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
334                           || (icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT))) {
335                 struct nd_msg *nd;
336                 int offset;
337
338                 /* In order to process neighbor discovery options, we need the
339                  * entire packet. */
340                 if (icmp_len < sizeof(*nd))
341                         goto invalid;
342                 if (!pskb_may_pull(skb, skb_transport_offset(skb) + icmp_len))
343                         return -ENOMEM;
344
345                 nd = (struct nd_msg *)skb_transport_header(skb);
346                 memcpy(key->nd_target, &nd->target, sizeof(key->nd_target));
347
348                 icmp_len -= sizeof(*nd);
349                 offset = 0;
350                 while (icmp_len >= 8) {
351                         struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd->opt + offset);
352                         int opt_len = nd_opt->nd_opt_len * 8;
353
354                         if (!opt_len || (opt_len > icmp_len))
355                                 goto invalid;
356
357                         /* Store the link layer address if the appropriate option is
358                          * provided.  It is considered an error if the same link
359                          * layer option is specified twice. */
360                         if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
361                                         && opt_len == 8) {
362                                 if (!is_zero_ether_addr(key->arp_sha))
363                                         goto invalid;
364                                 memcpy(key->arp_sha,
365                                                 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
366                         } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
367                                         && opt_len == 8) {
368                                 if (!is_zero_ether_addr(key->arp_tha))
369                                         goto invalid;
370                                 memcpy(key->arp_tha,
371                                                 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
372                         }
373
374                         icmp_len -= opt_len;
375                         offset += opt_len;
376                 }
377         }
378
379         return 0;
380
381 invalid:
382         memset(key->nd_target, 0, sizeof(key->nd_target));
383         memset(key->arp_sha, 0, sizeof(key->arp_sha));
384         memset(key->arp_tha, 0, sizeof(key->arp_tha));
385
386         return 0;
387 }
388
389 /**
390  * flow_extract - extracts a flow key from an Ethernet frame.
391  * @skb: sk_buff that contains the frame, with skb->data pointing to the
392  * Ethernet header
393  * @in_port: port number on which @skb was received.
394  * @key: output flow key
395  * @is_frag: set to 1 if @skb contains an IPv4 fragment, or to 0 if @skb does
396  * not contain an IPv4 packet or if it is not a fragment.
397  *
398  * The caller must ensure that skb->len >= ETH_HLEN.
399  *
400  * Returns 0 if successful, otherwise a negative errno value.
401  *
402  * Initializes @skb header pointers as follows:
403  *
404  *    - skb->mac_header: the Ethernet header.
405  *
406  *    - skb->network_header: just past the Ethernet header, or just past the
407  *      VLAN header, to the first byte of the Ethernet payload.
408  *
409  *    - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
410  *      on output, then just past the IP header, if one is present and
411  *      of a correct length, otherwise the same as skb->network_header.
412  *      For other key->dl_type values it is left untouched.
413  */
414 int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
415                  bool *is_frag)
416 {
417         struct ethhdr *eth;
418
419         memset(key, 0, sizeof(*key));
420         key->tun_id = OVS_CB(skb)->tun_id;
421         key->in_port = in_port;
422         *is_frag = false;
423
424         /*
425          * We would really like to pull as many bytes as we could possibly
426          * want to parse into the linear data area.  Currently, for IPv4,
427          * that is:
428          *
429          *    14     Ethernet header
430          *     4     VLAN header
431          *    60     max IP header with options
432          *    20     max TCP/UDP/ICMP header (don't care about options)
433          *    --
434          *    98
435          *
436          * But Xen only allocates 64 or 72 bytes for the linear data area in
437          * netback, which means that we would reallocate and copy the skb's
438          * linear data on every packet if we did that.  So instead just pull 64
439          * bytes, which is always sufficient without IP options, and then check
440          * whether we need to pull more later when we look at the IP header.
441          */
442         if (!pskb_may_pull(skb, min(skb->len, 64u)))
443                 return -ENOMEM;
444
445         skb_reset_mac_header(skb);
446
447         /* Link layer. */
448         eth = eth_hdr(skb);
449         memcpy(key->dl_src, eth->h_source, ETH_ALEN);
450         memcpy(key->dl_dst, eth->h_dest, ETH_ALEN);
451
452         /* dl_type, dl_vlan, dl_vlan_pcp. */
453         __skb_pull(skb, 2 * ETH_ALEN);
454
455         if (vlan_tx_tag_present(skb))
456                 key->dl_tci = htons(vlan_get_tci(skb));
457         else if (eth->h_proto == htons(ETH_P_8021Q))
458                 parse_vlan(skb, key);
459
460         key->dl_type = parse_ethertype(skb);
461         skb_reset_network_header(skb);
462         __skb_push(skb, skb->data - (unsigned char *)eth);
463
464         /* Network layer. */
465         if (key->dl_type == htons(ETH_P_IP)) {
466                 struct iphdr *nh;
467                 int error;
468
469                 error = check_iphdr(skb);
470                 if (unlikely(error)) {
471                         if (error == -EINVAL) {
472                                 skb->transport_header = skb->network_header;
473                                 return 0;
474                         }
475                         return error;
476                 }
477
478                 nh = ip_hdr(skb);
479                 key->ipv4_src = nh->saddr;
480                 key->ipv4_dst = nh->daddr;
481                 key->nw_tos = nh->tos & ~INET_ECN_MASK;
482                 key->nw_proto = nh->protocol;
483
484                 /* Transport layer. */
485                 if (!(nh->frag_off & htons(IP_MF | IP_OFFSET)) &&
486                     !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP)) {
487                         if (key->nw_proto == IPPROTO_TCP) {
488                                 if (tcphdr_ok(skb)) {
489                                         struct tcphdr *tcp = tcp_hdr(skb);
490                                         key->tp_src = tcp->source;
491                                         key->tp_dst = tcp->dest;
492                                 }
493                         } else if (key->nw_proto == IPPROTO_UDP) {
494                                 if (udphdr_ok(skb)) {
495                                         struct udphdr *udp = udp_hdr(skb);
496                                         key->tp_src = udp->source;
497                                         key->tp_dst = udp->dest;
498                                 }
499                         } else if (key->nw_proto == IPPROTO_ICMP) {
500                                 if (icmphdr_ok(skb)) {
501                                         struct icmphdr *icmp = icmp_hdr(skb);
502                                         /* The ICMP type and code fields use the 16-bit
503                                          * transport port fields, so we need to store them
504                                          * in 16-bit network byte order. */
505                                         key->tp_src = htons(icmp->type);
506                                         key->tp_dst = htons(icmp->code);
507                                 }
508                         }
509                 } else
510                         *is_frag = true;
511
512         } else if (key->dl_type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
513                 struct arp_eth_header *arp;
514
515                 arp = (struct arp_eth_header *)skb_network_header(skb);
516
517                 if (arp->ar_hrd == htons(ARPHRD_ETHER)
518                                 && arp->ar_pro == htons(ETH_P_IP)
519                                 && arp->ar_hln == ETH_ALEN
520                                 && arp->ar_pln == 4) {
521
522                         /* We only match on the lower 8 bits of the opcode. */
523                         if (ntohs(arp->ar_op) <= 0xff)
524                                 key->nw_proto = ntohs(arp->ar_op);
525
526                         if (key->nw_proto == ARPOP_REQUEST
527                                         || key->nw_proto == ARPOP_REPLY) {
528                                 memcpy(&key->ipv4_src, arp->ar_sip, sizeof(key->ipv4_src));
529                                 memcpy(&key->ipv4_dst, arp->ar_tip, sizeof(key->ipv4_dst));
530                                 memcpy(key->arp_sha, arp->ar_sha, ETH_ALEN);
531                                 memcpy(key->arp_tha, arp->ar_tha, ETH_ALEN);
532                         }
533                 }
534         } else if (key->dl_type == htons(ETH_P_IPV6)) {
535                 int nh_len;             /* IPv6 Header + Extensions */
536
537                 nh_len = parse_ipv6hdr(skb, key);
538                 if (unlikely(nh_len < 0)) {
539                         if (nh_len == -EINVAL) {
540                                 skb->transport_header = skb->network_header;
541                                 return 0;
542                         }
543                         return nh_len;
544                 }
545
546                 /* Transport layer. */
547                 if (key->nw_proto == NEXTHDR_TCP) {
548                         if (tcphdr_ok(skb)) {
549                                 struct tcphdr *tcp = tcp_hdr(skb);
550                                 key->tp_src = tcp->source;
551                                 key->tp_dst = tcp->dest;
552                         }
553                 } else if (key->nw_proto == NEXTHDR_UDP) {
554                         if (udphdr_ok(skb)) {
555                                 struct udphdr *udp = udp_hdr(skb);
556                                 key->tp_src = udp->source;
557                                 key->tp_dst = udp->dest;
558                         }
559                 } else if (key->nw_proto == NEXTHDR_ICMP) {
560                         if (icmp6hdr_ok(skb)) {
561                                 int error = parse_icmpv6(skb, key, nh_len);
562                                 if (error < 0)
563                                         return error;
564                         }
565                 }
566         }
567         return 0;
568 }
569
570 u32 flow_hash(const struct sw_flow_key *key)
571 {
572         return jhash2((u32*)key, sizeof(*key) / sizeof(u32), hash_seed);
573 }
574
575 int flow_cmp(const struct tbl_node *node, void *key2_)
576 {
577         const struct sw_flow_key *key1 = &flow_cast(node)->key;
578         const struct sw_flow_key *key2 = key2_;
579
580         return !memcmp(key1, key2, sizeof(struct sw_flow_key));
581 }
582
583 /**
584  * flow_from_nlattrs - parses Netlink attributes into a flow key.
585  * @swkey: receives the extracted flow key.
586  * @key: Netlink attribute holding nested %ODP_KEY_ATTR_* Netlink attribute
587  * sequence.
588  *
589  * This state machine accepts the following forms, with [] for optional
590  * elements and | for alternatives:
591  *
592  * [tun_id] in_port ethernet [8021q] [ethertype \
593  *              [IPv4 [TCP|UDP|ICMP] | IPv6 [TCP|UDP|ICMPv6 [ND]] | ARP]]
594  */
595 int flow_from_nlattrs(struct sw_flow_key *swkey, const struct nlattr *attr)
596 {
597         const struct nlattr *nla;
598         u16 prev_type;
599         int rem;
600
601         memset(swkey, 0, sizeof(*swkey));
602         swkey->dl_type = htons(ETH_P_802_2);
603
604         prev_type = ODP_KEY_ATTR_UNSPEC;
605         nla_for_each_nested(nla, attr, rem) {
606                 static const u32 key_lens[ODP_KEY_ATTR_MAX + 1] = {
607                         [ODP_KEY_ATTR_TUN_ID] = 8,
608                         [ODP_KEY_ATTR_IN_PORT] = 4,
609                         [ODP_KEY_ATTR_ETHERNET] = sizeof(struct odp_key_ethernet),
610                         [ODP_KEY_ATTR_8021Q] = sizeof(struct odp_key_8021q),
611                         [ODP_KEY_ATTR_ETHERTYPE] = 2,
612                         [ODP_KEY_ATTR_IPV4] = sizeof(struct odp_key_ipv4),
613                         [ODP_KEY_ATTR_IPV6] = sizeof(struct odp_key_ipv6),
614                         [ODP_KEY_ATTR_TCP] = sizeof(struct odp_key_tcp),
615                         [ODP_KEY_ATTR_UDP] = sizeof(struct odp_key_udp),
616                         [ODP_KEY_ATTR_ICMP] = sizeof(struct odp_key_icmp),
617                         [ODP_KEY_ATTR_ICMPV6] = sizeof(struct odp_key_icmpv6),
618                         [ODP_KEY_ATTR_ARP] = sizeof(struct odp_key_arp),
619                         [ODP_KEY_ATTR_ND] = sizeof(struct odp_key_nd),
620                 };
621
622                 const struct odp_key_ethernet *eth_key;
623                 const struct odp_key_8021q *q_key;
624                 const struct odp_key_ipv4 *ipv4_key;
625                 const struct odp_key_ipv6 *ipv6_key;
626                 const struct odp_key_tcp *tcp_key;
627                 const struct odp_key_udp *udp_key;
628                 const struct odp_key_icmp *icmp_key;
629                 const struct odp_key_icmpv6 *icmpv6_key;
630                 const struct odp_key_arp *arp_key;
631                 const struct odp_key_nd *nd_key;
632
633                 int type = nla_type(nla);
634
635                 if (type > ODP_KEY_ATTR_MAX || nla_len(nla) != key_lens[type])
636                         return -EINVAL;
637
638 #define TRANSITION(PREV_TYPE, TYPE) (((PREV_TYPE) << 16) | (TYPE))
639                 switch (TRANSITION(prev_type, type)) {
640                 case TRANSITION(ODP_KEY_ATTR_UNSPEC, ODP_KEY_ATTR_TUN_ID):
641                         swkey->tun_id = nla_get_be64(nla);
642                         break;
643
644                 case TRANSITION(ODP_KEY_ATTR_UNSPEC, ODP_KEY_ATTR_IN_PORT):
645                 case TRANSITION(ODP_KEY_ATTR_TUN_ID, ODP_KEY_ATTR_IN_PORT):
646                         if (nla_get_u32(nla) >= DP_MAX_PORTS)
647                                 return -EINVAL;
648                         swkey->in_port = nla_get_u32(nla);
649                         break;
650
651                 case TRANSITION(ODP_KEY_ATTR_IN_PORT, ODP_KEY_ATTR_ETHERNET):
652                         eth_key = nla_data(nla);
653                         memcpy(swkey->dl_src, eth_key->eth_src, ETH_ALEN);
654                         memcpy(swkey->dl_dst, eth_key->eth_dst, ETH_ALEN);
655                         break;
656
657                 case TRANSITION(ODP_KEY_ATTR_ETHERNET, ODP_KEY_ATTR_8021Q):
658                         q_key = nla_data(nla);
659                         /* Only standard 0x8100 VLANs currently supported. */
660                         if (q_key->q_tpid != htons(ETH_P_8021Q))
661                                 return -EINVAL;
662                         if (q_key->q_tci & htons(VLAN_TAG_PRESENT))
663                                 return -EINVAL;
664                         swkey->dl_tci = q_key->q_tci | htons(VLAN_TAG_PRESENT);
665                         break;
666
667                 case TRANSITION(ODP_KEY_ATTR_8021Q, ODP_KEY_ATTR_ETHERTYPE):
668                 case TRANSITION(ODP_KEY_ATTR_ETHERNET, ODP_KEY_ATTR_ETHERTYPE):
669                         swkey->dl_type = nla_get_be16(nla);
670                         if (ntohs(swkey->dl_type) < 1536)
671                                 return -EINVAL;
672                         break;
673
674                 case TRANSITION(ODP_KEY_ATTR_ETHERTYPE, ODP_KEY_ATTR_IPV4):
675                         if (swkey->dl_type != htons(ETH_P_IP))
676                                 return -EINVAL;
677                         ipv4_key = nla_data(nla);
678                         swkey->ipv4_src = ipv4_key->ipv4_src;
679                         swkey->ipv4_dst = ipv4_key->ipv4_dst;
680                         swkey->nw_proto = ipv4_key->ipv4_proto;
681                         swkey->nw_tos = ipv4_key->ipv4_tos;
682                         if (swkey->nw_tos & INET_ECN_MASK)
683                                 return -EINVAL;
684                         break;
685
686                 case TRANSITION(ODP_KEY_ATTR_ETHERTYPE, ODP_KEY_ATTR_IPV6):
687                         if (swkey->dl_type != htons(ETH_P_IPV6))
688                                 return -EINVAL;
689                         ipv6_key = nla_data(nla);
690                         memcpy(swkey->ipv6_src, ipv6_key->ipv6_src,
691                                         sizeof(swkey->ipv6_src));
692                         memcpy(swkey->ipv6_dst, ipv6_key->ipv6_dst,
693                                         sizeof(swkey->ipv6_dst));
694                         swkey->nw_proto = ipv6_key->ipv6_proto;
695                         swkey->nw_tos = ipv6_key->ipv6_tos;
696                         if (swkey->nw_tos & INET_ECN_MASK)
697                                 return -EINVAL;
698                         break;
699
700                 case TRANSITION(ODP_KEY_ATTR_IPV4, ODP_KEY_ATTR_TCP):
701                 case TRANSITION(ODP_KEY_ATTR_IPV6, ODP_KEY_ATTR_TCP):
702                         if (swkey->nw_proto != IPPROTO_TCP)
703                                 return -EINVAL;
704                         tcp_key = nla_data(nla);
705                         swkey->tp_src = tcp_key->tcp_src;
706                         swkey->tp_dst = tcp_key->tcp_dst;
707                         break;
708
709                 case TRANSITION(ODP_KEY_ATTR_IPV4, ODP_KEY_ATTR_UDP):
710                 case TRANSITION(ODP_KEY_ATTR_IPV6, ODP_KEY_ATTR_UDP):
711                         if (swkey->nw_proto != IPPROTO_UDP)
712                                 return -EINVAL;
713                         udp_key = nla_data(nla);
714                         swkey->tp_src = udp_key->udp_src;
715                         swkey->tp_dst = udp_key->udp_dst;
716                         break;
717
718                 case TRANSITION(ODP_KEY_ATTR_IPV4, ODP_KEY_ATTR_ICMP):
719                         if (swkey->nw_proto != IPPROTO_ICMP)
720                                 return -EINVAL;
721                         icmp_key = nla_data(nla);
722                         swkey->tp_src = htons(icmp_key->icmp_type);
723                         swkey->tp_dst = htons(icmp_key->icmp_code);
724                         break;
725
726                 case TRANSITION(ODP_KEY_ATTR_IPV6, ODP_KEY_ATTR_ICMPV6):
727                         if (swkey->nw_proto != IPPROTO_ICMPV6)
728                                 return -EINVAL;
729                         icmpv6_key = nla_data(nla);
730                         swkey->tp_src = htons(icmpv6_key->icmpv6_type);
731                         swkey->tp_dst = htons(icmpv6_key->icmpv6_code);
732                         break;
733
734                 case TRANSITION(ODP_KEY_ATTR_ETHERTYPE, ODP_KEY_ATTR_ARP):
735                         if (swkey->dl_type != htons(ETH_P_ARP))
736                                 return -EINVAL;
737                         arp_key = nla_data(nla);
738                         swkey->ipv4_src = arp_key->arp_sip;
739                         swkey->ipv4_dst = arp_key->arp_tip;
740                         if (arp_key->arp_op & htons(0xff00))
741                                 return -EINVAL;
742                         swkey->nw_proto = ntohs(arp_key->arp_op);
743                         memcpy(swkey->arp_sha, arp_key->arp_sha, ETH_ALEN);
744                         memcpy(swkey->arp_tha, arp_key->arp_tha, ETH_ALEN);
745                         break;
746
747                 case TRANSITION(ODP_KEY_ATTR_ICMPV6, ODP_KEY_ATTR_ND):
748                         if (swkey->tp_src != htons(NDISC_NEIGHBOUR_SOLICITATION)
749                                         && swkey->tp_src != htons(NDISC_NEIGHBOUR_ADVERTISEMENT))
750                                 return -EINVAL;
751                         nd_key = nla_data(nla);
752                         memcpy(swkey->nd_target, nd_key->nd_target,
753                                         sizeof(swkey->nd_target));
754                         memcpy(swkey->arp_sha, nd_key->nd_sll, ETH_ALEN);
755                         memcpy(swkey->arp_tha, nd_key->nd_tll, ETH_ALEN);
756                         break;
757
758                 default:
759                         return -EINVAL;
760                 }
761
762                 prev_type = type;
763         }
764         if (rem)
765                 return -EINVAL;
766
767         switch (prev_type) {
768         case ODP_KEY_ATTR_UNSPEC:
769                 return -EINVAL;
770
771         case ODP_KEY_ATTR_TUN_ID:
772         case ODP_KEY_ATTR_IN_PORT:
773                 return -EINVAL;
774
775         case ODP_KEY_ATTR_ETHERNET:
776         case ODP_KEY_ATTR_8021Q:
777                 return 0;
778
779         case ODP_KEY_ATTR_ETHERTYPE:
780                 if (swkey->dl_type == htons(ETH_P_IP) ||
781                     swkey->dl_type == htons(ETH_P_ARP))
782                         return -EINVAL;
783                 return 0;
784
785         case ODP_KEY_ATTR_IPV4:
786                 if (swkey->nw_proto == IPPROTO_TCP ||
787                     swkey->nw_proto == IPPROTO_UDP ||
788                     swkey->nw_proto == IPPROTO_ICMP)
789                         return -EINVAL;
790                 return 0;
791
792         case ODP_KEY_ATTR_IPV6:
793                 if (swkey->nw_proto == IPPROTO_TCP ||
794                     swkey->nw_proto == IPPROTO_UDP ||
795                     swkey->nw_proto == IPPROTO_ICMPV6)
796                         return -EINVAL;
797                 return 0;
798
799         case ODP_KEY_ATTR_ICMPV6:
800                 if (swkey->tp_src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
801                     swkey->tp_src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT))
802                         return -EINVAL;
803                 return 0;
804
805         case ODP_KEY_ATTR_TCP:
806         case ODP_KEY_ATTR_UDP:
807         case ODP_KEY_ATTR_ICMP:
808         case ODP_KEY_ATTR_ARP:
809         case ODP_KEY_ATTR_ND:
810                 return 0;
811         }
812
813         WARN_ON_ONCE(1);
814         return -EINVAL;
815 }
816
817 int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
818 {
819         struct odp_key_ethernet *eth_key;
820         struct nlattr *nla;
821
822         if (swkey->tun_id != cpu_to_be64(0))
823                 NLA_PUT_BE64(skb, ODP_KEY_ATTR_TUN_ID, swkey->tun_id);
824
825         NLA_PUT_U32(skb, ODP_KEY_ATTR_IN_PORT, swkey->in_port);
826
827         nla = nla_reserve(skb, ODP_KEY_ATTR_ETHERNET, sizeof(*eth_key));
828         if (!nla)
829                 goto nla_put_failure;
830         eth_key = nla_data(nla);
831         memcpy(eth_key->eth_src, swkey->dl_src, ETH_ALEN);
832         memcpy(eth_key->eth_dst, swkey->dl_dst, ETH_ALEN);
833
834         if (swkey->dl_tci != htons(0)) {
835                 struct odp_key_8021q q_key;
836
837                 q_key.q_tpid = htons(ETH_P_8021Q);
838                 q_key.q_tci = swkey->dl_tci & ~htons(VLAN_TAG_PRESENT);
839                 NLA_PUT(skb, ODP_KEY_ATTR_8021Q, sizeof(q_key), &q_key);
840         }
841
842         if (swkey->dl_type == htons(ETH_P_802_2))
843                 return 0;
844
845         NLA_PUT_BE16(skb, ODP_KEY_ATTR_ETHERTYPE, swkey->dl_type);
846
847         if (swkey->dl_type == htons(ETH_P_IP)) {
848                 struct odp_key_ipv4 *ipv4_key;
849
850                 nla = nla_reserve(skb, ODP_KEY_ATTR_IPV4, sizeof(*ipv4_key));
851                 if (!nla)
852                         goto nla_put_failure;
853                 ipv4_key = nla_data(nla);
854                 memset(ipv4_key, 0, sizeof(struct odp_key_ipv4));
855                 ipv4_key->ipv4_src = swkey->ipv4_src;
856                 ipv4_key->ipv4_dst = swkey->ipv4_dst;
857                 ipv4_key->ipv4_proto = swkey->nw_proto;
858                 ipv4_key->ipv4_tos = swkey->nw_tos;
859         } else if (swkey->dl_type == htons(ETH_P_IPV6)) {
860                 struct odp_key_ipv6 *ipv6_key;
861
862                 nla = nla_reserve(skb, ODP_KEY_ATTR_IPV6, sizeof(*ipv6_key));
863                 if (!nla)
864                         goto nla_put_failure;
865                 ipv6_key = nla_data(nla);
866                 memset(ipv6_key, 0, sizeof(struct odp_key_ipv6));
867                 memcpy(ipv6_key->ipv6_src, swkey->ipv6_src,
868                                 sizeof(ipv6_key->ipv6_src));
869                 memcpy(ipv6_key->ipv6_dst, swkey->ipv6_dst,
870                                 sizeof(ipv6_key->ipv6_dst));
871                 ipv6_key->ipv6_proto = swkey->nw_proto;
872                 ipv6_key->ipv6_tos = swkey->nw_tos;
873         } else if (swkey->dl_type == htons(ETH_P_ARP)) {
874                 struct odp_key_arp *arp_key;
875
876                 nla = nla_reserve(skb, ODP_KEY_ATTR_ARP, sizeof(*arp_key));
877                 if (!nla)
878                         goto nla_put_failure;
879                 arp_key = nla_data(nla);
880                 memset(arp_key, 0, sizeof(struct odp_key_arp));
881                 arp_key->arp_sip = swkey->ipv4_src;
882                 arp_key->arp_tip = swkey->ipv4_dst;
883                 arp_key->arp_op = htons(swkey->nw_proto);
884                 memcpy(arp_key->arp_sha, swkey->arp_sha, ETH_ALEN);
885                 memcpy(arp_key->arp_tha, swkey->arp_tha, ETH_ALEN);
886         }
887
888         if (swkey->dl_type == htons(ETH_P_IP)
889                         || swkey->dl_type == htons(ETH_P_IPV6)) {
890
891                 if (swkey->nw_proto == IPPROTO_TCP) {
892                         struct odp_key_tcp *tcp_key;
893
894                         nla = nla_reserve(skb, ODP_KEY_ATTR_TCP, sizeof(*tcp_key));
895                         if (!nla)
896                                 goto nla_put_failure;
897                         tcp_key = nla_data(nla);
898                         tcp_key->tcp_src = swkey->tp_src;
899                         tcp_key->tcp_dst = swkey->tp_dst;
900                 } else if (swkey->nw_proto == IPPROTO_UDP) {
901                         struct odp_key_udp *udp_key;
902
903                         nla = nla_reserve(skb, ODP_KEY_ATTR_UDP, sizeof(*udp_key));
904                         if (!nla)
905                                 goto nla_put_failure;
906                         udp_key = nla_data(nla);
907                         udp_key->udp_src = swkey->tp_src;
908                         udp_key->udp_dst = swkey->tp_dst;
909                 } else if (swkey->dl_type == htons(ETH_P_IP)
910                                 && swkey->nw_proto == IPPROTO_ICMP) {
911                         struct odp_key_icmp *icmp_key;
912
913                         nla = nla_reserve(skb, ODP_KEY_ATTR_ICMP, sizeof(*icmp_key));
914                         if (!nla)
915                                 goto nla_put_failure;
916                         icmp_key = nla_data(nla);
917                         icmp_key->icmp_type = ntohs(swkey->tp_src);
918                         icmp_key->icmp_code = ntohs(swkey->tp_dst);
919                 } else if (swkey->dl_type == htons(ETH_P_IPV6)
920                                 && swkey->nw_proto == IPPROTO_ICMPV6) {
921                         struct odp_key_icmpv6 *icmpv6_key;
922
923                         nla = nla_reserve(skb, ODP_KEY_ATTR_ICMPV6, sizeof(*icmpv6_key));
924                         if (!nla)
925                                 goto nla_put_failure;
926                         icmpv6_key = nla_data(nla);
927                         icmpv6_key->icmpv6_type = ntohs(swkey->tp_src);
928                         icmpv6_key->icmpv6_code = ntohs(swkey->tp_dst);
929
930                         if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION
931                                         || icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
932                                 struct odp_key_nd *nd_key;
933
934                                 nla = nla_reserve(skb, ODP_KEY_ATTR_ND, sizeof(*nd_key));
935                                 if (!nla)
936                                         goto nla_put_failure;
937                                 nd_key = nla_data(nla);
938                                 memcpy(nd_key->nd_target, swkey->nd_target,
939                                                         sizeof(nd_key->nd_target));
940                                 memcpy(nd_key->nd_sll, swkey->arp_sha, ETH_ALEN);
941                                 memcpy(nd_key->nd_tll, swkey->arp_tha, ETH_ALEN);
942                         }
943                 }
944         }
945
946         return 0;
947
948 nla_put_failure:
949         return -EMSGSIZE;
950 }
951
952 /* Initializes the flow module.
953  * Returns zero if successful or a negative error code. */
954 int flow_init(void)
955 {
956         flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
957                                         0, NULL);
958         if (flow_cache == NULL)
959                 return -ENOMEM;
960
961         get_random_bytes(&hash_seed, sizeof(hash_seed));
962
963         return 0;
964 }
965
966 /* Uninitializes the flow module. */
967 void flow_exit(void)
968 {
969         kmem_cache_destroy(flow_cache);
970 }