2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
11 #include <asm/uaccess.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_ether.h>
15 #include <linux/if_vlan.h>
16 #include <net/llc_pdu.h>
17 #include <linux/kernel.h>
18 #include <linux/jhash.h>
19 #include <linux/jiffies.h>
20 #include <linux/llc.h>
21 #include <linux/module.h>
23 #include <linux/rcupdate.h>
24 #include <linux/if_arp.h>
25 #include <linux/if_ether.h>
27 #include <linux/ipv6.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/icmp.h>
31 #include <linux/icmpv6.h>
32 #include <net/inet_ecn.h>
36 static struct kmem_cache *flow_cache;
37 static unsigned int hash_seed __read_mostly;
39 static inline bool arphdr_ok(struct sk_buff *skb)
41 return skb->len >= skb_network_offset(skb) + sizeof(struct arp_eth_header);
44 static inline int check_iphdr(struct sk_buff *skb)
46 unsigned int nh_ofs = skb_network_offset(skb);
49 if (skb->len < nh_ofs + sizeof(struct iphdr))
52 ip_len = ip_hdrlen(skb);
53 if (ip_len < sizeof(struct iphdr) || skb->len < nh_ofs + ip_len)
57 * Pull enough header bytes to account for the IP header plus the
58 * longest transport header that we parse, currently 20 bytes for TCP.
60 if (!pskb_may_pull(skb, min(nh_ofs + ip_len + 20, skb->len)))
63 skb_set_transport_header(skb, nh_ofs + ip_len);
67 static inline bool tcphdr_ok(struct sk_buff *skb)
69 int th_ofs = skb_transport_offset(skb);
70 if (skb->len >= th_ofs + sizeof(struct tcphdr)) {
71 int tcp_len = tcp_hdrlen(skb);
72 return (tcp_len >= sizeof(struct tcphdr)
73 && skb->len >= th_ofs + tcp_len);
78 static inline bool udphdr_ok(struct sk_buff *skb)
80 return skb->len >= skb_transport_offset(skb) + sizeof(struct udphdr);
83 static inline bool icmphdr_ok(struct sk_buff *skb)
85 return skb->len >= skb_transport_offset(skb) + sizeof(struct icmphdr);
88 u64 flow_used_time(unsigned long flow_jiffies)
90 struct timespec cur_ts;
93 ktime_get_ts(&cur_ts);
94 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
95 cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
96 cur_ts.tv_nsec / NSEC_PER_MSEC;
98 return cur_ms - idle_ms;
101 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
103 unsigned int nh_ofs = skb_network_offset(skb);
110 if (unlikely(skb->len < nh_ofs + sizeof(*nh)))
114 nexthdr = nh->nexthdr;
115 payload_ofs = (u8 *)(nh + 1) - skb->data;
116 payload_len = ntohs(nh->payload_len);
118 memcpy(key->ipv6_src, nh->saddr.in6_u.u6_addr8, sizeof(key->ipv6_src));
119 memcpy(key->ipv6_dst, nh->daddr.in6_u.u6_addr8, sizeof(key->ipv6_dst));
120 key->nw_tos = ipv6_get_dsfield(nh) & ~INET_ECN_MASK;
121 key->nw_proto = NEXTHDR_NONE;
123 /* We don't process jumbograms. */
127 if (unlikely(skb->len < nh_ofs + sizeof(*nh) + payload_len))
130 payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr);
131 if (payload_ofs < 0) {
134 nh_len = payload_ofs - nh_ofs;
136 /* Ensure that the payload length claimed is at least large enough
137 * for the headers we've already processed. */
138 if (payload_len < nh_len - sizeof(*nh))
141 /* Pull enough header bytes to account for the IP header plus the
142 * longest transport header that we parse, currently 20 bytes for TCP.
143 * To dig deeper than the transport header, transport parsers may need
144 * to pull more header bytes.
146 if (unlikely(!pskb_may_pull(skb, min(nh_ofs + nh_len + 20, skb->len))))
149 skb_set_transport_header(skb, nh_ofs + nh_len);
150 key->nw_proto = nexthdr;
154 static bool icmp6hdr_ok(struct sk_buff *skb)
156 return skb->len >= skb_transport_offset(skb) + sizeof(struct icmp6hdr);
159 #define TCP_FLAGS_OFFSET 13
160 #define TCP_FLAG_MASK 0x3f
162 void flow_used(struct sw_flow *flow, struct sk_buff *skb)
166 if (flow->key.dl_type == htons(ETH_P_IP) &&
167 flow->key.nw_proto == IPPROTO_TCP) {
168 u8 *tcp = (u8 *)tcp_hdr(skb);
169 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
172 spin_lock_bh(&flow->lock);
173 flow->used = jiffies;
174 flow->packet_count++;
175 flow->byte_count += skb->len;
176 flow->tcp_flags |= tcp_flags;
177 spin_unlock_bh(&flow->lock);
180 struct sw_flow_actions *flow_actions_alloc(const struct nlattr *actions)
182 int actions_len = nla_len(actions);
183 struct sw_flow_actions *sfa;
185 /* At least DP_MAX_PORTS actions are required to be able to flood a
186 * packet to every port. Factor of 2 allows for setting VLAN tags,
188 if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
189 return ERR_PTR(-EINVAL);
191 sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
193 return ERR_PTR(-ENOMEM);
195 sfa->actions_len = actions_len;
196 memcpy(sfa->actions, nla_data(actions), actions_len);
200 struct sw_flow *flow_alloc(void)
202 struct sw_flow *flow;
204 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
206 return ERR_PTR(-ENOMEM);
208 spin_lock_init(&flow->lock);
209 atomic_set(&flow->refcnt, 1);
215 void flow_free_tbl(struct tbl_node *node)
217 struct sw_flow *flow = flow_cast(node);
223 /* RCU callback used by flow_deferred_free. */
224 static void rcu_free_flow_callback(struct rcu_head *rcu)
226 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
232 /* Schedules 'flow' to be freed after the next RCU grace period.
233 * The caller must hold rcu_read_lock for this to be sensible. */
234 void flow_deferred_free(struct sw_flow *flow)
236 call_rcu(&flow->rcu, rcu_free_flow_callback);
239 void flow_hold(struct sw_flow *flow)
241 atomic_inc(&flow->refcnt);
244 void flow_put(struct sw_flow *flow)
249 if (atomic_dec_and_test(&flow->refcnt)) {
250 kfree((struct sf_flow_acts __force *)flow->sf_acts);
251 kmem_cache_free(flow_cache, flow);
255 /* RCU callback used by flow_deferred_free_acts. */
256 static void rcu_free_acts_callback(struct rcu_head *rcu)
258 struct sw_flow_actions *sf_acts = container_of(rcu,
259 struct sw_flow_actions, rcu);
263 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
264 * The caller must hold rcu_read_lock for this to be sensible. */
265 void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
267 call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
270 static void parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
273 __be16 eth_type; /* ETH_P_8021Q */
276 struct qtag_prefix *qp;
278 if (skb->len < sizeof(struct qtag_prefix) + sizeof(__be16))
281 qp = (struct qtag_prefix *) skb->data;
282 key->dl_tci = qp->tci | htons(VLAN_TAG_PRESENT);
283 __skb_pull(skb, sizeof(struct qtag_prefix));
286 static __be16 parse_ethertype(struct sk_buff *skb)
288 struct llc_snap_hdr {
289 u8 dsap; /* Always 0xAA */
290 u8 ssap; /* Always 0xAA */
295 struct llc_snap_hdr *llc;
298 proto = *(__be16 *) skb->data;
299 __skb_pull(skb, sizeof(__be16));
301 if (ntohs(proto) >= 1536)
304 if (unlikely(skb->len < sizeof(struct llc_snap_hdr)))
305 return htons(ETH_P_802_2);
307 llc = (struct llc_snap_hdr *) skb->data;
308 if (llc->dsap != LLC_SAP_SNAP ||
309 llc->ssap != LLC_SAP_SNAP ||
310 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
311 return htons(ETH_P_802_2);
313 __skb_pull(skb, sizeof(struct llc_snap_hdr));
314 return llc->ethertype;
318 * flow_extract - extracts a flow key from an Ethernet frame.
319 * @skb: sk_buff that contains the frame, with skb->data pointing to the
321 * @in_port: port number on which @skb was received.
322 * @key: output flow key
323 * @is_frag: set to 1 if @skb contains an IPv4 fragment, or to 0 if @skb does
324 * not contain an IPv4 packet or if it is not a fragment.
326 * The caller must ensure that skb->len >= ETH_HLEN.
328 * Returns 0 if successful, otherwise a negative errno value.
330 * Initializes @skb header pointers as follows:
332 * - skb->mac_header: the Ethernet header.
334 * - skb->network_header: just past the Ethernet header, or just past the
335 * VLAN header, to the first byte of the Ethernet payload.
337 * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
338 * on output, then just past the IP header, if one is present and
339 * of a correct length, otherwise the same as skb->network_header.
340 * For other key->dl_type values it is left untouched.
342 int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
347 memset(key, 0, sizeof(*key));
348 key->tun_id = OVS_CB(skb)->tun_id;
349 key->in_port = in_port;
353 * We would really like to pull as many bytes as we could possibly
354 * want to parse into the linear data area. Currently, for IPv4,
359 * 60 max IP header with options
360 * 20 max TCP/UDP/ICMP header (don't care about options)
364 * But Xen only allocates 64 or 72 bytes for the linear data area in
365 * netback, which means that we would reallocate and copy the skb's
366 * linear data on every packet if we did that. So instead just pull 64
367 * bytes, which is always sufficient without IP options, and then check
368 * whether we need to pull more later when we look at the IP header.
370 if (!pskb_may_pull(skb, min(skb->len, 64u)))
373 skb_reset_mac_header(skb);
377 memcpy(key->dl_src, eth->h_source, ETH_ALEN);
378 memcpy(key->dl_dst, eth->h_dest, ETH_ALEN);
380 /* dl_type, dl_vlan, dl_vlan_pcp. */
381 __skb_pull(skb, 2 * ETH_ALEN);
382 if (eth->h_proto == htons(ETH_P_8021Q))
383 parse_vlan(skb, key);
384 key->dl_type = parse_ethertype(skb);
385 skb_reset_network_header(skb);
386 __skb_push(skb, skb->data - (unsigned char *)eth);
389 if (key->dl_type == htons(ETH_P_IP)) {
393 error = check_iphdr(skb);
394 if (unlikely(error)) {
395 if (error == -EINVAL) {
396 skb->transport_header = skb->network_header;
403 key->ipv4_src = nh->saddr;
404 key->ipv4_dst = nh->daddr;
405 key->nw_tos = nh->tos & ~INET_ECN_MASK;
406 key->nw_proto = nh->protocol;
408 /* Transport layer. */
409 if (!(nh->frag_off & htons(IP_MF | IP_OFFSET)) &&
410 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP)) {
411 if (key->nw_proto == IPPROTO_TCP) {
412 if (tcphdr_ok(skb)) {
413 struct tcphdr *tcp = tcp_hdr(skb);
414 key->tp_src = tcp->source;
415 key->tp_dst = tcp->dest;
417 } else if (key->nw_proto == IPPROTO_UDP) {
418 if (udphdr_ok(skb)) {
419 struct udphdr *udp = udp_hdr(skb);
420 key->tp_src = udp->source;
421 key->tp_dst = udp->dest;
423 } else if (key->nw_proto == IPPROTO_ICMP) {
424 if (icmphdr_ok(skb)) {
425 struct icmphdr *icmp = icmp_hdr(skb);
426 /* The ICMP type and code fields use the 16-bit
427 * transport port fields, so we need to store them
428 * in 16-bit network byte order. */
429 key->tp_src = htons(icmp->type);
430 key->tp_dst = htons(icmp->code);
436 } else if (key->dl_type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
437 struct arp_eth_header *arp;
439 arp = (struct arp_eth_header *)skb_network_header(skb);
441 if (arp->ar_hrd == htons(ARPHRD_ETHER)
442 && arp->ar_pro == htons(ETH_P_IP)
443 && arp->ar_hln == ETH_ALEN
444 && arp->ar_pln == 4) {
446 /* We only match on the lower 8 bits of the opcode. */
447 if (ntohs(arp->ar_op) <= 0xff)
448 key->nw_proto = ntohs(arp->ar_op);
450 if (key->nw_proto == ARPOP_REQUEST
451 || key->nw_proto == ARPOP_REPLY) {
452 memcpy(&key->ipv4_src, arp->ar_sip, sizeof(key->ipv4_src));
453 memcpy(&key->ipv4_dst, arp->ar_tip, sizeof(key->ipv4_dst));
454 memcpy(key->arp_sha, arp->ar_sha, ETH_ALEN);
455 memcpy(key->arp_tha, arp->ar_tha, ETH_ALEN);
458 } else if (key->dl_type == htons(ETH_P_IPV6)) {
459 int nh_len; /* IPv6 Header + Extensions */
461 nh_len = parse_ipv6hdr(skb, key);
462 if (unlikely(nh_len < 0)) {
463 if (nh_len == -EINVAL) {
464 skb->transport_header = skb->network_header;
470 /* Transport layer. */
471 if (key->nw_proto == NEXTHDR_TCP) {
472 if (tcphdr_ok(skb)) {
473 struct tcphdr *tcp = tcp_hdr(skb);
474 key->tp_src = tcp->source;
475 key->tp_dst = tcp->dest;
477 } else if (key->nw_proto == NEXTHDR_UDP) {
478 if (udphdr_ok(skb)) {
479 struct udphdr *udp = udp_hdr(skb);
480 key->tp_src = udp->source;
481 key->tp_dst = udp->dest;
483 } else if (key->nw_proto == NEXTHDR_ICMP) {
484 if (icmp6hdr_ok(skb)) {
485 struct icmp6hdr *icmp = icmp6_hdr(skb);
486 /* The ICMPv6 type and code fields use the 16-bit
487 * transport port fields, so we need to store them
488 * in 16-bit network byte order. */
489 key->tp_src = htons(icmp->icmp6_type);
490 key->tp_dst = htons(icmp->icmp6_code);
497 u32 flow_hash(const struct sw_flow_key *key)
499 return jhash2((u32*)key, sizeof(*key) / sizeof(u32), hash_seed);
502 int flow_cmp(const struct tbl_node *node, void *key2_)
504 const struct sw_flow_key *key1 = &flow_cast(node)->key;
505 const struct sw_flow_key *key2 = key2_;
507 return !memcmp(key1, key2, sizeof(struct sw_flow_key));
511 * flow_from_nlattrs - parses Netlink attributes into a flow key.
512 * @swkey: receives the extracted flow key.
513 * @key: Netlink attribute holding nested %ODP_KEY_ATTR_* Netlink attribute
516 * This state machine accepts the following forms, with [] for optional
517 * elements and | for alternatives:
519 * [tun_id] in_port ethernet [8021q] [ethertype \
520 * [IPv4 [TCP|UDP|ICMP] | IPv6 [TCP|UDP|ICMPv6] | ARP]]
522 int flow_from_nlattrs(struct sw_flow_key *swkey, const struct nlattr *attr)
524 const struct nlattr *nla;
528 memset(swkey, 0, sizeof(*swkey));
529 swkey->dl_type = htons(ETH_P_802_2);
531 prev_type = ODP_KEY_ATTR_UNSPEC;
532 nla_for_each_nested(nla, attr, rem) {
533 static const u32 key_lens[ODP_KEY_ATTR_MAX + 1] = {
534 [ODP_KEY_ATTR_TUN_ID] = 8,
535 [ODP_KEY_ATTR_IN_PORT] = 4,
536 [ODP_KEY_ATTR_ETHERNET] = sizeof(struct odp_key_ethernet),
537 [ODP_KEY_ATTR_8021Q] = sizeof(struct odp_key_8021q),
538 [ODP_KEY_ATTR_ETHERTYPE] = 2,
539 [ODP_KEY_ATTR_IPV4] = sizeof(struct odp_key_ipv4),
540 [ODP_KEY_ATTR_IPV6] = sizeof(struct odp_key_ipv6),
541 [ODP_KEY_ATTR_TCP] = sizeof(struct odp_key_tcp),
542 [ODP_KEY_ATTR_UDP] = sizeof(struct odp_key_udp),
543 [ODP_KEY_ATTR_ICMP] = sizeof(struct odp_key_icmp),
544 [ODP_KEY_ATTR_ICMPV6] = sizeof(struct odp_key_icmpv6),
545 [ODP_KEY_ATTR_ARP] = sizeof(struct odp_key_arp),
548 const struct odp_key_ethernet *eth_key;
549 const struct odp_key_8021q *q_key;
550 const struct odp_key_ipv4 *ipv4_key;
551 const struct odp_key_ipv6 *ipv6_key;
552 const struct odp_key_tcp *tcp_key;
553 const struct odp_key_udp *udp_key;
554 const struct odp_key_icmp *icmp_key;
555 const struct odp_key_icmpv6 *icmpv6_key;
556 const struct odp_key_arp *arp_key;
558 int type = nla_type(nla);
560 if (type > ODP_KEY_ATTR_MAX || nla_len(nla) != key_lens[type])
563 #define TRANSITION(PREV_TYPE, TYPE) (((PREV_TYPE) << 16) | (TYPE))
564 switch (TRANSITION(prev_type, type)) {
565 case TRANSITION(ODP_KEY_ATTR_UNSPEC, ODP_KEY_ATTR_TUN_ID):
566 swkey->tun_id = nla_get_be64(nla);
569 case TRANSITION(ODP_KEY_ATTR_UNSPEC, ODP_KEY_ATTR_IN_PORT):
570 case TRANSITION(ODP_KEY_ATTR_TUN_ID, ODP_KEY_ATTR_IN_PORT):
571 if (nla_get_u32(nla) >= DP_MAX_PORTS)
573 swkey->in_port = nla_get_u32(nla);
576 case TRANSITION(ODP_KEY_ATTR_IN_PORT, ODP_KEY_ATTR_ETHERNET):
577 eth_key = nla_data(nla);
578 memcpy(swkey->dl_src, eth_key->eth_src, ETH_ALEN);
579 memcpy(swkey->dl_dst, eth_key->eth_dst, ETH_ALEN);
582 case TRANSITION(ODP_KEY_ATTR_ETHERNET, ODP_KEY_ATTR_8021Q):
583 q_key = nla_data(nla);
584 /* Only standard 0x8100 VLANs currently supported. */
585 if (q_key->q_tpid != htons(ETH_P_8021Q))
587 if (q_key->q_tci & htons(VLAN_TAG_PRESENT))
589 swkey->dl_tci = q_key->q_tci | htons(VLAN_TAG_PRESENT);
592 case TRANSITION(ODP_KEY_ATTR_8021Q, ODP_KEY_ATTR_ETHERTYPE):
593 case TRANSITION(ODP_KEY_ATTR_ETHERNET, ODP_KEY_ATTR_ETHERTYPE):
594 swkey->dl_type = nla_get_be16(nla);
595 if (ntohs(swkey->dl_type) < 1536)
599 case TRANSITION(ODP_KEY_ATTR_ETHERTYPE, ODP_KEY_ATTR_IPV4):
600 if (swkey->dl_type != htons(ETH_P_IP))
602 ipv4_key = nla_data(nla);
603 swkey->ipv4_src = ipv4_key->ipv4_src;
604 swkey->ipv4_dst = ipv4_key->ipv4_dst;
605 swkey->nw_proto = ipv4_key->ipv4_proto;
606 swkey->nw_tos = ipv4_key->ipv4_tos;
607 if (swkey->nw_tos & INET_ECN_MASK)
611 case TRANSITION(ODP_KEY_ATTR_ETHERTYPE, ODP_KEY_ATTR_IPV6):
612 if (swkey->dl_type != htons(ETH_P_IPV6))
614 ipv6_key = nla_data(nla);
615 memcpy(swkey->ipv6_src, ipv6_key->ipv6_src,
616 sizeof(swkey->ipv6_src));
617 memcpy(swkey->ipv6_dst, ipv6_key->ipv6_dst,
618 sizeof(swkey->ipv6_dst));
619 swkey->nw_proto = ipv6_key->ipv6_proto;
620 swkey->nw_tos = ipv6_key->ipv6_tos;
621 if (swkey->nw_tos & INET_ECN_MASK)
625 case TRANSITION(ODP_KEY_ATTR_IPV4, ODP_KEY_ATTR_TCP):
626 case TRANSITION(ODP_KEY_ATTR_IPV6, ODP_KEY_ATTR_TCP):
627 if (swkey->nw_proto != IPPROTO_TCP)
629 tcp_key = nla_data(nla);
630 swkey->tp_src = tcp_key->tcp_src;
631 swkey->tp_dst = tcp_key->tcp_dst;
634 case TRANSITION(ODP_KEY_ATTR_IPV4, ODP_KEY_ATTR_UDP):
635 case TRANSITION(ODP_KEY_ATTR_IPV6, ODP_KEY_ATTR_UDP):
636 if (swkey->nw_proto != IPPROTO_UDP)
638 udp_key = nla_data(nla);
639 swkey->tp_src = udp_key->udp_src;
640 swkey->tp_dst = udp_key->udp_dst;
643 case TRANSITION(ODP_KEY_ATTR_IPV4, ODP_KEY_ATTR_ICMP):
644 if (swkey->nw_proto != IPPROTO_ICMP)
646 icmp_key = nla_data(nla);
647 swkey->tp_src = htons(icmp_key->icmp_type);
648 swkey->tp_dst = htons(icmp_key->icmp_code);
651 case TRANSITION(ODP_KEY_ATTR_IPV6, ODP_KEY_ATTR_ICMPV6):
652 if (swkey->nw_proto != IPPROTO_ICMPV6)
654 icmpv6_key = nla_data(nla);
655 swkey->tp_src = htons(icmpv6_key->icmpv6_type);
656 swkey->tp_dst = htons(icmpv6_key->icmpv6_code);
659 case TRANSITION(ODP_KEY_ATTR_ETHERTYPE, ODP_KEY_ATTR_ARP):
660 if (swkey->dl_type != htons(ETH_P_ARP))
662 arp_key = nla_data(nla);
663 swkey->ipv4_src = arp_key->arp_sip;
664 swkey->ipv4_dst = arp_key->arp_tip;
665 if (arp_key->arp_op & htons(0xff00))
667 swkey->nw_proto = ntohs(arp_key->arp_op);
668 memcpy(swkey->arp_sha, arp_key->arp_sha, ETH_ALEN);
669 memcpy(swkey->arp_tha, arp_key->arp_tha, ETH_ALEN);
682 case ODP_KEY_ATTR_UNSPEC:
685 case ODP_KEY_ATTR_TUN_ID:
686 case ODP_KEY_ATTR_IN_PORT:
689 case ODP_KEY_ATTR_ETHERNET:
690 case ODP_KEY_ATTR_8021Q:
693 case ODP_KEY_ATTR_ETHERTYPE:
694 if (swkey->dl_type == htons(ETH_P_IP) ||
695 swkey->dl_type == htons(ETH_P_ARP))
699 case ODP_KEY_ATTR_IPV4:
700 if (swkey->nw_proto == IPPROTO_TCP ||
701 swkey->nw_proto == IPPROTO_UDP ||
702 swkey->nw_proto == IPPROTO_ICMP)
706 case ODP_KEY_ATTR_IPV6:
707 if (swkey->nw_proto == IPPROTO_TCP ||
708 swkey->nw_proto == IPPROTO_UDP ||
709 swkey->nw_proto == IPPROTO_ICMPV6)
713 case ODP_KEY_ATTR_TCP:
714 case ODP_KEY_ATTR_UDP:
715 case ODP_KEY_ATTR_ICMP:
716 case ODP_KEY_ATTR_ICMPV6:
717 case ODP_KEY_ATTR_ARP:
725 int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
727 struct odp_key_ethernet *eth_key;
730 if (swkey->tun_id != cpu_to_be64(0))
731 NLA_PUT_BE64(skb, ODP_KEY_ATTR_TUN_ID, swkey->tun_id);
733 NLA_PUT_U32(skb, ODP_KEY_ATTR_IN_PORT, swkey->in_port);
735 nla = nla_reserve(skb, ODP_KEY_ATTR_ETHERNET, sizeof(*eth_key));
737 goto nla_put_failure;
738 eth_key = nla_data(nla);
739 memcpy(eth_key->eth_src, swkey->dl_src, ETH_ALEN);
740 memcpy(eth_key->eth_dst, swkey->dl_dst, ETH_ALEN);
742 if (swkey->dl_tci != htons(0)) {
743 struct odp_key_8021q q_key;
745 q_key.q_tpid = htons(ETH_P_8021Q);
746 q_key.q_tci = swkey->dl_tci & ~htons(VLAN_TAG_PRESENT);
747 NLA_PUT(skb, ODP_KEY_ATTR_8021Q, sizeof(q_key), &q_key);
750 if (swkey->dl_type == htons(ETH_P_802_2))
753 NLA_PUT_BE16(skb, ODP_KEY_ATTR_ETHERTYPE, swkey->dl_type);
755 if (swkey->dl_type == htons(ETH_P_IP)) {
756 struct odp_key_ipv4 *ipv4_key;
758 nla = nla_reserve(skb, ODP_KEY_ATTR_IPV4, sizeof(*ipv4_key));
760 goto nla_put_failure;
761 ipv4_key = nla_data(nla);
762 ipv4_key->ipv4_src = swkey->ipv4_src;
763 ipv4_key->ipv4_dst = swkey->ipv4_dst;
764 ipv4_key->ipv4_proto = swkey->nw_proto;
765 ipv4_key->ipv4_tos = swkey->nw_tos;
766 } else if (swkey->dl_type == htons(ETH_P_IPV6)) {
767 struct odp_key_ipv6 *ipv6_key;
769 nla = nla_reserve(skb, ODP_KEY_ATTR_IPV6, sizeof(*ipv6_key));
771 goto nla_put_failure;
772 ipv6_key = nla_data(nla);
773 memcpy(ipv6_key->ipv6_src, swkey->ipv6_src,
774 sizeof(ipv6_key->ipv6_src));
775 memcpy(ipv6_key->ipv6_dst, swkey->ipv6_dst,
776 sizeof(ipv6_key->ipv6_dst));
777 ipv6_key->ipv6_proto = swkey->nw_proto;
778 ipv6_key->ipv6_tos = swkey->nw_tos;
779 } else if (swkey->dl_type == htons(ETH_P_ARP)) {
780 struct odp_key_arp *arp_key;
782 nla = nla_reserve(skb, ODP_KEY_ATTR_ARP, sizeof(*arp_key));
784 goto nla_put_failure;
785 arp_key = nla_data(nla);
786 arp_key->arp_sip = swkey->ipv4_src;
787 arp_key->arp_tip = swkey->ipv4_dst;
788 arp_key->arp_op = htons(swkey->nw_proto);
789 memcpy(arp_key->arp_sha, swkey->arp_sha, ETH_ALEN);
790 memcpy(arp_key->arp_tha, swkey->arp_tha, ETH_ALEN);
793 if (swkey->dl_type == htons(ETH_P_IP)
794 || swkey->dl_type == htons(ETH_P_IPV6)) {
796 if (swkey->nw_proto == IPPROTO_TCP) {
797 struct odp_key_tcp *tcp_key;
799 nla = nla_reserve(skb, ODP_KEY_ATTR_TCP, sizeof(*tcp_key));
801 goto nla_put_failure;
802 tcp_key = nla_data(nla);
803 tcp_key->tcp_src = swkey->tp_src;
804 tcp_key->tcp_dst = swkey->tp_dst;
805 } else if (swkey->nw_proto == IPPROTO_UDP) {
806 struct odp_key_udp *udp_key;
808 nla = nla_reserve(skb, ODP_KEY_ATTR_UDP, sizeof(*udp_key));
810 goto nla_put_failure;
811 udp_key = nla_data(nla);
812 udp_key->udp_src = swkey->tp_src;
813 udp_key->udp_dst = swkey->tp_dst;
814 } else if (swkey->dl_type == htons(ETH_P_IP)
815 && swkey->nw_proto == IPPROTO_ICMP) {
816 struct odp_key_icmp *icmp_key;
818 nla = nla_reserve(skb, ODP_KEY_ATTR_ICMP, sizeof(*icmp_key));
820 goto nla_put_failure;
821 icmp_key = nla_data(nla);
822 icmp_key->icmp_type = ntohs(swkey->tp_src);
823 icmp_key->icmp_code = ntohs(swkey->tp_dst);
824 } else if (swkey->dl_type == htons(ETH_P_IPV6)
825 && swkey->nw_proto == IPPROTO_ICMPV6) {
826 struct odp_key_icmpv6 *icmpv6_key;
828 nla = nla_reserve(skb, ODP_KEY_ATTR_ICMPV6, sizeof(*icmpv6_key));
830 goto nla_put_failure;
831 icmpv6_key = nla_data(nla);
832 icmpv6_key->icmpv6_type = ntohs(swkey->tp_src);
833 icmpv6_key->icmpv6_code = ntohs(swkey->tp_dst);
843 /* Initializes the flow module.
844 * Returns zero if successful or a negative error code. */
847 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
849 if (flow_cache == NULL)
852 get_random_bytes(&hash_seed, sizeof(hash_seed));
857 /* Uninitializes the flow module. */
860 kmem_cache_destroy(flow_cache);