2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_ether.h>
14 #include <linux/if_vlan.h>
15 #include <net/llc_pdu.h>
16 #include <linux/kernel.h>
17 #include <linux/jhash.h>
18 #include <linux/jiffies.h>
19 #include <linux/llc.h>
20 #include <linux/module.h>
22 #include <linux/rcupdate.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/icmp.h>
29 #include <net/inet_ecn.h>
34 struct kmem_cache *flow_cache;
35 static unsigned int hash_seed;
37 static inline bool arphdr_ok(struct sk_buff *skb)
39 return skb->len >= skb_network_offset(skb) + sizeof(struct arp_eth_header);
42 static inline int check_iphdr(struct sk_buff *skb)
44 unsigned int nh_ofs = skb_network_offset(skb);
47 if (skb->len < nh_ofs + sizeof(struct iphdr))
50 ip_len = ip_hdrlen(skb);
51 if (ip_len < sizeof(struct iphdr) || skb->len < nh_ofs + ip_len)
55 * Pull enough header bytes to account for the IP header plus the
56 * longest transport header that we parse, currently 20 bytes for TCP.
58 if (!pskb_may_pull(skb, min(nh_ofs + ip_len + 20, skb->len)))
61 skb_set_transport_header(skb, nh_ofs + ip_len);
65 static inline bool tcphdr_ok(struct sk_buff *skb)
67 int th_ofs = skb_transport_offset(skb);
68 if (skb->len >= th_ofs + sizeof(struct tcphdr)) {
69 int tcp_len = tcp_hdrlen(skb);
70 return (tcp_len >= sizeof(struct tcphdr)
71 && skb->len >= th_ofs + tcp_len);
76 static inline bool udphdr_ok(struct sk_buff *skb)
78 return skb->len >= skb_transport_offset(skb) + sizeof(struct udphdr);
81 static inline bool icmphdr_ok(struct sk_buff *skb)
83 return skb->len >= skb_transport_offset(skb) + sizeof(struct icmphdr);
86 #define TCP_FLAGS_OFFSET 13
87 #define TCP_FLAG_MASK 0x3f
89 void flow_used(struct sw_flow *flow, struct sk_buff *skb)
93 if (flow->key.dl_type == htons(ETH_P_IP) &&
94 flow->key.nw_proto == IPPROTO_TCP) {
95 u8 *tcp = (u8 *)tcp_hdr(skb);
96 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
99 spin_lock_bh(&flow->lock);
100 flow->used = jiffies;
101 flow->packet_count++;
102 flow->byte_count += skb->len;
103 flow->tcp_flags |= tcp_flags;
104 spin_unlock_bh(&flow->lock);
107 struct sw_flow_actions *flow_actions_alloc(size_t n_actions)
109 struct sw_flow_actions *sfa;
111 /* At least DP_MAX_PORTS actions are required to be able to flood a
112 * packet to every port. Factor of 2 allows for setting VLAN tags,
114 if (n_actions > 2 * DP_MAX_PORTS)
115 return ERR_PTR(-EINVAL);
117 sfa = kmalloc(sizeof *sfa + n_actions * sizeof(union odp_action),
120 return ERR_PTR(-ENOMEM);
122 sfa->n_actions = n_actions;
127 /* Frees 'flow' immediately. */
128 static void flow_free(struct sw_flow *flow)
132 kfree(flow->sf_acts);
133 kmem_cache_free(flow_cache, flow);
136 void flow_free_tbl(struct tbl_node *node)
138 struct sw_flow *flow = flow_cast(node);
142 /* RCU callback used by flow_deferred_free. */
143 static void rcu_free_flow_callback(struct rcu_head *rcu)
145 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
149 /* Schedules 'flow' to be freed after the next RCU grace period.
150 * The caller must hold rcu_read_lock for this to be sensible. */
151 void flow_deferred_free(struct sw_flow *flow)
153 call_rcu(&flow->rcu, rcu_free_flow_callback);
156 /* RCU callback used by flow_deferred_free_acts. */
157 static void rcu_free_acts_callback(struct rcu_head *rcu)
159 struct sw_flow_actions *sf_acts = container_of(rcu,
160 struct sw_flow_actions, rcu);
164 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
165 * The caller must hold rcu_read_lock for this to be sensible. */
166 void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
168 call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
171 static void parse_vlan(struct sk_buff *skb, struct odp_flow_key *key)
174 __be16 eth_type; /* ETH_P_8021Q */
177 struct qtag_prefix *qp;
179 if (skb->len < sizeof(struct qtag_prefix) + sizeof(__be16))
182 qp = (struct qtag_prefix *) skb->data;
183 key->dl_vlan = qp->tci & htons(VLAN_VID_MASK);
184 key->dl_vlan_pcp = (ntohs(qp->tci) & VLAN_PCP_MASK) >> VLAN_PCP_SHIFT;
185 __skb_pull(skb, sizeof(struct qtag_prefix));
188 static __be16 parse_ethertype(struct sk_buff *skb)
190 struct llc_snap_hdr {
191 u8 dsap; /* Always 0xAA */
192 u8 ssap; /* Always 0xAA */
197 struct llc_snap_hdr *llc;
200 proto = *(__be16 *) skb->data;
201 __skb_pull(skb, sizeof(__be16));
203 if (ntohs(proto) >= ODP_DL_TYPE_ETH2_CUTOFF)
206 if (unlikely(skb->len < sizeof(struct llc_snap_hdr)))
207 return htons(ODP_DL_TYPE_NOT_ETH_TYPE);
209 llc = (struct llc_snap_hdr *) skb->data;
210 if (llc->dsap != LLC_SAP_SNAP ||
211 llc->ssap != LLC_SAP_SNAP ||
212 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
213 return htons(ODP_DL_TYPE_NOT_ETH_TYPE);
215 __skb_pull(skb, sizeof(struct llc_snap_hdr));
216 return llc->ethertype;
220 * flow_extract - extracts a flow key from an Ethernet frame.
221 * @skb: sk_buff that contains the frame, with skb->data pointing to the
223 * @in_port: port number on which @skb was received.
224 * @key: output flow key
226 * The caller must ensure that skb->len >= ETH_HLEN.
228 * Returns 0 if successful, otherwise a negative errno value.
230 * Initializes @skb header pointers as follows:
232 * - skb->mac_header: the Ethernet header.
234 * - skb->network_header: just past the Ethernet header, or just past the
235 * VLAN header, to the first byte of the Ethernet payload.
237 * - skb->transport_header: If key->dl_type is ETH_P_IP on output, then just
238 * past the IPv4 header, if one is present and of a correct length,
239 * otherwise the same as skb->network_header. For other key->dl_type
240 * values it is left untouched.
242 * Sets OVS_CB(skb)->is_frag to %true if @skb is an IPv4 fragment, otherwise to
245 int flow_extract(struct sk_buff *skb, u16 in_port, struct odp_flow_key *key)
249 memset(key, 0, sizeof *key);
250 key->tun_id = OVS_CB(skb)->tun_id;
251 key->in_port = in_port;
252 key->dl_vlan = htons(ODP_VLAN_NONE);
253 OVS_CB(skb)->is_frag = false;
256 * We would really like to pull as many bytes as we could possibly
257 * want to parse into the linear data area. Currently that is:
261 * 60 max IP header with options
262 * 20 max TCP/UDP/ICMP header (don't care about options)
266 * But Xen only allocates 64 or 72 bytes for the linear data area in
267 * netback, which means that we would reallocate and copy the skb's
268 * linear data on every packet if we did that. So instead just pull 64
269 * bytes, which is always sufficient without IP options, and then check
270 * whether we need to pull more later when we look at the IP header.
272 if (!pskb_may_pull(skb, min(skb->len, 64u)))
275 skb_reset_mac_header(skb);
279 memcpy(key->dl_src, eth->h_source, ETH_ALEN);
280 memcpy(key->dl_dst, eth->h_dest, ETH_ALEN);
282 /* dl_type, dl_vlan, dl_vlan_pcp. */
283 __skb_pull(skb, 2 * ETH_ALEN);
284 if (eth->h_proto == htons(ETH_P_8021Q))
285 parse_vlan(skb, key);
286 key->dl_type = parse_ethertype(skb);
287 skb_reset_network_header(skb);
288 __skb_push(skb, skb->data - (unsigned char *)eth);
291 if (key->dl_type == htons(ETH_P_IP)) {
295 error = check_iphdr(skb);
296 if (unlikely(error)) {
297 if (error == -EINVAL) {
298 skb->transport_header = skb->network_header;
305 key->nw_src = nh->saddr;
306 key->nw_dst = nh->daddr;
307 key->nw_tos = nh->tos & ~INET_ECN_MASK;
308 key->nw_proto = nh->protocol;
310 /* Transport layer. */
311 if (!(nh->frag_off & htons(IP_MF | IP_OFFSET))) {
312 if (key->nw_proto == IPPROTO_TCP) {
313 if (tcphdr_ok(skb)) {
314 struct tcphdr *tcp = tcp_hdr(skb);
315 key->tp_src = tcp->source;
316 key->tp_dst = tcp->dest;
318 } else if (key->nw_proto == IPPROTO_UDP) {
319 if (udphdr_ok(skb)) {
320 struct udphdr *udp = udp_hdr(skb);
321 key->tp_src = udp->source;
322 key->tp_dst = udp->dest;
324 } else if (key->nw_proto == IPPROTO_ICMP) {
325 if (icmphdr_ok(skb)) {
326 struct icmphdr *icmp = icmp_hdr(skb);
327 /* The ICMP type and code fields use the 16-bit
328 * transport port fields, so we need to store them
329 * in 16-bit network byte order. */
330 key->tp_src = htons(icmp->type);
331 key->tp_dst = htons(icmp->code);
335 OVS_CB(skb)->is_frag = true;
337 } else if (key->dl_type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
338 struct arp_eth_header *arp;
340 arp = (struct arp_eth_header *)skb_network_header(skb);
342 if (arp->ar_hrd == htons(ARPHRD_ETHER)
343 && arp->ar_pro == htons(ETH_P_IP)
344 && arp->ar_hln == ETH_ALEN
345 && arp->ar_pln == 4) {
347 /* We only match on the lower 8 bits of the opcode. */
348 if (ntohs(arp->ar_op) <= 0xff) {
349 key->nw_proto = ntohs(arp->ar_op);
352 if (key->nw_proto == ARPOP_REQUEST
353 || key->nw_proto == ARPOP_REPLY) {
354 memcpy(&key->nw_src, arp->ar_sip, sizeof(key->nw_src));
355 memcpy(&key->nw_dst, arp->ar_tip, sizeof(key->nw_dst));
362 u32 flow_hash(const struct odp_flow_key *key)
364 return jhash2((u32*)key, sizeof *key / sizeof(u32), hash_seed);
367 int flow_cmp(const struct tbl_node *node, void *key2_)
369 const struct odp_flow_key *key1 = &flow_cast(node)->key;
370 const struct odp_flow_key *key2 = key2_;
372 return !memcmp(key1, key2, sizeof(struct odp_flow_key));
375 /* Initializes the flow module.
376 * Returns zero if successful or a negative error code. */
379 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
381 if (flow_cache == NULL)
384 get_random_bytes(&hash_seed, sizeof hash_seed);
389 /* Uninitializes the flow module. */
392 kmem_cache_destroy(flow_cache);