cfm: Log sequence number of incoming CCMs.
[openvswitch] / datapath / actions.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 /* Functions for executing flow actions. */
10
11 #include <linux/skbuff.h>
12 #include <linux/in.h>
13 #include <linux/ip.h>
14 #include <linux/tcp.h>
15 #include <linux/udp.h>
16 #include <linux/in6.h>
17 #include <linux/if_arp.h>
18 #include <linux/if_vlan.h>
19 #include <net/inet_ecn.h>
20 #include <net/ip.h>
21 #include <net/checksum.h>
22
23 #include "actions.h"
24 #include "checksum.h"
25 #include "datapath.h"
26 #include "loop_counter.h"
27 #include "openvswitch/datapath-protocol.h"
28 #include "vlan.h"
29 #include "vport.h"
30
31 static int do_execute_actions(struct datapath *, struct sk_buff *,
32                               struct sw_flow_actions *acts);
33
34 static struct sk_buff *make_writable(struct sk_buff *skb, unsigned min_headroom)
35 {
36         if (skb_cloned(skb)) {
37                 struct sk_buff *nskb;
38                 unsigned headroom = max(min_headroom, skb_headroom(skb));
39
40                 nskb = skb_copy_expand(skb, headroom, skb_tailroom(skb), GFP_ATOMIC);
41                 if (nskb) {
42                         set_skb_csum_bits(skb, nskb);
43                         kfree_skb(skb);
44                         return nskb;
45                 }
46         } else {
47                 unsigned int hdr_len = (skb_transport_offset(skb)
48                                         + sizeof(struct tcphdr));
49                 if (pskb_may_pull(skb, min(hdr_len, skb->len)))
50                         return skb;
51         }
52         kfree_skb(skb);
53         return NULL;
54 }
55
56 static struct sk_buff *strip_vlan(struct sk_buff *skb)
57 {
58         struct ethhdr *eh;
59
60         if (vlan_tx_tag_present(skb)) {
61                 vlan_set_tci(skb, 0);
62                 return skb;
63         }
64
65         if (unlikely(vlan_eth_hdr(skb)->h_vlan_proto != htons(ETH_P_8021Q) ||
66             skb->len < VLAN_ETH_HLEN))
67                 return skb;
68
69         skb = make_writable(skb, 0);
70         if (unlikely(!skb))
71                 return NULL;
72
73         if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
74                 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
75                                         + ETH_HLEN, VLAN_HLEN, 0));
76
77         memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
78
79         eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
80
81         skb->protocol = eh->h_proto;
82         skb->mac_header += VLAN_HLEN;
83
84         return skb;
85 }
86
87 static struct sk_buff *modify_vlan_tci(struct sk_buff *skb, __be16 tci)
88 {
89         struct vlan_ethhdr *vh;
90         __be16 old_tci;
91
92         if (vlan_tx_tag_present(skb) || skb->protocol != htons(ETH_P_8021Q))
93                 return __vlan_hwaccel_put_tag(skb, ntohs(tci));
94
95         skb = make_writable(skb, 0);
96         if (unlikely(!skb))
97                 return NULL;
98
99         if (unlikely(skb->len < VLAN_ETH_HLEN))
100                 return skb;
101
102         vh = vlan_eth_hdr(skb);
103
104         old_tci = vh->h_vlan_TCI;
105         vh->h_vlan_TCI = tci;
106
107         if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) {
108                 __be16 diff[] = { ~old_tci, vh->h_vlan_TCI };
109                 skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum);
110         }
111
112         return skb;
113 }
114
115 static bool is_ip(struct sk_buff *skb)
116 {
117         return (OVS_CB(skb)->flow->key.eth.type == htons(ETH_P_IP) &&
118                 skb->transport_header > skb->network_header);
119 }
120
121 static __sum16 *get_l4_checksum(struct sk_buff *skb)
122 {
123         u8 nw_proto = OVS_CB(skb)->flow->key.ip.nw_proto;
124         int transport_len = skb->len - skb_transport_offset(skb);
125         if (nw_proto == IPPROTO_TCP) {
126                 if (likely(transport_len >= sizeof(struct tcphdr)))
127                         return &tcp_hdr(skb)->check;
128         } else if (nw_proto == IPPROTO_UDP) {
129                 if (likely(transport_len >= sizeof(struct udphdr)))
130                         return &udp_hdr(skb)->check;
131         }
132         return NULL;
133 }
134
135 static struct sk_buff *set_nw_addr(struct sk_buff *skb, const struct nlattr *a)
136 {
137         __be32 new_nwaddr = nla_get_be32(a);
138         struct iphdr *nh;
139         __sum16 *check;
140         __be32 *nwaddr;
141
142         if (unlikely(!is_ip(skb)))
143                 return skb;
144
145         skb = make_writable(skb, 0);
146         if (unlikely(!skb))
147                 return NULL;
148
149         nh = ip_hdr(skb);
150         nwaddr = nla_type(a) == ODP_ACTION_ATTR_SET_NW_SRC ? &nh->saddr : &nh->daddr;
151
152         check = get_l4_checksum(skb);
153         if (likely(check))
154                 inet_proto_csum_replace4(check, skb, *nwaddr, new_nwaddr, 1);
155         csum_replace4(&nh->check, *nwaddr, new_nwaddr);
156
157         skb_clear_rxhash(skb);
158
159         *nwaddr = new_nwaddr;
160
161         return skb;
162 }
163
164 static struct sk_buff *set_nw_tos(struct sk_buff *skb, u8 nw_tos)
165 {
166         if (unlikely(!is_ip(skb)))
167                 return skb;
168
169         skb = make_writable(skb, 0);
170         if (skb) {
171                 struct iphdr *nh = ip_hdr(skb);
172                 u8 *f = &nh->tos;
173                 u8 old = *f;
174                 u8 new;
175
176                 /* Set the DSCP bits and preserve the ECN bits. */
177                 new = nw_tos | (nh->tos & INET_ECN_MASK);
178                 csum_replace4(&nh->check, (__force __be32)old,
179                                           (__force __be32)new);
180                 *f = new;
181         }
182         return skb;
183 }
184
185 static struct sk_buff *set_tp_port(struct sk_buff *skb, const struct nlattr *a)
186 {
187         struct udphdr *th;
188         __sum16 *check;
189         __be16 *port;
190
191         if (unlikely(!is_ip(skb)))
192                 return skb;
193
194         skb = make_writable(skb, 0);
195         if (unlikely(!skb))
196                 return NULL;
197
198         /* Must follow make_writable() since that can move the skb data. */
199         check = get_l4_checksum(skb);
200         if (unlikely(!check))
201                 return skb;
202
203         /*
204          * Update port and checksum.
205          *
206          * This is OK because source and destination port numbers are at the
207          * same offsets in both UDP and TCP headers, and get_l4_checksum() only
208          * supports those protocols.
209          */
210         th = udp_hdr(skb);
211         port = nla_type(a) == ODP_ACTION_ATTR_SET_TP_SRC ? &th->source : &th->dest;
212         inet_proto_csum_replace2(check, skb, *port, nla_get_be16(a), 0);
213         *port = nla_get_be16(a);
214         skb_clear_rxhash(skb);
215
216         return skb;
217 }
218
219 /**
220  * is_spoofed_arp - check for invalid ARP packet
221  *
222  * @skb: skbuff containing an Ethernet packet, with network header pointing
223  * just past the Ethernet and optional 802.1Q header.
224  *
225  * Returns true if @skb is an invalid Ethernet+IPv4 ARP packet: one with screwy
226  * or truncated header fields or one whose inner and outer Ethernet address
227  * differ.
228  */
229 static bool is_spoofed_arp(struct sk_buff *skb)
230 {
231         struct arp_eth_header *arp;
232
233         if (OVS_CB(skb)->flow->key.eth.type != htons(ETH_P_ARP))
234                 return false;
235
236         if (skb_network_offset(skb) + sizeof(struct arp_eth_header) > skb->len)
237                 return true;
238
239         arp = (struct arp_eth_header *)skb_network_header(skb);
240         return (arp->ar_hrd != htons(ARPHRD_ETHER) ||
241                 arp->ar_pro != htons(ETH_P_IP) ||
242                 arp->ar_hln != ETH_ALEN ||
243                 arp->ar_pln != 4 ||
244                 compare_ether_addr(arp->ar_sha, eth_hdr(skb)->h_source));
245 }
246
247 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
248 {
249         struct vport *p;
250
251         if (!skb)
252                 goto error;
253
254         p = rcu_dereference(dp->ports[out_port]);
255         if (!p)
256                 goto error;
257
258         vport_send(p, skb);
259         return;
260
261 error:
262         kfree_skb(skb);
263 }
264
265 static int output_control(struct datapath *dp, struct sk_buff *skb, u64 arg)
266 {
267         struct dp_upcall_info upcall;
268
269         skb = skb_clone(skb, GFP_ATOMIC);
270         if (!skb)
271                 return -ENOMEM;
272
273         upcall.cmd = ODP_PACKET_CMD_ACTION;
274         upcall.key = &OVS_CB(skb)->flow->key;
275         upcall.userdata = arg;
276         upcall.sample_pool = 0;
277         upcall.actions = NULL;
278         upcall.actions_len = 0;
279         return dp_upcall(dp, skb, &upcall);
280 }
281
282 /* Execute a list of actions against 'skb'. */
283 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
284                               struct sw_flow_actions *acts)
285 {
286         /* Every output action needs a separate clone of 'skb', but the common
287          * case is just a single output action, so that doing a clone and
288          * then freeing the original skbuff is wasteful.  So the following code
289          * is slightly obscure just to avoid that. */
290         int prev_port = -1;
291         u32 priority = skb->priority;
292         const struct nlattr *a;
293         int rem, err;
294
295         for (a = acts->actions, rem = acts->actions_len; rem > 0;
296              a = nla_next(a, &rem)) {
297                 if (prev_port != -1) {
298                         do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
299                         prev_port = -1;
300                 }
301
302                 switch (nla_type(a)) {
303                 case ODP_ACTION_ATTR_OUTPUT:
304                         prev_port = nla_get_u32(a);
305                         break;
306
307                 case ODP_ACTION_ATTR_CONTROLLER:
308                         err = output_control(dp, skb, nla_get_u64(a));
309                         if (err) {
310                                 kfree_skb(skb);
311                                 return err;
312                         }
313                         break;
314
315                 case ODP_ACTION_ATTR_SET_TUNNEL:
316                         OVS_CB(skb)->tun_id = nla_get_be64(a);
317                         break;
318
319                 case ODP_ACTION_ATTR_SET_DL_TCI:
320                         skb = modify_vlan_tci(skb, nla_get_be16(a));
321                         break;
322
323                 case ODP_ACTION_ATTR_STRIP_VLAN:
324                         skb = strip_vlan(skb);
325                         break;
326
327                 case ODP_ACTION_ATTR_SET_DL_SRC:
328                         skb = make_writable(skb, 0);
329                         if (!skb)
330                                 return -ENOMEM;
331                         memcpy(eth_hdr(skb)->h_source, nla_data(a), ETH_ALEN);
332                         break;
333
334                 case ODP_ACTION_ATTR_SET_DL_DST:
335                         skb = make_writable(skb, 0);
336                         if (!skb)
337                                 return -ENOMEM;
338                         memcpy(eth_hdr(skb)->h_dest, nla_data(a), ETH_ALEN);
339                         break;
340
341                 case ODP_ACTION_ATTR_SET_NW_SRC:
342                 case ODP_ACTION_ATTR_SET_NW_DST:
343                         skb = set_nw_addr(skb, a);
344                         break;
345
346                 case ODP_ACTION_ATTR_SET_NW_TOS:
347                         skb = set_nw_tos(skb, nla_get_u8(a));
348                         break;
349
350                 case ODP_ACTION_ATTR_SET_TP_SRC:
351                 case ODP_ACTION_ATTR_SET_TP_DST:
352                         skb = set_tp_port(skb, a);
353                         break;
354
355                 case ODP_ACTION_ATTR_SET_PRIORITY:
356                         skb->priority = nla_get_u32(a);
357                         break;
358
359                 case ODP_ACTION_ATTR_POP_PRIORITY:
360                         skb->priority = priority;
361                         break;
362
363                 case ODP_ACTION_ATTR_DROP_SPOOFED_ARP:
364                         if (unlikely(is_spoofed_arp(skb)))
365                                 goto exit;
366                         break;
367                 }
368                 if (!skb)
369                         return -ENOMEM;
370         }
371 exit:
372         if (prev_port != -1)
373                 do_output(dp, skb, prev_port);
374         else
375                 kfree_skb(skb);
376         return 0;
377 }
378
379 static void sflow_sample(struct datapath *dp, struct sk_buff *skb,
380                          struct sw_flow_actions *acts)
381 {
382         struct sk_buff *nskb;
383         struct vport *p = OVS_CB(skb)->vport;
384         struct dp_upcall_info upcall;
385
386         if (unlikely(!p))
387                 return;
388
389         atomic_inc(&p->sflow_pool);
390         if (net_random() >= dp->sflow_probability)
391                 return;
392
393         nskb = skb_clone(skb, GFP_ATOMIC);
394         if (unlikely(!nskb))
395                 return;
396
397         upcall.cmd = ODP_PACKET_CMD_SAMPLE;
398         upcall.key = &OVS_CB(skb)->flow->key;
399         upcall.userdata = 0;
400         upcall.sample_pool = atomic_read(&p->sflow_pool);
401         upcall.actions = acts->actions;
402         upcall.actions_len = acts->actions_len;
403         dp_upcall(dp, nskb, &upcall);
404 }
405
406 /* Execute a list of actions against 'skb'. */
407 int execute_actions(struct datapath *dp, struct sk_buff *skb)
408 {
409         struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
410         struct loop_counter *loop;
411         int error;
412
413         /* Check whether we've looped too much. */
414         loop = loop_get_counter();
415         if (unlikely(++loop->count > MAX_LOOPS))
416                 loop->looping = true;
417         if (unlikely(loop->looping)) {
418                 error = loop_suppress(dp, acts);
419                 kfree_skb(skb);
420                 goto out_loop;
421         }
422
423         /* Really execute actions. */
424         if (dp->sflow_probability)
425                 sflow_sample(dp, skb, acts);
426         OVS_CB(skb)->tun_id = 0;
427         error = do_execute_actions(dp, skb, acts);
428
429         /* Check whether sub-actions looped too much. */
430         if (unlikely(loop->looping))
431                 error = loop_suppress(dp, acts);
432
433 out_loop:
434         /* Decrement loop counter. */
435         if (!--loop->count)
436                 loop->looping = false;
437         loop_put_counter();
438
439         return error;
440 }