2 * Copyright (c) 2007-2012 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/skbuff.h>
24 #include <linux/if_tunnel.h>
25 #include <linux/if_vlan.h>
30 #include <net/protocol.h>
35 #include "vport-generic.h"
38 * The GRE header is composed of a series of sections: a base and then a variable
41 #define GRE_HEADER_SECTION 4
48 static int get_gre_param(const struct tnl_mutable_config *mutable,
49 const struct ovs_key_ipv4_tunnel *tun_key,
50 u32 *flags, u32 *tunnel_type, __be64 *out_key)
52 if (tun_key->ipv4_dst) {
55 if (tun_key->tun_flags & OVS_FLOW_TNL_F_KEY)
56 *flags = TNL_F_OUT_KEY_ACTION;
57 if (tun_key->tun_flags & OVS_FLOW_TNL_F_CSUM)
59 *tunnel_type = TNL_T_PROTO_GRE;
60 *out_key = tun_key->tun_id;
62 *flags = mutable->flags;
63 *tunnel_type = mutable->key.tunnel_type;
64 if (mutable->flags & TNL_F_OUT_KEY_ACTION) {
65 if (likely(tun_key->tun_flags & OVS_FLOW_TNL_F_KEY)) {
66 *out_key = tun_key->tun_id;
72 *out_key = mutable->out_key;
78 static int gre_hdr_len(const struct tnl_mutable_config *mutable,
79 const struct ovs_key_ipv4_tunnel *tun_key)
87 err = get_gre_param(mutable, tun_key, &flags, &tunnel_type, &out_key);
91 len = GRE_HEADER_SECTION;
93 if (flags & TNL_F_CSUM)
94 len += GRE_HEADER_SECTION;
96 /* Set key for GRE64 tunnels, even when key if is zero. */
98 tunnel_type & TNL_T_PROTO_GRE64 ||
99 flags & TNL_F_OUT_KEY_ACTION) {
101 len += GRE_HEADER_SECTION;
102 if (tunnel_type & TNL_T_PROTO_GRE64)
103 len += GRE_HEADER_SECTION;
109 /* Returns the least-significant 32 bits of a __be64. */
110 static __be32 be64_get_low32(__be64 x)
113 return (__force __be32)x;
115 return (__force __be32)((__force u64)x >> 32);
119 static __be32 be64_get_high32(__be64 x)
122 return (__force __be32)((__force u64)x >> 32);
124 return (__force __be32)x;
128 static void gre_build_header(const struct vport *vport,
129 const struct tnl_mutable_config *mutable,
130 const struct ovs_key_ipv4_tunnel *tun_key,
133 struct gre_base_hdr *greh = header;
134 __be32 *options = (__be32 *)(greh + 1);
139 get_gre_param(mutable, tun_key, &flags, &tunnel_type, &out_key);
141 greh->protocol = htons(ETH_P_TEB);
144 if (flags & TNL_F_CSUM) {
145 greh->flags |= GRE_CSUM;
150 if (flags & TNL_F_OUT_KEY_ACTION) {
151 greh->flags |= GRE_KEY;
152 if (tunnel_type & TNL_T_PROTO_GRE64)
153 greh->flags |= GRE_SEQ;
155 } else if (out_key ||
156 tunnel_type & TNL_T_PROTO_GRE64) {
157 greh->flags |= GRE_KEY;
158 *options = be64_get_low32(out_key);
159 if (tunnel_type & TNL_T_PROTO_GRE64) {
161 *options = be64_get_high32(out_key);
162 greh->flags |= GRE_SEQ;
167 static struct sk_buff *gre_update_header(const struct vport *vport,
168 const struct tnl_mutable_config *mutable,
169 struct dst_entry *dst,
176 const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
177 __be32 *options = (__be32 *)(skb_network_header(skb) + tunnel_hlen
178 - GRE_HEADER_SECTION);
180 if (get_gre_param(mutable, tun_key, &flags, &tunnel_type, &out_key)) {
185 /* Work backwards over the options so the checksum is last. */
186 if (flags & TNL_F_OUT_KEY_ACTION) {
187 if (tunnel_type & TNL_T_PROTO_GRE64) {
188 /* Set higher 32 bits to seq. */
189 *options = be64_get_high32(out_key);
192 *options = be64_get_low32(out_key);
194 } else if (out_key || tunnel_type & TNL_T_PROTO_GRE64) {
196 if (tunnel_type & TNL_T_PROTO_GRE64)
200 if (flags & TNL_F_CSUM)
201 *(__sum16 *)options = csum_fold(skb_checksum(skb,
202 skb_transport_offset(skb),
203 skb->len - skb_transport_offset(skb),
206 * Allow our local IP stack to fragment the outer packet even if the
207 * DF bit is set as a last resort. We also need to force selection of
208 * an IP ID here because Linux will otherwise leave it at 0 if the
209 * packet originally had DF set.
212 __ip_select_ident(ip_hdr(skb), dst, 0);
217 static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
220 return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
222 return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
226 static int parse_header(struct iphdr *iph, __be16 *flags, __be64 *tun_id,
229 /* IP and ICMP protocol handlers check that the IHL is valid. */
230 struct gre_base_hdr *greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2));
231 __be32 *options = (__be32 *)(greh + 1);
234 *flags = greh->flags;
236 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
239 if (unlikely(greh->protocol != htons(ETH_P_TEB)))
242 hdr_len = GRE_HEADER_SECTION;
244 if (greh->flags & GRE_CSUM) {
245 hdr_len += GRE_HEADER_SECTION;
249 if (greh->flags & GRE_KEY) {
254 hdr_len += GRE_HEADER_SECTION;
257 if (greh->flags & GRE_SEQ) {
259 *tunnel_type = TNL_T_PROTO_GRE64;
262 *tunnel_type = TNL_T_PROTO_GRE;
264 *tun_id = key_to_tunnel_id(gre_key, seq);
267 /* Ignore GRE seq if there is no key present. */
268 *tunnel_type = TNL_T_PROTO_GRE;
271 if (greh->flags & GRE_SEQ)
272 hdr_len += GRE_HEADER_SECTION;
277 /* Called with rcu_read_lock and BH disabled. */
278 static void gre_err(struct sk_buff *skb, u32 info)
281 const struct tnl_mutable_config *mutable;
282 const int type = icmp_hdr(skb)->type;
283 const int code = icmp_hdr(skb)->code;
284 int mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
290 int tunnel_hdr_len, tot_hdr_len;
291 unsigned int orig_mac_header;
292 unsigned int orig_nw_header;
294 if (type != ICMP_DEST_UNREACH || code != ICMP_FRAG_NEEDED)
298 * The mimimum size packet that we would actually be able to process:
299 * encapsulating IP header, minimum GRE header, Ethernet header,
302 if (!pskb_may_pull(skb, sizeof(struct iphdr) + GRE_HEADER_SECTION +
303 ETH_HLEN + sizeof(struct iphdr)))
306 iph = (struct iphdr *)skb->data;
307 if (ipv4_is_multicast(iph->daddr))
310 tunnel_hdr_len = parse_header(iph, &flags, &key, &tunnel_type);
311 if (tunnel_hdr_len < 0)
314 vport = ovs_tnl_find_port(dev_net(skb->dev), iph->saddr, iph->daddr, key,
315 tunnel_type, &mutable);
320 * Packets received by this function were previously sent by us, so
321 * any comparisons should be to the output values, not the input.
322 * However, it's not really worth it to have a hash table based on
323 * output keys (especially since ICMP error handling of tunneled packets
324 * isn't that reliable anyways). Therefore, we do a lookup based on the
325 * out key as if it were the in key and then check to see if the input
326 * and output keys are the same.
328 if (mutable->key.in_key != mutable->out_key)
331 if (!!(mutable->flags & TNL_F_IN_KEY_MATCH) !=
332 !!(mutable->flags & TNL_F_OUT_KEY_ACTION))
335 if ((mutable->flags & TNL_F_CSUM) && !(flags & GRE_CSUM))
338 tunnel_hdr_len += iph->ihl << 2;
340 orig_mac_header = skb_mac_header(skb) - skb->data;
341 orig_nw_header = skb_network_header(skb) - skb->data;
342 skb_set_mac_header(skb, tunnel_hdr_len);
344 tot_hdr_len = tunnel_hdr_len + ETH_HLEN;
346 skb->protocol = eth_hdr(skb)->h_proto;
347 if (skb->protocol == htons(ETH_P_8021Q)) {
348 tot_hdr_len += VLAN_HLEN;
349 skb->protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
352 skb_set_network_header(skb, tot_hdr_len);
355 if (skb->protocol == htons(ETH_P_IP))
356 tot_hdr_len += sizeof(struct iphdr);
357 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
358 else if (skb->protocol == htons(ETH_P_IPV6))
359 tot_hdr_len += sizeof(struct ipv6hdr);
364 if (!pskb_may_pull(skb, tot_hdr_len))
367 if (skb->protocol == htons(ETH_P_IP)) {
368 if (mtu < IP_MIN_MTU) {
369 if (ntohs(ip_hdr(skb)->tot_len) >= IP_MIN_MTU)
376 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
377 else if (skb->protocol == htons(ETH_P_IPV6)) {
378 if (mtu < IPV6_MIN_MTU) {
379 unsigned int packet_length = sizeof(struct ipv6hdr) +
380 ntohs(ipv6_hdr(skb)->payload_len);
382 if (packet_length >= IPV6_MIN_MTU
383 || ntohs(ipv6_hdr(skb)->payload_len) == 0)
391 __skb_pull(skb, tunnel_hdr_len);
392 ovs_tnl_frag_needed(vport, mutable, skb, mtu);
393 __skb_push(skb, tunnel_hdr_len);
396 skb_set_mac_header(skb, orig_mac_header);
397 skb_set_network_header(skb, orig_nw_header);
398 skb->protocol = htons(ETH_P_IP);
401 static bool check_checksum(struct sk_buff *skb)
403 struct iphdr *iph = ip_hdr(skb);
404 struct gre_base_hdr *greh = (struct gre_base_hdr *)(iph + 1);
407 if (greh->flags & GRE_CSUM) {
408 switch (skb->ip_summed) {
409 case CHECKSUM_COMPLETE:
410 csum = csum_fold(skb->csum);
418 csum = __skb_checksum_complete(skb);
419 skb->ip_summed = CHECKSUM_COMPLETE;
427 static u32 gre_flags_to_tunnel_flags(const struct tnl_mutable_config *mutable,
430 u32 tunnel_flags = 0;
432 if (gre_flags & GRE_KEY) {
433 if (mutable->key.daddr && (mutable->flags & TNL_F_IN_KEY_MATCH))
434 tunnel_flags = OVS_FLOW_TNL_F_KEY;
435 else if (!mutable->key.daddr)
436 tunnel_flags = OVS_FLOW_TNL_F_KEY;
439 if (gre_flags & GRE_CSUM)
440 tunnel_flags |= OVS_FLOW_TNL_F_CSUM;
445 /* Called with rcu_read_lock and BH disabled. */
446 static int gre_rcv(struct sk_buff *skb)
449 const struct tnl_mutable_config *mutable;
452 struct ovs_key_ipv4_tunnel tun_key;
457 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr) + ETH_HLEN)))
459 if (unlikely(!check_checksum(skb)))
462 hdr_len = parse_header(ip_hdr(skb), &flags, &key, &tunnel_type);
463 if (unlikely(hdr_len < 0))
466 if (unlikely(!pskb_may_pull(skb, hdr_len + ETH_HLEN)))
470 vport = ovs_tnl_find_port(dev_net(skb->dev), iph->daddr, iph->saddr, key,
471 tunnel_type, &mutable);
472 if (unlikely(!vport)) {
473 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
477 tnl_tun_key_init(&tun_key, iph, key, gre_flags_to_tunnel_flags(mutable, flags));
478 OVS_CB(skb)->tun_key = &tun_key;
480 __skb_pull(skb, hdr_len);
481 skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
483 ovs_tnl_rcv(vport, skb);
491 static const struct tnl_ops gre_tnl_ops = {
492 .tunnel_type = TNL_T_PROTO_GRE,
493 .ipproto = IPPROTO_GRE,
494 .hdr_len = gre_hdr_len,
495 .build_header = gre_build_header,
496 .update_header = gre_update_header,
499 static struct vport *gre_create(const struct vport_parms *parms)
501 return ovs_tnl_create(parms, &ovs_gre_vport_ops, &gre_tnl_ops);
504 static const struct tnl_ops gre64_tnl_ops = {
505 .tunnel_type = TNL_T_PROTO_GRE64,
506 .ipproto = IPPROTO_GRE,
507 .hdr_len = gre_hdr_len,
508 .build_header = gre_build_header,
509 .update_header = gre_update_header,
512 static struct vport *gre_create64(const struct vport_parms *parms)
514 return ovs_tnl_create(parms, &ovs_gre64_vport_ops, &gre64_tnl_ops);
517 static const struct net_protocol gre_protocol_handlers = {
519 .err_handler = gre_err,
520 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
527 static int gre_init(void)
535 err = inet_add_protocol(&gre_protocol_handlers, IPPROTO_GRE);
537 pr_warn("cannot register gre protocol handler\n");
542 static void gre_exit(void)
549 inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
552 const struct vport_ops ovs_gre_vport_ops = {
553 .type = OVS_VPORT_TYPE_GRE,
554 .flags = VPORT_F_TUN_ID,
557 .create = gre_create,
558 .destroy = ovs_tnl_destroy,
559 .set_addr = ovs_tnl_set_addr,
560 .get_name = ovs_tnl_get_name,
561 .get_addr = ovs_tnl_get_addr,
562 .get_options = ovs_tnl_get_options,
563 .set_options = ovs_tnl_set_options,
564 .get_dev_flags = ovs_vport_gen_get_dev_flags,
565 .is_running = ovs_vport_gen_is_running,
566 .get_operstate = ovs_vport_gen_get_operstate,
567 .send = ovs_tnl_send,
570 const struct vport_ops ovs_gre64_vport_ops = {
571 .type = OVS_VPORT_TYPE_GRE64,
572 .flags = VPORT_F_TUN_ID,
575 .create = gre_create64,
576 .destroy = ovs_tnl_destroy,
577 .set_addr = ovs_tnl_set_addr,
578 .get_name = ovs_tnl_get_name,
579 .get_addr = ovs_tnl_get_addr,
580 .get_options = ovs_tnl_get_options,
581 .set_options = ovs_tnl_set_options,
582 .get_dev_flags = ovs_vport_gen_get_dev_flags,
583 .is_running = ovs_vport_gen_is_running,
584 .get_operstate = ovs_vport_gen_get_operstate,
585 .send = ovs_tnl_send,