2 * Copyright (c) 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
12 #include <linux/version.h>
15 #include "openvswitch/tunnel.h"
19 * The absolute minimum fragment size. Note that there are many other
20 * definitions of the minimum MTU.
25 * One of these goes in struct tnl_ops and in tnl_find_port().
26 * These values are in the same namespace as other TNL_T_* values, so
27 * only the least significant 10 bits are available to define protocol
30 #define TNL_T_PROTO_GRE 0
31 #define TNL_T_PROTO_CAPWAP 1
33 /* These flags are only needed when calling tnl_find_port(). */
34 #define TNL_T_KEY_EXACT (1 << 10)
35 #define TNL_T_KEY_MATCH (1 << 11)
37 /* Private flags not exposed to userspace in this form. */
38 #define TNL_F_IN_KEY_MATCH (1 << 16) /* Store the key in tun_id to
39 * match in flow table. */
40 #define TNL_F_OUT_KEY_ACTION (1 << 17) /* Get the key from a SET_TUNNEL
43 /* All public tunnel flags. */
44 #define TNL_F_PUBLIC (TNL_F_CSUM | TNL_F_TOS_INHERIT | TNL_F_TTL_INHERIT | \
45 TNL_F_DF_INHERIT | TNL_F_DF_DEFAULT | TNL_F_PMTUD | \
46 TNL_F_HDR_CACHE | TNL_F_IPSEC)
49 * struct port_lookup_key - Tunnel port key, used as hash table key.
50 * @in_key: Key to match on input, 0 for wildcard.
51 * @saddr: IPv4 source address to match, 0 to accept any source address.
52 * @daddr: IPv4 destination of tunnel.
53 * @tunnel_type: Set of TNL_T_* flags that define lookup.
55 struct port_lookup_key {
62 #define PORT_KEY_LEN (offsetof(struct port_lookup_key, tunnel_type) + \
63 FIELD_SIZEOF(struct port_lookup_key, tunnel_type))
66 * struct tnl_mutable_config - modifiable configuration for a tunnel.
67 * @key: Used as key for tunnel port. Configured via OVS_TUNNEL_ATTR_*
69 * @rcu: RCU callback head for deferred destruction.
70 * @seq: Sequence number for distinguishing configuration versions.
71 * @tunnel_hlen: Tunnel header length.
72 * @eth_addr: Source address for packets generated by tunnel itself
73 * (e.g. ICMP fragmentation needed messages).
74 * @out_key: Key to use on output, 0 if this tunnel has no fixed output key.
75 * @flags: TNL_F_* flags.
76 * @tos: IPv4 TOS value to use for tunnel, 0 if no fixed TOS.
77 * @ttl: IPv4 TTL value to use for tunnel, 0 if no fixed TTL.
79 struct tnl_mutable_config {
80 struct port_lookup_key key;
87 unsigned char eth_addr[ETH_ALEN];
89 /* Configured via OVS_TUNNEL_ATTR_* attributes. */
95 /* Multicast configuration. */
100 u32 tunnel_type; /* Put the TNL_T_PROTO_* type in here. */
101 u8 ipproto; /* The IP protocol for the tunnel. */
104 * Returns the length of the tunnel header that will be added in
105 * build_header() (i.e. excludes the IP header). Returns a negative
106 * error code if the configuration is invalid.
108 int (*hdr_len)(const struct tnl_mutable_config *);
111 * Builds the static portion of the tunnel header, which is stored in
112 * the header cache. In general the performance of this function is
113 * not too important as we try to only call it when building the cache
114 * so it is preferable to shift as much work as possible here. However,
115 * in some circumstances caching is disabled and this function will be
116 * called for every packet, so try not to make it too slow.
118 void (*build_header)(const struct vport *,
119 const struct tnl_mutable_config *, void *header);
122 * Updates the cached header of a packet to match the actual packet
123 * data. Typical things that might need to be updated are length,
124 * checksum, etc. The IP header will have already been updated and this
125 * is the final step before transmission. Returns a linked list of
126 * completed SKBs (multiple packets may be generated in the event
129 struct sk_buff *(*update_header)(const struct vport *,
130 const struct tnl_mutable_config *,
131 struct dst_entry *, struct sk_buff *);
134 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
136 * On these kernels we have a fast mechanism to tell if the ARP cache for a
137 * particular destination has changed.
141 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
143 * On these kernels we have a fast mechanism to tell if the routing table
146 #define HAVE_RT_GENID
148 #if !defined(HAVE_HH_SEQ) || !defined(HAVE_RT_GENID)
149 /* If we can't detect all system changes directly we need to use a timeout. */
150 #define NEED_CACHE_TIMEOUT
155 int len; /* Length of data to be memcpy'd from cache. */
156 int hh_len; /* Hardware hdr length, cached from hh_cache. */
158 /* Sequence number of mutable->seq from which this cache was
160 unsigned mutable_seq;
164 * The sequence number from the seqlock protecting the hardware header
165 * cache (in the ARP cache). Since every write increments the counter
166 * this gives us an easy way to tell if it has changed.
171 #ifdef NEED_CACHE_TIMEOUT
173 * If we don't have direct mechanisms to detect all important changes in
174 * the system fall back to an expiration time. This expiration time
175 * can be relatively short since at high rates there will be millions of
176 * packets per second, so we'll still get plenty of benefit from the
177 * cache. Note that if something changes we may blackhole packets
178 * until the expiration time (depending on what changed and the kernel
179 * version we may be able to detect the change sooner). Expiration is
180 * expressed as a time in jiffies.
182 unsigned long expiration;
186 * The routing table entry that is the result of looking up the tunnel
187 * endpoints. It also contains a sequence number (called a generation
188 * ID) that can be compared to a global sequence to tell if the routing
189 * table has changed (and therefore there is a potential that this
190 * cached route has been invalidated).
195 * If the output device for tunnel traffic is an OVS internal device,
196 * the flow of that datapath. Since all tunnel traffic will have the
197 * same headers this allows us to cache the flow lookup. NULL if the
198 * output device is not OVS or if there is no flow installed.
200 struct sw_flow *flow;
202 /* The cached header follows after padding for alignment. */
207 struct hlist_node hash_node;
210 const struct tnl_ops *tnl_ops;
212 struct tnl_mutable_config __rcu *mutable;
215 * ID of last fragment sent (for tunnel protocols with direct support
216 * fragmentation). If the protocol relies on IP fragmentation then
217 * this is not needed.
221 spinlock_t cache_lock;
222 struct tnl_cache __rcu *cache; /* Protected by RCU/cache_lock. */
224 #ifdef NEED_CACHE_TIMEOUT
226 * If we must rely on expiration time to invalidate the cache, this is
227 * the interval. It is randomized within a range (defined by
228 * MAX_CACHE_EXP in tunnel.c) to avoid synchronized expirations caused
229 * by creation of a large number of tunnels at a one time.
231 unsigned long cache_exp_interval;
235 struct vport *tnl_create(const struct vport_parms *, const struct vport_ops *,
236 const struct tnl_ops *);
237 void tnl_destroy(struct vport *);
239 int tnl_set_options(struct vport *, struct nlattr *);
240 int tnl_get_options(const struct vport *, struct sk_buff *);
242 int tnl_set_addr(struct vport *vport, const unsigned char *addr);
243 const char *tnl_get_name(const struct vport *vport);
244 const unsigned char *tnl_get_addr(const struct vport *vport);
245 int tnl_send(struct vport *vport, struct sk_buff *skb);
246 void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos);
248 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
250 const struct tnl_mutable_config **mutable);
251 bool tnl_frag_needed(struct vport *vport,
252 const struct tnl_mutable_config *mutable,
253 struct sk_buff *skb, unsigned int mtu, __be64 flow_key);
254 void tnl_free_linked_skbs(struct sk_buff *skb);
258 static inline struct tnl_vport *tnl_vport_priv(const struct vport *vport)
260 return vport_priv(vport);
264 #endif /* tunnel.h */