2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
12 #include <linux/version.h>
15 #include "openvswitch/tunnel.h"
20 * The absolute minimum fragment size. Note that there are many other
21 * definitions of the minimum MTU.
26 * One of these goes in struct tnl_ops and in tnl_find_port().
27 * These values are in the same namespace as other TNL_T_* values, so
28 * only the least significant 10 bits are available to define protocol
31 #define TNL_T_PROTO_GRE 0
32 #define TNL_T_PROTO_CAPWAP 1
34 /* These flags are only needed when calling tnl_find_port(). */
35 #define TNL_T_KEY_EXACT (1 << 10)
36 #define TNL_T_KEY_MATCH (1 << 11)
37 #define TNL_T_KEY_EITHER (TNL_T_KEY_EXACT | TNL_T_KEY_MATCH)
39 struct tnl_mutable_config {
42 unsigned seq; /* Sequence number to identify this config. */
44 u32 tunnel_type; /* Set of TNL_T_* flags that define lookup. */
45 unsigned tunnel_hlen; /* Tunnel header length. */
47 unsigned char eth_addr[ETH_ALEN];
50 struct tnl_port_config port_config;
54 u32 tunnel_type; /* Put the TNL_T_PROTO_* type in here. */
55 u8 ipproto; /* The IP protocol for the tunnel. */
58 * Returns the length of the tunnel header that will be added in
59 * build_header() (i.e. excludes the IP header). Returns a negative
60 * error code if the configuration is invalid.
62 int (*hdr_len)(const struct tnl_port_config *);
65 * Builds the static portion of the tunnel header, which is stored in
66 * the header cache. In general the performance of this function is
67 * not too important as we try to only call it when building the cache
68 * so it is preferable to shift as much work as possible here. However,
69 * in some circumstances caching is disabled and this function will be
70 * called for every packet, so try not to make it too slow.
72 void (*build_header)(const struct vport *,
73 const struct tnl_mutable_config *, void *header);
76 * Updates the cached header of a packet to match the actual packet
77 * data. Typical things that might need to be updated are length,
78 * checksum, etc. The IP header will have already been updated and this
79 * is the final step before transmission. Returns a linked list of
80 * completed SKBs (multiple packets may be generated in the event
83 struct sk_buff *(*update_header)(const struct vport *,
84 const struct tnl_mutable_config *,
85 struct dst_entry *, struct sk_buff *);
88 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
90 * On these kernels we have a fast mechanism to tell if the ARP cache for a
91 * particular destination has changed.
95 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
97 * On these kernels we have a fast mechanism to tell if the routing table
100 #define HAVE_RT_GENID
102 #if !defined(HAVE_HH_SEQ) || !defined(HAVE_RT_GENID)
103 /* If we can't detect all system changes directly we need to use a timeout. */
104 #define NEED_CACHE_TIMEOUT
109 int len; /* Length of data to be memcpy'd from cache. */
111 /* Sequence number of mutable->seq from which this cache was generated. */
112 unsigned mutable_seq;
116 * The sequence number from the seqlock protecting the hardware header
117 * cache (in the ARP cache). Since every write increments the counter
118 * this gives us an easy way to tell if it has changed.
123 #ifdef NEED_CACHE_TIMEOUT
125 * If we don't have direct mechanisms to detect all important changes in
126 * the system fall back to an expiration time. This expiration time
127 * can be relatively short since at high rates there will be millions of
128 * packets per second, so we'll still get plenty of benefit from the
129 * cache. Note that if something changes we may blackhole packets
130 * until the expiration time (depending on what changed and the kernel
131 * version we may be able to detect the change sooner). Expiration is
132 * expressed as a time in jiffies.
134 unsigned long expiration;
138 * The routing table entry that is the result of looking up the tunnel
139 * endpoints. It also contains a sequence number (called a generation
140 * ID) that can be compared to a global sequence to tell if the routing
141 * table has changed (and therefore there is a potential that this
142 * cached route has been invalidated).
147 * If the output device for tunnel traffic is an OVS internal device,
148 * the flow of that datapath. Since all tunnel traffic will have the
149 * same headers this allows us to cache the flow lookup. NULL if the
150 * output device is not OVS or if there is no flow installed.
152 struct sw_flow *flow;
154 /* The cached header follows after padding for alignment. */
159 struct tbl_node tbl_node;
162 const struct tnl_ops *tnl_ops;
164 struct tnl_mutable_config __rcu *mutable;
167 * ID of last fragment sent (for tunnel protocols with direct support
168 * fragmentation). If the protocol relies on IP fragmentation then
169 * this is not needed.
173 spinlock_t cache_lock;
174 struct tnl_cache __rcu *cache; /* Protected by RCU/cache_lock. */
176 #ifdef NEED_CACHE_TIMEOUT
178 * If we must rely on expiration time to invalidate the cache, this is
179 * the interval. It is randomized within a range (defined by
180 * MAX_CACHE_EXP in tunnel.c) to avoid synchronized expirations caused
181 * by creation of a large number of tunnels at a one time.
183 unsigned long cache_exp_interval;
187 struct vport *tnl_create(const struct vport_parms *, const struct vport_ops *,
188 const struct tnl_ops *);
189 int tnl_modify(struct vport *, struct odp_port *);
190 int tnl_destroy(struct vport *);
191 int tnl_set_mtu(struct vport *vport, int mtu);
192 int tnl_set_addr(struct vport *vport, const unsigned char *addr);
193 const char *tnl_get_name(const struct vport *vport);
194 const unsigned char *tnl_get_addr(const struct vport *vport);
195 void tnl_get_config(const struct vport *vport, void *config);
196 int tnl_get_mtu(const struct vport *vport);
197 int tnl_send(struct vport *vport, struct sk_buff *skb);
198 void tnl_rcv(struct vport *vport, struct sk_buff *skb);
200 struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
202 const struct tnl_mutable_config **mutable);
203 bool tnl_frag_needed(struct vport *vport,
204 const struct tnl_mutable_config *mutable,
205 struct sk_buff *skb, unsigned int mtu, __be64 flow_key);
206 void tnl_free_linked_skbs(struct sk_buff *skb);
208 static inline struct tnl_vport *tnl_vport_priv(const struct vport *vport)
210 return vport_priv(vport);
214 #endif /* tunnel.h */