+ void (*build_header)(const struct vport *,
+ const struct tnl_mutable_config *, void *header);
+
+ /*
+ * Updates the cached header of a packet to match the actual packet
+ * data. Typical things that might need to be updated are length,
+ * checksum, etc. The IP header will have already been updated and this
+ * is the final step before transmission. Returns a linked list of
+ * completed SKBs (multiple packets may be generated in the event
+ * of fragmentation).
+ */
+ struct sk_buff *(*update_header)(const struct vport *,
+ const struct tnl_mutable_config *,
+ struct dst_entry *, struct sk_buff *);
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+/*
+ * On these kernels we have a fast mechanism to tell if the ARP cache for a
+ * particular destination has changed.
+ */
+#define HAVE_HH_SEQ
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+/*
+ * On these kernels we have a fast mechanism to tell if the routing table
+ * has changed.
+ */
+#define HAVE_RT_GENID
+#endif
+#if !defined(HAVE_HH_SEQ) || !defined(HAVE_RT_GENID)
+/* If we can't detect all system changes directly we need to use a timeout. */
+#define NEED_CACHE_TIMEOUT
+#endif
+struct tnl_cache {
+ struct rcu_head rcu;
+
+ int len; /* Length of data to be memcpy'd from cache. */
+
+ /* Sequence number of mutable->seq from which this cache was generated. */
+ unsigned mutable_seq;
+
+#ifdef HAVE_HH_SEQ
+ /*
+ * The sequence number from the seqlock protecting the hardware header
+ * cache (in the ARP cache). Since every write increments the counter
+ * this gives us an easy way to tell if it has changed.
+ */
+ unsigned hh_seq;
+#endif
+
+#ifdef NEED_CACHE_TIMEOUT
+ /*
+ * If we don't have direct mechanisms to detect all important changes in
+ * the system fall back to an expiration time. This expiration time
+ * can be relatively short since at high rates there will be millions of
+ * packets per second, so we'll still get plenty of benefit from the
+ * cache. Note that if something changes we may blackhole packets
+ * until the expiration time (depending on what changed and the kernel
+ * version we may be able to detect the change sooner). Expiration is
+ * expressed as a time in jiffies.
+ */
+ unsigned long expiration;
+#endif
+
+ /*
+ * The routing table entry that is the result of looking up the tunnel
+ * endpoints. It also contains a sequence number (called a generation
+ * ID) that can be compared to a global sequence to tell if the routing
+ * table has changed (and therefore there is a potential that this
+ * cached route has been invalidated).
+ */
+ struct rtable *rt;
+
+ /*
+ * If the output device for tunnel traffic is an OVS internal device,
+ * the flow of that datapath. Since all tunnel traffic will have the
+ * same headers this allows us to cache the flow lookup. NULL if the
+ * output device is not OVS or if there is no flow installed.
+ */
+ struct sw_flow *flow;
+
+ /* The cached header follows after padding for alignment. */