1 #ifndef __LINUX_SKBUFF_WRAPPER_H
2 #define __LINUX_SKBUFF_WRAPPER_H 1
4 #include_next <linux/skbuff.h>
6 #include <linux/version.h>
8 #ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
9 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
10 const int offset, void *to,
11 const unsigned int len)
13 memcpy(to, skb->data + offset, len);
16 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
19 const unsigned int len)
21 memcpy(skb->data + offset, from, len);
24 #endif /* !HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET */
27 * The networking layer reserves some headroom in skb data (via
28 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
29 * the header has to grow. In the default case, if the header has to grow
30 * 16 bytes or less we avoid the reallocation.
32 * Unfortunately this headroom changes the DMA alignment of the resulting
33 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
34 * on some architectures. An architecture can override this value,
35 * perhaps setting it to a cacheline in size (since that will maintain
36 * cacheline alignment of the DMA). It must be a power of 2.
38 * Various parts of the networking layer expect at least 16 bytes of
39 * headroom, you should not reduce this.
42 #define NET_SKB_PAD 16
46 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
51 if (headroom < NET_SKB_PAD)
52 headroom = NET_SKB_PAD;
53 if (headroom > skb_headroom(skb))
54 delta = headroom - skb_headroom(skb);
57 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
62 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
64 return __skb_cow(skb, headroom, skb_header_cloned(skb));
66 #endif /* !HAVE_SKB_COW */
69 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
70 /* Emulate Linux 2.6.17 and later behavior, in which kfree_skb silently ignores
71 * null pointer arguments. */
72 #define kfree_skb(skb) kfree_skb_maybe_null(skb)
73 static inline void kfree_skb_maybe_null(struct sk_buff *skb)
75 if (likely(skb != NULL))
81 #ifndef CHECKSUM_PARTIAL
82 /* Note that CHECKSUM_PARTIAL is not implemented, but this allows us to at
83 * least test against it: see update_csum() in forward.c. */
84 #define CHECKSUM_PARTIAL 3
86 #ifndef CHECKSUM_COMPLETE
87 #define CHECKSUM_COMPLETE CHECKSUM_HW
91 #define mac_header mac.raw
92 #define network_header nh.raw
95 #ifndef HAVE_SKBUFF_HEADER_HELPERS
96 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
101 static inline void skb_reset_transport_header(struct sk_buff *skb)
103 skb->h.raw = skb->data;
106 static inline void skb_set_transport_header(struct sk_buff *skb,
109 skb->h.raw = skb->data + offset;
112 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
117 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
119 skb->nh.raw = skb->data + offset;
122 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
127 static inline void skb_reset_mac_header(struct sk_buff *skb)
129 skb->mac_header = skb->data;
132 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
134 skb->mac.raw = skb->data + offset;
137 static inline int skb_transport_offset(const struct sk_buff *skb)
139 return skb_transport_header(skb) - skb->data;
142 static inline int skb_network_offset(const struct sk_buff *skb)
144 return skb_network_header(skb) - skb->data;
147 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
149 const unsigned int len)
151 memcpy(skb->data, from, len);
153 #endif /* !HAVE_SKBUFF_HEADER_HELPERS */
155 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
156 #warning "TSO/UFO not supported on kernels earlier than 2.6.18"
158 static inline int skb_is_gso(const struct sk_buff *skb)
163 static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb,
168 #endif /* before 2.6.18 */