{
__be32 diff[] = { ~from, to };
-/* On older kernels, CHECKSUM_PARTIAL and CHECKSUM_COMPLETE are both defined
- * as CHECKSUM_HW. However, we can make some inferences so that we can update
- * the checksums appropriately. */
- enum {
- CSUM_PARTIAL, /* Partial checksum, skb->csum undefined. */
- CSUM_PACKET, /* In-packet checksum, skb->csum undefined. */
- CSUM_COMPLETE, /* In-packet checksum, skb->csum valid. */
- } csum_type;
-
- csum_type = CSUM_PACKET;
-#ifndef CHECKSUM_HW
- /* Newer kernel, just map between kernel types and ours. */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- csum_type = CSUM_PARTIAL;
- else if (skb->ip_summed == CHECKSUM_COMPLETE)
- csum_type = CSUM_COMPLETE;
-#else
- /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
- * However, we should only get CHECKSUM_PARTIAL packets from Xen, which
- * uses some special fields to represent this (see below). Since we
- * can only make one type work, pick the one that actually happens in
- * practice. */
- if (skb->ip_summed == CHECKSUM_HW)
- csum_type = CSUM_COMPLETE;
-#endif
-#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
- /* Xen has a special way of representing CHECKSUM_PARTIAL on older
- * kernels. */
- if (skb->proto_csum_blank)
- csum_type = CSUM_PARTIAL;
-#endif
-
- if (csum_type != CSUM_PARTIAL) {
+ if (OVS_CB(skb)->ip_summed != CSUM_PARTIAL) {
*sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
~csum_unfold(*sum)));
- if (csum_type == CSUM_COMPLETE && pseudohdr)
+ if (OVS_CB(skb)->ip_summed == CSUM_COMPLETE && pseudohdr)
skb->csum = ~csum_partial((char *)diff, sizeof(diff),
~skb->csum);
} else if (pseudohdr)
#define MAINT_SLEEP_MSECS 1000
static int new_nbp(struct datapath *, struct net_device *, int port_no);
+static void compute_ip_summed(struct sk_buff *skb);
/* Must be called with rcu_read_lock or dp_mutex. */
struct datapath *get_dp(int dp_idx)
WARN_ON_ONCE(skb_shared(skb));
+ compute_ip_summed(skb);
+
/* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
if (skb->protocol != htons(ETH_P_IP))
goto out;
- if (!skb_pull_up_to(skb, skb_network_header(skb) + 1))
+ if (!skb_pull_up_to(skb, skb_network_header(skb) + sizeof(struct iphdr)))
goto out;
iph = ip_hdr(skb);
* skb_forward_csum(). It is slightly different because we are only concerned
* with bridging and not other types of forwarding and can get away with
* slightly more optimal behavior.*/
+static void
+compute_ip_summed(struct sk_buff *skb)
+{
+ OVS_CB(skb)->ip_summed = skb->ip_summed;
+
+#ifdef CHECKSUM_HW
+ /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
+ * However, we should only get CHECKSUM_PARTIAL packets from Xen, which
+ * uses some special fields to represent this (see below). Since we
+ * can only make one type work, pick the one that actually happens in
+ * practice. */
+ if (skb->ip_summed == CHECKSUM_HW)
+ OVS_CB(skb)->ip_summed = CSUM_COMPLETE;
+#endif
+#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
+ /* Xen has a special way of representing CHECKSUM_PARTIAL on older
+ * kernels. */
+ if (skb->proto_csum_blank)
+ OVS_CB(skb)->ip_summed = CSUM_PARTIAL;
+#endif
+}
+
void
forward_ip_summed(struct sk_buff *skb)
{
#ifdef CHECKSUM_HW
- if (skb->ip_summed == CHECKSUM_HW)
+ if (OVS_CB(skb)->ip_summed == CSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
#endif
}
atomic_t sflow_pool;
};
+enum csum_type {
+ CSUM_NONE = 0,
+ CSUM_UNNECESSARY = 1,
+ CSUM_COMPLETE = 2,
+ CSUM_PARTIAL = 3,
+};
+
+/**
+ * struct ovs_skb_cb - OVS data in skb CB
+ * @ip_summed: Consistently stores L4 checksumming status across different
+ * kernel versions.
+ */
+struct ovs_skb_cb {
+ enum csum_type ip_summed;
+};
+#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
+
extern struct notifier_block dp_device_notifier;
extern int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);