struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
struct ethhdr *eh;
-
/* Verify we were given a vlan packet */
if (vh->h_vlan_proto != htons(ETH_P_8021Q))
return skb;
+ if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
+ skb->csum = csum_sub(skb->csum, csum_partial(skb->data
+ + ETH_HLEN, VLAN_HLEN, 0));
+
memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
if (skb->protocol == htons(ETH_P_8021Q)) {
/* Modify vlan id, but maintain other TCI values */
struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
+ __be16 old_tci = vh->h_vlan_TCI;
+
vh->h_vlan_TCI = htons((ntohs(vh->h_vlan_TCI) & ~mask) | tci);
+
+ if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE) {
+ __be16 diff[] = { ~old_tci, vh->h_vlan_TCI };
+
+ skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+ ~skb->csum);
+ }
} else {
/* Add vlan header */
segs->next = NULL;
+ /* GSO can change the checksum type so update.*/
+ compute_ip_summed(segs, true);
+
segs = __vlan_put_tag(segs, tci);
err = -ENOMEM;
if (segs) {
} while (segs->next);
skb = segs;
+ compute_ip_summed(skb, true);
}
/* The hardware-accelerated version of vlan_put_tag() works
skb = __vlan_put_tag(skb, tci);
if (!skb)
return ERR_PTR(-ENOMEM);
+
+ /* GSO doesn't fix up the hardware computed checksum so this
+ * will only be hit in the non-GSO case. */
+ if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
+ skb->csum = csum_add(skb->csum, csum_partial(skb->data
+ + ETH_HLEN, VLAN_HLEN, 0));
}
return skb;
{
__be32 diff[] = { ~from, to };
- if (OVS_CB(skb)->ip_summed != CSUM_PARTIAL) {
+ if (OVS_CB(skb)->ip_summed != OVS_CSUM_PARTIAL) {
*sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
~csum_unfold(*sum)));
- if (OVS_CB(skb)->ip_summed == CSUM_COMPLETE && pseudohdr)
+ if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE && pseudohdr)
skb->csum = ~csum_partial((char *)diff, sizeof(diff),
~skb->csum);
} else if (pseudohdr)
#define MAINT_SLEEP_MSECS 1000
static int new_nbp(struct datapath *, struct net_device *, int port_no);
-static void compute_ip_summed(struct sk_buff *skb);
/* Must be called with rcu_read_lock or dp_mutex. */
struct datapath *get_dp(int dp_idx)
WARN_ON_ONCE(skb_shared(skb));
- compute_ip_summed(skb);
+ compute_ip_summed(skb, false);
/* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
* skb_forward_csum(). It is slightly different because we are only concerned
* with bridging and not other types of forwarding and can get away with
* slightly more optimal behavior.*/
-static void
-compute_ip_summed(struct sk_buff *skb)
+void
+compute_ip_summed(struct sk_buff *skb, bool xmit)
{
- OVS_CB(skb)->ip_summed = skb->ip_summed;
-
+ /* For our convenience these defines change repeatedly between kernel
+ * versions, so we can't just copy them over... */
+ switch (skb->ip_summed) {
+ case CHECKSUM_NONE:
+ OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
+ break;
+ case CHECKSUM_UNNECESSARY:
+ OVS_CB(skb)->ip_summed = OVS_CSUM_UNNECESSARY;
+ break;
#ifdef CHECKSUM_HW
/* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
* However, we should only get CHECKSUM_PARTIAL packets from Xen, which
* uses some special fields to represent this (see below). Since we
* can only make one type work, pick the one that actually happens in
- * practice. */
- if (skb->ip_summed == CHECKSUM_HW)
- OVS_CB(skb)->ip_summed = CSUM_COMPLETE;
+ * practice.
+ *
+ * The one exception to this is if we are on the transmit path
+ * (basically after skb_checksum_setup() has been run) the type has
+ * already been converted, so we should stay with that. */
+ case CHECKSUM_HW:
+ if (!xmit)
+ OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
+ else
+ OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
+
+ break;
+#else
+ case CHECKSUM_COMPLETE:
+ OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
+ break;
+ case CHECKSUM_PARTIAL:
+ OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
+ break;
#endif
+ default:
+ printk(KERN_ERR "openvswitch: unknown checksum type %d\n",
+ skb->ip_summed);
+ /* None seems the safest... */
+ OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
+ }
+
#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
/* Xen has a special way of representing CHECKSUM_PARTIAL on older
- * kernels. */
+ * kernels. It should not be set on the transmit path though. */
if (skb->proto_csum_blank)
- OVS_CB(skb)->ip_summed = CSUM_PARTIAL;
+ OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
+
+ WARN_ON_ONCE(skb->proto_csum_blank && xmit);
#endif
}
forward_ip_summed(struct sk_buff *skb)
{
#ifdef CHECKSUM_HW
- if (OVS_CB(skb)->ip_summed == CSUM_COMPLETE)
+ if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
#endif
}