__be32 from, __be32 to, int pseudohdr)
{
__be32 diff[] = { ~from, to };
- if (skb->ip_summed != CHECKSUM_PARTIAL) {
+
+/* On older kernels, CHECKSUM_PARTIAL and CHECKSUM_COMPLETE are both defined
+ * as CHECKSUM_HW. However, we can make some inferences so that we can update
+ * the checksums appropriately. */
+ enum {
+ CSUM_PARTIAL, /* Partial checksum, skb->csum undefined. */
+ CSUM_PACKET, /* In-packet checksum, skb->csum undefined. */
+ CSUM_COMPLETE, /* In-packet checksum, skb->csum valid. */
+ } csum_type;
+
+ csum_type = CSUM_PACKET;
+#ifndef CHECKSUM_HW
+ /* Newer kernel, just map between kernel types and ours. */
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ csum_type = CSUM_PARTIAL;
+ else if (skb->ip_summed == CHECKSUM_COMPLETE)
+ csum_type = CSUM_COMPLETE;
+#else
+ /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
+ * However, we should only get CHECKSUM_PARTIAL packets from Xen, which
+ * uses some special fields to represent this (see below). Since we
+ * can only make one type work, pick the one that actually happens in
+ * practice. */
+ if (skb->ip_summed == CHECKSUM_HW)
+ csum_type = CSUM_COMPLETE;
+#endif
+#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
+ /* Xen has a special way of representing CHECKSUM_PARTIAL on older
+ * kernels. */
+ if (skb->proto_csum_blank)
+ csum_type = CSUM_PARTIAL;
+#endif
+
+ if (csum_type != CSUM_PARTIAL) {
*sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
~csum_unfold(*sum)));
- if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
+ if (csum_type == CSUM_COMPLETE && pseudohdr)
skb->csum = ~csum_partial((char *)diff, sizeof(diff),
~skb->csum);
} else if (pseudohdr)
return dp_output_control(dp, skb, _ODPL_ACTION_NR, arg);
}
+/* Send a copy of this packet up to the sFlow agent, along with extra
+ * information about what happened to it. */
+static void sflow_sample(struct datapath *dp, struct sk_buff *skb,
+ const union odp_action *a, int n_actions,
+ gfp_t gfp, struct net_bridge_port *nbp)
+{
+ struct odp_sflow_sample_header *hdr;
+ unsigned int actlen = n_actions * sizeof(union odp_action);
+ unsigned int hdrlen = sizeof(struct odp_sflow_sample_header);
+ struct sk_buff *nskb;
+
+ nskb = skb_copy_expand(skb, actlen + hdrlen, 0, gfp);
+ if (!nskb)
+ return;
+
+ memcpy(__skb_push(nskb, actlen), a, actlen);
+ hdr = (struct odp_sflow_sample_header*)__skb_push(nskb, hdrlen);
+ hdr->n_actions = n_actions;
+ hdr->sample_pool = atomic_read(&nbp->sflow_pool);
+ dp_output_control(dp, nskb, _ODPL_SFLOW_NR, 0);
+}
+
/* Execute a list of actions against 'skb'. */
int execute_actions(struct datapath *dp, struct sk_buff *skb,
struct odp_flow_key *key,
* is slightly obscure just to avoid that. */
int prev_port = -1;
int err;
+
+ if (dp->sflow_probability) {
+ struct net_bridge_port *p = skb->dev->br_port;
+ if (p) {
+ atomic_inc(&p->sflow_pool);
+ if (dp->sflow_probability == UINT_MAX ||
+ net_random() < dp->sflow_probability)
+ sflow_sample(dp, skb, a, n_actions, gfp, p);
+ }
+ }
+
for (; n_actions > 0; a++, n_actions--) {
WARN_ON_ONCE(skb_shared(skb));
if (prev_port != -1) {