}
EXPORT_SYMBOL_GPL(get_dp);
-struct datapath *get_dp_locked(int dp_idx)
+static struct datapath *get_dp_locked(int dp_idx)
{
struct datapath *dp;
kfree(dp);
}
-struct kobj_type dp_ktype = {
+static struct kobj_type dp_ktype = {
.release = release_dp
};
kfree(p);
}
-struct kobj_type brport_ktype = {
+static struct kobj_type brport_ktype = {
#ifdef CONFIG_SYSFS
.sysfs_ops = &brport_sysfs_ops,
#endif
p->port_no = port_no;
p->dp = dp;
p->dev = dev;
+ atomic_set(&p->sflow_pool, 0);
if (!is_dp_dev(dev))
rcu_assign_pointer(dev->br_port, p);
else {
if (err)
goto out_put;
+ set_dp_devs_mtu(dp, dev);
dp_sysfs_add_if(dp->ports[port_no]);
- err = __put_user(port_no, &port.port);
+ err = __put_user(port_no, &portp->port);
out_put:
dev_put(dev);
out:
return err;
}
-#else
-int vswitch_skb_checksum_setup(struct sk_buff *skb) { return 0; }
-#endif /* CONFIG_XEN && linux == 2.6.18 */
+#endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */
/* Types of checksums that we can receive (these all refer to L4 checksums):
* 1. CHECKSUM_NONE: Device that did not compute checksum, contains full
* 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
* generated locally by a Xen DomU and has a partial checksum. If it is
* handled on this machine (Dom0 or DomU), then the checksum will not be
- * computed. If it goes off box, the checksum in the packet needs to
+ * computed. If it goes off box, the checksum in the packet needs to be
* completed. Calling skb_checksum_setup converts this to CHECKSUM_HW
* (CHECKSUM_PARTIAL) so that the checksum can be completed. In later
* kernels, this combination is replaced with CHECKSUM_PARTIAL.
int err;
WARN_ON_ONCE(skb_shared(skb));
- BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR);
-
+ BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR && queue_no != _ODPL_SFLOW_NR);
queue = &dp->queues[queue_no];
err = -ENOBUFS;
if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
return mtu ? mtu : ETH_DATA_LEN;
}
+/* Sets the MTU of all datapath devices to the minimum of the ports. 'dev'
+ * is the device whose MTU may have changed. Must be called with RTNL lock
+ * and dp_mutex. */
+void set_dp_devs_mtu(const struct datapath *dp, struct net_device *dev)
+{
+ struct net_bridge_port *p;
+ int mtu;
+
+ ASSERT_RTNL();
+
+ if (is_dp_dev(dev))
+ return;
+
+ mtu = dp_min_mtu(dp);
+
+ list_for_each_entry_rcu (p, &dp->port_list, node) {
+ struct net_device *br_dev = p->dev;
+
+ if (is_dp_dev(br_dev))
+ dev_set_mtu(br_dev, mtu);
+ }
+}
+
static int
put_port(const struct net_bridge_port *p, struct odp_port __user *uop)
{
int dp_idx = iminor(f->f_dentry->d_inode);
struct datapath *dp;
int drop_frags, listeners, port_no;
+ unsigned int sflow_probability;
int err;
/* Handle commands with special locking requirements up front. */
set_listen_mask(f, listeners);
break;
+ case ODP_GET_SFLOW_PROBABILITY:
+ err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
+ break;
+
+ case ODP_SET_SFLOW_PROBABILITY:
+ err = get_user(sflow_probability, (unsigned int __user *)argp);
+ if (!err)
+ dp->sflow_probability = sflow_probability;
+ break;
+
case ODP_PORT_QUERY:
err = query_port(dp, (struct odp_port __user *)argp);
break;