+/*
+ * Copyright (c) 2009 Nicira Networks.
+ * Distributed under the terms of the GNU GPL version 2.
+ *
+ * Significant portions of this file may be copied from parts of the Linux
+ * kernel, by Linus Torvalds and others.
+ */
+
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "datapath.h"
#include "dp_dev.h"
+struct pcpu_lstats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
struct datapath *dp_dev_get_dp(struct net_device *netdev)
{
return dp_dev_priv(netdev)->dp;
}
-EXPORT_SYMBOL(dp_dev_get_dp);
static struct net_device_stats *dp_dev_get_stats(struct net_device *netdev)
{
struct dp_dev *dp_dev = dp_dev_priv(netdev);
- return &dp_dev->stats;
+ struct net_device_stats *stats;
+ int i;
+
+ stats = &dp_dev->stats;
+ memset(stats, 0, sizeof *stats);
+ for_each_possible_cpu(i) {
+ const struct pcpu_lstats *lb_stats;
+
+ lb_stats = per_cpu_ptr(dp_dev->lstats, i);
+ stats->rx_bytes += lb_stats->rx_bytes;
+ stats->rx_packets += lb_stats->rx_packets;
+ stats->tx_bytes += lb_stats->tx_bytes;
+ stats->tx_packets += lb_stats->tx_packets;
+ }
+ return stats;
}
int dp_dev_recv(struct net_device *netdev, struct sk_buff *skb)
{
struct dp_dev *dp_dev = dp_dev_priv(netdev);
+ struct pcpu_lstats *lb_stats;
int len;
len = skb->len;
skb->pkt_type = PACKET_HOST;
else
netif_rx_ni(skb);
netdev->last_rx = jiffies;
- dp_dev->stats.rx_packets++;
- dp_dev->stats.rx_bytes += len;
+ lb_stats = per_cpu_ptr(dp_dev->lstats, smp_processor_id());
+ lb_stats->rx_packets++;
+ lb_stats->rx_bytes += len;
return len;
}
return 0;
}
+/* Not reentrant (because it is called with BHs disabled), but may be called
+ * simultaneously on different CPUs. */
static int dp_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct dp_dev *dp_dev = dp_dev_priv(netdev);
+ struct pcpu_lstats *lb_stats;
- /* By orphaning 'skb' we will screw up socket accounting slightly, but
- * the effect is limited to the device queue length. If we don't
- * do this, then the sk_buff will be destructed eventually, but it is
- * harder to predict when. */
- skb_orphan(skb);
-
- /* We are going to modify 'skb', by sticking it on &dp_dev->xmit_queue,
- * so we need to have our own clone. (At any rate, fwd_port_input()
- * will need its own clone, so there's no benefit to queuing any other
- * way.) */
+ /* dp_process_received_packet() needs its own clone. */
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
return 0;
- dp_dev->stats.tx_packets++;
- dp_dev->stats.tx_bytes += skb->len;
+ lb_stats = per_cpu_ptr(dp_dev->lstats, smp_processor_id());
+ lb_stats->tx_packets++;
+ lb_stats->tx_bytes += skb->len;
- if (skb_queue_len(&dp_dev->xmit_queue) >= netdev->tx_queue_len) {
- /* Queue overflow. Stop transmitter. */
- netif_stop_queue(netdev);
-
- /* We won't see all dropped packets individually, so overrun
- * error is appropriate. */
- dp_dev->stats.tx_fifo_errors++;
- }
- skb_queue_tail(&dp_dev->xmit_queue, skb);
- netdev->trans_start = jiffies;
-
- schedule_work(&dp_dev->xmit_work);
+ skb_reset_mac_header(skb);
+ rcu_read_lock_bh();
+ dp_process_received_packet(skb, dp_dev->dp->ports[dp_dev->port_no]);
+ rcu_read_unlock_bh();
return 0;
}
-static void dp_dev_do_xmit(struct work_struct *work)
-{
- struct dp_dev *dp_dev = container_of(work, struct dp_dev, xmit_work);
- struct datapath *dp = dp_dev->dp;
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&dp_dev->xmit_queue)) != NULL) {
- skb_reset_mac_header(skb);
- rcu_read_lock_bh();
- dp_process_received_packet(skb, dp->ports[dp_dev->port_no]);
- rcu_read_unlock_bh();
- }
- netif_wake_queue(dp_dev->dev);
-}
-
static int dp_dev_open(struct net_device *netdev)
{
netif_start_queue(netdev);
{
struct dp_dev *dp_dev = dp_dev_priv(netdev);
strcpy(info->driver, "openvswitch");
- sprintf(info->bus_info, "%d", dp_dev->dp->dp_idx);
+ sprintf(info->bus_info, "%d.%d", dp_dev->dp->dp_idx, dp_dev->port_no);
}
static struct ethtool_ops dp_ethtool_ops = {
.get_tso = ethtool_op_get_tso,
};
+static int dp_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < 68 || new_mtu > dp_min_mtu(dp_dev_get_dp(dev)))
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int dp_dev_init(struct net_device *netdev)
+{
+ struct dp_dev *dp_dev = dp_dev_priv(netdev);
+
+ dp_dev->lstats = alloc_percpu(struct pcpu_lstats);
+ if (!dp_dev->lstats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void dp_dev_free(struct net_device *netdev)
+{
+ struct dp_dev *dp_dev = dp_dev_priv(netdev);
+
+ free_percpu(dp_dev->lstats);
+ free_netdev(netdev);
+}
+
+static int dp_dev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ if (dp_ioctl_hook)
+ return dp_ioctl_hook(dev, ifr, cmd);
+ return -EOPNOTSUPP;
+}
+
+#ifdef HAVE_NET_DEVICE_OPS
+static const struct net_device_ops dp_dev_netdev_ops = {
+ .ndo_init = dp_dev_init,
+ .ndo_open = dp_dev_open,
+ .ndo_stop = dp_dev_stop,
+ .ndo_start_xmit = dp_dev_xmit,
+ .ndo_set_mac_address = dp_dev_mac_addr,
+ .ndo_do_ioctl = dp_dev_do_ioctl,
+ .ndo_change_mtu = dp_dev_change_mtu,
+ .ndo_get_stats = dp_dev_get_stats,
+};
+#endif
+
static void
do_setup(struct net_device *netdev)
{
ether_setup(netdev);
- netdev->do_ioctl = dp_ioctl_hook;
+#ifdef HAVE_NET_DEVICE_OPS
+ netdev->netdev_ops = &dp_dev_netdev_ops;
+#else
+ netdev->do_ioctl = dp_dev_do_ioctl;
netdev->get_stats = dp_dev_get_stats;
netdev->hard_start_xmit = dp_dev_xmit;
netdev->open = dp_dev_open;
- SET_ETHTOOL_OPS(netdev, &dp_ethtool_ops);
netdev->stop = dp_dev_stop;
- netdev->tx_queue_len = 100;
netdev->set_mac_address = dp_dev_mac_addr;
+ netdev->change_mtu = dp_dev_change_mtu;
+ netdev->init = dp_dev_init;
+#endif
+
+ netdev->destructor = dp_dev_free;
+ SET_ETHTOOL_OPS(netdev, &dp_ethtool_ops);
+ netdev->tx_queue_len = 0;
netdev->flags = IFF_BROADCAST | IFF_MULTICAST;
+ netdev->features = NETIF_F_LLTX; /* XXX other features? */
random_ether_addr(netdev->dev_addr);
if (!netdev)
return ERR_PTR(-ENOMEM);
+ dp_dev = dp_dev_priv(netdev);
+ dp_dev->dp = dp;
+ dp_dev->port_no = port_no;
+ dp_dev->dev = netdev;
+
err = register_netdevice(netdev);
if (err) {
free_netdev(netdev);
return ERR_PTR(err);
}
- dp_dev = dp_dev_priv(netdev);
- dp_dev->dp = dp;
- dp_dev->port_no = port_no;
- dp_dev->dev = netdev;
- skb_queue_head_init(&dp_dev->xmit_queue);
- INIT_WORK(&dp_dev->xmit_work, dp_dev_do_xmit);
return netdev;
}
/* Called with RTNL lock and dp_mutex.*/
void dp_dev_destroy(struct net_device *netdev)
{
- struct dp_dev *dp_dev = dp_dev_priv(netdev);
-
- netif_tx_disable(netdev);
- synchronize_net();
- skb_queue_purge(&dp_dev->xmit_queue);
unregister_netdevice(netdev);
}
int is_dp_dev(struct net_device *netdev)
{
+#ifdef HAVE_NET_DEVICE_OPS
+ return netdev->netdev_ops == &dp_dev_netdev_ops;
+#else
return netdev->open == dp_dev_open;
+#endif
}
-EXPORT_SYMBOL(is_dp_dev);