X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=datapath%2Fdp_dev.c;h=2bbd6fecf8d05a52f851b3562b2ad9663a1fbff8;hb=ce640333ea5ad7178a45b1d242af7cad9ca99976;hp=848a27b28a85692ad136c0d94d71366c249bb459;hpb=a165b67e53a835c623c13de4a0df5f4d7bc9db25;p=openvswitch diff --git a/datapath/dp_dev.c b/datapath/dp_dev.c index 848a27b2..2bbd6fec 100644 --- a/datapath/dp_dev.c +++ b/datapath/dp_dev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009 Nicira Networks. + * Copyright (c) 2009, 2010 Nicira Networks. * Distributed under the terms of the GNU GPL version 2. * * Significant portions of this file may be copied from parts of the Linux @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -17,21 +18,42 @@ #include "datapath.h" #include "dp_dev.h" +struct pcpu_lstats { + unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long tx_packets; + unsigned long tx_bytes; +}; + struct datapath *dp_dev_get_dp(struct net_device *netdev) { return dp_dev_priv(netdev)->dp; } -EXPORT_SYMBOL(dp_dev_get_dp); static struct net_device_stats *dp_dev_get_stats(struct net_device *netdev) { struct dp_dev *dp_dev = dp_dev_priv(netdev); - return &dp_dev->stats; + struct net_device_stats *stats; + int i; + + stats = &dp_dev->stats; + memset(stats, 0, sizeof *stats); + for_each_possible_cpu(i) { + const struct pcpu_lstats *lb_stats; + + lb_stats = per_cpu_ptr(dp_dev->lstats, i); + stats->rx_bytes += lb_stats->rx_bytes; + stats->rx_packets += lb_stats->rx_packets; + stats->tx_bytes += lb_stats->tx_bytes; + stats->tx_packets += lb_stats->tx_packets; + } + return stats; } int dp_dev_recv(struct net_device *netdev, struct sk_buff *skb) { struct dp_dev *dp_dev = dp_dev_priv(netdev); + struct pcpu_lstats *lb_stats; int len; len = skb->len; skb->pkt_type = PACKET_HOST; @@ -41,8 +63,13 @@ int dp_dev_recv(struct net_device *netdev, struct sk_buff *skb) else netif_rx_ni(skb); netdev->last_rx = jiffies; - dp_dev->stats.rx_packets++; - dp_dev->stats.rx_bytes += len; + + preempt_disable(); + lb_stats = per_cpu_ptr(dp_dev->lstats, smp_processor_id()); + lb_stats->rx_packets++; + lb_stats->rx_bytes += len; + preempt_enable(); + return len; } @@ -56,58 +83,30 @@ static int dp_dev_mac_addr(struct net_device *dev, void *p) return 0; } +/* Not reentrant (because it is called with BHs disabled), but may be called + * simultaneously on different CPUs. */ static int dp_dev_xmit(struct sk_buff *skb, struct net_device *netdev) { struct dp_dev *dp_dev = dp_dev_priv(netdev); + struct pcpu_lstats *lb_stats; - /* By orphaning 'skb' we will screw up socket accounting slightly, but - * the effect is limited to the device queue length. If we don't - * do this, then the sk_buff will be destructed eventually, but it is - * harder to predict when. */ - skb_orphan(skb); - - /* We are going to modify 'skb', by sticking it on &dp_dev->xmit_queue, - * so we need to have our own clone. (At any rate, fwd_port_input() - * will need its own clone, so there's no benefit to queuing any other - * way.) */ + /* dp_process_received_packet() needs its own clone. */ skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return 0; - dp_dev->stats.tx_packets++; - dp_dev->stats.tx_bytes += skb->len; + lb_stats = per_cpu_ptr(dp_dev->lstats, smp_processor_id()); + lb_stats->tx_packets++; + lb_stats->tx_bytes += skb->len; - if (skb_queue_len(&dp_dev->xmit_queue) >= netdev->tx_queue_len) { - /* Queue overflow. Stop transmitter. */ - netif_stop_queue(netdev); - - /* We won't see all dropped packets individually, so overrun - * error is appropriate. */ - dp_dev->stats.tx_fifo_errors++; - } - skb_queue_tail(&dp_dev->xmit_queue, skb); - netdev->trans_start = jiffies; - - schedule_work(&dp_dev->xmit_work); + skb_reset_mac_header(skb); + rcu_read_lock_bh(); + dp_process_received_packet(skb, dp_dev->dp->ports[dp_dev->port_no]); + rcu_read_unlock_bh(); return 0; } -static void dp_dev_do_xmit(struct work_struct *work) -{ - struct dp_dev *dp_dev = container_of(work, struct dp_dev, xmit_work); - struct datapath *dp = dp_dev->dp; - struct sk_buff *skb; - - while ((skb = skb_dequeue(&dp_dev->xmit_queue)) != NULL) { - skb_reset_mac_header(skb); - rcu_read_lock_bh(); - dp_process_received_packet(skb, dp->ports[dp_dev->port_no]); - rcu_read_unlock_bh(); - } - netif_wake_queue(dp_dev->dev); -} - static int dp_dev_open(struct net_device *netdev) { netif_start_queue(netdev); @@ -135,21 +134,78 @@ static struct ethtool_ops dp_ethtool_ops = { .get_tso = ethtool_op_get_tso, }; +static int dp_dev_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 68 || new_mtu > dp_min_mtu(dp_dev_get_dp(dev))) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} + +static int dp_dev_init(struct net_device *netdev) +{ + struct dp_dev *dp_dev = dp_dev_priv(netdev); + + dp_dev->lstats = alloc_percpu(struct pcpu_lstats); + if (!dp_dev->lstats) + return -ENOMEM; + + return 0; +} + +static void dp_dev_free(struct net_device *netdev) +{ + struct dp_dev *dp_dev = dp_dev_priv(netdev); + + free_percpu(dp_dev->lstats); + free_netdev(netdev); +} + +static int dp_dev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + if (dp_ioctl_hook) + return dp_ioctl_hook(dev, ifr, cmd); + return -EOPNOTSUPP; +} + +#ifdef HAVE_NET_DEVICE_OPS +static const struct net_device_ops dp_dev_netdev_ops = { + .ndo_init = dp_dev_init, + .ndo_open = dp_dev_open, + .ndo_stop = dp_dev_stop, + .ndo_start_xmit = dp_dev_xmit, + .ndo_set_mac_address = dp_dev_mac_addr, + .ndo_do_ioctl = dp_dev_do_ioctl, + .ndo_change_mtu = dp_dev_change_mtu, + .ndo_get_stats = dp_dev_get_stats, +}; +#endif + static void do_setup(struct net_device *netdev) { ether_setup(netdev); - netdev->do_ioctl = dp_ioctl_hook; +#ifdef HAVE_NET_DEVICE_OPS + netdev->netdev_ops = &dp_dev_netdev_ops; +#else + netdev->do_ioctl = dp_dev_do_ioctl; netdev->get_stats = dp_dev_get_stats; netdev->hard_start_xmit = dp_dev_xmit; netdev->open = dp_dev_open; - SET_ETHTOOL_OPS(netdev, &dp_ethtool_ops); netdev->stop = dp_dev_stop; - netdev->tx_queue_len = 100; netdev->set_mac_address = dp_dev_mac_addr; + netdev->change_mtu = dp_dev_change_mtu; + netdev->init = dp_dev_init; +#endif + + netdev->destructor = dp_dev_free; + SET_ETHTOOL_OPS(netdev, &dp_ethtool_ops); + netdev->tx_queue_len = 0; netdev->flags = IFF_BROADCAST | IFF_MULTICAST; + netdev->features = NETIF_F_LLTX; /* XXX other features? */ random_ether_addr(netdev->dev_addr); @@ -158,8 +214,8 @@ do_setup(struct net_device *netdev) netdev->dev_addr[1] = 0x23; netdev->dev_addr[2] = 0x20; - /* Set the top bits to indicate random Nicira address. */ - netdev->dev_addr[3] |= 0xc0; + /* Set the top bit to indicate random Nicira address. */ + netdev->dev_addr[3] |= 0x80; } /* Create a datapath device associated with 'dp'. If 'dp_name' is null, @@ -185,34 +241,31 @@ struct net_device *dp_dev_create(struct datapath *dp, const char *dp_name, int p if (!netdev) return ERR_PTR(-ENOMEM); + dp_dev = dp_dev_priv(netdev); + dp_dev->dp = dp; + dp_dev->port_no = port_no; + dp_dev->dev = netdev; + err = register_netdevice(netdev); if (err) { free_netdev(netdev); return ERR_PTR(err); } - dp_dev = dp_dev_priv(netdev); - dp_dev->dp = dp; - dp_dev->port_no = port_no; - dp_dev->dev = netdev; - skb_queue_head_init(&dp_dev->xmit_queue); - INIT_WORK(&dp_dev->xmit_work, dp_dev_do_xmit); return netdev; } /* Called with RTNL lock and dp_mutex.*/ void dp_dev_destroy(struct net_device *netdev) { - struct dp_dev *dp_dev = dp_dev_priv(netdev); - - netif_tx_disable(netdev); - synchronize_net(); - skb_queue_purge(&dp_dev->xmit_queue); unregister_netdevice(netdev); } int is_dp_dev(struct net_device *netdev) { +#ifdef HAVE_NET_DEVICE_OPS + return netdev->netdev_ops == &dp_dev_netdev_ops; +#else return netdev->open == dp_dev_open; +#endif } -EXPORT_SYMBOL(is_dp_dev);