return 0;
err_destroy_local_port:
- dp_del_port(dp->ports[ODPP_LOCAL], NULL);
+ dp_del_port(dp->ports[ODPP_LOCAL]);
err_destroy_table:
dp_table_destroy(dp->table, 0);
err_destroy_dp_dev:
return err;
}
-static void do_destroy_dp(struct datapath *dp, struct list_head *dp_devs)
+static void do_destroy_dp(struct datapath *dp)
{
struct net_bridge_port *p, *n;
int i;
list_for_each_entry_safe (p, n, &dp->port_list, node)
if (p->port_no != ODPP_LOCAL)
- dp_del_port(p, dp_devs);
+ dp_del_port(p);
if (dp_del_dp_hook)
dp_del_dp_hook(dp);
rcu_assign_pointer(dps[dp->dp_idx], NULL);
- dp_del_port(dp->ports[ODPP_LOCAL], dp_devs);
+ dp_del_port(dp->ports[ODPP_LOCAL]);
dp_table_destroy(dp->table, 1);
static int destroy_dp(int dp_idx)
{
- struct dp_dev *dp_dev, *next;
struct datapath *dp;
- LIST_HEAD(dp_devs);
int err;
rtnl_lock();
if (!dp)
goto err_unlock;
- do_destroy_dp(dp, &dp_devs);
+ do_destroy_dp(dp);
err = 0;
err_unlock:
mutex_unlock(&dp_mutex);
rtnl_unlock();
- list_for_each_entry_safe (dp_dev, next, &dp_devs, list)
- free_netdev(dp_dev->dev);
return err;
}
return err;
}
-int dp_del_port(struct net_bridge_port *p, struct list_head *dp_devs)
+int dp_del_port(struct net_bridge_port *p)
{
ASSERT_RTNL();
if (is_dp_dev(p->dev)) {
dp_dev_destroy(p->dev);
- if (dp_devs) {
- struct dp_dev *dp_dev = dp_dev_priv(p->dev);
- list_add(&dp_dev->list, dp_devs);
- }
}
if (p->port_no != ODPP_LOCAL && dp_del_if_hook) {
dp_del_if_hook(p);
static int del_port(int dp_idx, int port_no)
{
- struct dp_dev *dp_dev, *next;
struct net_bridge_port *p;
struct datapath *dp;
LIST_HEAD(dp_devs);
if (!p)
goto out_unlock_dp;
- err = dp_del_port(p, &dp_devs);
+ err = dp_del_port(p);
out_unlock_dp:
mutex_unlock(&dp->mutex);
out_unlock_rtnl:
rtnl_unlock();
out:
- list_for_each_entry_safe (dp_dev, next, &dp_devs, list)
- free_netdev(dp_dev->dev);
return err;
}
return 0;
}
+/* Not reentrant (because it is called with BHs disabled), but may be called
+ * simultaneously on different CPUs. */
static int dp_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct dp_dev *dp_dev = dp_dev_priv(netdev);
* harder to predict when. */
skb_orphan(skb);
- /* We are going to modify 'skb', by sticking it on &dp_dev->xmit_queue,
- * so we need to have our own clone. (At any rate, fwd_port_input()
- * will need its own clone, so there's no benefit to queuing any other
- * way.) */
+ /* dp_process_received_packet() needs its own clone. */
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
return 0;
dp_dev->stats.tx_packets++;
dp_dev->stats.tx_bytes += skb->len;
- if (skb_queue_len(&dp_dev->xmit_queue) >= netdev->tx_queue_len) {
- /* Queue overflow. Stop transmitter. */
- netif_stop_queue(netdev);
-
- /* We won't see all dropped packets individually, so overrun
- * error is appropriate. */
- dp_dev->stats.tx_fifo_errors++;
- }
- skb_queue_tail(&dp_dev->xmit_queue, skb);
- netdev->trans_start = jiffies;
-
- schedule_work(&dp_dev->xmit_work);
+ skb_reset_mac_header(skb);
+ rcu_read_lock_bh();
+ dp_process_received_packet(skb, dp_dev->dp->ports[dp_dev->port_no]);
+ rcu_read_unlock_bh();
return 0;
}
-static void dp_dev_do_xmit(struct work_struct *work)
-{
- struct dp_dev *dp_dev = container_of(work, struct dp_dev, xmit_work);
- struct datapath *dp = dp_dev->dp;
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&dp_dev->xmit_queue)) != NULL) {
- skb_reset_mac_header(skb);
- rcu_read_lock_bh();
- dp_process_received_packet(skb, dp->ports[dp_dev->port_no]);
- rcu_read_unlock_bh();
- }
- netif_wake_queue(dp_dev->dev);
-}
-
static int dp_dev_open(struct net_device *netdev)
{
netif_start_queue(netdev);
netdev->open = dp_dev_open;
SET_ETHTOOL_OPS(netdev, &dp_ethtool_ops);
netdev->stop = dp_dev_stop;
- netdev->tx_queue_len = 100;
+ netdev->tx_queue_len = 0;
netdev->set_mac_address = dp_dev_mac_addr;
+ netdev->destructor = free_netdev;
netdev->flags = IFF_BROADCAST | IFF_MULTICAST;
+ netdev->features = NETIF_F_LLTX; /* XXX other features? */
random_ether_addr(netdev->dev_addr);
dp_dev->dp = dp;
dp_dev->port_no = port_no;
dp_dev->dev = netdev;
- skb_queue_head_init(&dp_dev->xmit_queue);
- INIT_WORK(&dp_dev->xmit_work, dp_dev_do_xmit);
return netdev;
}
/* Called with RTNL lock and dp_mutex.*/
void dp_dev_destroy(struct net_device *netdev)
{
- struct dp_dev *dp_dev = dp_dev_priv(netdev);
-
- netif_tx_disable(netdev);
- synchronize_net();
- skb_queue_purge(&dp_dev->xmit_queue);
unregister_netdevice(netdev);
}