X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=datapath%2Fvport-netdev.c;h=3bab666c888132a9936812f4a331e9ed90ff42ac;hb=d825e2a5dcc5193067215cbf2466417cebebab9b;hp=e45e22fbb870e87fa0d974c413515673aefaaa9f;hpb=c283069c71adc49c182a1ac569a05e2dca949eda;p=openvswitch diff --git a/datapath/vport-netdev.c b/datapath/vport-netdev.c index e45e22fb..3bab666c 100644 --- a/datapath/vport-netdev.c +++ b/datapath/vport-netdev.c @@ -18,15 +18,43 @@ #include "checksum.h" #include "datapath.h" +#include "vlan.h" #include "vport-internal_dev.h" #include "vport-netdev.h" +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) && \ + !defined(HAVE_VLAN_BUG_WORKAROUND) +#include + +static int vlan_tso __read_mostly = 0; +module_param(vlan_tso, int, 0644); +MODULE_PARM_DESC(vlan_tso, "Enable TSO for VLAN packets"); +#else +#define vlan_tso true +#endif + /* If the native device stats aren't 64 bit use the vport stats tracking instead. */ #define USE_VPORT_STATS (sizeof(((struct net_device_stats *)0)->rx_bytes) < sizeof(u64)) static void netdev_port_receive(struct vport *vport, struct sk_buff *skb); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) +/* Called with rcu_read_lock and bottom-halves disabled. */ +static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct vport *vport; + + if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) + return RX_HANDLER_PASS; + + vport = netdev_get_vport(skb->dev); + + netdev_port_receive(vport, skb); + + return RX_HANDLER_CONSUMED; +} +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) /* Called with rcu_read_lock and bottom-halves disabled. */ static struct sk_buff *netdev_frame_hook(struct sk_buff *skb) { @@ -240,6 +268,11 @@ int netdev_get_mtu(const struct vport *vport) /* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) { + if (unlikely(!vport)) { + kfree_skb(skb); + return; + } + /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). * (No one comes after us, since we tell handle_bridge() that we took @@ -252,17 +285,98 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) skb_push(skb, ETH_HLEN); compute_ip_summed(skb, false); + vlan_copy_skb_tci(skb); vport_receive(vport, skb); } +static bool dev_supports_vlan_tx(struct net_device *dev) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) + /* Software fallback means every device supports vlan_tci on TX. */ + return true; +#elif defined(HAVE_VLAN_BUG_WORKAROUND) + return dev->features & NETIF_F_HW_VLAN_TX; +#else + /* Assume that the driver is buggy. */ + return false; +#endif +} + static int netdev_send(struct vport *vport, struct sk_buff *skb) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); - int len = skb->len; + int len; skb->dev = netdev_vport->dev; forward_ip_summed(skb); + + if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) { + int err; + int features = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) + features = skb->dev->features & skb->dev->vlan_features; +#endif + + err = vswitch_skb_checksum_setup(skb); + if (unlikely(err)) { + kfree_skb(skb); + return 0; + } + + if (!vlan_tso) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_UFO | NETIF_F_FSO); + + if (skb_is_gso(skb) && + (!skb_gso_ok(skb, features) || + unlikely(skb->ip_summed != CHECKSUM_PARTIAL))) { + struct sk_buff *nskb; + + nskb = skb_gso_segment(skb, features); + if (!nskb) { + if (unlikely(skb_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { + kfree_skb(skb); + return 0; + } + + skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY; + goto tag; + } + + kfree_skb(skb); + skb = nskb; + if (IS_ERR(skb)) + return 0; + + len = 0; + do { + nskb = skb->next; + skb->next = NULL; + + skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); + if (likely(skb)) { + len += skb->len; + vlan_set_tci(skb, 0); + dev_queue_xmit(skb); + } + + skb = nskb; + } while (skb); + + return len; + } + +tag: + skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); + if (unlikely(!skb)) + return 0; + vlan_set_tci(skb, 0); + } + + len = skb->len; dev_queue_xmit(skb); return len;