+ if (unlikely(vlan_deaccel_tag(skb)))
+ goto next;
+
+ if (likely(cache)) {
+ skb_push(skb, cache->len);
+ memcpy(skb->data, get_cached_header(cache), cache->len);
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, cache->hh_len);
+
+ } else {
+ skb_push(skb, mutable->tunnel_hlen);
+ create_tunnel_header(vport, mutable, rt, skb->data);
+ skb_reset_network_header(skb);
+
+ if (next_skb)
+ skb_dst_set(skb, dst_clone(unattached_dst));
+ else {
+ skb_dst_set(skb, unattached_dst);
+ unattached_dst = NULL;
+ }
+ }
+ skb_set_transport_header(skb, skb_network_offset(skb) + sizeof(struct iphdr));
+
+ iph = ip_hdr(skb);
+ iph->tos = tos;
+ iph->ttl = ttl;
+ iph->frag_off = frag_off;
+ ip_select_ident(iph, &rt_dst(rt), NULL);
+
+ skb = tnl_vport->tnl_ops->update_header(vport, mutable,
+ &rt_dst(rt), skb);
+ if (unlikely(!skb))
+ goto next;
+
+ if (likely(cache)) {
+ int orig_len = skb->len - cache->len;
+ struct vport *cache_vport;
+
+ cache_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
+ skb->protocol = htons(ETH_P_IP);
+ iph = ip_hdr(skb);
+ iph->tot_len = htons(skb->len - skb_network_offset(skb));
+ ip_send_check(iph);
+
+ if (cache_vport) {
+ if (unlikely(compute_ip_summed(skb, true))) {
+ kfree_skb(skb);
+ goto next;
+ }
+
+ OVS_CB(skb)->flow = cache->flow;
+ ovs_vport_receive(cache_vport, skb);
+ sent_len += orig_len;
+ } else {
+ int xmit_err;
+
+ skb->dev = rt_dst(rt).dev;
+ xmit_err = dev_queue_xmit(skb);
+
+ if (likely(net_xmit_eval(xmit_err) == 0))
+ sent_len += orig_len;
+ }
+ } else
+ sent_len += send_frags(skb, mutable);
+
+next: