datapath: Use min() instead of open-coding it.
[openvswitch] / datapath / vport.c
index 712c26e5d3ddae51386433bc437df41573aa9e07..4dd6cfe9e0e0a1f1e615f715f00b7f150d15d1c9 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/percpu.h>
 #include <linux/rtnetlink.h>
 #include <linux/compat.h>
+#include <linux/version.h>
 
 #include "vport.h"
 #include "vport-internal_dev.h"
@@ -27,6 +28,9 @@ static struct vport_ops *base_vport_ops_list[] = {
        &internal_vport_ops,
        &patch_vport_ops,
        &gre_vport_ops,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+       &capwap_vport_ops,
+#endif
 };
 
 static const struct vport_ops **vport_ops_list;
@@ -35,17 +39,6 @@ static int n_vport_types;
 static struct hlist_head *dev_table;
 #define VPORT_HASH_BUCKETS 1024
 
-/* We limit the number of times that we pass through vport_send() to
- * avoid blowing out the stack in the event that we have a loop. There is
- * a separate counter for each CPU for both interrupt and non-interrupt
- * context in order to keep the limit deterministic for a given packet. */
-struct percpu_loop_counter {
-       int count[2];
-};
-
-static DEFINE_PER_CPU(struct percpu_loop_counter, vport_loop_counter);
-#define VPORT_MAX_LOOPS 5
-
 /* Both RTNL lock and vport_mutex need to be held when updating dev_table.
  *
  * If you use vport_locate and then perform some operations, you need to hold
@@ -798,9 +791,6 @@ int vport_attach(struct vport *vport, struct dp_port *dp_port)
 {
        ASSERT_RTNL();
 
-       if (dp_port->vport)
-               return -EBUSY;
-
        if (vport_get_dp_port(vport))
                return -EBUSY;
 
@@ -812,7 +802,6 @@ int vport_attach(struct vport *vport, struct dp_port *dp_port)
                        return err;
        }
 
-       dp_port->vport = vport;
        rcu_assign_pointer(vport->dp_port, dp_port);
 
        return 0;
@@ -836,7 +825,6 @@ int vport_detach(struct vport *vport)
        if (!dp_port)
                return -EINVAL;
 
-       dp_port->vport = NULL;
        rcu_assign_pointer(vport->dp_port, NULL);
 
        if (vport->ops->detach)
@@ -1068,12 +1056,20 @@ int vport_get_stats(struct vport *vport, struct odp_vport_stats *stats)
 
                for_each_possible_cpu(i) {
                        const struct vport_percpu_stats *percpu_stats;
+                       struct vport_percpu_stats local_stats;
+                       unsigned seqcount;
 
                        percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
-                       stats->rx_bytes         += percpu_stats->rx_bytes;
-                       stats->rx_packets       += percpu_stats->rx_packets;
-                       stats->tx_bytes         += percpu_stats->tx_bytes;
-                       stats->tx_packets       += percpu_stats->tx_packets;
+
+                       do {
+                               seqcount = read_seqcount_begin(&percpu_stats->seqlock);
+                               local_stats = *percpu_stats;
+                       } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+
+                       stats->rx_bytes         += local_stats.rx_bytes;
+                       stats->rx_packets       += local_stats.rx_packets;
+                       stats->tx_bytes         += local_stats.tx_bytes;
+                       stats->tx_packets       += local_stats.tx_packets;
                }
 
                err = 0;
@@ -1208,10 +1204,12 @@ void vport_receive(struct vport *vport, struct sk_buff *skb)
                struct vport_percpu_stats *stats;
 
                local_bh_disable();
-
                stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+               write_seqcount_begin(&stats->seqlock);
                stats->rx_packets++;
                stats->rx_bytes += skb->len;
+               write_seqcount_end(&stats->seqlock);
 
                local_bh_enable();
        }
@@ -1243,20 +1241,9 @@ static inline unsigned packet_length(const struct sk_buff *skb)
  */
 int vport_send(struct vport *vport, struct sk_buff *skb)
 {
-       int *loop_count;
        int mtu;
        int sent;
 
-       loop_count = &get_cpu_var(vport_loop_counter).count[!!in_interrupt()];
-       (*loop_count)++;
-
-       if (unlikely(*loop_count > VPORT_MAX_LOOPS)) {
-               if (net_ratelimit())
-                       printk(KERN_WARNING "%s: dropping packet that has looped more than %d times\n",
-                              dp_name(vport_get_dp_port(vport)->dp), VPORT_MAX_LOOPS);
-               goto error;
-       }
-
        mtu = vport_get_mtu(vport);
        if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
                if (net_ratelimit())
@@ -1271,25 +1258,22 @@ int vport_send(struct vport *vport, struct sk_buff *skb)
                struct vport_percpu_stats *stats;
 
                local_bh_disable();
-
                stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+               write_seqcount_begin(&stats->seqlock);
                stats->tx_packets++;
                stats->tx_bytes += sent;
+               write_seqcount_end(&stats->seqlock);
 
                local_bh_enable();
        }
 
-       goto out;
+       return sent;
 
 error:
-       sent = 0;
        kfree_skb(skb);
        vport_record_error(vport, VPORT_E_TX_DROPPED);
-out:
-       (*loop_count)--;
-       put_cpu_var(vport_loop_counter);
-
-       return sent;
+       return 0;
 }
 
 /**