/* Update datapath statistics. */
local_bh_disable();
stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+ write_seqcount_begin(&stats->seqlock);
(*(u64 *)((u8 *)stats + stats_counter_off))++;
+ write_seqcount_end(&stats->seqlock);
+
local_bh_enable();
}
err:
local_bh_disable();
stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+ write_seqcount_begin(&stats->seqlock);
stats->n_lost++;
+ write_seqcount_end(&stats->seqlock);
+
local_bh_enable();
return err;
stats.max_groups = DP_MAX_GROUPS;
stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
for_each_possible_cpu(i) {
- const struct dp_stats_percpu *s;
- s = per_cpu_ptr(dp->stats_percpu, i);
- stats.n_frags += s->n_frags;
- stats.n_hit += s->n_hit;
- stats.n_missed += s->n_missed;
- stats.n_lost += s->n_lost;
+ const struct dp_stats_percpu *percpu_stats;
+ struct dp_stats_percpu local_stats;
+ unsigned seqcount;
+
+ percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+
+ do {
+ seqcount = read_seqcount_begin(&percpu_stats->seqlock);
+ local_stats = *percpu_stats;
+ } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+
+ stats.n_frags += local_stats.n_frags;
+ stats.n_hit += local_stats.n_hit;
+ stats.n_missed += local_stats.n_missed;
+ stats.n_lost += local_stats.n_lost;
}
stats.max_miss_queue = DP_MAX_QUEUE_LEN;
stats.max_action_queue = DP_MAX_QUEUE_LEN;
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
+#include <linux/seqlock.h>
#include <linux/skbuff.h>
#include <linux/version.h>
#include "flow.h"
u64 n_hit;
u64 n_missed;
u64 n_lost;
+ seqcount_t seqlock;
};
struct dp_port_group {
for_each_possible_cpu(i) {
const struct vport_percpu_stats *percpu_stats;
+ struct vport_percpu_stats local_stats;
+ unsigned seqcount;
percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
- stats->rx_bytes += percpu_stats->rx_bytes;
- stats->rx_packets += percpu_stats->rx_packets;
- stats->tx_bytes += percpu_stats->tx_bytes;
- stats->tx_packets += percpu_stats->tx_packets;
+
+ do {
+ seqcount = read_seqcount_begin(&percpu_stats->seqlock);
+ local_stats = *percpu_stats;
+ } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+
+ stats->rx_bytes += local_stats.rx_bytes;
+ stats->rx_packets += local_stats.rx_packets;
+ stats->tx_bytes += local_stats.tx_bytes;
+ stats->tx_packets += local_stats.tx_packets;
}
err = 0;
struct vport_percpu_stats *stats;
local_bh_disable();
-
stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+ write_seqcount_begin(&stats->seqlock);
stats->rx_packets++;
stats->rx_bytes += skb->len;
+ write_seqcount_end(&stats->seqlock);
local_bh_enable();
}
struct vport_percpu_stats *stats;
local_bh_disable();
-
stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+ write_seqcount_begin(&stats->seqlock);
stats->tx_packets++;
stats->tx_bytes += sent;
+ write_seqcount_end(&stats->seqlock);
local_bh_enable();
}
#define VPORT_H 1
#include <linux/list.h>
+#include <linux/seqlock.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
u64 rx_packets;
u64 tx_bytes;
u64 tx_packets;
+ seqcount_t seqlock;
};
struct vport_err_stats {