From c06955bc63bf504fc6fa87fd9780993bc45cad09 Mon Sep 17 00:00:00 2001 From: Justin Pettit Date: Tue, 12 Oct 2010 11:24:04 -0700 Subject: [PATCH] netflow: Back-out optimization that could lead to infinite loop Commit 924282 (netflow: Do 64-bit division less often.) attempted to remove the 64-bit division used to break flow records with large byte counts into multiple NetFlow records. The calculation to determine the number of records was incorrect and should have shifted "byte_delta" by 31 instead of 32. This commit reverts the change (while keeping commit f22a24 (netflow: Avoid (theoretically) looping 2**32 times.) ), since the logic is more straight-forward than the optimized version. Bug #3813 --- ofproto/netflow.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ofproto/netflow.c b/ofproto/netflow.c index 4881c5fd..d9f52ba5 100644 --- a/ofproto/netflow.c +++ b/ofproto/netflow.c @@ -190,8 +190,8 @@ netflow_expire(struct netflow *nf, struct netflow_flow *nf_flow, * traffic. We try to evenly distribute the packet and byte counters, * so that the bytes-per-packet lengths don't look wonky across the * records. */ - while (byte_delta > UINT32_MAX) { - uint32_t n_recs = byte_delta >> 32; + while (byte_delta) { + int n_recs = (byte_delta + UINT32_MAX - 1) / UINT32_MAX; uint32_t pkt_count = pkt_delta / n_recs; uint32_t byte_count = byte_delta / n_recs; @@ -200,9 +200,6 @@ netflow_expire(struct netflow *nf, struct netflow_flow *nf_flow, pkt_delta -= pkt_count; byte_delta -= byte_count; } - if (byte_delta > 0) { - gen_netflow_rec(nf, nf_flow, expired, pkt_delta, byte_delta); - } } else { /* In 600 seconds, a 10GbE link can theoretically transmit 75 * 10**10 * == 175 * 2**32 bytes. The byte counter is bigger than that, so it's -- 2.30.2