All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next] net_sched: psched_ratecfg_precompute() improvements
@ 2013-06-06 20:27 Eric Dumazet
  2013-06-06 20:37 ` Ben Greear
  0 siblings, 1 reply; 4+ messages in thread
From: Eric Dumazet @ 2013-06-06 20:27 UTC (permalink / raw)
  To: David Miller; +Cc: netdev

From: Eric Dumazet <edumazet@google.com>

Before allowing 64bits bytes rates, refactor
psched_ratecfg_precompute() to get better comments
and increased accuracy.

Signed-off-by: Eric Dumazet <edumazet@google.com>
---
 include/net/sch_generic.h |    4 +--
 net/sched/sch_generic.c   |   42 ++++++++++++++++--------------------
 2 files changed, 21 insertions(+), 25 deletions(-)

diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e7f4e21..e788397 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -679,7 +679,7 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
 #endif
 
 struct psched_ratecfg {
-	u64	rate_bps;
+	u64	rate_bps; /* bytes per second */
 	u32	mult;
 	u16	overhead;
 	u8	shift;
@@ -697,7 +697,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
 					  const struct psched_ratecfg *r)
 {
 	memset(res, 0, sizeof(*res));
-	res->rate = r->rate_bps >> 3;
+	res->rate = r->rate_bps;
 	res->overhead = r->overhead;
 }
 
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 2022408..9081d7a3 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -901,37 +901,33 @@ void dev_shutdown(struct net_device *dev)
 void psched_ratecfg_precompute(struct psched_ratecfg *r,
 			       const struct tc_ratespec *conf)
 {
-	u64 factor;
-	u64 mult;
-	int shift;
-
 	memset(r, 0, sizeof(*r));
 	r->overhead = conf->overhead;
-	r->rate_bps = (u64)conf->rate << 3;
+	r->rate_bps = conf->rate;
 	r->mult = 1;
 	/*
-	 * Calibrate mult, shift so that token counting is accurate
-	 * for smallest packet size (64 bytes).  Token (time in ns) is
-	 * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps.  It will
-	 * work as long as the smallest packet transfer time can be
-	 * accurately represented in nanosec.
+	 * The deal here is to replace a divide by a reciprocal one
+	 * in fast path (a reciprocal divide is a multiply and a shift)
+	 *
+	 * Normal formula would be :
+	 *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
+	 *
+	 * We compute mult/shift to use instead :
+	 *  time_in_ns = (len * mult) >> shift;
+	 *
+	 * We try to get the highest possible mult value for accuracy,
+	 * but have to make sure no overflows will ever happen.
 	 */
 	if (r->rate_bps > 0) {
-		/*
-		 * Higher shift gives better accuracy.  Find the largest
-		 * shift such that mult fits in 32 bits.
-		 */
-		for (shift = 0; shift < 16; shift++) {
-			r->shift = shift;
-			factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
-			mult = div64_u64(factor, r->rate_bps);
-			if (mult > UINT_MAX)
+		u64 factor = NSEC_PER_SEC;
+
+		for (;;) {
+			r->mult = div64_u64(factor, r->rate_bps);
+			if (r->mult & (1U << 31) || factor & (1ULL << 63))
 				break;
+			factor <<= 1;
+			r->shift++;
 		}
-
-		r->shift = shift - 1;
-		factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
-		r->mult = div64_u64(factor, r->rate_bps);
 	}
 }
 EXPORT_SYMBOL(psched_ratecfg_precompute);

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH net-next] net_sched: psched_ratecfg_precompute() improvements
  2013-06-06 20:27 [PATCH net-next] net_sched: psched_ratecfg_precompute() improvements Eric Dumazet
@ 2013-06-06 20:37 ` Ben Greear
  2013-06-06 20:56   ` [PATCH v2 " Eric Dumazet
  0 siblings, 1 reply; 4+ messages in thread
From: Ben Greear @ 2013-06-06 20:37 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David Miller, netdev

On 06/06/2013 01:27 PM, Eric Dumazet wrote:
> From: Eric Dumazet <edumazet@google.com>
>
> Before allowing 64bits bytes rates, refactor
> psched_ratecfg_precompute() to get better comments
> and increased accuracy.
>
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> ---
>   include/net/sch_generic.h |    4 +--
>   net/sched/sch_generic.c   |   42 ++++++++++++++++--------------------
>   2 files changed, 21 insertions(+), 25 deletions(-)
>
> diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
> index e7f4e21..e788397 100644
> --- a/include/net/sch_generic.h
> +++ b/include/net/sch_generic.h
> @@ -679,7 +679,7 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
>   #endif
>
>   struct psched_ratecfg {
> -	u64	rate_bps;
> +	u64	rate_bps; /* bytes per second */

Seems like rate_bytes_ps would be better..rate_bps is going to
be easily confused with bits-per-second...

Ben


-- 
Ben Greear <greearb@candelatech.com>
Candela Technologies Inc  http://www.candelatech.com

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v2 net-next] net_sched: psched_ratecfg_precompute() improvements
  2013-06-06 20:37 ` Ben Greear
@ 2013-06-06 20:56   ` Eric Dumazet
  2013-06-12  5:40     ` David Miller
  0 siblings, 1 reply; 4+ messages in thread
From: Eric Dumazet @ 2013-06-06 20:56 UTC (permalink / raw)
  To: Ben Greear; +Cc: David Miller, netdev

From: Eric Dumazet <edumazet@google.com>

Before allowing 64bits bytes rates, refactor
psched_ratecfg_precompute() to get better comments
and increased accuracy.

rate_bps field is renamed to rate_bytes_ps, as we only
have to worry about bytes per second.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Ben Greear <greearb@candelatech.com>
---
v2: rate_bps renamed to rate_bytes_ps (Ben Greear)

 include/net/sch_generic.h |    4 +--
 net/sched/sch_generic.c   |   44 ++++++++++++++++--------------------
 2 files changed, 22 insertions(+), 26 deletions(-)

diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e7f4e21..9485428 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -679,7 +679,7 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
 #endif
 
 struct psched_ratecfg {
-	u64	rate_bps;
+	u64	rate_bytes_ps; /* bytes per second */
 	u32	mult;
 	u16	overhead;
 	u8	shift;
@@ -697,7 +697,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
 					  const struct psched_ratecfg *r)
 {
 	memset(res, 0, sizeof(*res));
-	res->rate = r->rate_bps >> 3;
+	res->rate = r->rate_bytes_ps;
 	res->overhead = r->overhead;
 }
 
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 2022408..4626cef 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -901,37 +901,33 @@ void dev_shutdown(struct net_device *dev)
 void psched_ratecfg_precompute(struct psched_ratecfg *r,
 			       const struct tc_ratespec *conf)
 {
-	u64 factor;
-	u64 mult;
-	int shift;
-
 	memset(r, 0, sizeof(*r));
 	r->overhead = conf->overhead;
-	r->rate_bps = (u64)conf->rate << 3;
+	r->rate_bytes_ps = conf->rate;
 	r->mult = 1;
 	/*
-	 * Calibrate mult, shift so that token counting is accurate
-	 * for smallest packet size (64 bytes).  Token (time in ns) is
-	 * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps.  It will
-	 * work as long as the smallest packet transfer time can be
-	 * accurately represented in nanosec.
+	 * The deal here is to replace a divide by a reciprocal one
+	 * in fast path (a reciprocal divide is a multiply and a shift)
+	 *
+	 * Normal formula would be :
+	 *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
+	 *
+	 * We compute mult/shift to use instead :
+	 *  time_in_ns = (len * mult) >> shift;
+	 *
+	 * We try to get the highest possible mult value for accuracy,
+	 * but have to make sure no overflows will ever happen.
 	 */
-	if (r->rate_bps > 0) {
-		/*
-		 * Higher shift gives better accuracy.  Find the largest
-		 * shift such that mult fits in 32 bits.
-		 */
-		for (shift = 0; shift < 16; shift++) {
-			r->shift = shift;
-			factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
-			mult = div64_u64(factor, r->rate_bps);
-			if (mult > UINT_MAX)
+	if (r->rate_bytes_ps > 0) {
+		u64 factor = NSEC_PER_SEC;
+
+		for (;;) {
+			r->mult = div64_u64(factor, r->rate_bytes_ps);
+			if (r->mult & (1U << 31) || factor & (1ULL << 63))
 				break;
+			factor <<= 1;
+			r->shift++;
 		}
-
-		r->shift = shift - 1;
-		factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
-		r->mult = div64_u64(factor, r->rate_bps);
 	}
 }
 EXPORT_SYMBOL(psched_ratecfg_precompute);

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 net-next] net_sched: psched_ratecfg_precompute() improvements
  2013-06-06 20:56   ` [PATCH v2 " Eric Dumazet
@ 2013-06-12  5:40     ` David Miller
  0 siblings, 0 replies; 4+ messages in thread
From: David Miller @ 2013-06-12  5:40 UTC (permalink / raw)
  To: eric.dumazet; +Cc: greearb, netdev

From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Thu, 06 Jun 2013 13:56:19 -0700

> From: Eric Dumazet <edumazet@google.com>
> 
> Before allowing 64bits bytes rates, refactor
> psched_ratecfg_precompute() to get better comments
> and increased accuracy.
> 
> rate_bps field is renamed to rate_bytes_ps, as we only
> have to worry about bytes per second.
> 
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Cc: Ben Greear <greearb@candelatech.com>

Applied, thanks Eric.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2013-06-12  5:40 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-06-06 20:27 [PATCH net-next] net_sched: psched_ratecfg_precompute() improvements Eric Dumazet
2013-06-06 20:37 ` Ben Greear
2013-06-06 20:56   ` [PATCH v2 " Eric Dumazet
2013-06-12  5:40     ` David Miller

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.