
From: Con Kolivas <kernel@kolivas.org>

The priority biasing was off by mutliplying the total load by the total
priority bias and this ruins the ratio of loads between runqueues. This
patch should correct the ratios of loads between runqueues to be proportional
to overall load. -2nd attempt.

Signed-off-by: Con Kolivas <kernel@kolivas.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 kernel/sched.c |    8 ++++----
 1 files changed, 4 insertions(+), 4 deletions(-)

diff -puN kernel/sched.c~sched-correct_smp_nice_bias kernel/sched.c
--- devel/kernel/sched.c~sched-correct_smp_nice_bias	2005-08-21 23:49:38.000000000 -0700
+++ devel-akpm/kernel/sched.c	2005-08-21 23:49:38.000000000 -0700
@@ -978,7 +978,7 @@ static inline unsigned long __source_loa
 	else
 		source_load = min(cpu_load, load_now);
 
-	if (idle == NOT_IDLE || rq->nr_running > 1)
+	if (rq->nr_running > 1 || (idle == NOT_IDLE && rq->nr_running))
 		/*
 		 * If we are busy rebalancing the load is biased by
 		 * priority to create 'nice' support across cpus. When
@@ -987,7 +987,7 @@ static inline unsigned long __source_loa
 		 * prevent idle rebalance from trying to pull tasks from a
 		 * queue with only one running task.
 		 */
-		source_load *= rq->prio_bias;
+		source_load = source_load * rq->prio_bias / rq->nr_running;
 
 	return source_load;
 }
@@ -1011,8 +1011,8 @@ static inline unsigned long __target_loa
 	else
 		target_load = max(cpu_load, load_now);
 
-	if (idle == NOT_IDLE || rq->nr_running > 1)
-		target_load *= rq->prio_bias;
+	if (rq->nr_running > 1 || (idle == NOT_IDLE && rq->nr_running))
+		target_load = target_load * rq->prio_bias / rq->nr_running;
 
 	return target_load;
 }
_
