提交 6dd4a85b 编写于 作者: C Con Kolivas 提交者: Linus Torvalds

[PATCH] sched: correct smp_nice_bias

The priority biasing was off by mutliplying the total load by the total
priority bias and this ruins the ratio of loads between runqueues. This
patch should correct the ratios of loads between runqueues to be proportional
to overall load. -2nd attempt.

From: Dave Kleikamp <shaggy@austin.ibm.com>

  This patch fixes a divide-by-zero error that I hit on a two-way i386
  machine.  rq->nr_running is tested to be non-zero, but may change by the
  time it is used in the division.  Saving the value to a local variable
  ensures that the same value that is checked is used in the division.
Signed-off-by: NCon Kolivas <kernel@kolivas.org>
Signed-off-by: NDave Kleikamp <shaggy@austin.ibm.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 3b0bd9bc
...@@ -972,15 +972,16 @@ void kick_process(task_t *p) ...@@ -972,15 +972,16 @@ void kick_process(task_t *p)
static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) static inline unsigned long __source_load(int cpu, int type, enum idle_type idle)
{ {
runqueue_t *rq = cpu_rq(cpu); runqueue_t *rq = cpu_rq(cpu);
unsigned long running = rq->nr_running;
unsigned long source_load, cpu_load = rq->cpu_load[type-1], unsigned long source_load, cpu_load = rq->cpu_load[type-1],
load_now = rq->nr_running * SCHED_LOAD_SCALE; load_now = running * SCHED_LOAD_SCALE;
if (type == 0) if (type == 0)
source_load = load_now; source_load = load_now;
else else
source_load = min(cpu_load, load_now); source_load = min(cpu_load, load_now);
if (idle == NOT_IDLE || rq->nr_running > 1) if (running > 1 || (idle == NOT_IDLE && running))
/* /*
* If we are busy rebalancing the load is biased by * If we are busy rebalancing the load is biased by
* priority to create 'nice' support across cpus. When * priority to create 'nice' support across cpus. When
...@@ -989,7 +990,7 @@ static inline unsigned long __source_load(int cpu, int type, enum idle_type idle ...@@ -989,7 +990,7 @@ static inline unsigned long __source_load(int cpu, int type, enum idle_type idle
* prevent idle rebalance from trying to pull tasks from a * prevent idle rebalance from trying to pull tasks from a
* queue with only one running task. * queue with only one running task.
*/ */
source_load *= rq->prio_bias; source_load = source_load * rq->prio_bias / running;
return source_load; return source_load;
} }
...@@ -1005,16 +1006,17 @@ static inline unsigned long source_load(int cpu, int type) ...@@ -1005,16 +1006,17 @@ static inline unsigned long source_load(int cpu, int type)
static inline unsigned long __target_load(int cpu, int type, enum idle_type idle) static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
{ {
runqueue_t *rq = cpu_rq(cpu); runqueue_t *rq = cpu_rq(cpu);
unsigned long running = rq->nr_running;
unsigned long target_load, cpu_load = rq->cpu_load[type-1], unsigned long target_load, cpu_load = rq->cpu_load[type-1],
load_now = rq->nr_running * SCHED_LOAD_SCALE; load_now = running * SCHED_LOAD_SCALE;
if (type == 0) if (type == 0)
target_load = load_now; target_load = load_now;
else else
target_load = max(cpu_load, load_now); target_load = max(cpu_load, load_now);
if (idle == NOT_IDLE || rq->nr_running > 1) if (running > 1 || (idle == NOT_IDLE && running))
target_load *= rq->prio_bias; target_load = target_load * rq->prio_bias / running;
return target_load; return target_load;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册