sched: Avoid division by zero
Patch a5004278f0525dcb9aa43703ef77bf371ea837cd (sched: Fix
cgroup smp fairness) introduced the possibility of a
divide-by-zero because load-balancing is not synchronized
between sched_domains.
This can cause the state of cpus to change between the first
and second loop over the sched domain in tg_shares_up().
Reported-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Jes Sorensen <jes@sgi.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
LKML-Reference: <1250855934.7538.30.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index 1b529ef..8f8a98e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1522,7 +1522,8 @@
*/
static void
update_group_shares_cpu(struct task_group *tg, int cpu,
- unsigned long sd_shares, unsigned long sd_rq_weight)
+ unsigned long sd_shares, unsigned long sd_rq_weight,
+ unsigned long sd_eff_weight)
{
unsigned long rq_weight;
unsigned long shares;
@@ -1535,13 +1536,15 @@
if (!rq_weight) {
boost = 1;
rq_weight = NICE_0_LOAD;
+ if (sd_rq_weight == sd_eff_weight)
+ sd_eff_weight += NICE_0_LOAD;
+ sd_rq_weight = sd_eff_weight;
}
/*
- * \Sum shares * rq_weight
- * shares = -----------------------
- * \Sum rq_weight
- *
+ * \Sum_j shares_j * rq_weight_i
+ * shares_i = -----------------------------
+ * \Sum_j rq_weight_j
*/
shares = (sd_shares * rq_weight) / sd_rq_weight;
shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
@@ -1593,14 +1596,8 @@
if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
shares = tg->shares;
- for_each_cpu(i, sched_domain_span(sd)) {
- unsigned long sd_rq_weight = rq_weight;
-
- if (!tg->cfs_rq[i]->rq_weight)
- sd_rq_weight = eff_weight;
-
- update_group_shares_cpu(tg, i, shares, sd_rq_weight);
- }
+ for_each_cpu(i, sched_domain_span(sd))
+ update_group_shares_cpu(tg, i, shares, rq_weight, eff_weight);
return 0;
}