sched/numa: Don't scale the imbalance

It's far too easy to get ridiculously large imbalance pct when you
scale it like that. Use a fixed 125% for now.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-zsriaft1dv7hhboyrpvqjy6s@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 24922b7..6883d99 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6261,11 +6261,6 @@
 static struct cpumask ***sched_domains_numa_masks;
 static int sched_domains_curr_level;
 
-static inline unsigned long numa_scale(unsigned long x, int level)
-{
-	return x * sched_domains_numa_distance[level] / sched_domains_numa_scale;
-}
-
 static inline int sd_local_flags(int level)
 {
 	if (sched_domains_numa_distance[level] > REMOTE_DISTANCE)
@@ -6286,7 +6281,7 @@
 		.min_interval		= sd_weight,
 		.max_interval		= 2*sd_weight,
 		.busy_factor		= 32,
-		.imbalance_pct		= 100 + numa_scale(25, level),
+		.imbalance_pct		= 125,
 		.cache_nice_tries	= 2,
 		.busy_idx		= 3,
 		.idle_idx		= 2,