sched: add tree based averages

add support for tree based vruntime averages.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/kernel/sched.c b/kernel/sched.c
index bf85b4b..198b07a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -399,6 +399,8 @@
 	SCHED_FEAT_SLEEPER_AVG		= 4,
 	SCHED_FEAT_SLEEPER_LOAD_AVG	= 8,
 	SCHED_FEAT_START_DEBIT		= 16,
+	SCHED_FEAT_USE_TREE_AVG         = 32,
+	SCHED_FEAT_APPROX_AVG           = 64,
 };
 
 const_debug unsigned int sysctl_sched_features =
@@ -406,7 +408,9 @@
 		SCHED_FEAT_NEW_FAIR_SLEEPERS	*1 |
 		SCHED_FEAT_SLEEPER_AVG		*0 |
 		SCHED_FEAT_SLEEPER_LOAD_AVG	*1 |
-		SCHED_FEAT_START_DEBIT		*1;
+		SCHED_FEAT_START_DEBIT		*1 |
+		SCHED_FEAT_USE_TREE_AVG		*0 |
+		SCHED_FEAT_APPROX_AVG		*0;
 
 #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
 
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c8c6b05..86e5e8c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -547,16 +547,22 @@
 static void
 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 {
-	struct sched_entity *last = __pick_last_entity(cfs_rq);
 	u64 min_runtime, latency;
 
 	min_runtime = cfs_rq->min_vruntime;
-	if (last) {
-		min_runtime += last->vruntime;
-		min_runtime >>= 1;
-		if (initial && sched_feat(START_DEBIT))
-			min_runtime += sysctl_sched_latency/2;
-	}
+
+	if (sched_feat(USE_TREE_AVG)) {
+		struct sched_entity *last = __pick_last_entity(cfs_rq);
+		if (last) {
+			min_runtime = __pick_next_entity(cfs_rq)->vruntime;
+			min_runtime += last->vruntime;
+			min_runtime >>= 1;
+		}
+	} else if (sched_feat(APPROX_AVG))
+		min_runtime += sysctl_sched_latency/2;
+
+	if (initial && sched_feat(START_DEBIT))
+		min_runtime += sched_slice(cfs_rq, se);
 
 	if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) {
 		latency = sysctl_sched_latency;