blob: efa0a7b75dde7408e89bd07e5b1a490c4f68ea95 [file] [log] [blame]
Peter Zijlstrae26af0e2009-09-11 12:31:23 +02001/*
Ingo Molnar51e03042009-09-16 08:54:45 +02002 * Only give sleepers 50% of their service deficit. This allows
3 * them to run sooner, but does not allow tons of sleepers to
4 * rip the spread apart.
5 */
6SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
Peter Zijlstrae26af0e2009-09-11 12:31:23 +02007
8/*
Peter Zijlstrae26af0e2009-09-11 12:31:23 +02009 * Place new tasks ahead so that they do not starve already running
10 * tasks
11 */
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020012SCHED_FEAT(START_DEBIT, 1)
Peter Zijlstrae26af0e2009-09-11 12:31:23 +020013
14/*
Peter Zijlstrae26af0e2009-09-11 12:31:23 +020015 * Based on load and program behaviour, see if it makes sense to place
16 * a newly woken task on the same cpu as the task that woke it --
17 * improve cache locality. Typically used with SYNC wakeups as
18 * generated by pipes and the like, see also SYNC_WAKEUPS.
19 */
20SCHED_FEAT(AFFINE_WAKEUPS, 1)
21
22/*
23 * Prefer to schedule the task we woke last (assuming it failed
24 * wakeup-preemption), since its likely going to consume data we
25 * touched, increases cache locality.
26 */
Mike Galbraith0ec9fab2009-09-15 15:07:03 +020027SCHED_FEAT(NEXT_BUDDY, 0)
Peter Zijlstrae26af0e2009-09-11 12:31:23 +020028
29/*
30 * Prefer to schedule the task that ran last (when we did
31 * wake-preempt) as that likely will touch the same data, increases
32 * cache locality.
33 */
34SCHED_FEAT(LAST_BUDDY, 1)
35
36/*
37 * Consider buddies to be cache hot, decreases the likelyness of a
38 * cache buddy being migrated away, increases cache locality.
39 */
40SCHED_FEAT(CACHE_HOT_BUDDY, 1)
41
Peter Zijlstra8e6598a2009-09-03 13:20:03 +020042/*
43 * Use arch dependent cpu power functions
44 */
45SCHED_FEAT(ARCH_POWER, 0)
46
Ingo Molnar0c4b83d2008-10-20 14:27:43 +020047SCHED_FEAT(HRTICK, 0)
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020048SCHED_FEAT(DOUBLE_TICK, 0)
Peter Zijlstraefc2dea2008-08-20 12:44:55 +020049SCHED_FEAT(LB_BIAS, 1)
Peter Zijlstrae26af0e2009-09-11 12:31:23 +020050
51/*
52 * Spin-wait on mutex acquisition when the mutex owner is running on
53 * another cpu -- assumes that when the owner is running, it will soon
54 * release the lock. Decreases scheduling overhead.
55 */
Peter Zijlstra0d66bf62009-01-12 14:01:47 +010056SCHED_FEAT(OWNER_SPIN, 1)
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -070057
58/*
Glauber Costa095c0aa2011-07-11 15:28:18 -040059 * Decrement CPU power based on time not spent running tasks
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -070060 */
Glauber Costa095c0aa2011-07-11 15:28:18 -040061SCHED_FEAT(NONTASK_POWER, 1)
Peter Zijlstra317f3942011-04-05 17:23:58 +020062
63/*
64 * Queue remote wakeups on the target CPU and process them
65 * using the scheduler IPI. Reduces rq->lock contention/bounces.
66 */
67SCHED_FEAT(TTWU_QUEUE, 1)
Peter Zijlstrae3589f62011-07-15 10:35:52 +020068
69SCHED_FEAT(FORCE_SD_OVERLAP, 0)