blob: 594275ed262041a9fe702c9b7e9ac82172eb0465 [file] [log] [blame]
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -05001/*
2 * Generic entry point for the idle threads
3 */
4#include <linux/sched.h>
5#include <linux/cpu.h>
6#include <linux/cpuidle.h>
7#include <linux/tick.h>
8#include <linux/mm.h>
9#include <linux/stackprotector.h>
Rafael J. Wysocki38106312015-02-12 23:33:15 +010010#include <linux/suspend.h>
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -050011
12#include <asm/tlb.h>
13
14#include <trace/events/power.h>
15
Peter Zijlstrae3baac42014-06-04 10:31:18 -070016#include "sched.h"
17
Rafael J. Wysockifaad3842015-05-10 01:18:03 +020018/**
19 * sched_idle_set_state - Record idle state for the current CPU.
20 * @idle_state: State to record.
21 */
22void sched_idle_set_state(struct cpuidle_state *idle_state)
23{
24 idle_set_state(this_rq(), idle_state);
25}
26
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -050027static int __read_mostly cpu_idle_force_poll;
28
29void cpu_idle_poll_ctrl(bool enable)
30{
31 if (enable) {
32 cpu_idle_force_poll++;
33 } else {
34 cpu_idle_force_poll--;
35 WARN_ON_ONCE(cpu_idle_force_poll < 0);
36 }
37}
38
39#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
40static int __init cpu_idle_poll_setup(char *__unused)
41{
42 cpu_idle_force_poll = 1;
43 return 1;
44}
45__setup("nohlt", cpu_idle_poll_setup);
46
47static int __init cpu_idle_nopoll_setup(char *__unused)
48{
49 cpu_idle_force_poll = 0;
50 return 1;
51}
52__setup("hlt", cpu_idle_nopoll_setup);
53#endif
54
55static inline int cpu_idle_poll(void)
56{
57 rcu_idle_enter();
58 trace_cpu_idle_rcuidle(0, smp_processor_id());
59 local_irq_enable();
Preeti U Murthyff6f2d22015-01-21 16:27:25 +053060 while (!tif_need_resched() &&
61 (cpu_idle_force_poll || tick_check_broadcast_expired()))
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -050062 cpu_relax();
63 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
64 rcu_idle_exit();
65 return 1;
66}
67
68/* Weak implementations for optional arch specific functions */
69void __weak arch_cpu_idle_prepare(void) { }
70void __weak arch_cpu_idle_enter(void) { }
71void __weak arch_cpu_idle_exit(void) { }
72void __weak arch_cpu_idle_dead(void) { }
73void __weak arch_cpu_idle(void)
74{
75 cpu_idle_force_poll = 1;
76 local_irq_enable();
77}
78
Rafael J. Wysocki827a5ae2015-05-10 01:18:46 +020079/**
80 * default_idle_call - Default CPU idle routine.
81 *
82 * To use when the cpuidle framework cannot be used.
83 */
84void default_idle_call(void)
Rafael J. Wysocki82f66322015-05-04 22:53:22 +020085{
Rafael J. Wysocki82f66322015-05-04 22:53:22 +020086 if (current_clr_polling_and_test())
87 local_irq_enable();
88 else
89 arch_cpu_idle();
90}
91
Rafael J. Wysockibcf6ad82015-05-04 22:53:35 +020092static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
93 int next_state)
94{
Rafael J. Wysockibcf6ad82015-05-04 22:53:35 +020095 /* Fall back to the default arch idle method on errors. */
96 if (next_state < 0) {
97 default_idle_call();
98 return next_state;
99 }
100
101 /*
102 * The idle task must be scheduled, it is pointless to go to idle, just
103 * update no idle residency and return.
104 */
105 if (current_clr_polling_and_test()) {
106 dev->last_residency = 0;
107 local_irq_enable();
108 return -EBUSY;
109 }
110
Rafael J. Wysockibcf6ad82015-05-04 22:53:35 +0200111 /*
112 * Enter the idle state previously returned by the governor decision.
113 * This function will block until an interrupt occurs and will take
114 * care of re-enabling the local interrupts
115 */
Rafael J. Wysocki827a5ae2015-05-10 01:18:46 +0200116 return cpuidle_enter(drv, dev, next_state);
Rafael J. Wysockibcf6ad82015-05-04 22:53:35 +0200117}
118
Daniel Lezcano30cdd692014-03-03 08:48:51 +0100119/**
120 * cpuidle_idle_call - the main idle function
121 *
122 * NOTE: no locks or semaphores should be used here
Andy Lutomirski82c65d62014-06-04 10:31:16 -0700123 *
124 * On archs that support TIF_POLLING_NRFLAG, is called with polling
125 * set, and it returns with polling set. If it ever stops polling, it
126 * must clear the polling bit.
Daniel Lezcano30cdd692014-03-03 08:48:51 +0100127 */
Rafael J. Wysocki08c373e2014-04-21 01:26:58 +0200128static void cpuidle_idle_call(void)
Daniel Lezcano30cdd692014-03-03 08:48:51 +0100129{
130 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
131 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
Peter Zijlstra37352272014-04-11 13:55:48 +0200132 int next_state, entered_state;
Daniel Lezcano30cdd692014-03-03 08:48:51 +0100133
Daniel Lezcanoa1d028b2014-03-03 08:48:54 +0100134 /*
135 * Check if the idle task must be rescheduled. If it is the
Peter Zijlstrac4441172014-04-11 13:47:16 +0200136 * case, exit the function after re-enabling the local irq.
Daniel Lezcanoa1d028b2014-03-03 08:48:54 +0100137 */
Peter Zijlstrac4441172014-04-11 13:47:16 +0200138 if (need_resched()) {
Daniel Lezcano8ca3c642014-03-03 08:48:53 +0100139 local_irq_enable();
Rafael J. Wysocki08c373e2014-04-21 01:26:58 +0200140 return;
Daniel Lezcano8ca3c642014-03-03 08:48:53 +0100141 }
142
Daniel Lezcanoa1d028b2014-03-03 08:48:54 +0100143 /*
144 * During the idle period, stop measuring the disabled irqs
145 * critical sections latencies
146 */
Daniel Lezcanoc8cc7d42014-03-03 08:48:52 +0100147 stop_critical_timings();
Daniel Lezcanoa1d028b2014-03-03 08:48:54 +0100148
149 /*
150 * Tell the RCU framework we are entering an idle section,
151 * so no more rcu read side critical sections and one more
152 * step to the grace period
153 */
Daniel Lezcanoc8cc7d42014-03-03 08:48:52 +0100154 rcu_idle_enter();
155
Rafael J. Wysocki82f66322015-05-04 22:53:22 +0200156 if (cpuidle_not_available(drv, dev)) {
157 default_idle_call();
158 goto exit_idle;
159 }
Rafael J. Wysockief2b22a2015-03-02 22:26:55 +0100160
Daniel Lezcanoa1d028b2014-03-03 08:48:54 +0100161 /*
Rafael J. Wysocki38106312015-02-12 23:33:15 +0100162 * Suspend-to-idle ("freeze") is a system state in which all user space
163 * has been frozen, all I/O devices have been suspended and the only
164 * activity happens here and in iterrupts (if any). In that case bypass
165 * the cpuidle governor and go stratight for the deepest idle state
166 * available. Possibly also suspend the local tick and the entire
167 * timekeeping to prevent timer interrupts from kicking us out of idle
168 * until a proper wakeup interrupt happens.
169 */
170 if (idle_should_freeze()) {
Rafael J. Wysockief2b22a2015-03-02 22:26:55 +0100171 entered_state = cpuidle_enter_freeze(drv, dev);
172 if (entered_state >= 0) {
173 local_irq_enable();
174 goto exit_idle;
175 }
Rafael J. Wysocki38106312015-02-12 23:33:15 +0100176
Rafael J. Wysockief2b22a2015-03-02 22:26:55 +0100177 next_state = cpuidle_find_deepest_state(drv, dev);
Rafael J. Wysockibcf6ad82015-05-04 22:53:35 +0200178 call_cpuidle(drv, dev, next_state);
Rafael J. Wysockief2b22a2015-03-02 22:26:55 +0100179 } else {
Rafael J. Wysockief2b22a2015-03-02 22:26:55 +0100180 /*
181 * Ask the cpuidle framework to choose a convenient idle state.
182 */
183 next_state = cpuidle_select(drv, dev);
Rafael J. Wysockibcf6ad82015-05-04 22:53:35 +0200184 entered_state = call_cpuidle(drv, dev, next_state);
185 /*
186 * Give the governor an opportunity to reflect on the outcome
187 */
Rafael J. Wysockief2b22a2015-03-02 22:26:55 +0100188 cpuidle_reflect(dev, entered_state);
Rafael J. Wysockibcf6ad82015-05-04 22:53:35 +0200189 }
Peter Zijlstra37352272014-04-11 13:55:48 +0200190
191exit_idle:
Daniel Lezcano8ca3c642014-03-03 08:48:53 +0100192 __current_set_polling();
Daniel Lezcano30cdd692014-03-03 08:48:51 +0100193
Daniel Lezcanoa1d028b2014-03-03 08:48:54 +0100194 /*
Peter Zijlstra37352272014-04-11 13:55:48 +0200195 * It is up to the idle functions to reenable local interrupts
Daniel Lezcanoa1d028b2014-03-03 08:48:54 +0100196 */
Daniel Lezcanoc8cc7d42014-03-03 08:48:52 +0100197 if (WARN_ON_ONCE(irqs_disabled()))
198 local_irq_enable();
199
200 rcu_idle_exit();
201 start_critical_timings();
Daniel Lezcano30cdd692014-03-03 08:48:51 +0100202}
Daniel Lezcano30cdd692014-03-03 08:48:51 +0100203
Paul E. McKenney528a25b2015-01-28 14:09:43 -0800204DEFINE_PER_CPU(bool, cpu_dead_idle);
205
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -0500206/*
207 * Generic idle loop implementation
Andy Lutomirski82c65d62014-06-04 10:31:16 -0700208 *
209 * Called with polling cleared.
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -0500210 */
211static void cpu_idle_loop(void)
212{
213 while (1) {
Andy Lutomirski82c65d62014-06-04 10:31:16 -0700214 /*
215 * If the arch has a polling bit, we maintain an invariant:
216 *
217 * Our polling bit is clear if we're not scheduled (i.e. if
218 * rq->curr != rq->idle). This means that, if rq->idle has
219 * the polling bit set, then setting need_resched is
220 * guaranteed to cause the cpu to reschedule.
221 */
222
223 __current_set_polling();
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -0500224 tick_nohz_idle_enter();
225
226 while (!need_resched()) {
227 check_pgt_cache();
228 rmb();
229
Paul E. McKenney528a25b2015-01-28 14:09:43 -0800230 if (cpu_is_offline(smp_processor_id())) {
Paul E. McKenney88428cc2015-01-28 14:42:09 -0800231 rcu_cpu_notify(NULL, CPU_DYING_IDLE,
232 (void *)(long)smp_processor_id());
Paul E. McKenney528a25b2015-01-28 14:09:43 -0800233 smp_mb(); /* all activity before dead. */
234 this_cpu_write(cpu_dead_idle, true);
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -0500235 arch_cpu_idle_dead();
Paul E. McKenney528a25b2015-01-28 14:09:43 -0800236 }
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -0500237
238 local_irq_disable();
239 arch_cpu_idle_enter();
240
241 /*
242 * In poll mode we reenable interrupts and spin.
243 *
244 * Also if we detected in the wakeup from idle
245 * path that the tick broadcast device expired
246 * for us, we don't want to go deep idle as we
247 * know that the IPI is going to arrive right
248 * away
249 */
Daniel Lezcano8ca3c642014-03-03 08:48:53 +0100250 if (cpu_idle_force_poll || tick_check_broadcast_expired())
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -0500251 cpu_idle_poll();
Daniel Lezcano8ca3c642014-03-03 08:48:53 +0100252 else
253 cpuidle_idle_call();
254
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -0500255 arch_cpu_idle_exit();
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -0500256 }
Peter Zijlstra06d50c62014-02-24 18:22:07 +0100257
258 /*
259 * Since we fell out of the loop above, we know
260 * TIF_NEED_RESCHED must be set, propagate it into
261 * PREEMPT_NEED_RESCHED.
262 *
263 * This is required because for polling idle loops we will
264 * not have had an IPI to fold the state for us.
265 */
266 preempt_set_need_resched();
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -0500267 tick_nohz_idle_exit();
Andy Lutomirski82c65d62014-06-04 10:31:16 -0700268 __current_clr_polling();
269
270 /*
Peter Zijlstrae3baac42014-06-04 10:31:18 -0700271 * We promise to call sched_ttwu_pending and reschedule
272 * if need_resched is set while polling is set. That
273 * means that clearing polling needs to be visible
274 * before doing these things.
Andy Lutomirski82c65d62014-06-04 10:31:16 -0700275 */
276 smp_mb__after_atomic();
277
Peter Zijlstrae3baac42014-06-04 10:31:18 -0700278 sched_ttwu_pending();
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -0500279 schedule_preempt_disabled();
280 }
281}
282
283void cpu_startup_entry(enum cpuhp_state state)
284{
285 /*
286 * This #ifdef needs to die, but it's too late in the cycle to
287 * make this generic (arm and sh have never invoked the canary
288 * init for the non boot cpus!). Will be fixed in 3.11
289 */
290#ifdef CONFIG_X86
291 /*
292 * If we're the non-boot CPU, nothing set the stack canary up
293 * for us. The boot CPU already has it initialized but no harm
294 * in doing it again. This is a good place for updating it, as
295 * we wont ever return from this function (so the invalid
296 * canaries already on the stack wont ever trigger).
297 */
298 boot_init_stack_canary();
299#endif
Nicolas Pitrecf37b6b2014-01-26 23:42:01 -0500300 arch_cpu_idle_prepare();
301 cpu_idle_loop();
302}