blob: 45caf90b24cd9693a72b943220ecd0176580f748 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Peter Zijlstra029632f2011-10-25 10:00:11 +02002#include "sched.h"
3
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004/*
5 * stop-task scheduling class.
6 *
7 * The stop task is the highest priority task in the system, it preempts
8 * everything and will be preempted by nothing.
9 *
10 * See kernel/stop_machine.c
11 */
12
13#ifdef CONFIG_SMP
14static int
Peter Zijlstraac66f542013-10-07 11:29:16 +010015select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020016{
17 return task_cpu(p); /* stop tasks as never migrate */
18}
19#endif /* CONFIG_SMP */
20
21static void
22check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
23{
Peter Zijlstra1e5a7402010-10-31 12:37:04 +010024 /* we're never preempted */
Peter Zijlstra34f971f2010-09-22 13:53:15 +020025}
26
Peter Zijlstra606dba22012-02-11 06:05:00 +010027static struct task_struct *
Matt Flemingd8ac8972016-09-21 14:38:10 +010028pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020029{
30 struct task_struct *stop = rq->stop;
31
Kirill Tkhaida0c1e62014-08-20 13:47:32 +040032 if (!stop || !task_on_rq_queued(stop))
Peter Zijlstra606dba22012-02-11 06:05:00 +010033 return NULL;
Peter Zijlstra34f971f2010-09-22 13:53:15 +020034
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +010035 put_prev_task(rq, prev);
Peter Zijlstra606dba22012-02-11 06:05:00 +010036
37 stop->se.exec_start = rq_clock_task(rq);
38
39 return stop;
Peter Zijlstra34f971f2010-09-22 13:53:15 +020040}
41
42static void
43enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
44{
Kirill Tkhai72465442014-05-09 03:00:14 +040045 add_nr_running(rq, 1);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020046}
47
48static void
49dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
50{
Kirill Tkhai72465442014-05-09 03:00:14 +040051 sub_nr_running(rq, 1);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020052}
53
54static void yield_task_stop(struct rq *rq)
55{
56 BUG(); /* the stop task should never yield, its pointless. */
57}
58
59static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
60{
Mike Galbraith8f618962012-08-04 05:44:14 +020061 struct task_struct *curr = rq->curr;
62 u64 delta_exec;
63
Frederic Weisbecker78becc22013-04-12 01:51:02 +020064 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
Mike Galbraith8f618962012-08-04 05:44:14 +020065 if (unlikely((s64)delta_exec < 0))
66 delta_exec = 0;
67
68 schedstat_set(curr->se.statistics.exec_max,
69 max(curr->se.statistics.exec_max, delta_exec));
70
71 curr->se.sum_exec_runtime += delta_exec;
72 account_group_exec_runtime(curr, delta_exec);
73
Frederic Weisbecker78becc22013-04-12 01:51:02 +020074 curr->se.exec_start = rq_clock_task(rq);
Mike Galbraith8f618962012-08-04 05:44:14 +020075 cpuacct_charge(curr, delta_exec);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020076}
77
78static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
79{
80}
81
82static void set_curr_task_stop(struct rq *rq)
83{
Mike Galbraith8f618962012-08-04 05:44:14 +020084 struct task_struct *stop = rq->stop;
85
Frederic Weisbecker78becc22013-04-12 01:51:02 +020086 stop->se.exec_start = rq_clock_task(rq);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020087}
88
Peter Zijlstrada7a7352011-01-17 17:03:27 +010089static void switched_to_stop(struct rq *rq, struct task_struct *p)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020090{
91 BUG(); /* its impossible to change to this class */
92}
93
Peter Zijlstrada7a7352011-01-17 17:03:27 +010094static void
95prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020096{
97 BUG(); /* how!?, what priority? */
98}
99
100static unsigned int
101get_rr_interval_stop(struct rq *rq, struct task_struct *task)
102{
103 return 0;
104}
105
Thomas Gleixner90e362f2014-11-23 23:04:52 +0100106static void update_curr_stop(struct rq *rq)
107{
108}
109
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200110/*
111 * Simple, special scheduling class for the per-CPU stop tasks:
112 */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200113const struct sched_class stop_sched_class = {
Dario Faggioliaab03e02013-11-28 11:14:43 +0100114 .next = &dl_sched_class,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200115
116 .enqueue_task = enqueue_task_stop,
117 .dequeue_task = dequeue_task_stop,
118 .yield_task = yield_task_stop,
119
120 .check_preempt_curr = check_preempt_curr_stop,
121
122 .pick_next_task = pick_next_task_stop,
123 .put_prev_task = put_prev_task_stop,
124
125#ifdef CONFIG_SMP
126 .select_task_rq = select_task_rq_stop,
Peter Zijlstrac5b28032015-05-15 17:43:35 +0200127 .set_cpus_allowed = set_cpus_allowed_common,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200128#endif
129
130 .set_curr_task = set_curr_task_stop,
131 .task_tick = task_tick_stop,
132
133 .get_rr_interval = get_rr_interval_stop,
134
135 .prio_changed = prio_changed_stop,
136 .switched_to = switched_to_stop,
Thomas Gleixner90e362f2014-11-23 23:04:52 +0100137 .update_curr = update_curr_stop,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200138};