Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | /* |
| 2 | * idle-task scheduling class. |
| 3 | * |
| 4 | * (NOTE: these are not related to SCHED_IDLE tasks which are |
| 5 | * handled in sched_fair.c) |
| 6 | */ |
| 7 | |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 8 | #ifdef CONFIG_SMP |
| 9 | static int select_task_rq_idle(struct task_struct *p, int sync) |
| 10 | { |
| 11 | return task_cpu(p); /* IDLE tasks as never migrated */ |
| 12 | } |
| 13 | #endif /* CONFIG_SMP */ |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 14 | /* |
| 15 | * Idle tasks are unconditionally rescheduled: |
| 16 | */ |
| 17 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p) |
| 18 | { |
| 19 | resched_task(rq->idle); |
| 20 | } |
| 21 | |
Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 22 | static struct task_struct *pick_next_task_idle(struct rq *rq) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 23 | { |
| 24 | schedstat_inc(rq, sched_goidle); |
| 25 | |
| 26 | return rq->idle; |
| 27 | } |
| 28 | |
| 29 | /* |
| 30 | * It is not legal to sleep in the idle task - print a warning |
| 31 | * message if some code attempts to do it: |
| 32 | */ |
| 33 | static void |
Ingo Molnar | f02231e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 34 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 35 | { |
| 36 | spin_unlock_irq(&rq->lock); |
| 37 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); |
| 38 | dump_stack(); |
| 39 | spin_lock_irq(&rq->lock); |
| 40 | } |
| 41 | |
Ingo Molnar | 31ee529 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 42 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 43 | { |
| 44 | } |
| 45 | |
Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 46 | #ifdef CONFIG_SMP |
Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 47 | static unsigned long |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 48 | load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, |
Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 49 | unsigned long max_load_move, |
| 50 | struct sched_domain *sd, enum cpu_idle_type idle, |
| 51 | int *all_pinned, int *this_best_prio) |
| 52 | { |
| 53 | return 0; |
| 54 | } |
| 55 | |
| 56 | static int |
| 57 | move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, |
| 58 | struct sched_domain *sd, enum cpu_idle_type idle) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 59 | { |
| 60 | return 0; |
| 61 | } |
Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 62 | #endif |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 63 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 64 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 65 | { |
| 66 | } |
| 67 | |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 68 | static void set_curr_task_idle(struct rq *rq) |
| 69 | { |
| 70 | } |
| 71 | |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 72 | static void switched_to_idle(struct rq *rq, struct task_struct *p, |
| 73 | int running) |
| 74 | { |
| 75 | /* Can this actually happen?? */ |
| 76 | if (running) |
| 77 | resched_task(rq->curr); |
| 78 | else |
| 79 | check_preempt_curr(rq, p); |
| 80 | } |
| 81 | |
| 82 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, |
| 83 | int oldprio, int running) |
| 84 | { |
| 85 | /* This can happen for hot plug CPUS */ |
| 86 | |
| 87 | /* |
| 88 | * Reschedule if we are currently running on this runqueue and |
| 89 | * our priority decreased, or if we are not currently running on |
| 90 | * this runqueue and our priority is higher than the current's |
| 91 | */ |
| 92 | if (running) { |
| 93 | if (p->prio > oldprio) |
| 94 | resched_task(rq->curr); |
| 95 | } else |
| 96 | check_preempt_curr(rq, p); |
| 97 | } |
| 98 | |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 99 | /* |
| 100 | * Simple, special scheduling class for the per-CPU idle tasks: |
| 101 | */ |
Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 102 | const struct sched_class idle_sched_class = { |
| 103 | /* .next is NULL */ |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 104 | /* no enqueue/yield_task for idle tasks */ |
| 105 | |
| 106 | /* dequeue is not valid, we print a debug message there: */ |
| 107 | .dequeue_task = dequeue_task_idle, |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 108 | #ifdef CONFIG_SMP |
| 109 | .select_task_rq = select_task_rq_idle, |
| 110 | #endif /* CONFIG_SMP */ |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 111 | |
| 112 | .check_preempt_curr = check_preempt_curr_idle, |
| 113 | |
| 114 | .pick_next_task = pick_next_task_idle, |
| 115 | .put_prev_task = put_prev_task_idle, |
| 116 | |
Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 117 | #ifdef CONFIG_SMP |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 118 | .load_balance = load_balance_idle, |
Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 119 | .move_one_task = move_one_task_idle, |
Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 120 | #endif |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 121 | |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 122 | .set_curr_task = set_curr_task_idle, |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 123 | .task_tick = task_tick_idle, |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 124 | |
| 125 | .prio_changed = prio_changed_idle, |
| 126 | .switched_to = switched_to_idle, |
| 127 | |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 128 | /* no .task_new for idle tasks */ |
| 129 | }; |