Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 1995 Linus Torvalds |
| 3 | * |
| 4 | * Pentium III FXSR, SSE support |
| 5 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
| 6 | */ |
| 7 | |
| 8 | /* |
| 9 | * This file handles the architecture-dependent parts of process handling.. |
| 10 | */ |
| 11 | |
Tejun Heo | 5c79d2a | 2009-02-11 16:31:00 +0900 | [diff] [blame] | 12 | #include <linux/stackprotector.h> |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 13 | #include <linux/cpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/errno.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/fs.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/elfcore.h> |
| 20 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/stddef.h> |
| 22 | #include <linux/slab.h> |
| 23 | #include <linux/vmalloc.h> |
| 24 | #include <linux/user.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/interrupt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/delay.h> |
| 27 | #include <linux/reboot.h> |
| 28 | #include <linux/init.h> |
| 29 | #include <linux/mc146818rtc.h> |
| 30 | #include <linux/module.h> |
| 31 | #include <linux/kallsyms.h> |
| 32 | #include <linux/ptrace.h> |
Andi Kleen | c16b63e0 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 33 | #include <linux/personality.h> |
Ingo Molnar | 7416734 | 2007-02-16 01:28:07 -0800 | [diff] [blame] | 34 | #include <linux/tick.h> |
Jeremy Fitzhardinge | 7c3576d | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 35 | #include <linux/percpu.h> |
Erik Bosman | 529e25f | 2008-04-14 00:24:18 +0200 | [diff] [blame] | 36 | #include <linux/prctl.h> |
Frederic Weisbecker | 8b96f01 | 2008-12-06 03:40:00 +0100 | [diff] [blame] | 37 | #include <linux/ftrace.h> |
Jaswinder Singh Rajput | befa9e7 | 2009-01-04 16:18:56 +0530 | [diff] [blame] | 38 | #include <linux/uaccess.h> |
| 39 | #include <linux/io.h> |
| 40 | #include <linux/kdebug.h> |
Len Brown | a0bfa13 | 2011-04-01 19:34:59 -0400 | [diff] [blame] | 41 | #include <linux/cpuidle.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <asm/pgtable.h> |
| 44 | #include <asm/system.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #include <asm/ldt.h> |
| 46 | #include <asm/processor.h> |
| 47 | #include <asm/i387.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <asm/desc.h> |
| 49 | #ifdef CONFIG_MATH_EMULATION |
| 50 | #include <asm/math_emu.h> |
| 51 | #endif |
| 52 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #include <linux/err.h> |
| 54 | |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 55 | #include <asm/tlbflush.h> |
| 56 | #include <asm/cpu.h> |
Marc Dionne | 1eda814 | 2008-09-23 22:40:02 -0400 | [diff] [blame] | 57 | #include <asm/idle.h> |
Jaswinder Singh | bbc1f69 | 2008-07-21 21:34:13 +0530 | [diff] [blame] | 58 | #include <asm/syscalls.h> |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 59 | #include <asm/debugreg.h> |
Don Zickus | b227e23 | 2011-09-30 15:06:22 -0400 | [diff] [blame] | 60 | #include <asm/nmi.h> |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 61 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
| 63 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | /* |
| 65 | * Return saved PC of a blocked thread. |
| 66 | */ |
| 67 | unsigned long thread_saved_pc(struct task_struct *tsk) |
| 68 | { |
H. Peter Anvin | faca622 | 2008-01-30 13:31:02 +0100 | [diff] [blame] | 69 | return ((unsigned long *)tsk->thread.sp)[3]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | } |
| 71 | |
Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 72 | #ifndef CONFIG_SMP |
| 73 | static inline void play_dead(void) |
| 74 | { |
| 75 | BUG(); |
| 76 | } |
| 77 | #endif |
| 78 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | /* |
| 80 | * The idle thread. There's no useful work to be |
| 81 | * done, so just try to conserve power and have a |
| 82 | * low exit latency (ie sit in a loop waiting for |
| 83 | * somebody to say that they'd like to reschedule) |
| 84 | */ |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 85 | void cpu_idle(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | { |
Nick Piggin | 5bfb5d6 | 2005-11-08 21:39:01 -0800 | [diff] [blame] | 87 | int cpu = smp_processor_id(); |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 88 | |
Tejun Heo | 5c79d2a | 2009-02-11 16:31:00 +0900 | [diff] [blame] | 89 | /* |
| 90 | * If we're the non-boot CPU, nothing set the stack canary up |
| 91 | * for us. CPU0 already has it initialized but no harm in |
| 92 | * doing it again. This is a good place for updating it, as |
| 93 | * we wont ever return from this function (so the invalid |
| 94 | * canaries already on the stack wont ever trigger). |
| 95 | */ |
| 96 | boot_init_stack_canary(); |
| 97 | |
Andi Kleen | 495ab9c | 2006-06-26 13:59:11 +0200 | [diff] [blame] | 98 | current_thread_info()->status |= TS_POLLING; |
Nick Piggin | 64c7c8f | 2005-11-08 21:39:04 -0800 | [diff] [blame] | 99 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | /* endless idle loop with no priority at all */ |
| 101 | while (1) { |
Thomas Gleixner | b8f8c3c | 2008-07-18 17:27:28 +0200 | [diff] [blame] | 102 | tick_nohz_stop_sched_tick(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | while (!need_resched()) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | |
Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 105 | check_pgt_cache(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | rmb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 108 | if (cpu_is_offline(cpu)) |
| 109 | play_dead(); |
| 110 | |
Don Zickus | b227e23 | 2011-09-30 15:06:22 -0400 | [diff] [blame] | 111 | local_touch_nmi(); |
Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 112 | local_irq_disable(); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 113 | /* Don't trace irqs off for idle */ |
| 114 | stop_critical_timings(); |
Len Brown | a0bfa13 | 2011-04-01 19:34:59 -0400 | [diff] [blame] | 115 | if (cpuidle_idle_call()) |
| 116 | pm_idle(); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 117 | start_critical_timings(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | } |
Ingo Molnar | 7416734 | 2007-02-16 01:28:07 -0800 | [diff] [blame] | 119 | tick_nohz_restart_sched_tick(); |
Nick Piggin | 5bfb5d6 | 2005-11-08 21:39:01 -0800 | [diff] [blame] | 120 | preempt_enable_no_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | schedule(); |
Nick Piggin | 5bfb5d6 | 2005-11-08 21:39:01 -0800 | [diff] [blame] | 122 | preempt_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | } |
| 124 | } |
| 125 | |
Pekka Enberg | e2ce07c | 2008-04-03 16:40:48 +0300 | [diff] [blame] | 126 | void __show_regs(struct pt_regs *regs, int all) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | { |
| 128 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 129 | unsigned long d0, d1, d2, d3, d6, d7; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 130 | unsigned long sp; |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 131 | unsigned short ss, gs; |
| 132 | |
| 133 | if (user_mode_vm(regs)) { |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 134 | sp = regs->sp; |
| 135 | ss = regs->ss & 0xffff; |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 136 | gs = get_user_gs(regs); |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 137 | } else { |
H. Peter Anvin | def3c5d | 2009-10-12 14:09:07 -0700 | [diff] [blame] | 138 | sp = kernel_stack_pointer(regs); |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 139 | savesegment(ss, ss); |
| 140 | savesegment(gs, gs); |
| 141 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | |
Andy Isaacson | 814e2c8 | 2009-12-08 00:29:42 -0800 | [diff] [blame] | 143 | show_regs_common(); |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 144 | |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 145 | printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", |
Harvey Harrison | 92bc205 | 2008-02-08 12:09:56 -0800 | [diff] [blame] | 146 | (u16)regs->cs, regs->ip, regs->flags, |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 147 | smp_processor_id()); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 148 | print_symbol("EIP is at %s\n", regs->ip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 150 | printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 151 | regs->ax, regs->bx, regs->cx, regs->dx); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 152 | printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 153 | regs->si, regs->di, regs->bp, sp); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 154 | printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", |
Harvey Harrison | 92bc205 | 2008-02-08 12:09:56 -0800 | [diff] [blame] | 155 | (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 156 | |
| 157 | if (!all) |
| 158 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | |
Zachary Amsden | 4bb0d3e | 2005-09-03 15:56:36 -0700 | [diff] [blame] | 160 | cr0 = read_cr0(); |
| 161 | cr2 = read_cr2(); |
| 162 | cr3 = read_cr3(); |
Zachary Amsden | ff6e8c0 | 2006-01-06 00:11:50 -0800 | [diff] [blame] | 163 | cr4 = read_cr4_safe(); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 164 | printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 165 | cr0, cr2, cr3, cr4); |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 166 | |
| 167 | get_debugreg(d0, 0); |
| 168 | get_debugreg(d1, 1); |
| 169 | get_debugreg(d2, 2); |
| 170 | get_debugreg(d3, 3); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 171 | printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 172 | d0, d1, d2, d3); |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 173 | |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 174 | get_debugreg(d6, 6); |
| 175 | get_debugreg(d7, 7); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 176 | printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n", |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 177 | d6, d7); |
| 178 | } |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 179 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | void release_thread(struct task_struct *dead_task) |
| 181 | { |
Zachary Amsden | 2684927 | 2006-01-06 00:11:59 -0800 | [diff] [blame] | 182 | BUG_ON(dead_task->mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | release_vm86_irqs(dead_task); |
| 184 | } |
| 185 | |
| 186 | /* |
| 187 | * This gets called before we allocate a new thread and copy |
| 188 | * the current task into it. |
| 189 | */ |
| 190 | void prepare_to_copy(struct task_struct *tsk) |
| 191 | { |
| 192 | unlazy_fpu(tsk); |
| 193 | } |
| 194 | |
Alexey Dobriyan | 6f2c55b | 2009-04-02 16:56:59 -0700 | [diff] [blame] | 195 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | unsigned long unused, |
Jaswinder Singh Rajput | befa9e7 | 2009-01-04 16:18:56 +0530 | [diff] [blame] | 197 | struct task_struct *p, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | { |
Jaswinder Singh Rajput | befa9e7 | 2009-01-04 16:18:56 +0530 | [diff] [blame] | 199 | struct pt_regs *childregs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | struct task_struct *tsk; |
| 201 | int err; |
| 202 | |
akpm@osdl.org | 07b047f | 2006-01-12 01:05:41 -0800 | [diff] [blame] | 203 | childregs = task_pt_regs(p); |
Alexander Nyberg | f48d966 | 2005-05-05 16:15:03 -0700 | [diff] [blame] | 204 | *childregs = *regs; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 205 | childregs->ax = 0; |
| 206 | childregs->sp = sp; |
Alexander Nyberg | f48d966 | 2005-05-05 16:15:03 -0700 | [diff] [blame] | 207 | |
H. Peter Anvin | faca622 | 2008-01-30 13:31:02 +0100 | [diff] [blame] | 208 | p->thread.sp = (unsigned long) childregs; |
| 209 | p->thread.sp0 = (unsigned long) (childregs+1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | |
H. Peter Anvin | faca622 | 2008-01-30 13:31:02 +0100 | [diff] [blame] | 211 | p->thread.ip = (unsigned long) ret_from_fork; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 213 | task_user_gs(p) = get_user_gs(regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 215 | p->thread.io_bitmap_ptr = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | tsk = current; |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 217 | err = -ENOMEM; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 218 | |
| 219 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 220 | |
Stephane Eranian | b3cf257 | 2006-07-09 21:12:39 -0400 | [diff] [blame] | 221 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
Alexey Dobriyan | 52978be | 2006-09-30 23:27:21 -0700 | [diff] [blame] | 222 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, |
| 223 | IO_BITMAP_BYTES, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | if (!p->thread.io_bitmap_ptr) { |
| 225 | p->thread.io_bitmap_max = 0; |
| 226 | return -ENOMEM; |
| 227 | } |
Stephane Eranian | b3cf257 | 2006-07-09 21:12:39 -0400 | [diff] [blame] | 228 | set_tsk_thread_flag(p, TIF_IO_BITMAP); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | } |
| 230 | |
Roland McGrath | efd1ca5 | 2008-01-30 13:30:46 +0100 | [diff] [blame] | 231 | err = 0; |
| 232 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | /* |
| 234 | * Set a new TLS for the child thread? |
| 235 | */ |
Roland McGrath | efd1ca5 | 2008-01-30 13:30:46 +0100 | [diff] [blame] | 236 | if (clone_flags & CLONE_SETTLS) |
| 237 | err = do_set_thread_area(p, -1, |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 238 | (struct user_desc __user *)childregs->si, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | if (err && p->thread.io_bitmap_ptr) { |
| 241 | kfree(p->thread.io_bitmap_ptr); |
| 242 | p->thread.io_bitmap_max = 0; |
| 243 | } |
| 244 | return err; |
| 245 | } |
| 246 | |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 247 | void |
| 248 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) |
| 249 | { |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 250 | set_user_gs(regs, 0); |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 251 | regs->fs = 0; |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 252 | regs->ds = __USER_DS; |
| 253 | regs->es = __USER_DS; |
| 254 | regs->ss = __USER_DS; |
| 255 | regs->cs = __USER_CS; |
| 256 | regs->ip = new_ip; |
| 257 | regs->sp = new_sp; |
Suresh Siddha | aa283f4 | 2008-03-10 15:28:05 -0700 | [diff] [blame] | 258 | /* |
| 259 | * Free the old FP and other extended state |
| 260 | */ |
| 261 | free_thread_xstate(current); |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 262 | } |
| 263 | EXPORT_SYMBOL_GPL(start_thread); |
| 264 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | |
| 266 | /* |
Kamalesh Babulal | ea70ef3 | 2011-04-28 14:32:08 +0530 | [diff] [blame] | 267 | * switch_to(x,y) should switch tasks from x to y. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | * |
| 269 | * We fsave/fwait so that an exception goes off at the right time |
| 270 | * (as a call from the fsave or fwait in effect) rather than to |
| 271 | * the wrong process. Lazy FP saving no longer makes any sense |
| 272 | * with modern CPU's, and this simplifies a lot of things (SMP |
| 273 | * and UP become the same). |
| 274 | * |
| 275 | * NOTE! We used to use the x86 hardware context switching. The |
| 276 | * reason for not using it any more becomes apparent when you |
| 277 | * try to recover gracefully from saved state that is no longer |
| 278 | * valid (stale segment register values in particular). With the |
| 279 | * hardware task-switch, there is no way to fix up bad state in |
| 280 | * a reasonable manner. |
| 281 | * |
| 282 | * The fact that Intel documents the hardware task-switching to |
| 283 | * be slow is a fairly red herring - this code is not noticeably |
| 284 | * faster. However, there _is_ some room for improvement here, |
| 285 | * so the performance issues may eventually be a valid point. |
| 286 | * More important, however, is the fact that this allows us much |
| 287 | * more flexibility. |
| 288 | * |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 289 | * The return value (in %ax) will be the "prev" task after |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | * the task-switch, and shows up in ret_from_fork in entry.S, |
| 291 | * for example. |
| 292 | */ |
Frederic Weisbecker | 8b96f01 | 2008-12-06 03:40:00 +0100 | [diff] [blame] | 293 | __notrace_funcgraph struct task_struct * |
| 294 | __switch_to(struct task_struct *prev_p, struct task_struct *next_p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | { |
| 296 | struct thread_struct *prev = &prev_p->thread, |
| 297 | *next = &next_p->thread; |
| 298 | int cpu = smp_processor_id(); |
| 299 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
Jeremy Fitzhardinge | 2fcddce | 2009-04-24 00:45:26 -0700 | [diff] [blame] | 300 | bool preload_fpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | |
| 302 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
| 303 | |
Jeremy Fitzhardinge | 2fcddce | 2009-04-24 00:45:26 -0700 | [diff] [blame] | 304 | /* |
| 305 | * If the task has used fpu the last 5 timeslices, just do a full |
| 306 | * restore of the math state immediately to avoid the trap; the |
| 307 | * chances of needing FPU soon are obviously high now |
| 308 | */ |
| 309 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; |
| 310 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | __unlazy_fpu(prev_p); |
| 312 | |
Chuck Ebbert | acc2076 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 313 | /* we're going to use this soon, after a few expensive things */ |
Jeremy Fitzhardinge | 2fcddce | 2009-04-24 00:45:26 -0700 | [diff] [blame] | 314 | if (preload_fpu) |
Avi Kivity | 8660328 | 2010-05-06 11:45:46 +0300 | [diff] [blame] | 315 | prefetch(next->fpu.state); |
Chuck Ebbert | acc2076 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 316 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | /* |
Zachary Amsden | e7a2ff5 | 2005-09-03 15:56:39 -0700 | [diff] [blame] | 318 | * Reload esp0. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | */ |
H. Peter Anvin | faca622 | 2008-01-30 13:31:02 +0100 | [diff] [blame] | 320 | load_sp0(tss, next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | |
| 322 | /* |
Jeremy Fitzhardinge | 464d1a7 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 323 | * Save away %gs. No need to save %fs, as it was saved on the |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 324 | * stack on entry. No need to save %es and %ds, as those are |
| 325 | * always kernel segments while inside the kernel. Doing this |
| 326 | * before setting the new TLS descriptors avoids the situation |
| 327 | * where we temporarily have non-reloadable segments in %fs |
| 328 | * and %gs. This could be an issue if the NMI handler ever |
| 329 | * used %fs or %gs (it does not today), or if the kernel is |
| 330 | * running inside of a hypervisor layer. |
Zachary Amsden | e7a2ff5 | 2005-09-03 15:56:39 -0700 | [diff] [blame] | 331 | */ |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 332 | lazy_save_gs(prev->gs); |
Zachary Amsden | e7a2ff5 | 2005-09-03 15:56:39 -0700 | [diff] [blame] | 333 | |
| 334 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | * Load the per-thread Thread-Local Storage descriptor. |
| 336 | */ |
| 337 | load_TLS(next, cpu); |
| 338 | |
| 339 | /* |
Zachary Amsden | 8b15114 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 340 | * Restore IOPL if needed. In normal use, the flags restore |
| 341 | * in the switch assembly will handle this. But if the kernel |
| 342 | * is running virtualized at a non-zero CPL, the popf will |
| 343 | * not restore flags, so it must be done in a separate step. |
| 344 | */ |
| 345 | if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) |
| 346 | set_iopl_mask(next->iopl); |
| 347 | |
| 348 | /* |
Stephane Eranian | b3cf257 | 2006-07-09 21:12:39 -0400 | [diff] [blame] | 349 | * Now maybe handle debug registers and/or IO bitmaps |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | */ |
Andrea Arcangeli | cf99aba | 2007-07-15 23:41:33 -0700 | [diff] [blame] | 351 | if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || |
| 352 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) |
| 353 | __switch_to_xtra(prev_p, next_p, tss); |
Andrea Arcangeli | ffaa8bd | 2005-06-27 14:36:36 -0700 | [diff] [blame] | 354 | |
Jeremy Fitzhardinge | 2fcddce | 2009-04-24 00:45:26 -0700 | [diff] [blame] | 355 | /* If we're going to preload the fpu context, make sure clts |
| 356 | is run while we're batching the cpu state updates. */ |
| 357 | if (preload_fpu) |
| 358 | clts(); |
| 359 | |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 360 | /* |
| 361 | * Leave lazy mode, flushing any hypercalls made here. |
| 362 | * This must be done before restoring TLS segments so |
| 363 | * the GDT and LDT are properly updated, and must be |
| 364 | * done before math_state_restore, so the TS bit is up |
| 365 | * to date. |
| 366 | */ |
Jeremy Fitzhardinge | 224101e | 2009-02-18 11:18:57 -0800 | [diff] [blame] | 367 | arch_end_context_switch(next_p); |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 368 | |
Jeremy Fitzhardinge | 2fcddce | 2009-04-24 00:45:26 -0700 | [diff] [blame] | 369 | if (preload_fpu) |
| 370 | __math_state_restore(); |
Chuck Ebbert | acc2076 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 371 | |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 372 | /* |
| 373 | * Restore %gs if needed (which is common) |
| 374 | */ |
| 375 | if (prev->gs | next->gs) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 376 | lazy_load_gs(next->gs); |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 377 | |
Ingo Molnar | 6dbde35 | 2009-01-15 22:15:53 +0900 | [diff] [blame] | 378 | percpu_write(current_task, next_p); |
Zachary Amsden | 9226d12 | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 379 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | return prev_p; |
| 381 | } |
| 382 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | #define top_esp (THREAD_SIZE - sizeof(unsigned long)) |
| 384 | #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) |
| 385 | |
| 386 | unsigned long get_wchan(struct task_struct *p) |
| 387 | { |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 388 | unsigned long bp, sp, ip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | unsigned long stack_page; |
| 390 | int count = 0; |
| 391 | if (!p || p == current || p->state == TASK_RUNNING) |
| 392 | return 0; |
Al Viro | 65e0fdf | 2006-01-12 01:05:41 -0800 | [diff] [blame] | 393 | stack_page = (unsigned long)task_stack_page(p); |
H. Peter Anvin | faca622 | 2008-01-30 13:31:02 +0100 | [diff] [blame] | 394 | sp = p->thread.sp; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 395 | if (!stack_page || sp < stack_page || sp > top_esp+stack_page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | return 0; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 397 | /* include/asm-i386/system.h:switch_to() pushes bp last. */ |
| 398 | bp = *(unsigned long *) sp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | do { |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 400 | if (bp < stack_page || bp > top_ebp+stack_page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | return 0; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 402 | ip = *(unsigned long *) (bp+4); |
| 403 | if (!in_sched_functions(ip)) |
| 404 | return ip; |
| 405 | bp = *(unsigned long *) bp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | } while (count++ < 16); |
| 407 | return 0; |
| 408 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | |