Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * arch/v850/kernel/process.c -- Arch-dependent process handling |
| 3 | * |
| 4 | * Copyright (C) 2001,02,03 NEC Electronics Corporation |
| 5 | * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org> |
| 6 | * |
| 7 | * This file is subject to the terms and conditions of the GNU General |
| 8 | * Public License. See the file COPYING in the main directory of this |
| 9 | * archive for more details. |
| 10 | * |
| 11 | * Written by Miles Bader <miles@gnu.org> |
| 12 | */ |
| 13 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/errno.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/stddef.h> |
| 20 | #include <linux/unistd.h> |
| 21 | #include <linux/ptrace.h> |
| 22 | #include <linux/slab.h> |
| 23 | #include <linux/user.h> |
| 24 | #include <linux/a.out.h> |
| 25 | #include <linux/reboot.h> |
| 26 | |
| 27 | #include <asm/uaccess.h> |
| 28 | #include <asm/system.h> |
| 29 | #include <asm/pgtable.h> |
| 30 | |
Adrian Bunk | 47f3fc9 | 2006-03-06 15:42:47 -0800 | [diff] [blame] | 31 | void (*pm_power_off)(void) = NULL; |
| 32 | EXPORT_SYMBOL(pm_power_off); |
| 33 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | extern void ret_from_fork (void); |
| 35 | |
| 36 | |
| 37 | /* The idle loop. */ |
Adrian Bunk | cdb0452 | 2006-03-24 03:15:57 -0800 | [diff] [blame] | 38 | static void default_idle (void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | { |
Nick Piggin | 5bfb5d6 | 2005-11-08 21:39:01 -0800 | [diff] [blame] | 40 | while (! need_resched ()) |
| 41 | asm ("halt; nop; nop; nop; nop; nop" ::: "cc"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | void (*idle)(void) = default_idle; |
| 45 | |
| 46 | /* |
| 47 | * The idle thread. There's no useful work to be |
| 48 | * done, so just try to conserve power and have a |
| 49 | * low exit latency (ie sit in a loop waiting for |
| 50 | * somebody to say that they'd like to reschedule) |
| 51 | */ |
| 52 | void cpu_idle (void) |
| 53 | { |
| 54 | /* endless idle loop with no priority at all */ |
Nick Piggin | 5bfb5d6 | 2005-11-08 21:39:01 -0800 | [diff] [blame] | 55 | while (1) { |
| 56 | while (!need_resched()) |
| 57 | (*idle) (); |
| 58 | |
| 59 | preempt_enable_no_resched(); |
| 60 | schedule(); |
| 61 | preempt_disable(); |
| 62 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | } |
| 64 | |
| 65 | /* |
| 66 | * This is the mechanism for creating a new kernel thread. |
| 67 | * |
| 68 | * NOTE! Only a kernel-only process (ie the swapper or direct descendants who |
| 69 | * haven't done an "execve()") should use this: it will work within a system |
| 70 | * call from a "real" process, but the process memory space will not be free'd |
| 71 | * until both the parent and the child have exited. |
| 72 | */ |
| 73 | int kernel_thread (int (*fn)(void *), void *arg, unsigned long flags) |
| 74 | { |
| 75 | register mm_segment_t fs = get_fs (); |
| 76 | register unsigned long syscall asm (SYSCALL_NUM); |
| 77 | register unsigned long arg0 asm (SYSCALL_ARG0); |
| 78 | register unsigned long ret asm (SYSCALL_RET); |
| 79 | |
| 80 | set_fs (KERNEL_DS); |
| 81 | |
| 82 | /* Clone this thread. Note that we don't pass the clone syscall's |
| 83 | second argument -- it's ignored for calls from kernel mode (the |
| 84 | child's SP is always set to the top of the kernel stack). */ |
| 85 | arg0 = flags | CLONE_VM; |
| 86 | syscall = __NR_clone; |
| 87 | asm volatile ("trap " SYSCALL_SHORT_TRAP |
| 88 | : "=r" (ret), "=r" (syscall) |
| 89 | : "1" (syscall), "r" (arg0) |
| 90 | : SYSCALL_SHORT_CLOBBERS); |
| 91 | |
| 92 | if (ret == 0) { |
| 93 | /* In child thread, call FN and exit. */ |
| 94 | arg0 = (*fn) (arg); |
| 95 | syscall = __NR_exit; |
| 96 | asm volatile ("trap " SYSCALL_SHORT_TRAP |
| 97 | : "=r" (ret), "=r" (syscall) |
| 98 | : "1" (syscall), "r" (arg0) |
| 99 | : SYSCALL_SHORT_CLOBBERS); |
| 100 | } |
| 101 | |
| 102 | /* In parent. */ |
| 103 | set_fs (fs); |
| 104 | |
| 105 | return ret; |
| 106 | } |
| 107 | |
| 108 | void flush_thread (void) |
| 109 | { |
| 110 | set_fs (USER_DS); |
| 111 | } |
| 112 | |
| 113 | int copy_thread (int nr, unsigned long clone_flags, |
| 114 | unsigned long stack_start, unsigned long stack_size, |
| 115 | struct task_struct *p, struct pt_regs *regs) |
| 116 | { |
| 117 | /* Start pushing stuff from the top of the child's kernel stack. */ |
Al Viro | b7f6961 | 2006-01-12 01:05:51 -0800 | [diff] [blame] | 118 | unsigned long orig_ksp = task_tos(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | unsigned long ksp = orig_ksp; |
| 120 | /* We push two `state save' stack fames (see entry.S) on the new |
| 121 | kernel stack: |
| 122 | 1) The innermost one is what switch_thread would have |
| 123 | pushed, and is used when we context switch to the child |
| 124 | thread for the first time. It's set up to return to |
| 125 | ret_from_fork in entry.S. |
| 126 | 2) The outermost one (nearest the top) is what a syscall |
| 127 | trap would have pushed, and is set up to return to the |
| 128 | same location as the parent thread, but with a return |
| 129 | value of 0. */ |
| 130 | struct pt_regs *child_switch_regs, *child_trap_regs; |
| 131 | |
| 132 | /* Trap frame. */ |
| 133 | ksp -= STATE_SAVE_SIZE; |
| 134 | child_trap_regs = (struct pt_regs *)(ksp + STATE_SAVE_PT_OFFSET); |
| 135 | /* Switch frame. */ |
| 136 | ksp -= STATE_SAVE_SIZE; |
| 137 | child_switch_regs = (struct pt_regs *)(ksp + STATE_SAVE_PT_OFFSET); |
| 138 | |
| 139 | /* First copy parent's register state to child. */ |
| 140 | *child_switch_regs = *regs; |
| 141 | *child_trap_regs = *regs; |
| 142 | |
| 143 | /* switch_thread returns to the restored value of the lp |
| 144 | register (r31), so we make that the place where we want to |
| 145 | jump when the child thread begins running. */ |
| 146 | child_switch_regs->gpr[GPR_LP] = (v850_reg_t)ret_from_fork; |
| 147 | |
| 148 | if (regs->kernel_mode) |
| 149 | /* Since we're returning to kernel-mode, make sure the child's |
| 150 | stored kernel stack pointer agrees with what the actual |
| 151 | stack pointer will be at that point (the trap return code |
| 152 | always restores the SP, even when returning to |
| 153 | kernel-mode). */ |
| 154 | child_trap_regs->gpr[GPR_SP] = orig_ksp; |
| 155 | else |
| 156 | /* Set the child's user-mode stack-pointer (the name |
| 157 | `stack_start' is a misnomer, it's just the initial SP |
| 158 | value). */ |
| 159 | child_trap_regs->gpr[GPR_SP] = stack_start; |
| 160 | |
| 161 | /* Thread state for the child (everything else is on the stack). */ |
| 162 | p->thread.ksp = ksp; |
| 163 | |
| 164 | return 0; |
| 165 | } |
| 166 | |
| 167 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | * sys_execve() executes a new program. |
| 169 | */ |
| 170 | int sys_execve (char *name, char **argv, char **envp, struct pt_regs *regs) |
| 171 | { |
| 172 | char *filename = getname (name); |
| 173 | int error = PTR_ERR (filename); |
| 174 | |
| 175 | if (! IS_ERR (filename)) { |
| 176 | error = do_execve (filename, argv, envp, regs); |
| 177 | putname (filename); |
| 178 | } |
| 179 | |
| 180 | return error; |
| 181 | } |
| 182 | |
| 183 | |
| 184 | /* |
| 185 | * These bracket the sleeping functions.. |
| 186 | */ |
| 187 | #define first_sched ((unsigned long)__sched_text_start) |
| 188 | #define last_sched ((unsigned long)__sched_text_end) |
| 189 | |
| 190 | unsigned long get_wchan (struct task_struct *p) |
| 191 | { |
| 192 | #if 0 /* Barf. Figure out the stack-layout later. XXX */ |
| 193 | unsigned long fp, pc; |
| 194 | int count = 0; |
| 195 | |
| 196 | if (!p || p == current || p->state == TASK_RUNNING) |
| 197 | return 0; |
| 198 | |
| 199 | pc = thread_saved_pc (p); |
| 200 | |
| 201 | /* This quite disgusting function walks up the stack, following |
| 202 | saved return address, until it something that's out of bounds |
| 203 | (as defined by `first_sched' and `last_sched'). It then |
| 204 | returns the last PC that was in-bounds. */ |
| 205 | do { |
| 206 | if (fp < stack_page + sizeof (struct task_struct) || |
| 207 | fp >= 8184+stack_page) |
| 208 | return 0; |
| 209 | pc = ((unsigned long *)fp)[1]; |
| 210 | if (pc < first_sched || pc >= last_sched) |
| 211 | return pc; |
| 212 | fp = *(unsigned long *) fp; |
| 213 | } while (count++ < 16); |
| 214 | #endif |
| 215 | |
| 216 | return 0; |
| 217 | } |