Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/ptrace.c |
| 3 | * |
| 4 | * (C) Copyright 1999 Linus Torvalds |
| 5 | * |
| 6 | * Common interfaces for "ptrace()" which we do not want |
| 7 | * to continually duplicate across every architecture. |
| 8 | */ |
| 9 | |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 10 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/errno.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/highmem.h> |
| 16 | #include <linux/pagemap.h> |
| 17 | #include <linux/smp_lock.h> |
| 18 | #include <linux/ptrace.h> |
| 19 | #include <linux/security.h> |
Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 20 | #include <linux/signal.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
| 22 | #include <asm/pgtable.h> |
| 23 | #include <asm/uaccess.h> |
| 24 | |
| 25 | /* |
| 26 | * ptrace a task: make the debugger its new parent and |
| 27 | * move it to the ptrace list. |
| 28 | * |
| 29 | * Must be called with the tasklist lock write-held. |
| 30 | */ |
| 31 | void __ptrace_link(task_t *child, task_t *new_parent) |
| 32 | { |
Eric Sesterhenn | 524223c | 2006-04-02 13:43:40 +0200 | [diff] [blame] | 33 | BUG_ON(!list_empty(&child->ptrace_list)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | if (child->parent == new_parent) |
| 35 | return; |
| 36 | list_add(&child->ptrace_list, &child->parent->ptrace_children); |
Oleg Nesterov | 9b678ec | 2006-03-28 16:11:05 -0800 | [diff] [blame] | 37 | remove_parent(child); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | child->parent = new_parent; |
Oleg Nesterov | 9b678ec | 2006-03-28 16:11:05 -0800 | [diff] [blame] | 39 | add_parent(child); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | } |
| 41 | |
| 42 | /* |
| 43 | * Turn a tracing stop into a normal stop now, since with no tracer there |
| 44 | * would be no way to wake it up with SIGCONT or SIGKILL. If there was a |
| 45 | * signal sent that would resume the child, but didn't because it was in |
| 46 | * TASK_TRACED, resume it now. |
| 47 | * Requires that irqs be disabled. |
| 48 | */ |
| 49 | void ptrace_untrace(task_t *child) |
| 50 | { |
| 51 | spin_lock(&child->sighand->siglock); |
| 52 | if (child->state == TASK_TRACED) { |
| 53 | if (child->signal->flags & SIGNAL_STOP_STOPPED) { |
| 54 | child->state = TASK_STOPPED; |
| 55 | } else { |
| 56 | signal_wake_up(child, 1); |
| 57 | } |
| 58 | } |
| 59 | spin_unlock(&child->sighand->siglock); |
| 60 | } |
| 61 | |
| 62 | /* |
| 63 | * unptrace a task: move it back to its original parent and |
| 64 | * remove it from the ptrace list. |
| 65 | * |
| 66 | * Must be called with the tasklist lock write-held. |
| 67 | */ |
| 68 | void __ptrace_unlink(task_t *child) |
| 69 | { |
Oleg Nesterov | 5ecfbae | 2006-02-15 22:50:10 +0300 | [diff] [blame] | 70 | BUG_ON(!child->ptrace); |
| 71 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | child->ptrace = 0; |
| 73 | if (!list_empty(&child->ptrace_list)) { |
| 74 | list_del_init(&child->ptrace_list); |
Oleg Nesterov | 9b678ec | 2006-03-28 16:11:05 -0800 | [diff] [blame] | 75 | remove_parent(child); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | child->parent = child->real_parent; |
Oleg Nesterov | 9b678ec | 2006-03-28 16:11:05 -0800 | [diff] [blame] | 77 | add_parent(child); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | } |
| 79 | |
Roland McGrath | e57a505 | 2006-04-12 16:30:20 -0700 | [diff] [blame] | 80 | if (child->state == TASK_TRACED) |
| 81 | ptrace_untrace(child); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | /* |
| 85 | * Check that we have indeed attached to the thing.. |
| 86 | */ |
| 87 | int ptrace_check_attach(struct task_struct *child, int kill) |
| 88 | { |
| 89 | int ret = -ESRCH; |
| 90 | |
| 91 | /* |
| 92 | * We take the read lock around doing both checks to close a |
| 93 | * possible race where someone else was tracing our child and |
| 94 | * detached between these two checks. After this locked check, |
| 95 | * we are sure that this is our traced child and that can only |
| 96 | * be changed by us so it's not changing right after this. |
| 97 | */ |
| 98 | read_lock(&tasklist_lock); |
| 99 | if ((child->ptrace & PT_PTRACED) && child->parent == current && |
| 100 | (!(child->ptrace & PT_ATTACHED) || child->real_parent != current) |
| 101 | && child->signal != NULL) { |
| 102 | ret = 0; |
| 103 | spin_lock_irq(&child->sighand->siglock); |
| 104 | if (child->state == TASK_STOPPED) { |
| 105 | child->state = TASK_TRACED; |
| 106 | } else if (child->state != TASK_TRACED && !kill) { |
| 107 | ret = -ESRCH; |
| 108 | } |
| 109 | spin_unlock_irq(&child->sighand->siglock); |
| 110 | } |
| 111 | read_unlock(&tasklist_lock); |
| 112 | |
| 113 | if (!ret && !kill) { |
| 114 | wait_task_inactive(child); |
| 115 | } |
| 116 | |
| 117 | /* All systems go.. */ |
| 118 | return ret; |
| 119 | } |
| 120 | |
Miklos Szeredi | ab8d11b | 2005-09-06 15:18:24 -0700 | [diff] [blame] | 121 | static int may_attach(struct task_struct *task) |
| 122 | { |
| 123 | if (!task->mm) |
| 124 | return -EPERM; |
| 125 | if (((current->uid != task->euid) || |
| 126 | (current->uid != task->suid) || |
| 127 | (current->uid != task->uid) || |
| 128 | (current->gid != task->egid) || |
| 129 | (current->gid != task->sgid) || |
| 130 | (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE)) |
| 131 | return -EPERM; |
| 132 | smp_rmb(); |
| 133 | if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE)) |
| 134 | return -EPERM; |
| 135 | |
| 136 | return security_ptrace(current, task); |
| 137 | } |
| 138 | |
| 139 | int ptrace_may_attach(struct task_struct *task) |
| 140 | { |
| 141 | int err; |
| 142 | task_lock(task); |
| 143 | err = may_attach(task); |
| 144 | task_unlock(task); |
| 145 | return !err; |
| 146 | } |
| 147 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | int ptrace_attach(struct task_struct *task) |
| 149 | { |
| 150 | int retval; |
Linus Torvalds | f5b40e3 | 2006-05-07 10:49:33 -0700 | [diff] [blame] | 151 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | retval = -EPERM; |
| 153 | if (task->pid <= 1) |
Linus Torvalds | f5b40e3 | 2006-05-07 10:49:33 -0700 | [diff] [blame] | 154 | goto out; |
Linus Torvalds | 28d838c | 2005-11-09 11:33:07 -0800 | [diff] [blame] | 155 | if (task->tgid == current->tgid) |
Linus Torvalds | f5b40e3 | 2006-05-07 10:49:33 -0700 | [diff] [blame] | 156 | goto out; |
| 157 | |
Linus Torvalds | f358166 | 2006-05-11 11:08:49 -0700 | [diff] [blame] | 158 | repeat: |
| 159 | /* |
| 160 | * Nasty, nasty. |
| 161 | * |
| 162 | * We want to hold both the task-lock and the |
| 163 | * tasklist_lock for writing at the same time. |
| 164 | * But that's against the rules (tasklist_lock |
| 165 | * is taken for reading by interrupts on other |
| 166 | * cpu's that may have task_lock). |
| 167 | */ |
Linus Torvalds | f5b40e3 | 2006-05-07 10:49:33 -0700 | [diff] [blame] | 168 | task_lock(task); |
Linus Torvalds | f358166 | 2006-05-11 11:08:49 -0700 | [diff] [blame] | 169 | local_irq_disable(); |
| 170 | if (!write_trylock(&tasklist_lock)) { |
| 171 | local_irq_enable(); |
| 172 | task_unlock(task); |
| 173 | do { |
| 174 | cpu_relax(); |
| 175 | } while (!write_can_lock(&tasklist_lock)); |
| 176 | goto repeat; |
| 177 | } |
Linus Torvalds | f5b40e3 | 2006-05-07 10:49:33 -0700 | [diff] [blame] | 178 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | /* the same process cannot be attached many times */ |
| 180 | if (task->ptrace & PT_PTRACED) |
| 181 | goto bad; |
Miklos Szeredi | ab8d11b | 2005-09-06 15:18:24 -0700 | [diff] [blame] | 182 | retval = may_attach(task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | if (retval) |
| 184 | goto bad; |
| 185 | |
| 186 | /* Go */ |
| 187 | task->ptrace |= PT_PTRACED | ((task->real_parent != current) |
| 188 | ? PT_ATTACHED : 0); |
| 189 | if (capable(CAP_SYS_PTRACE)) |
| 190 | task->ptrace |= PT_PTRACE_CAP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | __ptrace_link(task, current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | |
| 194 | force_sig_specific(SIGSTOP, task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | |
| 196 | bad: |
Linus Torvalds | f5b40e3 | 2006-05-07 10:49:33 -0700 | [diff] [blame] | 197 | write_unlock_irq(&tasklist_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | task_unlock(task); |
Linus Torvalds | f5b40e3 | 2006-05-07 10:49:33 -0700 | [diff] [blame] | 199 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | return retval; |
| 201 | } |
| 202 | |
Oleg Nesterov | 5ecfbae | 2006-02-15 22:50:10 +0300 | [diff] [blame] | 203 | void __ptrace_detach(struct task_struct *child, unsigned int data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | child->exit_code = data; |
Oleg Nesterov | 5ecfbae | 2006-02-15 22:50:10 +0300 | [diff] [blame] | 206 | /* .. re-parent .. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | __ptrace_unlink(child); |
| 208 | /* .. and wake it up. */ |
| 209 | if (child->exit_state != EXIT_ZOMBIE) |
| 210 | wake_up_process(child); |
Oleg Nesterov | 5ecfbae | 2006-02-15 22:50:10 +0300 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | int ptrace_detach(struct task_struct *child, unsigned int data) |
| 214 | { |
| 215 | if (!valid_signal(data)) |
| 216 | return -EIO; |
| 217 | |
| 218 | /* Architecture-specific hardware disable .. */ |
| 219 | ptrace_disable(child); |
| 220 | |
| 221 | write_lock_irq(&tasklist_lock); |
| 222 | if (child->ptrace) |
| 223 | __ptrace_detach(child, data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | write_unlock_irq(&tasklist_lock); |
| 225 | |
| 226 | return 0; |
| 227 | } |
| 228 | |
| 229 | /* |
| 230 | * Access another process' address space. |
| 231 | * Source/target buffer must be kernel space, |
| 232 | * Do not walk the page table directly, use get_user_pages |
| 233 | */ |
| 234 | |
| 235 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) |
| 236 | { |
| 237 | struct mm_struct *mm; |
| 238 | struct vm_area_struct *vma; |
| 239 | struct page *page; |
| 240 | void *old_buf = buf; |
| 241 | |
| 242 | mm = get_task_mm(tsk); |
| 243 | if (!mm) |
| 244 | return 0; |
| 245 | |
| 246 | down_read(&mm->mmap_sem); |
| 247 | /* ignore errors, just check how much was sucessfully transfered */ |
| 248 | while (len) { |
| 249 | int bytes, ret, offset; |
| 250 | void *maddr; |
| 251 | |
| 252 | ret = get_user_pages(tsk, mm, addr, 1, |
| 253 | write, 1, &page, &vma); |
| 254 | if (ret <= 0) |
| 255 | break; |
| 256 | |
| 257 | bytes = len; |
| 258 | offset = addr & (PAGE_SIZE-1); |
| 259 | if (bytes > PAGE_SIZE-offset) |
| 260 | bytes = PAGE_SIZE-offset; |
| 261 | |
| 262 | maddr = kmap(page); |
| 263 | if (write) { |
| 264 | copy_to_user_page(vma, page, addr, |
| 265 | maddr + offset, buf, bytes); |
Hugh Dickins | 16bf134 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 266 | set_page_dirty_lock(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | } else { |
| 268 | copy_from_user_page(vma, page, addr, |
| 269 | buf, maddr + offset, bytes); |
| 270 | } |
| 271 | kunmap(page); |
| 272 | page_cache_release(page); |
| 273 | len -= bytes; |
| 274 | buf += bytes; |
| 275 | addr += bytes; |
| 276 | } |
| 277 | up_read(&mm->mmap_sem); |
| 278 | mmput(mm); |
| 279 | |
| 280 | return buf - old_buf; |
| 281 | } |
| 282 | |
| 283 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) |
| 284 | { |
| 285 | int copied = 0; |
| 286 | |
| 287 | while (len > 0) { |
| 288 | char buf[128]; |
| 289 | int this_len, retval; |
| 290 | |
| 291 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; |
| 292 | retval = access_process_vm(tsk, src, buf, this_len, 0); |
| 293 | if (!retval) { |
| 294 | if (copied) |
| 295 | break; |
| 296 | return -EIO; |
| 297 | } |
| 298 | if (copy_to_user(dst, buf, retval)) |
| 299 | return -EFAULT; |
| 300 | copied += retval; |
| 301 | src += retval; |
| 302 | dst += retval; |
| 303 | len -= retval; |
| 304 | } |
| 305 | return copied; |
| 306 | } |
| 307 | |
| 308 | int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) |
| 309 | { |
| 310 | int copied = 0; |
| 311 | |
| 312 | while (len > 0) { |
| 313 | char buf[128]; |
| 314 | int this_len, retval; |
| 315 | |
| 316 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; |
| 317 | if (copy_from_user(buf, src, this_len)) |
| 318 | return -EFAULT; |
| 319 | retval = access_process_vm(tsk, dst, buf, this_len, 1); |
| 320 | if (!retval) { |
| 321 | if (copied) |
| 322 | break; |
| 323 | return -EIO; |
| 324 | } |
| 325 | copied += retval; |
| 326 | src += retval; |
| 327 | dst += retval; |
| 328 | len -= retval; |
| 329 | } |
| 330 | return copied; |
| 331 | } |
| 332 | |
| 333 | static int ptrace_setoptions(struct task_struct *child, long data) |
| 334 | { |
| 335 | child->ptrace &= ~PT_TRACE_MASK; |
| 336 | |
| 337 | if (data & PTRACE_O_TRACESYSGOOD) |
| 338 | child->ptrace |= PT_TRACESYSGOOD; |
| 339 | |
| 340 | if (data & PTRACE_O_TRACEFORK) |
| 341 | child->ptrace |= PT_TRACE_FORK; |
| 342 | |
| 343 | if (data & PTRACE_O_TRACEVFORK) |
| 344 | child->ptrace |= PT_TRACE_VFORK; |
| 345 | |
| 346 | if (data & PTRACE_O_TRACECLONE) |
| 347 | child->ptrace |= PT_TRACE_CLONE; |
| 348 | |
| 349 | if (data & PTRACE_O_TRACEEXEC) |
| 350 | child->ptrace |= PT_TRACE_EXEC; |
| 351 | |
| 352 | if (data & PTRACE_O_TRACEVFORKDONE) |
| 353 | child->ptrace |= PT_TRACE_VFORK_DONE; |
| 354 | |
| 355 | if (data & PTRACE_O_TRACEEXIT) |
| 356 | child->ptrace |= PT_TRACE_EXIT; |
| 357 | |
| 358 | return (data & ~PTRACE_O_MASK) ? -EINVAL : 0; |
| 359 | } |
| 360 | |
| 361 | static int ptrace_getsiginfo(struct task_struct *child, siginfo_t __user * data) |
| 362 | { |
| 363 | siginfo_t lastinfo; |
| 364 | int error = -ESRCH; |
| 365 | |
| 366 | read_lock(&tasklist_lock); |
| 367 | if (likely(child->sighand != NULL)) { |
| 368 | error = -EINVAL; |
| 369 | spin_lock_irq(&child->sighand->siglock); |
| 370 | if (likely(child->last_siginfo != NULL)) { |
| 371 | lastinfo = *child->last_siginfo; |
| 372 | error = 0; |
| 373 | } |
| 374 | spin_unlock_irq(&child->sighand->siglock); |
| 375 | } |
| 376 | read_unlock(&tasklist_lock); |
| 377 | if (!error) |
| 378 | return copy_siginfo_to_user(data, &lastinfo); |
| 379 | return error; |
| 380 | } |
| 381 | |
| 382 | static int ptrace_setsiginfo(struct task_struct *child, siginfo_t __user * data) |
| 383 | { |
| 384 | siginfo_t newinfo; |
| 385 | int error = -ESRCH; |
| 386 | |
| 387 | if (copy_from_user(&newinfo, data, sizeof (siginfo_t))) |
| 388 | return -EFAULT; |
| 389 | |
| 390 | read_lock(&tasklist_lock); |
| 391 | if (likely(child->sighand != NULL)) { |
| 392 | error = -EINVAL; |
| 393 | spin_lock_irq(&child->sighand->siglock); |
| 394 | if (likely(child->last_siginfo != NULL)) { |
| 395 | *child->last_siginfo = newinfo; |
| 396 | error = 0; |
| 397 | } |
| 398 | spin_unlock_irq(&child->sighand->siglock); |
| 399 | } |
| 400 | read_unlock(&tasklist_lock); |
| 401 | return error; |
| 402 | } |
| 403 | |
| 404 | int ptrace_request(struct task_struct *child, long request, |
| 405 | long addr, long data) |
| 406 | { |
| 407 | int ret = -EIO; |
| 408 | |
| 409 | switch (request) { |
| 410 | #ifdef PTRACE_OLDSETOPTIONS |
| 411 | case PTRACE_OLDSETOPTIONS: |
| 412 | #endif |
| 413 | case PTRACE_SETOPTIONS: |
| 414 | ret = ptrace_setoptions(child, data); |
| 415 | break; |
| 416 | case PTRACE_GETEVENTMSG: |
| 417 | ret = put_user(child->ptrace_message, (unsigned long __user *) data); |
| 418 | break; |
| 419 | case PTRACE_GETSIGINFO: |
| 420 | ret = ptrace_getsiginfo(child, (siginfo_t __user *) data); |
| 421 | break; |
| 422 | case PTRACE_SETSIGINFO: |
| 423 | ret = ptrace_setsiginfo(child, (siginfo_t __user *) data); |
| 424 | break; |
| 425 | default: |
| 426 | break; |
| 427 | } |
| 428 | |
| 429 | return ret; |
| 430 | } |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 431 | |
Christoph Hellwig | 6b9c7ed | 2006-01-08 01:02:33 -0800 | [diff] [blame] | 432 | /** |
| 433 | * ptrace_traceme -- helper for PTRACE_TRACEME |
| 434 | * |
| 435 | * Performs checks and sets PT_PTRACED. |
| 436 | * Should be used by all ptrace implementations for PTRACE_TRACEME. |
| 437 | */ |
| 438 | int ptrace_traceme(void) |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 439 | { |
Linus Torvalds | f5b40e3 | 2006-05-07 10:49:33 -0700 | [diff] [blame] | 440 | int ret = -EPERM; |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 441 | |
| 442 | /* |
Christoph Hellwig | 6b9c7ed | 2006-01-08 01:02:33 -0800 | [diff] [blame] | 443 | * Are we already being traced? |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 444 | */ |
Linus Torvalds | f5b40e3 | 2006-05-07 10:49:33 -0700 | [diff] [blame] | 445 | task_lock(current); |
| 446 | if (!(current->ptrace & PT_PTRACED)) { |
| 447 | ret = security_ptrace(current->parent, current); |
| 448 | /* |
| 449 | * Set the ptrace bit in the process ptrace flags. |
| 450 | */ |
| 451 | if (!ret) |
| 452 | current->ptrace |= PT_PTRACED; |
| 453 | } |
| 454 | task_unlock(current); |
| 455 | return ret; |
Christoph Hellwig | 6b9c7ed | 2006-01-08 01:02:33 -0800 | [diff] [blame] | 456 | } |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 457 | |
Christoph Hellwig | 6b9c7ed | 2006-01-08 01:02:33 -0800 | [diff] [blame] | 458 | /** |
| 459 | * ptrace_get_task_struct -- grab a task struct reference for ptrace |
| 460 | * @pid: process id to grab a task_struct reference of |
| 461 | * |
| 462 | * This function is a helper for ptrace implementations. It checks |
| 463 | * permissions and then grabs a task struct for use of the actual |
| 464 | * ptrace implementation. |
| 465 | * |
| 466 | * Returns the task_struct for @pid or an ERR_PTR() on failure. |
| 467 | */ |
| 468 | struct task_struct *ptrace_get_task_struct(pid_t pid) |
| 469 | { |
| 470 | struct task_struct *child; |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 471 | |
| 472 | /* |
Christoph Hellwig | 6b9c7ed | 2006-01-08 01:02:33 -0800 | [diff] [blame] | 473 | * Tracing init is not allowed. |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 474 | */ |
| 475 | if (pid == 1) |
Christoph Hellwig | 6b9c7ed | 2006-01-08 01:02:33 -0800 | [diff] [blame] | 476 | return ERR_PTR(-EPERM); |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 477 | |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 478 | read_lock(&tasklist_lock); |
| 479 | child = find_task_by_pid(pid); |
| 480 | if (child) |
| 481 | get_task_struct(child); |
| 482 | read_unlock(&tasklist_lock); |
| 483 | if (!child) |
Christoph Hellwig | 6b9c7ed | 2006-01-08 01:02:33 -0800 | [diff] [blame] | 484 | return ERR_PTR(-ESRCH); |
| 485 | return child; |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 486 | } |
| 487 | |
Christoph Hellwig | 6b9c7ed | 2006-01-08 01:02:33 -0800 | [diff] [blame] | 488 | #ifndef __ARCH_SYS_PTRACE |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 489 | asmlinkage long sys_ptrace(long request, long pid, long addr, long data) |
| 490 | { |
| 491 | struct task_struct *child; |
| 492 | long ret; |
| 493 | |
| 494 | /* |
| 495 | * This lock_kernel fixes a subtle race with suid exec |
| 496 | */ |
| 497 | lock_kernel(); |
Christoph Hellwig | 6b9c7ed | 2006-01-08 01:02:33 -0800 | [diff] [blame] | 498 | if (request == PTRACE_TRACEME) { |
| 499 | ret = ptrace_traceme(); |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 500 | goto out; |
Christoph Hellwig | 6b9c7ed | 2006-01-08 01:02:33 -0800 | [diff] [blame] | 501 | } |
| 502 | |
| 503 | child = ptrace_get_task_struct(pid); |
| 504 | if (IS_ERR(child)) { |
| 505 | ret = PTR_ERR(child); |
| 506 | goto out; |
| 507 | } |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 508 | |
| 509 | if (request == PTRACE_ATTACH) { |
| 510 | ret = ptrace_attach(child); |
Christoph Hellwig | 005f18d | 2005-11-13 16:06:33 -0800 | [diff] [blame] | 511 | goto out_put_task_struct; |
Christoph Hellwig | 481bed4 | 2005-11-07 00:59:47 -0800 | [diff] [blame] | 512 | } |
| 513 | |
| 514 | ret = ptrace_check_attach(child, request == PTRACE_KILL); |
| 515 | if (ret < 0) |
| 516 | goto out_put_task_struct; |
| 517 | |
| 518 | ret = arch_ptrace(child, request, addr, data); |
| 519 | if (ret < 0) |
| 520 | goto out_put_task_struct; |
| 521 | |
| 522 | out_put_task_struct: |
| 523 | put_task_struct(child); |
| 524 | out: |
| 525 | unlock_kernel(); |
| 526 | return ret; |
| 527 | } |
| 528 | #endif /* __ARCH_SYS_PTRACE */ |