Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 1 | /* |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 2 | * Stack trace management functions |
| 3 | * |
Ingo Molnar | 8f47e16 | 2009-01-31 02:03:42 +0100 | [diff] [blame] | 4 | * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 5 | */ |
| 6 | #include <linux/sched.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 7 | #include <linux/sched/debug.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 8 | #include <linux/sched/task_stack.h> |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 9 | #include <linux/stacktrace.h> |
Paul Gortmaker | 186f436 | 2016-07-13 20:18:56 -0400 | [diff] [blame] | 10 | #include <linux/export.h> |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 11 | #include <linux/uaccess.h> |
Andi Kleen | c0b766f | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 12 | #include <asm/stacktrace.h> |
Josh Poimboeuf | 49a612c | 2016-09-16 14:18:14 -0500 | [diff] [blame] | 13 | #include <asm/unwind.h> |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 14 | |
Josh Poimboeuf | 49a612c | 2016-09-16 14:18:14 -0500 | [diff] [blame] | 15 | static int save_stack_address(struct stack_trace *trace, unsigned long addr, |
| 16 | bool nosched) |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 17 | { |
Oleg Nesterov | 018378c | 2010-06-03 21:32:43 +0200 | [diff] [blame] | 18 | if (nosched && in_sched_functions(addr)) |
Alexei Starovoitov | 568b329 | 2016-02-17 19:58:57 -0800 | [diff] [blame] | 19 | return 0; |
Josh Poimboeuf | 49a612c | 2016-09-16 14:18:14 -0500 | [diff] [blame] | 20 | |
Andi Kleen | c0b766f | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 21 | if (trace->skip > 0) { |
| 22 | trace->skip--; |
Alexei Starovoitov | 568b329 | 2016-02-17 19:58:57 -0800 | [diff] [blame] | 23 | return 0; |
Andi Kleen | c0b766f | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 24 | } |
Josh Poimboeuf | 49a612c | 2016-09-16 14:18:14 -0500 | [diff] [blame] | 25 | |
| 26 | if (trace->nr_entries >= trace->max_entries) |
| 27 | return -1; |
| 28 | |
| 29 | trace->entries[trace->nr_entries++] = addr; |
| 30 | return 0; |
| 31 | } |
| 32 | |
| 33 | static void __save_stack_trace(struct stack_trace *trace, |
| 34 | struct task_struct *task, struct pt_regs *regs, |
| 35 | bool nosched) |
| 36 | { |
| 37 | struct unwind_state state; |
| 38 | unsigned long addr; |
| 39 | |
| 40 | if (regs) |
| 41 | save_stack_address(trace, regs->ip, nosched); |
| 42 | |
| 43 | for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); |
| 44 | unwind_next_frame(&state)) { |
| 45 | addr = unwind_get_return_address(&state); |
| 46 | if (!addr || save_stack_address(trace, addr, nosched)) |
| 47 | break; |
Alexei Starovoitov | 568b329 | 2016-02-17 19:58:57 -0800 | [diff] [blame] | 48 | } |
Josh Poimboeuf | 49a612c | 2016-09-16 14:18:14 -0500 | [diff] [blame] | 49 | |
| 50 | if (trace->nr_entries < trace->max_entries) |
| 51 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
Andi Kleen | c0b766f | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 52 | } |
| 53 | |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 54 | /* |
| 55 | * Save stack-backtrace addresses into a stack_trace buffer. |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 56 | */ |
Christoph Hellwig | ab1b6f0 | 2007-05-08 00:23:29 -0700 | [diff] [blame] | 57 | void save_stack_trace(struct stack_trace *trace) |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 58 | { |
Josh Poimboeuf | 49a612c | 2016-09-16 14:18:14 -0500 | [diff] [blame] | 59 | __save_stack_trace(trace, current, NULL, false); |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 60 | } |
Ingo Molnar | 8594698 | 2008-06-27 21:20:17 +0200 | [diff] [blame] | 61 | EXPORT_SYMBOL_GPL(save_stack_trace); |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 62 | |
Masami Hiramatsu | 3958106 | 2011-06-08 16:09:21 +0900 | [diff] [blame] | 63 | void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) |
Vegard Nossum | acc6be5 | 2008-05-20 11:15:43 +0200 | [diff] [blame] | 64 | { |
Josh Poimboeuf | 49a612c | 2016-09-16 14:18:14 -0500 | [diff] [blame] | 65 | __save_stack_trace(trace, current, regs, false); |
Vegard Nossum | acc6be5 | 2008-05-20 11:15:43 +0200 | [diff] [blame] | 66 | } |
| 67 | |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 68 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 69 | { |
Andy Lutomirski | 1959a60 | 2016-09-15 22:45:45 -0700 | [diff] [blame] | 70 | if (!try_get_task_stack(tsk)) |
| 71 | return; |
| 72 | |
Josh Poimboeuf | 49a612c | 2016-09-16 14:18:14 -0500 | [diff] [blame] | 73 | __save_stack_trace(trace, tsk, NULL, true); |
Andy Lutomirski | 1959a60 | 2016-09-15 22:45:45 -0700 | [diff] [blame] | 74 | |
| 75 | put_task_stack(tsk); |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 76 | } |
Ingo Molnar | 8594698 | 2008-06-27 21:20:17 +0200 | [diff] [blame] | 77 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 78 | |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 79 | #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE |
| 80 | |
| 81 | #define STACKTRACE_DUMP_ONCE(task) ({ \ |
| 82 | static bool __section(.data.unlikely) __dumped; \ |
| 83 | \ |
| 84 | if (!__dumped) { \ |
| 85 | __dumped = true; \ |
| 86 | WARN_ON(1); \ |
| 87 | show_stack(task, NULL); \ |
| 88 | } \ |
| 89 | }) |
| 90 | |
| 91 | static int __save_stack_trace_reliable(struct stack_trace *trace, |
| 92 | struct task_struct *task) |
| 93 | { |
| 94 | struct unwind_state state; |
| 95 | struct pt_regs *regs; |
| 96 | unsigned long addr; |
| 97 | |
| 98 | for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state); |
| 99 | unwind_next_frame(&state)) { |
| 100 | |
Josh Poimboeuf | a841ba8 | 2017-12-31 10:18:06 -0600 | [diff] [blame] | 101 | regs = unwind_get_entry_regs(&state, NULL); |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 102 | if (regs) { |
| 103 | /* |
| 104 | * Kernel mode registers on the stack indicate an |
| 105 | * in-kernel interrupt or exception (e.g., preemption |
| 106 | * or a page fault), which can make frame pointers |
| 107 | * unreliable. |
| 108 | */ |
| 109 | if (!user_mode(regs)) |
| 110 | return -EINVAL; |
| 111 | |
| 112 | /* |
| 113 | * The last frame contains the user mode syscall |
| 114 | * pt_regs. Skip it and finish the unwind. |
| 115 | */ |
| 116 | unwind_next_frame(&state); |
| 117 | if (!unwind_done(&state)) { |
| 118 | STACKTRACE_DUMP_ONCE(task); |
| 119 | return -EINVAL; |
| 120 | } |
| 121 | break; |
| 122 | } |
| 123 | |
| 124 | addr = unwind_get_return_address(&state); |
| 125 | |
| 126 | /* |
| 127 | * A NULL or invalid return address probably means there's some |
| 128 | * generated code which __kernel_text_address() doesn't know |
| 129 | * about. |
| 130 | */ |
| 131 | if (!addr) { |
| 132 | STACKTRACE_DUMP_ONCE(task); |
| 133 | return -EINVAL; |
| 134 | } |
| 135 | |
| 136 | if (save_stack_address(trace, addr, false)) |
| 137 | return -EINVAL; |
| 138 | } |
| 139 | |
| 140 | /* Check for stack corruption */ |
| 141 | if (unwind_error(&state)) { |
| 142 | STACKTRACE_DUMP_ONCE(task); |
| 143 | return -EINVAL; |
| 144 | } |
| 145 | |
| 146 | if (trace->nr_entries < trace->max_entries) |
| 147 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
| 148 | |
| 149 | return 0; |
| 150 | } |
| 151 | |
| 152 | /* |
| 153 | * This function returns an error if it detects any unreliable features of the |
| 154 | * stack. Otherwise it guarantees that the stack trace is reliable. |
| 155 | * |
| 156 | * If the task is not 'current', the caller *must* ensure the task is inactive. |
| 157 | */ |
| 158 | int save_stack_trace_tsk_reliable(struct task_struct *tsk, |
| 159 | struct stack_trace *trace) |
| 160 | { |
| 161 | int ret; |
| 162 | |
Josh Poimboeuf | 647ed11 | 2017-12-18 15:13:44 -0600 | [diff] [blame] | 163 | /* |
| 164 | * If the task doesn't have a stack (e.g., a zombie), the stack is |
| 165 | * "reliably" empty. |
| 166 | */ |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 167 | if (!try_get_task_stack(tsk)) |
Josh Poimboeuf | 647ed11 | 2017-12-18 15:13:44 -0600 | [diff] [blame] | 168 | return 0; |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 169 | |
| 170 | ret = __save_stack_trace_reliable(trace, tsk); |
| 171 | |
| 172 | put_task_stack(tsk); |
| 173 | |
| 174 | return ret; |
| 175 | } |
| 176 | #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ |
| 177 | |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 178 | /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ |
| 179 | |
Frederic Weisbecker | c9cf4db | 2010-05-19 21:35:17 +0200 | [diff] [blame] | 180 | struct stack_frame_user { |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 181 | const void __user *next_fp; |
Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 182 | unsigned long ret_addr; |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 183 | }; |
| 184 | |
Frederic Weisbecker | c9cf4db | 2010-05-19 21:35:17 +0200 | [diff] [blame] | 185 | static int |
| 186 | copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 187 | { |
| 188 | int ret; |
| 189 | |
| 190 | if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) |
| 191 | return 0; |
| 192 | |
| 193 | ret = 1; |
| 194 | pagefault_disable(); |
| 195 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) |
| 196 | ret = 0; |
| 197 | pagefault_enable(); |
| 198 | |
| 199 | return ret; |
| 200 | } |
| 201 | |
Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 202 | static inline void __save_stack_trace_user(struct stack_trace *trace) |
| 203 | { |
| 204 | const struct pt_regs *regs = task_pt_regs(current); |
| 205 | const void __user *fp = (const void __user *)regs->bp; |
| 206 | |
| 207 | if (trace->nr_entries < trace->max_entries) |
| 208 | trace->entries[trace->nr_entries++] = regs->ip; |
| 209 | |
| 210 | while (trace->nr_entries < trace->max_entries) { |
Frederic Weisbecker | c9cf4db | 2010-05-19 21:35:17 +0200 | [diff] [blame] | 211 | struct stack_frame_user frame; |
Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 212 | |
| 213 | frame.next_fp = NULL; |
| 214 | frame.ret_addr = 0; |
| 215 | if (!copy_stack_frame(fp, &frame)) |
| 216 | break; |
| 217 | if ((unsigned long)fp < regs->sp) |
| 218 | break; |
| 219 | if (frame.ret_addr) { |
| 220 | trace->entries[trace->nr_entries++] = |
| 221 | frame.ret_addr; |
| 222 | } |
| 223 | if (fp == frame.next_fp) |
| 224 | break; |
| 225 | fp = frame.next_fp; |
| 226 | } |
| 227 | } |
| 228 | |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 229 | void save_stack_trace_user(struct stack_trace *trace) |
| 230 | { |
| 231 | /* |
| 232 | * Trace user stack if we are not a kernel thread |
| 233 | */ |
| 234 | if (current->mm) { |
Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 235 | __save_stack_trace_user(trace); |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 236 | } |
| 237 | if (trace->nr_entries < trace->max_entries) |
| 238 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
| 239 | } |