Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Code for replacing ftrace calls with jumps. |
| 3 | * |
| 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
| 5 | * |
| 6 | * Thanks goes to Ingo Molnar, for suggesting the idea. |
| 7 | * Mathieu Desnoyers, for suggesting postponing the modifications. |
| 8 | * Arjan van de Ven, for keeping me straight, and explaining to me |
| 9 | * the dangers of modifying code on the run. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/spinlock.h> |
| 13 | #include <linux/hardirq.h> |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 14 | #include <linux/uaccess.h> |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 15 | #include <linux/ftrace.h> |
| 16 | #include <linux/percpu.h> |
Ingo Molnar | 19b3e96 | 2008-11-11 11:57:02 +0100 | [diff] [blame] | 17 | #include <linux/sched.h> |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 18 | #include <linux/init.h> |
| 19 | #include <linux/list.h> |
| 20 | |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 21 | #include <asm/ftrace.h> |
Frederic Weisbecker | caf4b32 | 2008-11-11 07:03:45 +0100 | [diff] [blame] | 22 | #include <linux/ftrace.h> |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 23 | #include <asm/nops.h> |
Frederic Weisbecker | caf4b32 | 2008-11-11 07:03:45 +0100 | [diff] [blame] | 24 | #include <asm/nmi.h> |
Steven Rostedt | dfa60ab | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 25 | |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 26 | |
Frederic Weisbecker | caf4b32 | 2008-11-11 07:03:45 +0100 | [diff] [blame] | 27 | #ifdef CONFIG_DYNAMIC_FTRACE |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 28 | |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 29 | union ftrace_code_union { |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 30 | char code[MCOUNT_INSN_SIZE]; |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 31 | struct { |
| 32 | char e8; |
| 33 | int offset; |
| 34 | } __attribute__((packed)); |
| 35 | }; |
| 36 | |
Steven Rostedt | 15adc04 | 2008-10-23 09:33:08 -0400 | [diff] [blame] | 37 | static int ftrace_calc_offset(long ip, long addr) |
Steven Rostedt | 3c1720f | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 38 | { |
| 39 | return (int)(addr - ip); |
| 40 | } |
| 41 | |
Steven Rostedt | 31e8890 | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 42 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
Steven Rostedt | 3c1720f | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 43 | { |
| 44 | static union ftrace_code_union calc; |
| 45 | |
| 46 | calc.e8 = 0xe8; |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 47 | calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); |
Steven Rostedt | 3c1720f | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 48 | |
| 49 | /* |
| 50 | * No locking needed, this must be called via kstop_machine |
| 51 | * which in essence is like running on a uniprocessor machine. |
| 52 | */ |
| 53 | return calc.code; |
| 54 | } |
| 55 | |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 56 | /* |
| 57 | * Modifying code must take extra care. On an SMP machine, if |
| 58 | * the code being modified is also being executed on another CPU |
| 59 | * that CPU will have undefined results and possibly take a GPF. |
| 60 | * We use kstop_machine to stop other CPUS from exectuing code. |
| 61 | * But this does not stop NMIs from happening. We still need |
| 62 | * to protect against that. We separate out the modification of |
| 63 | * the code to take care of this. |
| 64 | * |
| 65 | * Two buffers are added: An IP buffer and a "code" buffer. |
| 66 | * |
Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame] | 67 | * 1) Put the instruction pointer into the IP buffer |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 68 | * and the new code into the "code" buffer. |
| 69 | * 2) Set a flag that says we are modifying code |
| 70 | * 3) Wait for any running NMIs to finish. |
| 71 | * 4) Write the code |
| 72 | * 5) clear the flag. |
| 73 | * 6) Wait for any running NMIs to finish. |
| 74 | * |
| 75 | * If an NMI is executed, the first thing it does is to call |
| 76 | * "ftrace_nmi_enter". This will check if the flag is set to write |
| 77 | * and if it is, it will write what is in the IP and "code" buffers. |
| 78 | * |
| 79 | * The trick is, it does not matter if everyone is writing the same |
| 80 | * content to the code location. Also, if a CPU is executing code |
| 81 | * it is OK to write to that code location if the contents being written |
| 82 | * are the same as what exists. |
| 83 | */ |
| 84 | |
Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame] | 85 | static atomic_t in_nmi = ATOMIC_INIT(0); |
| 86 | static int mod_code_status; /* holds return value of text write */ |
| 87 | static int mod_code_write; /* set when NMI should do the write */ |
| 88 | static void *mod_code_ip; /* holds the IP to write to */ |
| 89 | static void *mod_code_newcode; /* holds the text to write to the IP */ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 90 | |
Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame] | 91 | static unsigned nmi_wait_count; |
| 92 | static atomic_t nmi_update_count = ATOMIC_INIT(0); |
Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 93 | |
| 94 | int ftrace_arch_read_dyn_info(char *buf, int size) |
| 95 | { |
| 96 | int r; |
| 97 | |
| 98 | r = snprintf(buf, size, "%u %u", |
| 99 | nmi_wait_count, |
| 100 | atomic_read(&nmi_update_count)); |
| 101 | return r; |
| 102 | } |
| 103 | |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 104 | static void ftrace_mod_code(void) |
| 105 | { |
| 106 | /* |
| 107 | * Yes, more than one CPU process can be writing to mod_code_status. |
| 108 | * (and the code itself) |
| 109 | * But if one were to fail, then they all should, and if one were |
| 110 | * to succeed, then they all should. |
| 111 | */ |
| 112 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, |
| 113 | MCOUNT_INSN_SIZE); |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | void ftrace_nmi_enter(void) |
| 117 | { |
| 118 | atomic_inc(&in_nmi); |
| 119 | /* Must have in_nmi seen before reading write flag */ |
| 120 | smp_mb(); |
Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 121 | if (mod_code_write) { |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 122 | ftrace_mod_code(); |
Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 123 | atomic_inc(&nmi_update_count); |
| 124 | } |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | void ftrace_nmi_exit(void) |
| 128 | { |
| 129 | /* Finish all executions before clearing in_nmi */ |
| 130 | smp_wmb(); |
| 131 | atomic_dec(&in_nmi); |
| 132 | } |
| 133 | |
| 134 | static void wait_for_nmi(void) |
| 135 | { |
Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 136 | int waited = 0; |
| 137 | |
| 138 | while (atomic_read(&in_nmi)) { |
| 139 | waited = 1; |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 140 | cpu_relax(); |
Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 141 | } |
| 142 | |
| 143 | if (waited) |
| 144 | nmi_wait_count++; |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | static int |
| 148 | do_ftrace_mod_code(unsigned long ip, void *new_code) |
| 149 | { |
| 150 | mod_code_ip = (void *)ip; |
| 151 | mod_code_newcode = new_code; |
| 152 | |
| 153 | /* The buffers need to be visible before we let NMIs write them */ |
| 154 | smp_wmb(); |
| 155 | |
| 156 | mod_code_write = 1; |
| 157 | |
| 158 | /* Make sure write bit is visible before we wait on NMIs */ |
| 159 | smp_mb(); |
| 160 | |
| 161 | wait_for_nmi(); |
| 162 | |
| 163 | /* Make sure all running NMIs have finished before we write the code */ |
| 164 | smp_mb(); |
| 165 | |
| 166 | ftrace_mod_code(); |
| 167 | |
| 168 | /* Make sure the write happens before clearing the bit */ |
| 169 | smp_wmb(); |
| 170 | |
| 171 | mod_code_write = 0; |
| 172 | |
| 173 | /* make sure NMIs see the cleared bit */ |
| 174 | smp_mb(); |
| 175 | |
| 176 | wait_for_nmi(); |
| 177 | |
| 178 | return mod_code_status; |
| 179 | } |
| 180 | |
| 181 | |
Frederic Weisbecker | caf4b32 | 2008-11-11 07:03:45 +0100 | [diff] [blame] | 182 | |
| 183 | |
| 184 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; |
| 185 | |
Steven Rostedt | 31e8890 | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 186 | static unsigned char *ftrace_nop_replace(void) |
Frederic Weisbecker | caf4b32 | 2008-11-11 07:03:45 +0100 | [diff] [blame] | 187 | { |
| 188 | return ftrace_nop; |
| 189 | } |
| 190 | |
Steven Rostedt | 31e8890 | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 191 | static int |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 192 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
| 193 | unsigned char *new_code) |
| 194 | { |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 195 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 196 | |
| 197 | /* |
| 198 | * Note: Due to modules and __init, code can |
| 199 | * disappear and change, we need to protect against faulting |
Steven Rostedt | 76aefee | 2008-10-23 09:33:00 -0400 | [diff] [blame] | 200 | * as well as code changing. We do this by using the |
Steven Rostedt | ab9a091 | 2008-10-23 09:33:01 -0400 | [diff] [blame] | 201 | * probe_kernel_* functions. |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 202 | * |
| 203 | * No real locking needed, this code is run through |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 204 | * kstop_machine, or before SMP starts. |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 205 | */ |
Steven Rostedt | 76aefee | 2008-10-23 09:33:00 -0400 | [diff] [blame] | 206 | |
| 207 | /* read the text we want to modify */ |
Steven Rostedt | ab9a091 | 2008-10-23 09:33:01 -0400 | [diff] [blame] | 208 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
Steven Rostedt | 593eb8a | 2008-10-23 09:32:59 -0400 | [diff] [blame] | 209 | return -EFAULT; |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 210 | |
Steven Rostedt | 76aefee | 2008-10-23 09:33:00 -0400 | [diff] [blame] | 211 | /* Make sure it is what we expect it to be */ |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 212 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
Steven Rostedt | 593eb8a | 2008-10-23 09:32:59 -0400 | [diff] [blame] | 213 | return -EINVAL; |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 214 | |
Steven Rostedt | 76aefee | 2008-10-23 09:33:00 -0400 | [diff] [blame] | 215 | /* replace the text with the new text */ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 216 | if (do_ftrace_mod_code(ip, new_code)) |
Steven Rostedt | 593eb8a | 2008-10-23 09:32:59 -0400 | [diff] [blame] | 217 | return -EPERM; |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 218 | |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 219 | sync_core(); |
| 220 | |
Steven Rostedt | 6f93fc0 | 2008-08-20 12:55:07 -0400 | [diff] [blame] | 221 | return 0; |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 222 | } |
| 223 | |
Steven Rostedt | 31e8890 | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 224 | int ftrace_make_nop(struct module *mod, |
| 225 | struct dyn_ftrace *rec, unsigned long addr) |
| 226 | { |
| 227 | unsigned char *new, *old; |
| 228 | unsigned long ip = rec->ip; |
| 229 | |
| 230 | old = ftrace_call_replace(ip, addr); |
| 231 | new = ftrace_nop_replace(); |
| 232 | |
| 233 | return ftrace_modify_code(rec->ip, old, new); |
| 234 | } |
| 235 | |
| 236 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
| 237 | { |
| 238 | unsigned char *new, *old; |
| 239 | unsigned long ip = rec->ip; |
| 240 | |
| 241 | old = ftrace_nop_replace(); |
| 242 | new = ftrace_call_replace(ip, addr); |
| 243 | |
| 244 | return ftrace_modify_code(rec->ip, old, new); |
| 245 | } |
| 246 | |
Steven Rostedt | 15adc04 | 2008-10-23 09:33:08 -0400 | [diff] [blame] | 247 | int ftrace_update_ftrace_func(ftrace_func_t func) |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 248 | { |
| 249 | unsigned long ip = (unsigned long)(&ftrace_call); |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 250 | unsigned char old[MCOUNT_INSN_SIZE], *new; |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 251 | int ret; |
| 252 | |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 253 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 254 | new = ftrace_call_replace(ip, (unsigned long)func); |
| 255 | ret = ftrace_modify_code(ip, old, new); |
| 256 | |
| 257 | return ret; |
| 258 | } |
| 259 | |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 260 | int __init ftrace_dyn_arch_init(void *data) |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 261 | { |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 262 | extern const unsigned char ftrace_test_p6nop[]; |
| 263 | extern const unsigned char ftrace_test_nop5[]; |
| 264 | extern const unsigned char ftrace_test_jmp[]; |
| 265 | int faulted = 0; |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 266 | |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 267 | /* |
| 268 | * There is no good nop for all x86 archs. |
| 269 | * We will default to using the P6_NOP5, but first we |
| 270 | * will test to make sure that the nop will actually |
| 271 | * work on this CPU. If it faults, we will then |
| 272 | * go to a lesser efficient 5 byte nop. If that fails |
| 273 | * we then just use a jmp as our nop. This isn't the most |
| 274 | * efficient nop, but we can not use a multi part nop |
| 275 | * since we would then risk being preempted in the middle |
| 276 | * of that nop, and if we enabled tracing then, it might |
| 277 | * cause a system crash. |
| 278 | * |
| 279 | * TODO: check the cpuid to determine the best nop. |
| 280 | */ |
| 281 | asm volatile ( |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 282 | "ftrace_test_jmp:" |
| 283 | "jmp ftrace_test_p6nop\n" |
Anders Kaseorg | 8b27386 | 2008-10-09 22:19:08 -0400 | [diff] [blame] | 284 | "nop\n" |
| 285 | "nop\n" |
| 286 | "nop\n" /* 2 byte jmp + 3 bytes */ |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 287 | "ftrace_test_p6nop:" |
| 288 | P6_NOP5 |
| 289 | "jmp 1f\n" |
| 290 | "ftrace_test_nop5:" |
| 291 | ".byte 0x66,0x66,0x66,0x66,0x90\n" |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 292 | "1:" |
| 293 | ".section .fixup, \"ax\"\n" |
| 294 | "2: movl $1, %0\n" |
| 295 | " jmp ftrace_test_nop5\n" |
| 296 | "3: movl $2, %0\n" |
| 297 | " jmp 1b\n" |
| 298 | ".previous\n" |
| 299 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) |
| 300 | _ASM_EXTABLE(ftrace_test_nop5, 3b) |
| 301 | : "=r"(faulted) : "0" (faulted)); |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 302 | |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 303 | switch (faulted) { |
| 304 | case 0: |
| 305 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); |
Steven Rostedt | 8115f3f | 2008-10-24 09:12:17 -0400 | [diff] [blame] | 306 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 307 | break; |
| 308 | case 1: |
| 309 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); |
Steven Rostedt | 8115f3f | 2008-10-24 09:12:17 -0400 | [diff] [blame] | 310 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 311 | break; |
| 312 | case 2: |
Anders Kaseorg | 8b27386 | 2008-10-09 22:19:08 -0400 | [diff] [blame] | 313 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); |
Steven Rostedt | 8115f3f | 2008-10-24 09:12:17 -0400 | [diff] [blame] | 314 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 315 | break; |
| 316 | } |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 317 | |
Steven Rostedt | 732f3ca | 2008-08-14 18:05:05 -0400 | [diff] [blame] | 318 | /* The return code is retured via data */ |
| 319 | *(unsigned long *)data = 0; |
Steven Rostedt | dfa60ab | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 320 | |
Steven Rostedt | 3d08339 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 321 | return 0; |
| 322 | } |
Frederic Weisbecker | caf4b32 | 2008-11-11 07:03:45 +0100 | [diff] [blame] | 323 | #endif |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 324 | |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 325 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 326 | |
Steven Rostedt | 5a45cfe | 2008-11-26 00:16:24 -0500 | [diff] [blame] | 327 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 328 | extern void ftrace_graph_call(void); |
| 329 | |
| 330 | static int ftrace_mod_jmp(unsigned long ip, |
| 331 | int old_offset, int new_offset) |
| 332 | { |
| 333 | unsigned char code[MCOUNT_INSN_SIZE]; |
| 334 | |
| 335 | if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) |
| 336 | return -EFAULT; |
| 337 | |
| 338 | if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) |
| 339 | return -EINVAL; |
| 340 | |
| 341 | *(int *)(&code[1]) = new_offset; |
| 342 | |
| 343 | if (do_ftrace_mod_code(ip, &code)) |
| 344 | return -EPERM; |
| 345 | |
| 346 | return 0; |
| 347 | } |
| 348 | |
| 349 | int ftrace_enable_ftrace_graph_caller(void) |
| 350 | { |
| 351 | unsigned long ip = (unsigned long)(&ftrace_graph_call); |
| 352 | int old_offset, new_offset; |
| 353 | |
| 354 | old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); |
| 355 | new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); |
| 356 | |
| 357 | return ftrace_mod_jmp(ip, old_offset, new_offset); |
| 358 | } |
| 359 | |
| 360 | int ftrace_disable_ftrace_graph_caller(void) |
| 361 | { |
| 362 | unsigned long ip = (unsigned long)(&ftrace_graph_call); |
| 363 | int old_offset, new_offset; |
| 364 | |
| 365 | old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); |
| 366 | new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); |
| 367 | |
| 368 | return ftrace_mod_jmp(ip, old_offset, new_offset); |
| 369 | } |
| 370 | |
| 371 | #else /* CONFIG_DYNAMIC_FTRACE */ |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 372 | |
| 373 | /* |
| 374 | * These functions are picked from those used on |
| 375 | * this page for dynamic ftrace. They have been |
| 376 | * simplified to ignore all traces in NMI context. |
| 377 | */ |
| 378 | static atomic_t in_nmi; |
| 379 | |
| 380 | void ftrace_nmi_enter(void) |
| 381 | { |
| 382 | atomic_inc(&in_nmi); |
| 383 | } |
| 384 | |
| 385 | void ftrace_nmi_exit(void) |
| 386 | { |
| 387 | atomic_dec(&in_nmi); |
| 388 | } |
Steven Rostedt | 5a45cfe | 2008-11-26 00:16:24 -0500 | [diff] [blame] | 389 | |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 390 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
| 391 | |
| 392 | /* Add a function return address to the trace stack on thread info.*/ |
| 393 | static int push_return_trace(unsigned long ret, unsigned long long time, |
Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 394 | unsigned long func, int *depth) |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 395 | { |
| 396 | int index; |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 397 | |
| 398 | if (!current->ret_stack) |
| 399 | return -EBUSY; |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 400 | |
| 401 | /* The return trace stack is full */ |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 402 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
| 403 | atomic_inc(¤t->trace_overrun); |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 404 | return -EBUSY; |
Frederic Weisbecker | 0231022 | 2008-11-17 03:22:41 +0100 | [diff] [blame] | 405 | } |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 406 | |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 407 | index = ++current->curr_ret_stack; |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 408 | barrier(); |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 409 | current->ret_stack[index].ret = ret; |
| 410 | current->ret_stack[index].func = func; |
| 411 | current->ret_stack[index].calltime = time; |
Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 412 | *depth = index; |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 413 | |
| 414 | return 0; |
| 415 | } |
| 416 | |
| 417 | /* Retrieve a function return address to the trace stack on thread info.*/ |
Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 418 | static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 419 | { |
| 420 | int index; |
| 421 | |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 422 | index = current->curr_ret_stack; |
Steven Rostedt | 62679ef | 2008-12-02 23:50:06 -0500 | [diff] [blame] | 423 | |
| 424 | if (unlikely(index < 0)) { |
| 425 | ftrace_graph_stop(); |
| 426 | WARN_ON(1); |
| 427 | /* Might as well panic, otherwise we have no where to go */ |
| 428 | *ret = (unsigned long)panic; |
| 429 | return; |
| 430 | } |
| 431 | |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 432 | *ret = current->ret_stack[index].ret; |
Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 433 | trace->func = current->ret_stack[index].func; |
| 434 | trace->calltime = current->ret_stack[index].calltime; |
| 435 | trace->overrun = atomic_read(¤t->trace_overrun); |
| 436 | trace->depth = index; |
Steven Rostedt | e49dc19 | 2008-12-02 23:50:05 -0500 | [diff] [blame] | 437 | barrier(); |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 438 | current->curr_ret_stack--; |
Steven Rostedt | 62679ef | 2008-12-02 23:50:06 -0500 | [diff] [blame] | 439 | |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 440 | } |
| 441 | |
| 442 | /* |
| 443 | * Send the trace to the ring-buffer. |
| 444 | * @return the original return address. |
| 445 | */ |
| 446 | unsigned long ftrace_return_to_handler(void) |
| 447 | { |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 448 | struct ftrace_graph_ret trace; |
Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 449 | unsigned long ret; |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 450 | |
Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 451 | pop_return_trace(&trace, &ret); |
| 452 | trace.rettime = cpu_clock(raw_smp_processor_id()); |
| 453 | ftrace_graph_return(&trace); |
| 454 | |
Steven Rostedt | 62679ef | 2008-12-02 23:50:06 -0500 | [diff] [blame] | 455 | if (unlikely(!ret)) { |
| 456 | ftrace_graph_stop(); |
| 457 | WARN_ON(1); |
| 458 | /* Might as well panic. What else to do? */ |
| 459 | ret = (unsigned long)panic; |
| 460 | } |
| 461 | |
Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 462 | return ret; |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 463 | } |
| 464 | |
| 465 | /* |
| 466 | * Hook the return address and push it in the stack of return addrs |
| 467 | * in current thread info. |
| 468 | */ |
| 469 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) |
| 470 | { |
| 471 | unsigned long old; |
| 472 | unsigned long long calltime; |
| 473 | int faulted; |
Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 474 | struct ftrace_graph_ent trace; |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 475 | unsigned long return_hooker = (unsigned long) |
| 476 | &return_to_handler; |
| 477 | |
| 478 | /* Nmi's are currently unsupported */ |
Frederic Weisbecker | 380c4b1 | 2008-12-06 03:43:41 +0100 | [diff] [blame] | 479 | if (unlikely(atomic_read(&in_nmi))) |
| 480 | return; |
| 481 | |
| 482 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 483 | return; |
| 484 | |
| 485 | /* |
| 486 | * Protect against fault, even if it shouldn't |
| 487 | * happen. This tool is too much intrusive to |
| 488 | * ignore such a protection. |
| 489 | */ |
| 490 | asm volatile( |
Steven Rostedt | 347fdd9 | 2008-12-02 15:34:08 -0500 | [diff] [blame] | 491 | "1: " _ASM_MOV " (%[parent_old]), %[old]\n" |
| 492 | "2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n" |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 493 | " movl $0, %[faulted]\n" |
| 494 | |
| 495 | ".section .fixup, \"ax\"\n" |
| 496 | "3: movl $1, %[faulted]\n" |
| 497 | ".previous\n" |
| 498 | |
Steven Rostedt | 347fdd9 | 2008-12-02 15:34:08 -0500 | [diff] [blame] | 499 | _ASM_EXTABLE(1b, 3b) |
| 500 | _ASM_EXTABLE(2b, 3b) |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 501 | |
| 502 | : [parent_replaced] "=r" (parent), [old] "=r" (old), |
| 503 | [faulted] "=r" (faulted) |
| 504 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) |
| 505 | : "memory" |
| 506 | ); |
| 507 | |
Steven Rostedt | 14a866c | 2008-12-02 23:50:02 -0500 | [diff] [blame] | 508 | if (unlikely(faulted)) { |
| 509 | ftrace_graph_stop(); |
| 510 | WARN_ON(1); |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 511 | return; |
| 512 | } |
| 513 | |
Steven Rostedt | 14a866c | 2008-12-02 23:50:02 -0500 | [diff] [blame] | 514 | if (unlikely(!__kernel_text_address(old))) { |
| 515 | ftrace_graph_stop(); |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 516 | *parent = old; |
Steven Rostedt | 14a866c | 2008-12-02 23:50:02 -0500 | [diff] [blame] | 517 | WARN_ON(1); |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 518 | return; |
| 519 | } |
| 520 | |
| 521 | calltime = cpu_clock(raw_smp_processor_id()); |
| 522 | |
Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 523 | if (push_return_trace(old, calltime, |
| 524 | self_addr, &trace.depth) == -EBUSY) { |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 525 | *parent = old; |
Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 526 | return; |
| 527 | } |
| 528 | |
| 529 | trace.func = self_addr; |
Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 530 | |
Steven Rostedt | e49dc19 | 2008-12-02 23:50:05 -0500 | [diff] [blame] | 531 | /* Only trace if the calling function expects to */ |
| 532 | if (!ftrace_graph_entry(&trace)) { |
| 533 | current->curr_ret_stack--; |
| 534 | *parent = old; |
| 535 | } |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 536 | } |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 537 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |