blob: 7a73d2763e1ba3ffef136c83198ee75e5fe4256e [file] [log] [blame]
Paul Mundtafbfb522006-12-04 18:17:28 +09001/*
2 * arch/sh/kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
Paul Mundt5a89f1a2008-09-13 01:44:03 +09006 * Copyright (C) 2006 - 2008 Paul Mundt
Paul Mundtafbfb522006-12-04 18:17:28 +09007 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010013#include <linux/sched/debug.h>
Paul Mundtafbfb522006-12-04 18:17:28 +090014#include <linux/stacktrace.h>
15#include <linux/thread_info.h>
Heiko Carstens8b95d912008-07-14 23:32:32 +020016#include <linux/module.h>
Matt Fleming0eff9f62009-08-11 22:43:20 +010017#include <asm/unwinder.h>
Paul Mundtafbfb522006-12-04 18:17:28 +090018#include <asm/ptrace.h>
Matt Fleming4e14dfc2009-08-07 16:11:19 +010019#include <asm/stacktrace.h>
20
Matt Fleming4e14dfc2009-08-07 16:11:19 +010021static int save_stack_stack(void *data, char *name)
22{
23 return 0;
24}
Paul Mundtafbfb522006-12-04 18:17:28 +090025
26/*
27 * Save stack-backtrace addresses into a stack_trace buffer.
28 */
Matt Fleming4e14dfc2009-08-07 16:11:19 +010029static void save_stack_address(void *data, unsigned long addr, int reliable)
30{
31 struct stack_trace *trace = data;
32
Paul Mundt48e4d462009-08-15 01:05:46 +090033 if (!reliable)
34 return;
35
Matt Fleming4e14dfc2009-08-07 16:11:19 +010036 if (trace->skip > 0) {
37 trace->skip--;
38 return;
39 }
40
41 if (trace->nr_entries < trace->max_entries)
42 trace->entries[trace->nr_entries++] = addr;
43}
44
45static const struct stacktrace_ops save_stack_ops = {
Matt Fleming4e14dfc2009-08-07 16:11:19 +010046 .stack = save_stack_stack,
47 .address = save_stack_address,
48};
49
Paul Mundta3cf4ea82007-05-09 18:55:14 +090050void save_stack_trace(struct stack_trace *trace)
Paul Mundtafbfb522006-12-04 18:17:28 +090051{
Christoph Hellwigab1b6f02007-05-08 00:23:29 -070052 unsigned long *sp = (unsigned long *)current_stack_pointer;
Paul Mundtafbfb522006-12-04 18:17:28 +090053
Matt Fleming0eff9f62009-08-11 22:43:20 +010054 unwind_stack(current, NULL, sp, &save_stack_ops, trace);
Paul Mundt606b4c92009-08-15 01:11:37 +090055 if (trace->nr_entries < trace->max_entries)
56 trace->entries[trace->nr_entries++] = ULONG_MAX;
Paul Mundtafbfb522006-12-04 18:17:28 +090057}
Ingo Molnar7b4c9502008-07-03 09:17:55 +020058EXPORT_SYMBOL_GPL(save_stack_trace);
Paul Mundt5a89f1a2008-09-13 01:44:03 +090059
Matt Fleming4e14dfc2009-08-07 16:11:19 +010060static void
61save_stack_address_nosched(void *data, unsigned long addr, int reliable)
62{
63 struct stack_trace *trace = (struct stack_trace *)data;
64
Paul Mundt48e4d462009-08-15 01:05:46 +090065 if (!reliable)
66 return;
67
Matt Fleming4e14dfc2009-08-07 16:11:19 +010068 if (in_sched_functions(addr))
69 return;
70
71 if (trace->skip > 0) {
72 trace->skip--;
73 return;
74 }
75
76 if (trace->nr_entries < trace->max_entries)
77 trace->entries[trace->nr_entries++] = addr;
78}
79
80static const struct stacktrace_ops save_stack_ops_nosched = {
Matt Fleming4e14dfc2009-08-07 16:11:19 +010081 .stack = save_stack_stack,
82 .address = save_stack_address_nosched,
83};
84
Paul Mundt5a89f1a2008-09-13 01:44:03 +090085void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
86{
87 unsigned long *sp = (unsigned long *)tsk->thread.sp;
88
Matt Fleming0eff9f62009-08-11 22:43:20 +010089 unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
Paul Mundt606b4c92009-08-15 01:11:37 +090090 if (trace->nr_entries < trace->max_entries)
91 trace->entries[trace->nr_entries++] = ULONG_MAX;
Paul Mundt5a89f1a2008-09-13 01:44:03 +090092}
93EXPORT_SYMBOL_GPL(save_stack_trace_tsk);