blob: c2e45c48409cd64588f4ca460b4841bcad8a2909 [file] [log] [blame]
Paul Mundtafbfb522006-12-04 18:17:28 +09001/*
2 * arch/sh/kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
Paul Mundt5a89f1a2008-09-13 01:44:03 +09006 * Copyright (C) 2006 - 2008 Paul Mundt
Paul Mundtafbfb522006-12-04 18:17:28 +09007 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/sched.h>
13#include <linux/stacktrace.h>
14#include <linux/thread_info.h>
Heiko Carstens8b95d912008-07-14 23:32:32 +020015#include <linux/module.h>
Matt Fleming0eff9f62009-08-11 22:43:20 +010016#include <asm/unwinder.h>
Paul Mundtafbfb522006-12-04 18:17:28 +090017#include <asm/ptrace.h>
Matt Fleming4e14dfc2009-08-07 16:11:19 +010018#include <asm/stacktrace.h>
19
20static void save_stack_warning(void *data, char *msg)
21{
22}
23
24static void
25save_stack_warning_symbol(void *data, char *msg, unsigned long symbol)
26{
27}
28
29static int save_stack_stack(void *data, char *name)
30{
31 return 0;
32}
Paul Mundtafbfb522006-12-04 18:17:28 +090033
34/*
35 * Save stack-backtrace addresses into a stack_trace buffer.
36 */
Matt Fleming4e14dfc2009-08-07 16:11:19 +010037static void save_stack_address(void *data, unsigned long addr, int reliable)
38{
39 struct stack_trace *trace = data;
40
Paul Mundt48e4d462009-08-15 01:05:46 +090041 if (!reliable)
42 return;
43
Matt Fleming4e14dfc2009-08-07 16:11:19 +010044 if (trace->skip > 0) {
45 trace->skip--;
46 return;
47 }
48
49 if (trace->nr_entries < trace->max_entries)
50 trace->entries[trace->nr_entries++] = addr;
51}
52
53static const struct stacktrace_ops save_stack_ops = {
54 .warning = save_stack_warning,
55 .warning_symbol = save_stack_warning_symbol,
56 .stack = save_stack_stack,
57 .address = save_stack_address,
58};
59
Paul Mundta3cf4ea82007-05-09 18:55:14 +090060void save_stack_trace(struct stack_trace *trace)
Paul Mundtafbfb522006-12-04 18:17:28 +090061{
Christoph Hellwigab1b6f02007-05-08 00:23:29 -070062 unsigned long *sp = (unsigned long *)current_stack_pointer;
Paul Mundtafbfb522006-12-04 18:17:28 +090063
Matt Fleming0eff9f62009-08-11 22:43:20 +010064 unwind_stack(current, NULL, sp, &save_stack_ops, trace);
Paul Mundt606b4c92009-08-15 01:11:37 +090065 if (trace->nr_entries < trace->max_entries)
66 trace->entries[trace->nr_entries++] = ULONG_MAX;
Paul Mundtafbfb522006-12-04 18:17:28 +090067}
Ingo Molnar7b4c9502008-07-03 09:17:55 +020068EXPORT_SYMBOL_GPL(save_stack_trace);
Paul Mundt5a89f1a2008-09-13 01:44:03 +090069
Matt Fleming4e14dfc2009-08-07 16:11:19 +010070static void
71save_stack_address_nosched(void *data, unsigned long addr, int reliable)
72{
73 struct stack_trace *trace = (struct stack_trace *)data;
74
Paul Mundt48e4d462009-08-15 01:05:46 +090075 if (!reliable)
76 return;
77
Matt Fleming4e14dfc2009-08-07 16:11:19 +010078 if (in_sched_functions(addr))
79 return;
80
81 if (trace->skip > 0) {
82 trace->skip--;
83 return;
84 }
85
86 if (trace->nr_entries < trace->max_entries)
87 trace->entries[trace->nr_entries++] = addr;
88}
89
90static const struct stacktrace_ops save_stack_ops_nosched = {
91 .warning = save_stack_warning,
92 .warning_symbol = save_stack_warning_symbol,
93 .stack = save_stack_stack,
94 .address = save_stack_address_nosched,
95};
96
Paul Mundt5a89f1a2008-09-13 01:44:03 +090097void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
98{
99 unsigned long *sp = (unsigned long *)tsk->thread.sp;
100
Matt Fleming0eff9f62009-08-11 22:43:20 +0100101 unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
Paul Mundt606b4c92009-08-15 01:11:37 +0900102 if (trace->nr_entries < trace->max_entries)
103 trace->entries[trace->nr_entries++] = ULONG_MAX;
Paul Mundt5a89f1a2008-09-13 01:44:03 +0900104}
105EXPORT_SYMBOL_GPL(save_stack_trace_tsk);