blob: 6fd486e0cef407b1cf3c416d15c31be14bf8dd6a [file] [log] [blame]
Steven Rostedt81d68a92008-05-12 21:20:42 +02001/*
Wenji Huang73d8b8b2009-02-17 01:10:02 -05002 * trace irqs off critical timings
Steven Rostedt81d68a92008-05-12 21:20:42 +02003 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/kallsyms.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly;
23
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020024static DEFINE_PER_CPU(int, tracing_cpu);
25
Steven Rostedt89b2f972008-05-12 21:20:44 +020026static DEFINE_SPINLOCK(max_trace_lock);
27
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020028enum {
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
31};
32
33static int trace_type __read_mostly;
34
Steven Rostedte9d25fe2009-03-04 22:15:30 -050035static int save_lat_flag;
36
Jiri Olsa62b915f2010-04-02 19:01:22 +020037static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38static int start_irqsoff_tracer(struct trace_array *tr, int graph);
39
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020040#ifdef CONFIG_PREEMPT_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020041static inline int
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020042preempt_trace(void)
43{
44 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
45}
46#else
47# define preempt_trace() (0)
48#endif
49
50#ifdef CONFIG_IRQSOFF_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020051static inline int
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020052irq_trace(void)
53{
54 return ((trace_type & TRACER_IRQS_OFF) &&
55 irqs_disabled());
56}
57#else
58# define irq_trace() (0)
59#endif
60
Jiri Olsa62b915f2010-04-02 19:01:22 +020061#define TRACE_DISPLAY_GRAPH 1
62
63static struct tracer_opt trace_opts[] = {
64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
65 /* display latency trace as call graph */
66 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
67#endif
68 { } /* Empty entry */
69};
70
71static struct tracer_flags tracer_flags = {
72 .val = 0,
73 .opts = trace_opts,
74};
75
76#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
77
Steven Rostedt81d68a92008-05-12 21:20:42 +020078/*
79 * Sequence count - we record it when starting a measurement and
80 * skip the latency if the sequence has changed - some other section
81 * did a maximum and could disturb our measurement with serial console
82 * printouts, etc. Truly coinciding maximum latencies should be rare
83 * and what happens together happens separately as well, so this doesnt
84 * decrease the validity of the maximum found:
85 */
86static __cacheline_aligned_in_smp unsigned long max_sequence;
87
Steven Rostedt606576c2008-10-06 19:06:12 -040088#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt81d68a92008-05-12 21:20:42 +020089/*
90 * irqsoff uses its own tracer function to keep the overhead down:
91 */
Ingo Molnare309b412008-05-12 21:20:51 +020092static void
Steven Rostedt81d68a92008-05-12 21:20:42 +020093irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
94{
95 struct trace_array *tr = irqsoff_trace;
96 struct trace_array_cpu *data;
97 unsigned long flags;
98 long disabled;
99 int cpu;
100
Steven Rostedt361943a2008-05-12 21:20:44 +0200101 /*
102 * Does not matter if we preempt. We test the flags
103 * afterward, to see if irqs are disabled or not.
104 * If we preempt and get a false positive, the flags
105 * test will fail.
106 */
107 cpu = raw_smp_processor_id();
108 if (likely(!per_cpu(tracing_cpu, cpu)))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200109 return;
110
111 local_save_flags(flags);
Steven Rostedt361943a2008-05-12 21:20:44 +0200112 /* slight chance to get a false positive on tracing_cpu */
113 if (!irqs_disabled_flags(flags))
114 return;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200115
Steven Rostedt81d68a92008-05-12 21:20:42 +0200116 data = tr->data[cpu];
117 disabled = atomic_inc_return(&data->disabled);
118
119 if (likely(disabled == 1))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500120 trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt81d68a92008-05-12 21:20:42 +0200121
122 atomic_dec(&data->disabled);
123}
124
125static struct ftrace_ops trace_ops __read_mostly =
126{
127 .func = irqsoff_tracer_call,
128};
Steven Rostedt606576c2008-10-06 19:06:12 -0400129#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200130
Jiri Olsa62b915f2010-04-02 19:01:22 +0200131#ifdef CONFIG_FUNCTION_GRAPH_TRACER
132static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
133{
134 int cpu;
135
136 if (!(bit & TRACE_DISPLAY_GRAPH))
137 return -EINVAL;
138
139 if (!(is_graph() ^ set))
140 return 0;
141
142 stop_irqsoff_tracer(irqsoff_trace, !set);
143
144 for_each_possible_cpu(cpu)
145 per_cpu(tracing_cpu, cpu) = 0;
146
147 tracing_max_latency = 0;
148 tracing_reset_online_cpus(irqsoff_trace);
149
150 return start_irqsoff_tracer(irqsoff_trace, set);
151}
152
153static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
154{
155 struct trace_array *tr = irqsoff_trace;
156 struct trace_array_cpu *data;
157 unsigned long flags;
158 long disabled;
159 int ret;
160 int cpu;
161 int pc;
162
163 cpu = raw_smp_processor_id();
164 if (likely(!per_cpu(tracing_cpu, cpu)))
165 return 0;
166
167 local_save_flags(flags);
168 /* slight chance to get a false positive on tracing_cpu */
169 if (!irqs_disabled_flags(flags))
170 return 0;
171
172 data = tr->data[cpu];
173 disabled = atomic_inc_return(&data->disabled);
174
175 if (likely(disabled == 1)) {
176 pc = preempt_count();
177 ret = __trace_graph_entry(tr, trace, flags, pc);
178 } else
179 ret = 0;
180
181 atomic_dec(&data->disabled);
182 return ret;
183}
184
185static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
186{
187 struct trace_array *tr = irqsoff_trace;
188 struct trace_array_cpu *data;
189 unsigned long flags;
190 long disabled;
191 int cpu;
192 int pc;
193
194 cpu = raw_smp_processor_id();
195 if (likely(!per_cpu(tracing_cpu, cpu)))
196 return;
197
198 local_save_flags(flags);
199 /* slight chance to get a false positive on tracing_cpu */
200 if (!irqs_disabled_flags(flags))
201 return;
202
203 data = tr->data[cpu];
204 disabled = atomic_inc_return(&data->disabled);
205
206 if (likely(disabled == 1)) {
207 pc = preempt_count();
208 __trace_graph_return(tr, trace, flags, pc);
209 }
210
211 atomic_dec(&data->disabled);
212}
213
214static void irqsoff_trace_open(struct trace_iterator *iter)
215{
216 if (is_graph())
217 graph_trace_open(iter);
218
219}
220
221static void irqsoff_trace_close(struct trace_iterator *iter)
222{
223 if (iter->private)
224 graph_trace_close(iter);
225}
226
227#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
228 TRACE_GRAPH_PRINT_PROC)
229
230static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
231{
232 u32 flags = GRAPH_TRACER_FLAGS;
233
234 if (trace_flags & TRACE_ITER_LATENCY_FMT)
235 flags |= TRACE_GRAPH_PRINT_DURATION;
236 else
237 flags |= TRACE_GRAPH_PRINT_ABS_TIME;
238
239 /*
240 * In graph mode call the graph tracer output function,
241 * otherwise go with the TRACE_FN event handler
242 */
243 if (is_graph())
244 return print_graph_function_flags(iter, flags);
245
246 return TRACE_TYPE_UNHANDLED;
247}
248
249static void irqsoff_print_header(struct seq_file *s)
250{
251 if (is_graph()) {
252 struct trace_iterator *iter = s->private;
253 u32 flags = GRAPH_TRACER_FLAGS;
254
255 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
256 /* print nothing if the buffers are empty */
257 if (trace_empty(iter))
258 return;
259
260 print_trace_header(s, iter);
261 flags |= TRACE_GRAPH_PRINT_DURATION;
262 } else
263 flags |= TRACE_GRAPH_PRINT_ABS_TIME;
264
265 print_graph_headers_flags(s, flags);
266 } else
267 trace_default_header(s);
268}
269
270static void
271trace_graph_function(struct trace_array *tr,
272 unsigned long ip, unsigned long flags, int pc)
273{
274 u64 time = trace_clock_local();
275 struct ftrace_graph_ent ent = {
276 .func = ip,
277 .depth = 0,
278 };
279 struct ftrace_graph_ret ret = {
280 .func = ip,
281 .depth = 0,
282 .calltime = time,
283 .rettime = time,
284 };
285
286 __trace_graph_entry(tr, &ent, flags, pc);
287 __trace_graph_return(tr, &ret, flags, pc);
288}
289
290static void
291__trace_function(struct trace_array *tr,
292 unsigned long ip, unsigned long parent_ip,
293 unsigned long flags, int pc)
294{
295 if (!is_graph())
296 trace_function(tr, ip, parent_ip, flags, pc);
297 else {
298 trace_graph_function(tr, parent_ip, flags, pc);
299 trace_graph_function(tr, ip, flags, pc);
300 }
301}
302
303#else
304#define __trace_function trace_function
305
306static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
307{
308 return -EINVAL;
309}
310
311static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
312{
313 return -1;
314}
315
316static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
317{
318 return TRACE_TYPE_UNHANDLED;
319}
320
321static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
322static void irqsoff_print_header(struct seq_file *s) { }
323static void irqsoff_trace_open(struct trace_iterator *iter) { }
324static void irqsoff_trace_close(struct trace_iterator *iter) { }
325#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
326
Steven Rostedt81d68a92008-05-12 21:20:42 +0200327/*
328 * Should this new latency be reported/recorded?
329 */
Ingo Molnare309b412008-05-12 21:20:51 +0200330static int report_latency(cycle_t delta)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200331{
332 if (tracing_thresh) {
333 if (delta < tracing_thresh)
334 return 0;
335 } else {
336 if (delta <= tracing_max_latency)
337 return 0;
338 }
339 return 1;
340}
341
Ingo Molnare309b412008-05-12 21:20:51 +0200342static void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200343check_critical_timing(struct trace_array *tr,
344 struct trace_array_cpu *data,
345 unsigned long parent_ip,
346 int cpu)
347{
Steven Rostedt89b2f972008-05-12 21:20:44 +0200348 cycle_t T0, T1, delta;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200349 unsigned long flags;
Steven Rostedt38697052008-10-01 13:14:09 -0400350 int pc;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200351
Steven Rostedt81d68a92008-05-12 21:20:42 +0200352 T0 = data->preempt_timestamp;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200353 T1 = ftrace_now(cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200354 delta = T1-T0;
355
356 local_save_flags(flags);
357
Steven Rostedt6450c1d2008-10-02 19:23:04 -0400358 pc = preempt_count();
359
Steven Rostedt81d68a92008-05-12 21:20:42 +0200360 if (!report_latency(delta))
361 goto out;
362
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200363 spin_lock_irqsave(&max_trace_lock, flags);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200364
Steven Rostedt89b2f972008-05-12 21:20:44 +0200365 /* check if we are still the max latency */
366 if (!report_latency(delta))
367 goto out_unlock;
368
Jiri Olsa62b915f2010-04-02 19:01:22 +0200369 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
Steven Rostedtcc51a0f2009-12-11 11:54:51 -0500370 /* Skip 5 functions to get to the irq/preempt enable function */
371 __trace_stack(tr, flags, 5, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200372
Steven Rostedt81d68a92008-05-12 21:20:42 +0200373 if (data->critical_sequence != max_sequence)
Steven Rostedt89b2f972008-05-12 21:20:44 +0200374 goto out_unlock;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200375
Steven Rostedt81d68a92008-05-12 21:20:42 +0200376 data->critical_end = parent_ip;
377
Carsten Emdeb5130b12009-09-13 01:43:07 +0200378 if (likely(!is_tracing_stopped())) {
379 tracing_max_latency = delta;
380 update_max_tr_single(tr, current, cpu);
381 }
Steven Rostedt81d68a92008-05-12 21:20:42 +0200382
Steven Rostedt81d68a92008-05-12 21:20:42 +0200383 max_sequence++;
384
Steven Rostedt89b2f972008-05-12 21:20:44 +0200385out_unlock:
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200386 spin_unlock_irqrestore(&max_trace_lock, flags);
Steven Rostedt89b2f972008-05-12 21:20:44 +0200387
Steven Rostedt81d68a92008-05-12 21:20:42 +0200388out:
389 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200390 data->preempt_timestamp = ftrace_now(cpu);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200391 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200392}
393
Ingo Molnare309b412008-05-12 21:20:51 +0200394static inline void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200395start_critical_timing(unsigned long ip, unsigned long parent_ip)
396{
397 int cpu;
398 struct trace_array *tr = irqsoff_trace;
399 struct trace_array_cpu *data;
400 unsigned long flags;
401
402 if (likely(!tracer_enabled))
403 return;
404
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200405 cpu = raw_smp_processor_id();
406
407 if (per_cpu(tracing_cpu, cpu))
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200408 return;
409
Steven Rostedt81d68a92008-05-12 21:20:42 +0200410 data = tr->data[cpu];
411
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200412 if (unlikely(!data) || atomic_read(&data->disabled))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200413 return;
414
415 atomic_inc(&data->disabled);
416
417 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200418 data->preempt_timestamp = ftrace_now(cpu);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200419 data->critical_start = parent_ip ? : ip;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200420
421 local_save_flags(flags);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200422
Jiri Olsa62b915f2010-04-02 19:01:22 +0200423 __trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt81d68a92008-05-12 21:20:42 +0200424
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200425 per_cpu(tracing_cpu, cpu) = 1;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200426
Steven Rostedt81d68a92008-05-12 21:20:42 +0200427 atomic_dec(&data->disabled);
428}
429
Ingo Molnare309b412008-05-12 21:20:51 +0200430static inline void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200431stop_critical_timing(unsigned long ip, unsigned long parent_ip)
432{
433 int cpu;
434 struct trace_array *tr = irqsoff_trace;
435 struct trace_array_cpu *data;
436 unsigned long flags;
437
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200438 cpu = raw_smp_processor_id();
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200439 /* Always clear the tracing cpu on stopping the trace */
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200440 if (unlikely(per_cpu(tracing_cpu, cpu)))
441 per_cpu(tracing_cpu, cpu) = 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200442 else
443 return;
444
445 if (!tracer_enabled)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200446 return;
447
Steven Rostedt81d68a92008-05-12 21:20:42 +0200448 data = tr->data[cpu];
449
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400450 if (unlikely(!data) ||
Steven Rostedt81d68a92008-05-12 21:20:42 +0200451 !data->critical_start || atomic_read(&data->disabled))
452 return;
453
454 atomic_inc(&data->disabled);
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200455
Steven Rostedt81d68a92008-05-12 21:20:42 +0200456 local_save_flags(flags);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200457 __trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200458 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200459 data->critical_start = 0;
460 atomic_dec(&data->disabled);
461}
462
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200463/* start and stop critical timings used to for stoppage (in idle) */
Ingo Molnare309b412008-05-12 21:20:51 +0200464void start_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200465{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200466 if (preempt_trace() || irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200467 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
468}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200469EXPORT_SYMBOL_GPL(start_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200470
Ingo Molnare309b412008-05-12 21:20:51 +0200471void stop_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200472{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200473 if (preempt_trace() || irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200474 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
475}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200476EXPORT_SYMBOL_GPL(stop_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200477
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200478#ifdef CONFIG_IRQSOFF_TRACER
Steven Rostedt81d68a92008-05-12 21:20:42 +0200479#ifdef CONFIG_PROVE_LOCKING
Ingo Molnare309b412008-05-12 21:20:51 +0200480void time_hardirqs_on(unsigned long a0, unsigned long a1)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200481{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200482 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200483 stop_critical_timing(a0, a1);
484}
485
Ingo Molnare309b412008-05-12 21:20:51 +0200486void time_hardirqs_off(unsigned long a0, unsigned long a1)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200487{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200488 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200489 start_critical_timing(a0, a1);
490}
491
492#else /* !CONFIG_PROVE_LOCKING */
493
494/*
495 * Stubs:
496 */
497
498void early_boot_irqs_off(void)
499{
500}
501
502void early_boot_irqs_on(void)
503{
504}
505
506void trace_softirqs_on(unsigned long ip)
507{
508}
509
510void trace_softirqs_off(unsigned long ip)
511{
512}
513
Ingo Molnare309b412008-05-12 21:20:51 +0200514inline void print_irqtrace_events(struct task_struct *curr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200515{
516}
517
518/*
519 * We are only interested in hardirq on/off events:
520 */
Ingo Molnare309b412008-05-12 21:20:51 +0200521void trace_hardirqs_on(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200522{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200523 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200524 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
525}
526EXPORT_SYMBOL(trace_hardirqs_on);
527
Ingo Molnare309b412008-05-12 21:20:51 +0200528void trace_hardirqs_off(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200529{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200530 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200531 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
532}
533EXPORT_SYMBOL(trace_hardirqs_off);
534
Ingo Molnare309b412008-05-12 21:20:51 +0200535void trace_hardirqs_on_caller(unsigned long caller_addr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200536{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200537 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200538 stop_critical_timing(CALLER_ADDR0, caller_addr);
539}
540EXPORT_SYMBOL(trace_hardirqs_on_caller);
541
Ingo Molnare309b412008-05-12 21:20:51 +0200542void trace_hardirqs_off_caller(unsigned long caller_addr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200543{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200544 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200545 start_critical_timing(CALLER_ADDR0, caller_addr);
546}
547EXPORT_SYMBOL(trace_hardirqs_off_caller);
548
549#endif /* CONFIG_PROVE_LOCKING */
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200550#endif /* CONFIG_IRQSOFF_TRACER */
551
552#ifdef CONFIG_PREEMPT_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +0200553void trace_preempt_on(unsigned long a0, unsigned long a1)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200554{
Steven Rostedt1e01cb02008-07-15 09:53:37 -0400555 if (preempt_trace())
556 stop_critical_timing(a0, a1);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200557}
558
Ingo Molnare309b412008-05-12 21:20:51 +0200559void trace_preempt_off(unsigned long a0, unsigned long a1)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200560{
Steven Rostedt1e01cb02008-07-15 09:53:37 -0400561 if (preempt_trace())
562 start_critical_timing(a0, a1);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200563}
564#endif /* CONFIG_PREEMPT_TRACER */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200565
Jiri Olsa62b915f2010-04-02 19:01:22 +0200566static int start_irqsoff_tracer(struct trace_array *tr, int graph)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200567{
Jiri Olsa62b915f2010-04-02 19:01:22 +0200568 int ret = 0;
569
570 if (!graph)
571 ret = register_ftrace_function(&trace_ops);
572 else
573 ret = register_ftrace_graph(&irqsoff_graph_return,
574 &irqsoff_graph_entry);
575
576 if (!ret && tracing_is_enabled())
Steven Rostedt90369902008-11-05 16:05:44 -0500577 tracer_enabled = 1;
Steven Rostedt94523e82009-01-22 11:18:06 -0500578 else
Steven Rostedt90369902008-11-05 16:05:44 -0500579 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200580
581 return ret;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200582}
583
Jiri Olsa62b915f2010-04-02 19:01:22 +0200584static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200585{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200586 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200587
588 if (!graph)
589 unregister_ftrace_function(&trace_ops);
590 else
591 unregister_ftrace_graph();
Steven Rostedt81d68a92008-05-12 21:20:42 +0200592}
593
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200594static void __irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200595{
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500596 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
597 trace_flags |= TRACE_ITER_LATENCY_FMT;
598
Steven Rostedt745b1622009-01-15 23:40:11 -0500599 tracing_max_latency = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200600 irqsoff_trace = tr;
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200601 /* make sure that the tracer is visible */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200602 smp_wmb();
Steven Rostedt2f26ebd2009-09-01 11:06:29 -0400603 tracing_reset_online_cpus(tr);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200604
605 if (start_irqsoff_tracer(tr, is_graph()))
606 printk(KERN_ERR "failed to start irqsoff tracer\n");
Steven Rostedt81d68a92008-05-12 21:20:42 +0200607}
608
609static void irqsoff_tracer_reset(struct trace_array *tr)
610{
Jiri Olsa62b915f2010-04-02 19:01:22 +0200611 stop_irqsoff_tracer(tr, is_graph());
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500612
613 if (!save_lat_flag)
614 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200615}
616
Steven Rostedt90369902008-11-05 16:05:44 -0500617static void irqsoff_tracer_start(struct trace_array *tr)
618{
Steven Rostedt90369902008-11-05 16:05:44 -0500619 tracer_enabled = 1;
Steven Rostedt90369902008-11-05 16:05:44 -0500620}
621
622static void irqsoff_tracer_stop(struct trace_array *tr)
623{
624 tracer_enabled = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200625}
626
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200627#ifdef CONFIG_IRQSOFF_TRACER
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100628static int irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200629{
630 trace_type = TRACER_IRQS_OFF;
631
632 __irqsoff_tracer_init(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100633 return 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200634}
Steven Rostedt81d68a92008-05-12 21:20:42 +0200635static struct tracer irqsoff_tracer __read_mostly =
636{
637 .name = "irqsoff",
638 .init = irqsoff_tracer_init,
639 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500640 .start = irqsoff_tracer_start,
641 .stop = irqsoff_tracer_stop,
Steven Rostedt81d68a92008-05-12 21:20:42 +0200642 .print_max = 1,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200643 .print_header = irqsoff_print_header,
644 .print_line = irqsoff_print_line,
645 .flags = &tracer_flags,
646 .set_flag = irqsoff_set_flag,
Steven Rostedt60a11772008-05-12 21:20:44 +0200647#ifdef CONFIG_FTRACE_SELFTEST
648 .selftest = trace_selftest_startup_irqsoff,
649#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200650 .open = irqsoff_trace_open,
651 .close = irqsoff_trace_close,
Steven Rostedt81d68a92008-05-12 21:20:42 +0200652};
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200653# define register_irqsoff(trace) register_tracer(&trace)
654#else
655# define register_irqsoff(trace) do { } while (0)
656#endif
657
658#ifdef CONFIG_PREEMPT_TRACER
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100659static int preemptoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200660{
661 trace_type = TRACER_PREEMPT_OFF;
662
663 __irqsoff_tracer_init(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100664 return 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200665}
666
667static struct tracer preemptoff_tracer __read_mostly =
668{
669 .name = "preemptoff",
670 .init = preemptoff_tracer_init,
671 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500672 .start = irqsoff_tracer_start,
673 .stop = irqsoff_tracer_stop,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200674 .print_max = 1,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200675 .print_header = irqsoff_print_header,
676 .print_line = irqsoff_print_line,
677 .flags = &tracer_flags,
678 .set_flag = irqsoff_set_flag,
Steven Rostedt60a11772008-05-12 21:20:44 +0200679#ifdef CONFIG_FTRACE_SELFTEST
680 .selftest = trace_selftest_startup_preemptoff,
681#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200682 .open = irqsoff_trace_open,
683 .close = irqsoff_trace_close,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200684};
685# define register_preemptoff(trace) register_tracer(&trace)
686#else
687# define register_preemptoff(trace) do { } while (0)
688#endif
689
690#if defined(CONFIG_IRQSOFF_TRACER) && \
691 defined(CONFIG_PREEMPT_TRACER)
692
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100693static int preemptirqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200694{
695 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
696
697 __irqsoff_tracer_init(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100698 return 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200699}
700
701static struct tracer preemptirqsoff_tracer __read_mostly =
702{
703 .name = "preemptirqsoff",
704 .init = preemptirqsoff_tracer_init,
705 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500706 .start = irqsoff_tracer_start,
707 .stop = irqsoff_tracer_stop,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200708 .print_max = 1,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200709 .print_header = irqsoff_print_header,
710 .print_line = irqsoff_print_line,
711 .flags = &tracer_flags,
712 .set_flag = irqsoff_set_flag,
Steven Rostedt60a11772008-05-12 21:20:44 +0200713#ifdef CONFIG_FTRACE_SELFTEST
714 .selftest = trace_selftest_startup_preemptirqsoff,
715#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200716 .open = irqsoff_trace_open,
717 .close = irqsoff_trace_close,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200718};
719
720# define register_preemptirqsoff(trace) register_tracer(&trace)
721#else
722# define register_preemptirqsoff(trace) do { } while (0)
723#endif
Steven Rostedt81d68a92008-05-12 21:20:42 +0200724
725__init static int init_irqsoff_tracer(void)
726{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200727 register_irqsoff(irqsoff_tracer);
728 register_preemptoff(preemptoff_tracer);
729 register_preemptirqsoff(preemptirqsoff_tracer);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200730
731 return 0;
732}
733device_initcall(init_irqsoff_tracer);