blob: 19a359d5e6d58573cc1c74326e488a419b01b342 [file] [log] [blame]
Peter Zijlstraac199db2009-03-19 20:26:15 +01001/*
Frederic Weisbecker97d5a222010-03-05 05:35:37 +01002 * trace event based perf event profiling/tracing
Peter Zijlstraac199db2009-03-19 20:26:15 +01003 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
Frederic Weisbeckerc5306652010-03-03 07:16:16 +01005 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
Peter Zijlstraac199db2009-03-19 20:26:15 +01006 */
7
Li Zefan558e6542009-08-24 12:19:47 +08008#include <linux/module.h>
Xiao Guangrong430ad5a2010-01-28 09:32:29 +08009#include <linux/kprobes.h>
Peter Zijlstraac199db2009-03-19 20:26:15 +010010#include "trace.h"
11
Namhyung Kim6016ee12010-08-11 12:47:59 +090012static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020013
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +010014/*
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
16 * suprises
17 */
18typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19 perf_trace_t;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010020
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020021/* Count the events in use (per event id, not per instance) */
Frederic Weisbecker97d5a222010-03-05 05:35:37 +010022static int total_ref_count;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020023
Frederic Weisbecker61c32652010-11-18 01:39:17 +010024static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event)
26{
27 /* No tracing, just counting, so no obvious leak */
28 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
29 return 0;
30
31 /* Some events are ok to be traced by non-root users... */
32 if (p_event->attach_state == PERF_ATTACH_TASK) {
33 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
34 return 0;
35 }
36
37 /*
38 * ...otherwise raw tracepoint data can be a severe data leak,
39 * only allow root to have these.
40 */
41 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
42 return -EPERM;
43
44 return 0;
45}
46
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020047static int perf_trace_event_init(struct ftrace_event_call *tp_event,
48 struct perf_event *p_event)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020049{
Namhyung Kim6016ee12010-08-11 12:47:59 +090050 struct hlist_head __percpu *list;
Frederic Weisbecker61c32652010-11-18 01:39:17 +010051 int ret;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020052 int cpu;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020053
Frederic Weisbecker61c32652010-11-18 01:39:17 +010054 ret = perf_trace_event_perm(tp_event, p_event);
55 if (ret)
56 return ret;
57
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020058 p_event->tp_event = tp_event;
59 if (tp_event->perf_refcount++ > 0)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020060 return 0;
61
Frederic Weisbecker61c32652010-11-18 01:39:17 +010062 ret = -ENOMEM;
63
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020064 list = alloc_percpu(struct hlist_head);
65 if (!list)
66 goto fail;
67
68 for_each_possible_cpu(cpu)
69 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
70
71 tp_event->perf_events = list;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020072
Frederic Weisbecker97d5a222010-03-05 05:35:37 +010073 if (!total_ref_count) {
Namhyung Kim6016ee12010-08-11 12:47:59 +090074 char __percpu *buf;
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020075 int i;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020076
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +020077 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
Namhyung Kim6016ee12010-08-11 12:47:59 +090078 buf = (char __percpu *)alloc_percpu(perf_trace_t);
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020079 if (!buf)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020080 goto fail;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020081
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020082 perf_trace_buf[i] = buf;
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020083 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020084 }
85
Steven Rostedta1d0ce82010-06-08 11:22:06 -040086 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020087 if (ret)
88 goto fail;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020089
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020090 total_ref_count++;
91 return 0;
92
93fail:
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020094 if (!total_ref_count) {
95 int i;
96
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +020097 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020098 free_percpu(perf_trace_buf[i]);
99 perf_trace_buf[i] = NULL;
100 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200101 }
102
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200103 if (!--tp_event->perf_refcount) {
104 free_percpu(tp_event->perf_events);
105 tp_event->perf_events = NULL;
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +0200106 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200107
108 return ret;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200109}
110
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200111int perf_trace_init(struct perf_event *p_event)
Peter Zijlstraac199db2009-03-19 20:26:15 +0100112{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200113 struct ftrace_event_call *tp_event;
114 int event_id = p_event->attr.config;
Li Zefan20c89282009-05-06 10:33:45 +0800115 int ret = -EINVAL;
Peter Zijlstraac199db2009-03-19 20:26:15 +0100116
Li Zefan20c89282009-05-06 10:33:45 +0800117 mutex_lock(&event_mutex);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200118 list_for_each_entry(tp_event, &ftrace_events, list) {
Steven Rostedtff5f1492010-05-21 11:49:57 -0400119 if (tp_event->event.type == event_id &&
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400120 tp_event->class && tp_event->class->reg &&
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200121 try_module_get(tp_event->mod)) {
122 ret = perf_trace_event_init(tp_event, p_event);
Li Zefan9cb627d2010-09-01 12:58:43 +0200123 if (ret)
124 module_put(tp_event->mod);
Li Zefan20c89282009-05-06 10:33:45 +0800125 break;
126 }
Peter Zijlstraac199db2009-03-19 20:26:15 +0100127 }
Li Zefan20c89282009-05-06 10:33:45 +0800128 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +0100129
Li Zefan20c89282009-05-06 10:33:45 +0800130 return ret;
Peter Zijlstraac199db2009-03-19 20:26:15 +0100131}
132
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200133int perf_trace_add(struct perf_event *p_event, int flags)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200134{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200135 struct ftrace_event_call *tp_event = p_event->tp_event;
Namhyung Kim6016ee12010-08-11 12:47:59 +0900136 struct hlist_head __percpu *pcpu_list;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200137 struct hlist_head *list;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200138
Namhyung Kim6016ee12010-08-11 12:47:59 +0900139 pcpu_list = tp_event->perf_events;
140 if (WARN_ON_ONCE(!pcpu_list))
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200141 return -EINVAL;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200142
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200143 if (!(flags & PERF_EF_START))
144 p_event->hw.state = PERF_HES_STOPPED;
145
Namhyung Kim6016ee12010-08-11 12:47:59 +0900146 list = this_cpu_ptr(pcpu_list);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200147 hlist_add_head_rcu(&p_event->hlist_entry, list);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200148
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200149 return 0;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200150}
151
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200152void perf_trace_del(struct perf_event *p_event, int flags)
Peter Zijlstraac199db2009-03-19 20:26:15 +0100153{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200154 hlist_del_rcu(&p_event->hlist_entry);
155}
Peter Zijlstraac199db2009-03-19 20:26:15 +0100156
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200157void perf_trace_destroy(struct perf_event *p_event)
158{
159 struct ftrace_event_call *tp_event = p_event->tp_event;
160 int i;
161
Peter Zijlstra2e979422010-05-21 16:22:33 +0200162 mutex_lock(&event_mutex);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200163 if (--tp_event->perf_refcount > 0)
Peter Zijlstra2e979422010-05-21 16:22:33 +0200164 goto out;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200165
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400166 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200167
Peter Zijlstra3771f072010-05-21 12:31:09 +0200168 /*
Frederic Weisbecker669336e2010-07-20 17:29:54 +0200169 * Ensure our callback won't be called anymore. The buffers
170 * will be freed after that.
Peter Zijlstra3771f072010-05-21 12:31:09 +0200171 */
Frederic Weisbecker669336e2010-07-20 17:29:54 +0200172 tracepoint_synchronize_unregister();
Peter Zijlstra3771f072010-05-21 12:31:09 +0200173
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200174 free_percpu(tp_event->perf_events);
175 tp_event->perf_events = NULL;
176
177 if (!--total_ref_count) {
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +0200178 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200179 free_percpu(perf_trace_buf[i]);
180 perf_trace_buf[i] = NULL;
Li Zefan20c89282009-05-06 10:33:45 +0800181 }
Peter Zijlstraac199db2009-03-19 20:26:15 +0100182 }
Peter Zijlstra2e979422010-05-21 16:22:33 +0200183out:
Li Zefan9cb627d2010-09-01 12:58:43 +0200184 module_put(tp_event->mod);
Peter Zijlstra2e979422010-05-21 16:22:33 +0200185 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +0100186}
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800187
Frederic Weisbecker97d5a222010-03-05 05:35:37 +0100188__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200189 struct pt_regs *regs, int *rctxp)
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800190{
191 struct trace_entry *entry;
Peter Zijlstra87f44bb2010-05-25 11:02:55 +0200192 unsigned long flags;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200193 char *raw_data;
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200194 int pc;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800195
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +0100196 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
197
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800198 pc = preempt_count();
199
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800200 *rctxp = perf_swevent_get_recursion_context();
201 if (*rctxp < 0)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200202 return NULL;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800203
Peter Zijlstra3771f072010-05-21 12:31:09 +0200204 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800205
206 /* zero the dead bytes from align to not leak stack to user */
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +0100207 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800208
209 entry = (struct trace_entry *)raw_data;
Peter Zijlstra87f44bb2010-05-25 11:02:55 +0200210 local_save_flags(flags);
211 tracing_generic_entry_update(entry, flags, pc);
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800212 entry->type = type;
213
214 return raw_data;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800215}
Frederic Weisbecker97d5a222010-03-05 05:35:37 +0100216EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);