blob: b48f1ad7c946ceca285aaca7f57619f0c13a6319 [file] [log] [blame]
Li Zefand0b6e042009-07-13 10:33:21 +08001#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sched
3
Steven Rostedtea20d922009-04-10 08:54:16 -04004#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04005#define _TRACE_SCHED_H
6
7#include <linux/sched.h>
8#include <linux/tracepoint.h>
9
Steven Rostedtea20d922009-04-10 08:54:16 -040010/*
11 * Tracepoint for calling kthread_stop, performed to end a kthread:
12 */
13TRACE_EVENT(sched_kthread_stop,
14
15 TP_PROTO(struct task_struct *t),
16
17 TP_ARGS(t),
18
19 TP_STRUCT__entry(
20 __array( char, comm, TASK_COMM_LEN )
21 __field( pid_t, pid )
22 ),
23
24 TP_fast_assign(
25 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
26 __entry->pid = t->pid;
27 ),
28
29 TP_printk("task %s:%d", __entry->comm, __entry->pid)
30);
31
32/*
33 * Tracepoint for the return value of the kthread stopping:
34 */
35TRACE_EVENT(sched_kthread_stop_ret,
36
37 TP_PROTO(int ret),
38
39 TP_ARGS(ret),
40
41 TP_STRUCT__entry(
42 __field( int, ret )
43 ),
44
45 TP_fast_assign(
46 __entry->ret = ret;
47 ),
48
49 TP_printk("ret %d", __entry->ret)
50);
51
52/*
53 * Tracepoint for waiting on task to unschedule:
54 *
55 * (NOTE: the 'rq' argument is not used by generic trace events,
56 * but used by the latency tracer plugin. )
57 */
58TRACE_EVENT(sched_wait_task,
59
60 TP_PROTO(struct rq *rq, struct task_struct *p),
61
62 TP_ARGS(rq, p),
63
64 TP_STRUCT__entry(
65 __array( char, comm, TASK_COMM_LEN )
66 __field( pid_t, pid )
67 __field( int, prio )
68 ),
69
70 TP_fast_assign(
71 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72 __entry->pid = p->pid;
73 __entry->prio = p->prio;
74 ),
75
76 TP_printk("task %s:%d [%d]",
77 __entry->comm, __entry->pid, __entry->prio)
78);
79
80/*
81 * Tracepoint for waking up a task:
82 *
83 * (NOTE: the 'rq' argument is not used by generic trace events,
84 * but used by the latency tracer plugin. )
85 */
86TRACE_EVENT(sched_wakeup,
87
88 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
89
90 TP_ARGS(rq, p, success),
91
92 TP_STRUCT__entry(
93 __array( char, comm, TASK_COMM_LEN )
94 __field( pid_t, pid )
95 __field( int, prio )
96 __field( int, success )
Steven Rostedtf0693c82009-08-06 14:59:32 -040097 __field( int, cpu )
Steven Rostedtea20d922009-04-10 08:54:16 -040098 ),
99
100 TP_fast_assign(
101 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
102 __entry->pid = p->pid;
103 __entry->prio = p->prio;
104 __entry->success = success;
Steven Rostedtf0693c82009-08-06 14:59:32 -0400105 __entry->cpu = task_cpu(p);
Steven Rostedtea20d922009-04-10 08:54:16 -0400106 ),
107
Steven Rostedtf0693c82009-08-06 14:59:32 -0400108 TP_printk("task %s:%d [%d] success=%d [%03d]",
Steven Rostedtea20d922009-04-10 08:54:16 -0400109 __entry->comm, __entry->pid, __entry->prio,
Steven Rostedtf0693c82009-08-06 14:59:32 -0400110 __entry->success, __entry->cpu)
Steven Rostedtea20d922009-04-10 08:54:16 -0400111);
112
113/*
114 * Tracepoint for waking up a new task:
115 *
116 * (NOTE: the 'rq' argument is not used by generic trace events,
117 * but used by the latency tracer plugin. )
118 */
119TRACE_EVENT(sched_wakeup_new,
120
121 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
122
123 TP_ARGS(rq, p, success),
124
125 TP_STRUCT__entry(
126 __array( char, comm, TASK_COMM_LEN )
127 __field( pid_t, pid )
128 __field( int, prio )
129 __field( int, success )
Steven Rostedtf0693c82009-08-06 14:59:32 -0400130 __field( int, cpu )
Steven Rostedtea20d922009-04-10 08:54:16 -0400131 ),
132
133 TP_fast_assign(
134 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
135 __entry->pid = p->pid;
136 __entry->prio = p->prio;
137 __entry->success = success;
Steven Rostedtf0693c82009-08-06 14:59:32 -0400138 __entry->cpu = task_cpu(p);
Steven Rostedtea20d922009-04-10 08:54:16 -0400139 ),
140
Steven Rostedtf0693c82009-08-06 14:59:32 -0400141 TP_printk("task %s:%d [%d] success=%d [%03d]",
Steven Rostedtea20d922009-04-10 08:54:16 -0400142 __entry->comm, __entry->pid, __entry->prio,
Steven Rostedtf0693c82009-08-06 14:59:32 -0400143 __entry->success, __entry->cpu)
Steven Rostedtea20d922009-04-10 08:54:16 -0400144);
145
146/*
147 * Tracepoint for task switches, performed by the scheduler:
148 *
149 * (NOTE: the 'rq' argument is not used by generic trace events,
150 * but used by the latency tracer plugin. )
151 */
152TRACE_EVENT(sched_switch,
153
154 TP_PROTO(struct rq *rq, struct task_struct *prev,
155 struct task_struct *next),
156
157 TP_ARGS(rq, prev, next),
158
159 TP_STRUCT__entry(
160 __array( char, prev_comm, TASK_COMM_LEN )
161 __field( pid_t, prev_pid )
162 __field( int, prev_prio )
Steven Rostedt937cdb92009-05-15 10:51:13 -0400163 __field( long, prev_state )
Steven Rostedtea20d922009-04-10 08:54:16 -0400164 __array( char, next_comm, TASK_COMM_LEN )
165 __field( pid_t, next_pid )
166 __field( int, next_prio )
167 ),
168
169 TP_fast_assign(
170 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
171 __entry->prev_pid = prev->pid;
172 __entry->prev_prio = prev->prio;
Steven Rostedt937cdb92009-05-15 10:51:13 -0400173 __entry->prev_state = prev->state;
Steven Rostedtea20d922009-04-10 08:54:16 -0400174 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
175 __entry->next_pid = next->pid;
176 __entry->next_prio = next->prio;
177 ),
178
Steven Rostedt937cdb92009-05-15 10:51:13 -0400179 TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]",
Steven Rostedtea20d922009-04-10 08:54:16 -0400180 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
Steven Rostedt937cdb92009-05-15 10:51:13 -0400181 __entry->prev_state ?
182 __print_flags(__entry->prev_state, "|",
183 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
184 { 16, "Z" }, { 32, "X" }, { 64, "x" },
185 { 128, "W" }) : "R",
Steven Rostedtea20d922009-04-10 08:54:16 -0400186 __entry->next_comm, __entry->next_pid, __entry->next_prio)
187);
188
189/*
190 * Tracepoint for a task being migrated:
191 */
192TRACE_EVENT(sched_migrate_task,
193
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +0800194 TP_PROTO(struct task_struct *p, int dest_cpu),
Steven Rostedtea20d922009-04-10 08:54:16 -0400195
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +0800196 TP_ARGS(p, dest_cpu),
Steven Rostedtea20d922009-04-10 08:54:16 -0400197
198 TP_STRUCT__entry(
199 __array( char, comm, TASK_COMM_LEN )
200 __field( pid_t, pid )
201 __field( int, prio )
202 __field( int, orig_cpu )
203 __field( int, dest_cpu )
204 ),
205
206 TP_fast_assign(
207 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
208 __entry->pid = p->pid;
209 __entry->prio = p->prio;
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +0800210 __entry->orig_cpu = task_cpu(p);
Steven Rostedtea20d922009-04-10 08:54:16 -0400211 __entry->dest_cpu = dest_cpu;
212 ),
213
214 TP_printk("task %s:%d [%d] from: %d to: %d",
215 __entry->comm, __entry->pid, __entry->prio,
216 __entry->orig_cpu, __entry->dest_cpu)
217);
218
219/*
220 * Tracepoint for freeing a task:
221 */
222TRACE_EVENT(sched_process_free,
223
224 TP_PROTO(struct task_struct *p),
225
226 TP_ARGS(p),
227
228 TP_STRUCT__entry(
229 __array( char, comm, TASK_COMM_LEN )
230 __field( pid_t, pid )
231 __field( int, prio )
232 ),
233
234 TP_fast_assign(
235 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
236 __entry->pid = p->pid;
237 __entry->prio = p->prio;
238 ),
239
240 TP_printk("task %s:%d [%d]",
241 __entry->comm, __entry->pid, __entry->prio)
242);
243
244/*
245 * Tracepoint for a task exiting:
246 */
247TRACE_EVENT(sched_process_exit,
248
249 TP_PROTO(struct task_struct *p),
250
251 TP_ARGS(p),
252
253 TP_STRUCT__entry(
254 __array( char, comm, TASK_COMM_LEN )
255 __field( pid_t, pid )
256 __field( int, prio )
257 ),
258
259 TP_fast_assign(
260 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
261 __entry->pid = p->pid;
262 __entry->prio = p->prio;
263 ),
264
265 TP_printk("task %s:%d [%d]",
266 __entry->comm, __entry->pid, __entry->prio)
267);
268
269/*
270 * Tracepoint for a waiting task:
271 */
272TRACE_EVENT(sched_process_wait,
273
274 TP_PROTO(struct pid *pid),
275
276 TP_ARGS(pid),
277
278 TP_STRUCT__entry(
279 __array( char, comm, TASK_COMM_LEN )
280 __field( pid_t, pid )
281 __field( int, prio )
282 ),
283
284 TP_fast_assign(
285 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
286 __entry->pid = pid_nr(pid);
287 __entry->prio = current->prio;
288 ),
289
290 TP_printk("task %s:%d [%d]",
291 __entry->comm, __entry->pid, __entry->prio)
292);
293
294/*
295 * Tracepoint for do_fork:
296 */
297TRACE_EVENT(sched_process_fork,
298
299 TP_PROTO(struct task_struct *parent, struct task_struct *child),
300
301 TP_ARGS(parent, child),
302
303 TP_STRUCT__entry(
304 __array( char, parent_comm, TASK_COMM_LEN )
305 __field( pid_t, parent_pid )
306 __array( char, child_comm, TASK_COMM_LEN )
307 __field( pid_t, child_pid )
308 ),
309
310 TP_fast_assign(
311 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
312 __entry->parent_pid = parent->pid;
313 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
314 __entry->child_pid = child->pid;
315 ),
316
317 TP_printk("parent %s:%d child %s:%d",
318 __entry->parent_comm, __entry->parent_pid,
319 __entry->child_comm, __entry->child_pid)
320);
321
322/*
323 * Tracepoint for sending a signal:
324 */
325TRACE_EVENT(sched_signal_send,
326
327 TP_PROTO(int sig, struct task_struct *p),
328
329 TP_ARGS(sig, p),
330
331 TP_STRUCT__entry(
332 __field( int, sig )
333 __array( char, comm, TASK_COMM_LEN )
334 __field( pid_t, pid )
335 ),
336
337 TP_fast_assign(
338 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
339 __entry->pid = p->pid;
340 __entry->sig = sig;
341 ),
342
343 TP_printk("sig: %d task %s:%d",
344 __entry->sig, __entry->comm, __entry->pid)
345);
346
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200347/*
348 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
349 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
350 */
351
352/*
353 * Tracepoint for accounting wait time (time the task is runnable
354 * but not actually running due to scheduler contention).
355 */
356TRACE_EVENT(sched_stat_wait,
357
358 TP_PROTO(struct task_struct *tsk, u64 delay),
359
360 TP_ARGS(tsk, delay),
361
362 TP_STRUCT__entry(
363 __array( char, comm, TASK_COMM_LEN )
364 __field( pid_t, pid )
365 __field( u64, delay )
366 ),
367
368 TP_fast_assign(
369 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
370 __entry->pid = tsk->pid;
371 __entry->delay = delay;
372 )
373 TP_perf_assign(
374 __perf_count(delay);
375 ),
376
377 TP_printk("task: %s:%d wait: %Lu [ns]",
378 __entry->comm, __entry->pid,
379 (unsigned long long)__entry->delay)
380);
381
382/*
383 * Tracepoint for accounting sleep time (time the task is not runnable,
384 * including iowait, see below).
385 */
386TRACE_EVENT(sched_stat_sleep,
387
388 TP_PROTO(struct task_struct *tsk, u64 delay),
389
390 TP_ARGS(tsk, delay),
391
392 TP_STRUCT__entry(
393 __array( char, comm, TASK_COMM_LEN )
394 __field( pid_t, pid )
395 __field( u64, delay )
396 ),
397
398 TP_fast_assign(
399 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
400 __entry->pid = tsk->pid;
401 __entry->delay = delay;
402 )
403 TP_perf_assign(
404 __perf_count(delay);
405 ),
406
407 TP_printk("task: %s:%d sleep: %Lu [ns]",
408 __entry->comm, __entry->pid,
409 (unsigned long long)__entry->delay)
410);
411
412/*
413 * Tracepoint for accounting iowait time (time the task is not runnable
414 * due to waiting on IO to complete).
415 */
416TRACE_EVENT(sched_stat_iowait,
417
418 TP_PROTO(struct task_struct *tsk, u64 delay),
419
420 TP_ARGS(tsk, delay),
421
422 TP_STRUCT__entry(
423 __array( char, comm, TASK_COMM_LEN )
424 __field( pid_t, pid )
425 __field( u64, delay )
426 ),
427
428 TP_fast_assign(
429 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
430 __entry->pid = tsk->pid;
431 __entry->delay = delay;
432 )
433 TP_perf_assign(
434 __perf_count(delay);
435 ),
436
437 TP_printk("task: %s:%d iowait: %Lu [ns]",
438 __entry->comm, __entry->pid,
439 (unsigned long long)__entry->delay)
440);
441
Steven Rostedtea20d922009-04-10 08:54:16 -0400442#endif /* _TRACE_SCHED_H */
Steven Rostedta8d154b2009-04-10 09:36:00 -0400443
444/* This part must be outside protection */
445#include <trace/define_trace.h>