Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 1 | /* |
| 2 | * tracing clocks |
| 3 | * |
| 4 | * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 5 | * |
| 6 | * Implements 3 trace clock variants, with differing scalability/precision |
| 7 | * tradeoffs: |
| 8 | * |
| 9 | * - local: CPU-local trace clock |
| 10 | * - medium: scalable global clock with some jitter |
| 11 | * - global: globally monotonic, serialized clock |
| 12 | * |
| 13 | * Tracer plugins will chose a default from these clocks. |
| 14 | */ |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/hardirq.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/percpu.h> |
| 19 | #include <linux/sched.h> |
| 20 | #include <linux/ktime.h> |
Dmitri Vorobiev | b8b9426 | 2009-03-22 19:11:11 +0200 | [diff] [blame] | 21 | #include <linux/trace_clock.h> |
Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 22 | |
| 23 | /* |
| 24 | * trace_clock_local(): the simplest and least coherent tracing clock. |
| 25 | * |
| 26 | * Useful for tracing that does not cross to other CPUs nor |
| 27 | * does it go through idle events. |
| 28 | */ |
| 29 | u64 notrace trace_clock_local(void) |
| 30 | { |
Peter Zijlstra | 6cc3c6e | 2009-03-10 19:03:43 +0100 | [diff] [blame] | 31 | unsigned long flags; |
| 32 | u64 clock; |
| 33 | |
Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 34 | /* |
| 35 | * sched_clock() is an architecture implemented, fast, scalable, |
| 36 | * lockless clock. It is not guaranteed to be coherent across |
| 37 | * CPUs, nor across CPU idle events. |
| 38 | */ |
Peter Zijlstra | 6cc3c6e | 2009-03-10 19:03:43 +0100 | [diff] [blame] | 39 | raw_local_irq_save(flags); |
| 40 | clock = sched_clock(); |
| 41 | raw_local_irq_restore(flags); |
| 42 | |
| 43 | return clock; |
Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | /* |
| 47 | * trace_clock(): 'inbetween' trace clock. Not completely serialized, |
| 48 | * but not completely incorrect when crossing CPUs either. |
| 49 | * |
| 50 | * This is based on cpu_clock(), which will allow at most ~1 jiffy of |
| 51 | * jitter between CPUs. So it's a pretty scalable clock, but there |
| 52 | * can be offsets in the trace data. |
| 53 | */ |
| 54 | u64 notrace trace_clock(void) |
| 55 | { |
| 56 | return cpu_clock(raw_smp_processor_id()); |
| 57 | } |
| 58 | |
| 59 | |
| 60 | /* |
| 61 | * trace_clock_global(): special globally coherent trace clock |
| 62 | * |
| 63 | * It has higher overhead than the other trace clocks but is still |
| 64 | * an order of magnitude faster than GTOD derived hardware clocks. |
| 65 | * |
| 66 | * Used by plugins that need globally coherent timestamps. |
| 67 | */ |
| 68 | |
| 69 | static u64 prev_trace_clock_time; |
| 70 | |
| 71 | static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp = |
| 72 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
| 73 | |
| 74 | u64 notrace trace_clock_global(void) |
| 75 | { |
| 76 | unsigned long flags; |
| 77 | int this_cpu; |
| 78 | u64 now; |
| 79 | |
| 80 | raw_local_irq_save(flags); |
| 81 | |
| 82 | this_cpu = raw_smp_processor_id(); |
| 83 | now = cpu_clock(this_cpu); |
| 84 | /* |
| 85 | * If in an NMI context then dont risk lockups and return the |
| 86 | * cpu_clock() time: |
| 87 | */ |
| 88 | if (unlikely(in_nmi())) |
| 89 | goto out; |
| 90 | |
| 91 | __raw_spin_lock(&trace_clock_lock); |
| 92 | |
| 93 | /* |
| 94 | * TODO: if this happens often then maybe we should reset |
| 95 | * my_scd->clock to prev_trace_clock_time+1, to make sure |
| 96 | * we start ticking with the local clock from now on? |
| 97 | */ |
| 98 | if ((s64)(now - prev_trace_clock_time) < 0) |
| 99 | now = prev_trace_clock_time + 1; |
| 100 | |
| 101 | prev_trace_clock_time = now; |
| 102 | |
| 103 | __raw_spin_unlock(&trace_clock_lock); |
| 104 | |
| 105 | out: |
| 106 | raw_local_irq_restore(flags); |
| 107 | |
| 108 | return now; |
| 109 | } |