Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 1 | /* |
| 2 | * sched_clock for unstable cpu clocks |
| 3 | * |
| 4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
| 5 | * |
Steven Rostedt | c300ba2 | 2008-07-09 00:15:33 -0400 | [diff] [blame] | 6 | * Updates and enhancements: |
| 7 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> |
| 8 | * |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 9 | * Based on code by: |
| 10 | * Ingo Molnar <mingo@redhat.com> |
| 11 | * Guillaume Chazarain <guichaz@gmail.com> |
| 12 | * |
| 13 | * Create a semi stable clock from a mixture of other events, including: |
| 14 | * - gtod |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 15 | * - sched_clock() |
| 16 | * - explicit idle events |
| 17 | * |
| 18 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 19 | * making it monotonic and keeping it within an expected window. |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 20 | * |
| 21 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time |
| 22 | * that is otherwise invisible (TSC gets stopped). |
| 23 | * |
| 24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 25 | * consistent between cpus (never more than 2 jiffies difference). |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 26 | */ |
| 27 | #include <linux/sched.h> |
| 28 | #include <linux/percpu.h> |
| 29 | #include <linux/spinlock.h> |
| 30 | #include <linux/ktime.h> |
| 31 | #include <linux/module.h> |
| 32 | |
Hugh Dickins | 2c3d103 | 2008-07-25 19:45:00 +0100 | [diff] [blame] | 33 | /* |
| 34 | * Scheduler clock - returns current time in nanosec units. |
| 35 | * This is default implementation. |
| 36 | * Architectures and sub-architectures can override this. |
| 37 | */ |
| 38 | unsigned long long __attribute__((weak)) sched_clock(void) |
| 39 | { |
| 40 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); |
| 41 | } |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 42 | |
Peter Zijlstra | c1955a3 | 2008-08-11 08:59:03 +0200 | [diff] [blame] | 43 | static __read_mostly int sched_clock_running; |
| 44 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 45 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
| 46 | |
| 47 | struct sched_clock_data { |
| 48 | /* |
| 49 | * Raw spinlock - this is a special case: this might be called |
| 50 | * from within instrumentation code so we dont want to do any |
| 51 | * instrumentation ourselves. |
| 52 | */ |
| 53 | raw_spinlock_t lock; |
| 54 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 55 | u64 tick_raw; |
| 56 | u64 tick_gtod; |
| 57 | u64 clock; |
| 58 | }; |
| 59 | |
| 60 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); |
| 61 | |
| 62 | static inline struct sched_clock_data *this_scd(void) |
| 63 | { |
| 64 | return &__get_cpu_var(sched_clock_data); |
| 65 | } |
| 66 | |
| 67 | static inline struct sched_clock_data *cpu_sdc(int cpu) |
| 68 | { |
| 69 | return &per_cpu(sched_clock_data, cpu); |
| 70 | } |
| 71 | |
| 72 | void sched_clock_init(void) |
| 73 | { |
| 74 | u64 ktime_now = ktime_to_ns(ktime_get()); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 75 | int cpu; |
| 76 | |
| 77 | for_each_possible_cpu(cpu) { |
| 78 | struct sched_clock_data *scd = cpu_sdc(cpu); |
| 79 | |
| 80 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 81 | scd->tick_raw = 0; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 82 | scd->tick_gtod = ktime_now; |
| 83 | scd->clock = ktime_now; |
| 84 | } |
Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 85 | |
| 86 | sched_clock_running = 1; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | /* |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 90 | * min,max except they take wrapping into account |
| 91 | */ |
| 92 | |
| 93 | static inline u64 wrap_min(u64 x, u64 y) |
| 94 | { |
| 95 | return (s64)(x - y) < 0 ? x : y; |
| 96 | } |
| 97 | |
| 98 | static inline u64 wrap_max(u64 x, u64 y) |
| 99 | { |
| 100 | return (s64)(x - y) > 0 ? x : y; |
| 101 | } |
| 102 | |
| 103 | /* |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 104 | * update the percpu scd from the raw @now value |
| 105 | * |
| 106 | * - filter out backward motion |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 107 | * - use the GTOD tick value to create a window to filter crazy TSC values |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 108 | */ |
Ingo Molnar | 56b9061 | 2008-07-30 10:15:55 +0200 | [diff] [blame] | 109 | static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 110 | { |
Ingo Molnar | 18e4e36 | 2008-07-30 10:13:35 +0200 | [diff] [blame] | 111 | s64 delta = now - scd->tick_raw; |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 112 | u64 clock, min_clock, max_clock; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 113 | |
| 114 | WARN_ON_ONCE(!irqs_disabled()); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 115 | |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 116 | if (unlikely(delta < 0)) |
| 117 | delta = 0; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 118 | |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 119 | /* |
| 120 | * scd->clock = clamp(scd->tick_gtod + delta, |
| 121 | * max(scd->tick_gtod, scd->clock), |
| 122 | * scd->tick_gtod + TICK_NSEC); |
| 123 | */ |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 124 | |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 125 | clock = scd->tick_gtod + delta; |
| 126 | min_clock = wrap_max(scd->tick_gtod, scd->clock); |
| 127 | max_clock = scd->tick_gtod + TICK_NSEC; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 128 | |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 129 | clock = wrap_max(clock, min_clock); |
| 130 | clock = wrap_min(clock, max_clock); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 131 | |
Ingo Molnar | e4e4e53 | 2008-04-14 08:50:02 +0200 | [diff] [blame] | 132 | scd->clock = clock; |
Ingo Molnar | 56b9061 | 2008-07-30 10:15:55 +0200 | [diff] [blame] | 133 | |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 134 | return scd->clock; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | static void lock_double_clock(struct sched_clock_data *data1, |
| 138 | struct sched_clock_data *data2) |
| 139 | { |
| 140 | if (data1 < data2) { |
| 141 | __raw_spin_lock(&data1->lock); |
| 142 | __raw_spin_lock(&data2->lock); |
| 143 | } else { |
| 144 | __raw_spin_lock(&data2->lock); |
| 145 | __raw_spin_lock(&data1->lock); |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | u64 sched_clock_cpu(int cpu) |
| 150 | { |
| 151 | struct sched_clock_data *scd = cpu_sdc(cpu); |
Ingo Molnar | 4a273f2 | 2008-07-30 10:22:07 +0200 | [diff] [blame] | 152 | u64 now, clock, this_clock, remote_clock; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 153 | |
Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 154 | if (unlikely(!sched_clock_running)) |
| 155 | return 0ull; |
| 156 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 157 | WARN_ON_ONCE(!irqs_disabled()); |
| 158 | now = sched_clock(); |
| 159 | |
| 160 | if (cpu != raw_smp_processor_id()) { |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 161 | struct sched_clock_data *my_scd = this_scd(); |
| 162 | |
| 163 | lock_double_clock(scd, my_scd); |
| 164 | |
Ingo Molnar | 4a273f2 | 2008-07-30 10:22:07 +0200 | [diff] [blame] | 165 | this_clock = __update_sched_clock(my_scd, now); |
| 166 | remote_clock = scd->clock; |
| 167 | |
| 168 | /* |
| 169 | * Use the opportunity that we have both locks |
| 170 | * taken to couple the two clocks: we take the |
| 171 | * larger time as the latest time for both |
| 172 | * runqueues. (this creates monotonic movement) |
| 173 | */ |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 174 | if (likely((s64)(remote_clock - this_clock) < 0)) { |
Ingo Molnar | 4a273f2 | 2008-07-30 10:22:07 +0200 | [diff] [blame] | 175 | clock = this_clock; |
| 176 | scd->clock = clock; |
| 177 | } else { |
| 178 | /* |
| 179 | * Should be rare, but possible: |
| 180 | */ |
| 181 | clock = remote_clock; |
| 182 | my_scd->clock = remote_clock; |
| 183 | } |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 184 | |
| 185 | __raw_spin_unlock(&my_scd->lock); |
| 186 | } else { |
| 187 | __raw_spin_lock(&scd->lock); |
Ingo Molnar | 4a273f2 | 2008-07-30 10:22:07 +0200 | [diff] [blame] | 188 | clock = __update_sched_clock(scd, now); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 189 | } |
| 190 | |
Ingo Molnar | e4e4e53 | 2008-04-14 08:50:02 +0200 | [diff] [blame] | 191 | __raw_spin_unlock(&scd->lock); |
| 192 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 193 | return clock; |
| 194 | } |
| 195 | |
| 196 | void sched_clock_tick(void) |
| 197 | { |
| 198 | struct sched_clock_data *scd = this_scd(); |
| 199 | u64 now, now_gtod; |
| 200 | |
Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 201 | if (unlikely(!sched_clock_running)) |
| 202 | return; |
| 203 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 204 | WARN_ON_ONCE(!irqs_disabled()); |
| 205 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 206 | now_gtod = ktime_to_ns(ktime_get()); |
Steven Rostedt | a83bc47 | 2008-07-09 00:15:32 -0400 | [diff] [blame] | 207 | now = sched_clock(); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 208 | |
| 209 | __raw_spin_lock(&scd->lock); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 210 | scd->tick_raw = now; |
| 211 | scd->tick_gtod = now_gtod; |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 212 | __update_sched_clock(scd, now); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 213 | __raw_spin_unlock(&scd->lock); |
| 214 | } |
| 215 | |
| 216 | /* |
| 217 | * We are going deep-idle (irqs are disabled): |
| 218 | */ |
| 219 | void sched_clock_idle_sleep_event(void) |
| 220 | { |
| 221 | sched_clock_cpu(smp_processor_id()); |
| 222 | } |
| 223 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); |
| 224 | |
| 225 | /* |
| 226 | * We just idled delta nanoseconds (called with irqs disabled): |
| 227 | */ |
| 228 | void sched_clock_idle_wakeup_event(u64 delta_ns) |
| 229 | { |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 230 | sched_clock_tick(); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 231 | touch_softlockup_watchdog(); |
| 232 | } |
| 233 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
| 234 | |
Peter Zijlstra | c1955a3 | 2008-08-11 08:59:03 +0200 | [diff] [blame] | 235 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
| 236 | |
| 237 | void sched_clock_init(void) |
| 238 | { |
| 239 | sched_clock_running = 1; |
| 240 | } |
| 241 | |
| 242 | u64 sched_clock_cpu(int cpu) |
| 243 | { |
| 244 | if (unlikely(!sched_clock_running)) |
| 245 | return 0; |
| 246 | |
| 247 | return sched_clock(); |
| 248 | } |
| 249 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 250 | #endif |
| 251 | |
Peter Zijlstra | 76a2a6e | 2008-06-27 13:41:15 +0200 | [diff] [blame] | 252 | unsigned long long cpu_clock(int cpu) |
| 253 | { |
| 254 | unsigned long long clock; |
| 255 | unsigned long flags; |
| 256 | |
Ingo Molnar | 2d452c9 | 2008-06-29 15:01:59 +0200 | [diff] [blame] | 257 | local_irq_save(flags); |
Peter Zijlstra | 76a2a6e | 2008-06-27 13:41:15 +0200 | [diff] [blame] | 258 | clock = sched_clock_cpu(cpu); |
Ingo Molnar | 2d452c9 | 2008-06-29 15:01:59 +0200 | [diff] [blame] | 259 | local_irq_restore(flags); |
Peter Zijlstra | 76a2a6e | 2008-06-27 13:41:15 +0200 | [diff] [blame] | 260 | |
| 261 | return clock; |
| 262 | } |
Ingo Molnar | 4c9fe8a | 2008-06-27 14:49:35 +0200 | [diff] [blame] | 263 | EXPORT_SYMBOL_GPL(cpu_clock); |