Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 1 | /* |
| 2 | * sched_clock for unstable cpu clocks |
| 3 | * |
| 4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
| 5 | * |
Steven Rostedt | c300ba2 | 2008-07-09 00:15:33 -0400 | [diff] [blame] | 6 | * Updates and enhancements: |
| 7 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> |
| 8 | * |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 9 | * Based on code by: |
| 10 | * Ingo Molnar <mingo@redhat.com> |
| 11 | * Guillaume Chazarain <guichaz@gmail.com> |
| 12 | * |
| 13 | * Create a semi stable clock from a mixture of other events, including: |
| 14 | * - gtod |
| 15 | * - jiffies |
| 16 | * - sched_clock() |
| 17 | * - explicit idle events |
| 18 | * |
| 19 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, |
| 20 | * making it monotonic and keeping it within an expected window. This window |
| 21 | * is set up using jiffies. |
| 22 | * |
| 23 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time |
| 24 | * that is otherwise invisible (TSC gets stopped). |
| 25 | * |
| 26 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat |
| 27 | * consistent between cpus (never more than 1 jiffies difference). |
| 28 | */ |
| 29 | #include <linux/sched.h> |
| 30 | #include <linux/percpu.h> |
| 31 | #include <linux/spinlock.h> |
| 32 | #include <linux/ktime.h> |
| 33 | #include <linux/module.h> |
| 34 | |
Hugh Dickins | 2c3d103 | 2008-07-25 19:45:00 +0100 | [diff] [blame] | 35 | /* |
| 36 | * Scheduler clock - returns current time in nanosec units. |
| 37 | * This is default implementation. |
| 38 | * Architectures and sub-architectures can override this. |
| 39 | */ |
| 40 | unsigned long long __attribute__((weak)) sched_clock(void) |
| 41 | { |
| 42 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); |
| 43 | } |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 44 | |
| 45 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
| 46 | |
| 47 | struct sched_clock_data { |
| 48 | /* |
| 49 | * Raw spinlock - this is a special case: this might be called |
| 50 | * from within instrumentation code so we dont want to do any |
| 51 | * instrumentation ourselves. |
| 52 | */ |
| 53 | raw_spinlock_t lock; |
| 54 | |
Steven Rostedt | 62c43dd | 2008-07-07 14:16:50 -0400 | [diff] [blame] | 55 | unsigned long tick_jiffies; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 56 | u64 prev_raw; |
| 57 | u64 tick_raw; |
| 58 | u64 tick_gtod; |
| 59 | u64 clock; |
| 60 | }; |
| 61 | |
| 62 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); |
| 63 | |
| 64 | static inline struct sched_clock_data *this_scd(void) |
| 65 | { |
| 66 | return &__get_cpu_var(sched_clock_data); |
| 67 | } |
| 68 | |
| 69 | static inline struct sched_clock_data *cpu_sdc(int cpu) |
| 70 | { |
| 71 | return &per_cpu(sched_clock_data, cpu); |
| 72 | } |
| 73 | |
Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 74 | static __read_mostly int sched_clock_running; |
| 75 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 76 | void sched_clock_init(void) |
| 77 | { |
| 78 | u64 ktime_now = ktime_to_ns(ktime_get()); |
Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 79 | unsigned long now_jiffies = jiffies; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 80 | int cpu; |
| 81 | |
| 82 | for_each_possible_cpu(cpu) { |
| 83 | struct sched_clock_data *scd = cpu_sdc(cpu); |
| 84 | |
| 85 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
Steven Rostedt | 62c43dd | 2008-07-07 14:16:50 -0400 | [diff] [blame] | 86 | scd->tick_jiffies = now_jiffies; |
Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 87 | scd->prev_raw = 0; |
| 88 | scd->tick_raw = 0; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 89 | scd->tick_gtod = ktime_now; |
| 90 | scd->clock = ktime_now; |
| 91 | } |
Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 92 | |
| 93 | sched_clock_running = 1; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 94 | } |
| 95 | |
| 96 | /* |
| 97 | * update the percpu scd from the raw @now value |
| 98 | * |
| 99 | * - filter out backward motion |
| 100 | * - use jiffies to generate a min,max window to clip the raw values |
| 101 | */ |
Ingo Molnar | e4e4e53 | 2008-04-14 08:50:02 +0200 | [diff] [blame^] | 102 | static void __update_sched_clock(struct sched_clock_data *scd, u64 now) |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 103 | { |
| 104 | unsigned long now_jiffies = jiffies; |
Steven Rostedt | 62c43dd | 2008-07-07 14:16:50 -0400 | [diff] [blame] | 105 | long delta_jiffies = now_jiffies - scd->tick_jiffies; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 106 | u64 clock = scd->clock; |
| 107 | u64 min_clock, max_clock; |
| 108 | s64 delta = now - scd->prev_raw; |
| 109 | |
| 110 | WARN_ON_ONCE(!irqs_disabled()); |
Ingo Molnar | e4e4e53 | 2008-04-14 08:50:02 +0200 | [diff] [blame^] | 111 | min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 112 | |
| 113 | if (unlikely(delta < 0)) { |
| 114 | clock++; |
| 115 | goto out; |
| 116 | } |
| 117 | |
Ingo Molnar | e4e4e53 | 2008-04-14 08:50:02 +0200 | [diff] [blame^] | 118 | max_clock = min_clock + TICK_NSEC; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 119 | |
Ingo Molnar | e4e4e53 | 2008-04-14 08:50:02 +0200 | [diff] [blame^] | 120 | if (unlikely(clock + delta > max_clock)) { |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 121 | if (clock < max_clock) |
| 122 | clock = max_clock; |
| 123 | else |
| 124 | clock++; |
| 125 | } else { |
| 126 | clock += delta; |
| 127 | } |
| 128 | |
| 129 | out: |
| 130 | if (unlikely(clock < min_clock)) |
| 131 | clock = min_clock; |
| 132 | |
Ingo Molnar | e4e4e53 | 2008-04-14 08:50:02 +0200 | [diff] [blame^] | 133 | scd->prev_raw = now; |
| 134 | scd->tick_jiffies = now_jiffies; |
| 135 | scd->clock = clock; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 136 | } |
| 137 | |
| 138 | static void lock_double_clock(struct sched_clock_data *data1, |
| 139 | struct sched_clock_data *data2) |
| 140 | { |
| 141 | if (data1 < data2) { |
| 142 | __raw_spin_lock(&data1->lock); |
| 143 | __raw_spin_lock(&data2->lock); |
| 144 | } else { |
| 145 | __raw_spin_lock(&data2->lock); |
| 146 | __raw_spin_lock(&data1->lock); |
| 147 | } |
| 148 | } |
| 149 | |
| 150 | u64 sched_clock_cpu(int cpu) |
| 151 | { |
| 152 | struct sched_clock_data *scd = cpu_sdc(cpu); |
| 153 | u64 now, clock; |
| 154 | |
Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 155 | if (unlikely(!sched_clock_running)) |
| 156 | return 0ull; |
| 157 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 158 | WARN_ON_ONCE(!irqs_disabled()); |
| 159 | now = sched_clock(); |
| 160 | |
| 161 | if (cpu != raw_smp_processor_id()) { |
| 162 | /* |
| 163 | * in order to update a remote cpu's clock based on our |
| 164 | * unstable raw time rebase it against: |
| 165 | * tick_raw (offset between raw counters) |
| 166 | * tick_gotd (tick offset between cpus) |
| 167 | */ |
| 168 | struct sched_clock_data *my_scd = this_scd(); |
| 169 | |
| 170 | lock_double_clock(scd, my_scd); |
| 171 | |
| 172 | now -= my_scd->tick_raw; |
| 173 | now += scd->tick_raw; |
| 174 | |
Steven Rostedt | 2b8a0cf | 2008-07-07 19:49:41 -0400 | [diff] [blame] | 175 | now += my_scd->tick_gtod; |
| 176 | now -= scd->tick_gtod; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 177 | |
| 178 | __raw_spin_unlock(&my_scd->lock); |
| 179 | } else { |
| 180 | __raw_spin_lock(&scd->lock); |
| 181 | } |
| 182 | |
Ingo Molnar | e4e4e53 | 2008-04-14 08:50:02 +0200 | [diff] [blame^] | 183 | __update_sched_clock(scd, now); |
| 184 | clock = scd->clock; |
| 185 | |
| 186 | __raw_spin_unlock(&scd->lock); |
| 187 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 188 | return clock; |
| 189 | } |
| 190 | |
| 191 | void sched_clock_tick(void) |
| 192 | { |
| 193 | struct sched_clock_data *scd = this_scd(); |
| 194 | u64 now, now_gtod; |
| 195 | |
Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 196 | if (unlikely(!sched_clock_running)) |
| 197 | return; |
| 198 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 199 | WARN_ON_ONCE(!irqs_disabled()); |
| 200 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 201 | now_gtod = ktime_to_ns(ktime_get()); |
Steven Rostedt | a83bc47 | 2008-07-09 00:15:32 -0400 | [diff] [blame] | 202 | now = sched_clock(); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 203 | |
| 204 | __raw_spin_lock(&scd->lock); |
Ingo Molnar | e4e4e53 | 2008-04-14 08:50:02 +0200 | [diff] [blame^] | 205 | __update_sched_clock(scd, now); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 206 | /* |
| 207 | * update tick_gtod after __update_sched_clock() because that will |
| 208 | * already observe 1 new jiffy; adding a new tick_gtod to that would |
| 209 | * increase the clock 2 jiffies. |
| 210 | */ |
| 211 | scd->tick_raw = now; |
| 212 | scd->tick_gtod = now_gtod; |
| 213 | __raw_spin_unlock(&scd->lock); |
| 214 | } |
| 215 | |
| 216 | /* |
| 217 | * We are going deep-idle (irqs are disabled): |
| 218 | */ |
| 219 | void sched_clock_idle_sleep_event(void) |
| 220 | { |
| 221 | sched_clock_cpu(smp_processor_id()); |
| 222 | } |
| 223 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); |
| 224 | |
| 225 | /* |
| 226 | * We just idled delta nanoseconds (called with irqs disabled): |
| 227 | */ |
| 228 | void sched_clock_idle_wakeup_event(u64 delta_ns) |
| 229 | { |
| 230 | struct sched_clock_data *scd = this_scd(); |
| 231 | u64 now = sched_clock(); |
| 232 | |
| 233 | /* |
| 234 | * Override the previous timestamp and ignore all |
| 235 | * sched_clock() deltas that occured while we idled, |
| 236 | * and use the PM-provided delta_ns to advance the |
| 237 | * rq clock: |
| 238 | */ |
| 239 | __raw_spin_lock(&scd->lock); |
| 240 | scd->prev_raw = now; |
| 241 | scd->clock += delta_ns; |
| 242 | __raw_spin_unlock(&scd->lock); |
| 243 | |
| 244 | touch_softlockup_watchdog(); |
| 245 | } |
| 246 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
| 247 | |
| 248 | #endif |
| 249 | |
Peter Zijlstra | 76a2a6e | 2008-06-27 13:41:15 +0200 | [diff] [blame] | 250 | unsigned long long cpu_clock(int cpu) |
| 251 | { |
| 252 | unsigned long long clock; |
| 253 | unsigned long flags; |
| 254 | |
Ingo Molnar | 2d452c9 | 2008-06-29 15:01:59 +0200 | [diff] [blame] | 255 | local_irq_save(flags); |
Peter Zijlstra | 76a2a6e | 2008-06-27 13:41:15 +0200 | [diff] [blame] | 256 | clock = sched_clock_cpu(cpu); |
Ingo Molnar | 2d452c9 | 2008-06-29 15:01:59 +0200 | [diff] [blame] | 257 | local_irq_restore(flags); |
Peter Zijlstra | 76a2a6e | 2008-06-27 13:41:15 +0200 | [diff] [blame] | 258 | |
| 259 | return clock; |
| 260 | } |
Ingo Molnar | 4c9fe8a | 2008-06-27 14:49:35 +0200 | [diff] [blame] | 261 | EXPORT_SYMBOL_GPL(cpu_clock); |