blob: e8ab096ddfe399dc3007e32b169b46d02ed9cb48 [file] [log] [blame]
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001/*
2 * sched_clock for unstable cpu clocks
3 *
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5 *
Steven Rostedtc300ba22008-07-09 00:15:33 -04006 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
8 *
Peter Zijlstra3e51f332008-05-03 18:29:28 +02009 * Based on code by:
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
12 *
13 * Create a semi stable clock from a mixture of other events, including:
14 * - gtod
Peter Zijlstra3e51f332008-05-03 18:29:28 +020015 * - sched_clock()
16 * - explicit idle events
17 *
18 * We use gtod as base and the unstable clock deltas. The deltas are filtered,
Peter Zijlstra354879b2008-08-25 17:15:34 +020019 * making it monotonic and keeping it within an expected window.
Peter Zijlstra3e51f332008-05-03 18:29:28 +020020 *
21 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
22 * that is otherwise invisible (TSC gets stopped).
23 *
24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
Peter Zijlstra354879b2008-08-25 17:15:34 +020025 * consistent between cpus (never more than 2 jiffies difference).
Peter Zijlstra3e51f332008-05-03 18:29:28 +020026 */
27#include <linux/sched.h>
28#include <linux/percpu.h>
29#include <linux/spinlock.h>
30#include <linux/ktime.h>
31#include <linux/module.h>
32
Hugh Dickins2c3d1032008-07-25 19:45:00 +010033/*
34 * Scheduler clock - returns current time in nanosec units.
35 * This is default implementation.
36 * Architectures and sub-architectures can override this.
37 */
38unsigned long long __attribute__((weak)) sched_clock(void)
39{
40 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
41}
Peter Zijlstra3e51f332008-05-03 18:29:28 +020042
Peter Zijlstrac1955a32008-08-11 08:59:03 +020043static __read_mostly int sched_clock_running;
44
Peter Zijlstra3e51f332008-05-03 18:29:28 +020045#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
46
47struct sched_clock_data {
48 /*
49 * Raw spinlock - this is a special case: this might be called
50 * from within instrumentation code so we dont want to do any
51 * instrumentation ourselves.
52 */
53 raw_spinlock_t lock;
54
Peter Zijlstra3e51f332008-05-03 18:29:28 +020055 u64 tick_raw;
56 u64 tick_gtod;
57 u64 clock;
58};
59
60static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
61
62static inline struct sched_clock_data *this_scd(void)
63{
64 return &__get_cpu_var(sched_clock_data);
65}
66
67static inline struct sched_clock_data *cpu_sdc(int cpu)
68{
69 return &per_cpu(sched_clock_data, cpu);
70}
71
72void sched_clock_init(void)
73{
74 u64 ktime_now = ktime_to_ns(ktime_get());
Peter Zijlstra3e51f332008-05-03 18:29:28 +020075 int cpu;
76
77 for_each_possible_cpu(cpu) {
78 struct sched_clock_data *scd = cpu_sdc(cpu);
79
80 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Peter Zijlstraa3817592008-05-29 10:07:15 +020081 scd->tick_raw = 0;
Peter Zijlstra3e51f332008-05-03 18:29:28 +020082 scd->tick_gtod = ktime_now;
83 scd->clock = ktime_now;
84 }
Peter Zijlstraa3817592008-05-29 10:07:15 +020085
86 sched_clock_running = 1;
Peter Zijlstra3e51f332008-05-03 18:29:28 +020087}
88
89/*
Peter Zijlstra354879b2008-08-25 17:15:34 +020090 * min,max except they take wrapping into account
91 */
92
93static inline u64 wrap_min(u64 x, u64 y)
94{
95 return (s64)(x - y) < 0 ? x : y;
96}
97
98static inline u64 wrap_max(u64 x, u64 y)
99{
100 return (s64)(x - y) > 0 ? x : y;
101}
102
103/*
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200104 * update the percpu scd from the raw @now value
105 *
106 * - filter out backward motion
Peter Zijlstra354879b2008-08-25 17:15:34 +0200107 * - use the GTOD tick value to create a window to filter crazy TSC values
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200108 */
Ingo Molnar56b90612008-07-30 10:15:55 +0200109static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200110{
Ingo Molnar18e4e362008-07-30 10:13:35 +0200111 s64 delta = now - scd->tick_raw;
Peter Zijlstra354879b2008-08-25 17:15:34 +0200112 u64 clock, min_clock, max_clock;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200113
114 WARN_ON_ONCE(!irqs_disabled());
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200115
Peter Zijlstra354879b2008-08-25 17:15:34 +0200116 if (unlikely(delta < 0))
117 delta = 0;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200118
Peter Zijlstra354879b2008-08-25 17:15:34 +0200119 /*
120 * scd->clock = clamp(scd->tick_gtod + delta,
121 * max(scd->tick_gtod, scd->clock),
122 * scd->tick_gtod + TICK_NSEC);
123 */
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200124
Peter Zijlstra354879b2008-08-25 17:15:34 +0200125 clock = scd->tick_gtod + delta;
126 min_clock = wrap_max(scd->tick_gtod, scd->clock);
127 max_clock = scd->tick_gtod + TICK_NSEC;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200128
Peter Zijlstra354879b2008-08-25 17:15:34 +0200129 clock = wrap_max(clock, min_clock);
130 clock = wrap_min(clock, max_clock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200131
Ingo Molnare4e4e532008-04-14 08:50:02 +0200132 scd->clock = clock;
Ingo Molnar56b90612008-07-30 10:15:55 +0200133
Peter Zijlstra354879b2008-08-25 17:15:34 +0200134 return scd->clock;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200135}
136
137static void lock_double_clock(struct sched_clock_data *data1,
138 struct sched_clock_data *data2)
139{
140 if (data1 < data2) {
141 __raw_spin_lock(&data1->lock);
142 __raw_spin_lock(&data2->lock);
143 } else {
144 __raw_spin_lock(&data2->lock);
145 __raw_spin_lock(&data1->lock);
146 }
147}
148
149u64 sched_clock_cpu(int cpu)
150{
151 struct sched_clock_data *scd = cpu_sdc(cpu);
Ingo Molnar4a273f22008-07-30 10:22:07 +0200152 u64 now, clock, this_clock, remote_clock;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200153
Peter Zijlstraa3817592008-05-29 10:07:15 +0200154 if (unlikely(!sched_clock_running))
155 return 0ull;
156
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200157 WARN_ON_ONCE(!irqs_disabled());
158 now = sched_clock();
159
160 if (cpu != raw_smp_processor_id()) {
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200161 struct sched_clock_data *my_scd = this_scd();
162
163 lock_double_clock(scd, my_scd);
164
Ingo Molnar4a273f22008-07-30 10:22:07 +0200165 this_clock = __update_sched_clock(my_scd, now);
166 remote_clock = scd->clock;
167
168 /*
169 * Use the opportunity that we have both locks
170 * taken to couple the two clocks: we take the
171 * larger time as the latest time for both
172 * runqueues. (this creates monotonic movement)
173 */
Peter Zijlstra354879b2008-08-25 17:15:34 +0200174 if (likely((s64)(remote_clock - this_clock) < 0)) {
Ingo Molnar4a273f22008-07-30 10:22:07 +0200175 clock = this_clock;
176 scd->clock = clock;
177 } else {
178 /*
179 * Should be rare, but possible:
180 */
181 clock = remote_clock;
182 my_scd->clock = remote_clock;
183 }
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200184
185 __raw_spin_unlock(&my_scd->lock);
186 } else {
187 __raw_spin_lock(&scd->lock);
Ingo Molnar4a273f22008-07-30 10:22:07 +0200188 clock = __update_sched_clock(scd, now);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200189 }
190
Ingo Molnare4e4e532008-04-14 08:50:02 +0200191 __raw_spin_unlock(&scd->lock);
192
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200193 return clock;
194}
195
196void sched_clock_tick(void)
197{
198 struct sched_clock_data *scd = this_scd();
199 u64 now, now_gtod;
200
Peter Zijlstraa3817592008-05-29 10:07:15 +0200201 if (unlikely(!sched_clock_running))
202 return;
203
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200204 WARN_ON_ONCE(!irqs_disabled());
205
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200206 now_gtod = ktime_to_ns(ktime_get());
Steven Rostedta83bc472008-07-09 00:15:32 -0400207 now = sched_clock();
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200208
209 __raw_spin_lock(&scd->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200210 scd->tick_raw = now;
211 scd->tick_gtod = now_gtod;
Peter Zijlstra354879b2008-08-25 17:15:34 +0200212 __update_sched_clock(scd, now);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200213 __raw_spin_unlock(&scd->lock);
214}
215
216/*
217 * We are going deep-idle (irqs are disabled):
218 */
219void sched_clock_idle_sleep_event(void)
220{
221 sched_clock_cpu(smp_processor_id());
222}
223EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
224
225/*
226 * We just idled delta nanoseconds (called with irqs disabled):
227 */
228void sched_clock_idle_wakeup_event(u64 delta_ns)
229{
Peter Zijlstra354879b2008-08-25 17:15:34 +0200230 sched_clock_tick();
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200231 touch_softlockup_watchdog();
232}
233EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
234
Peter Zijlstrac1955a32008-08-11 08:59:03 +0200235#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
236
237void sched_clock_init(void)
238{
239 sched_clock_running = 1;
240}
241
242u64 sched_clock_cpu(int cpu)
243{
244 if (unlikely(!sched_clock_running))
245 return 0;
246
247 return sched_clock();
248}
249
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200250#endif
251
Peter Zijlstra76a2a6e2008-06-27 13:41:15 +0200252unsigned long long cpu_clock(int cpu)
253{
254 unsigned long long clock;
255 unsigned long flags;
256
Ingo Molnar2d452c92008-06-29 15:01:59 +0200257 local_irq_save(flags);
Peter Zijlstra76a2a6e2008-06-27 13:41:15 +0200258 clock = sched_clock_cpu(cpu);
Ingo Molnar2d452c92008-06-29 15:01:59 +0200259 local_irq_restore(flags);
Peter Zijlstra76a2a6e2008-06-27 13:41:15 +0200260
261 return clock;
262}
Ingo Molnar4c9fe8a2008-06-27 14:49:35 +0200263EXPORT_SYMBOL_GPL(cpu_clock);