Root/
1 | /* |
2 | * sched_clock for unstable cpu clocks |
3 | * |
4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
5 | * |
6 | * Updates and enhancements: |
7 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> |
8 | * |
9 | * Based on code by: |
10 | * Ingo Molnar <mingo@redhat.com> |
11 | * Guillaume Chazarain <guichaz@gmail.com> |
12 | * |
13 | * |
14 | * What: |
15 | * |
16 | * cpu_clock(i) provides a fast (execution time) high resolution |
17 | * clock with bounded drift between CPUs. The value of cpu_clock(i) |
18 | * is monotonic for constant i. The timestamp returned is in nanoseconds. |
19 | * |
20 | * ######################### BIG FAT WARNING ########################## |
21 | * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # |
22 | * # go backwards !! # |
23 | * #################################################################### |
24 | * |
25 | * There is no strict promise about the base, although it tends to start |
26 | * at 0 on boot (but people really shouldn't rely on that). |
27 | * |
28 | * cpu_clock(i) -- can be used from any context, including NMI. |
29 | * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI) |
30 | * local_clock() -- is cpu_clock() on the current cpu. |
31 | * |
32 | * How: |
33 | * |
34 | * The implementation either uses sched_clock() when |
35 | * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the |
36 | * sched_clock() is assumed to provide these properties (mostly it means |
37 | * the architecture provides a globally synchronized highres time source). |
38 | * |
39 | * Otherwise it tries to create a semi stable clock from a mixture of other |
40 | * clocks, including: |
41 | * |
42 | * - GTOD (clock monotomic) |
43 | * - sched_clock() |
44 | * - explicit idle events |
45 | * |
46 | * We use GTOD as base and use sched_clock() deltas to improve resolution. The |
47 | * deltas are filtered to provide monotonicity and keeping it within an |
48 | * expected window. |
49 | * |
50 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time |
51 | * that is otherwise invisible (TSC gets stopped). |
52 | * |
53 | * |
54 | * Notes: |
55 | * |
56 | * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things |
57 | * like cpufreq interrupts that can change the base clock (TSC) multiplier |
58 | * and cause funny jumps in time -- although the filtering provided by |
59 | * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it |
60 | * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on |
61 | * sched_clock(). |
62 | */ |
63 | #include <linux/spinlock.h> |
64 | #include <linux/hardirq.h> |
65 | #include <linux/module.h> |
66 | #include <linux/percpu.h> |
67 | #include <linux/ktime.h> |
68 | #include <linux/sched.h> |
69 | |
70 | /* |
71 | * Scheduler clock - returns current time in nanosec units. |
72 | * This is default implementation. |
73 | * Architectures and sub-architectures can override this. |
74 | */ |
75 | unsigned long long __attribute__((weak)) sched_clock(void) |
76 | { |
77 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) |
78 | * (NSEC_PER_SEC / HZ); |
79 | } |
80 | EXPORT_SYMBOL_GPL(sched_clock); |
81 | |
82 | __read_mostly int sched_clock_running; |
83 | |
84 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
85 | __read_mostly int sched_clock_stable; |
86 | |
87 | struct sched_clock_data { |
88 | u64 tick_raw; |
89 | u64 tick_gtod; |
90 | u64 clock; |
91 | }; |
92 | |
93 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); |
94 | |
95 | static inline struct sched_clock_data *this_scd(void) |
96 | { |
97 | return &__get_cpu_var(sched_clock_data); |
98 | } |
99 | |
100 | static inline struct sched_clock_data *cpu_sdc(int cpu) |
101 | { |
102 | return &per_cpu(sched_clock_data, cpu); |
103 | } |
104 | |
105 | void sched_clock_init(void) |
106 | { |
107 | u64 ktime_now = ktime_to_ns(ktime_get()); |
108 | int cpu; |
109 | |
110 | for_each_possible_cpu(cpu) { |
111 | struct sched_clock_data *scd = cpu_sdc(cpu); |
112 | |
113 | scd->tick_raw = 0; |
114 | scd->tick_gtod = ktime_now; |
115 | scd->clock = ktime_now; |
116 | } |
117 | |
118 | sched_clock_running = 1; |
119 | } |
120 | |
121 | /* |
122 | * min, max except they take wrapping into account |
123 | */ |
124 | |
125 | static inline u64 wrap_min(u64 x, u64 y) |
126 | { |
127 | return (s64)(x - y) < 0 ? x : y; |
128 | } |
129 | |
130 | static inline u64 wrap_max(u64 x, u64 y) |
131 | { |
132 | return (s64)(x - y) > 0 ? x : y; |
133 | } |
134 | |
135 | /* |
136 | * update the percpu scd from the raw @now value |
137 | * |
138 | * - filter out backward motion |
139 | * - use the GTOD tick value to create a window to filter crazy TSC values |
140 | */ |
141 | static u64 sched_clock_local(struct sched_clock_data *scd) |
142 | { |
143 | u64 now, clock, old_clock, min_clock, max_clock; |
144 | s64 delta; |
145 | |
146 | again: |
147 | now = sched_clock(); |
148 | delta = now - scd->tick_raw; |
149 | if (unlikely(delta < 0)) |
150 | delta = 0; |
151 | |
152 | old_clock = scd->clock; |
153 | |
154 | /* |
155 | * scd->clock = clamp(scd->tick_gtod + delta, |
156 | * max(scd->tick_gtod, scd->clock), |
157 | * scd->tick_gtod + TICK_NSEC); |
158 | */ |
159 | |
160 | clock = scd->tick_gtod + delta; |
161 | min_clock = wrap_max(scd->tick_gtod, old_clock); |
162 | max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); |
163 | |
164 | clock = wrap_max(clock, min_clock); |
165 | clock = wrap_min(clock, max_clock); |
166 | |
167 | if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) |
168 | goto again; |
169 | |
170 | return clock; |
171 | } |
172 | |
173 | static u64 sched_clock_remote(struct sched_clock_data *scd) |
174 | { |
175 | struct sched_clock_data *my_scd = this_scd(); |
176 | u64 this_clock, remote_clock; |
177 | u64 *ptr, old_val, val; |
178 | |
179 | sched_clock_local(my_scd); |
180 | again: |
181 | this_clock = my_scd->clock; |
182 | remote_clock = scd->clock; |
183 | |
184 | /* |
185 | * Use the opportunity that we have both locks |
186 | * taken to couple the two clocks: we take the |
187 | * larger time as the latest time for both |
188 | * runqueues. (this creates monotonic movement) |
189 | */ |
190 | if (likely((s64)(remote_clock - this_clock) < 0)) { |
191 | ptr = &scd->clock; |
192 | old_val = remote_clock; |
193 | val = this_clock; |
194 | } else { |
195 | /* |
196 | * Should be rare, but possible: |
197 | */ |
198 | ptr = &my_scd->clock; |
199 | old_val = this_clock; |
200 | val = remote_clock; |
201 | } |
202 | |
203 | if (cmpxchg64(ptr, old_val, val) != old_val) |
204 | goto again; |
205 | |
206 | return val; |
207 | } |
208 | |
209 | /* |
210 | * Similar to cpu_clock(), but requires local IRQs to be disabled. |
211 | * |
212 | * See cpu_clock(). |
213 | */ |
214 | u64 sched_clock_cpu(int cpu) |
215 | { |
216 | struct sched_clock_data *scd; |
217 | u64 clock; |
218 | |
219 | WARN_ON_ONCE(!irqs_disabled()); |
220 | |
221 | if (sched_clock_stable) |
222 | return sched_clock(); |
223 | |
224 | if (unlikely(!sched_clock_running)) |
225 | return 0ull; |
226 | |
227 | scd = cpu_sdc(cpu); |
228 | |
229 | if (cpu != smp_processor_id()) |
230 | clock = sched_clock_remote(scd); |
231 | else |
232 | clock = sched_clock_local(scd); |
233 | |
234 | return clock; |
235 | } |
236 | |
237 | void sched_clock_tick(void) |
238 | { |
239 | struct sched_clock_data *scd; |
240 | u64 now, now_gtod; |
241 | |
242 | if (sched_clock_stable) |
243 | return; |
244 | |
245 | if (unlikely(!sched_clock_running)) |
246 | return; |
247 | |
248 | WARN_ON_ONCE(!irqs_disabled()); |
249 | |
250 | scd = this_scd(); |
251 | now_gtod = ktime_to_ns(ktime_get()); |
252 | now = sched_clock(); |
253 | |
254 | scd->tick_raw = now; |
255 | scd->tick_gtod = now_gtod; |
256 | sched_clock_local(scd); |
257 | } |
258 | |
259 | /* |
260 | * We are going deep-idle (irqs are disabled): |
261 | */ |
262 | void sched_clock_idle_sleep_event(void) |
263 | { |
264 | sched_clock_cpu(smp_processor_id()); |
265 | } |
266 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); |
267 | |
268 | /* |
269 | * We just idled delta nanoseconds (called with irqs disabled): |
270 | */ |
271 | void sched_clock_idle_wakeup_event(u64 delta_ns) |
272 | { |
273 | if (timekeeping_suspended) |
274 | return; |
275 | |
276 | sched_clock_tick(); |
277 | touch_softlockup_watchdog(); |
278 | } |
279 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
280 | |
281 | /* |
282 | * As outlined at the top, provides a fast, high resolution, nanosecond |
283 | * time source that is monotonic per cpu argument and has bounded drift |
284 | * between cpus. |
285 | * |
286 | * ######################### BIG FAT WARNING ########################## |
287 | * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # |
288 | * # go backwards !! # |
289 | * #################################################################### |
290 | */ |
291 | u64 cpu_clock(int cpu) |
292 | { |
293 | u64 clock; |
294 | unsigned long flags; |
295 | |
296 | local_irq_save(flags); |
297 | clock = sched_clock_cpu(cpu); |
298 | local_irq_restore(flags); |
299 | |
300 | return clock; |
301 | } |
302 | |
303 | /* |
304 | * Similar to cpu_clock() for the current cpu. Time will only be observed |
305 | * to be monotonic if care is taken to only compare timestampt taken on the |
306 | * same CPU. |
307 | * |
308 | * See cpu_clock(). |
309 | */ |
310 | u64 local_clock(void) |
311 | { |
312 | u64 clock; |
313 | unsigned long flags; |
314 | |
315 | local_irq_save(flags); |
316 | clock = sched_clock_cpu(smp_processor_id()); |
317 | local_irq_restore(flags); |
318 | |
319 | return clock; |
320 | } |
321 | |
322 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
323 | |
324 | void sched_clock_init(void) |
325 | { |
326 | sched_clock_running = 1; |
327 | } |
328 | |
329 | u64 sched_clock_cpu(int cpu) |
330 | { |
331 | if (unlikely(!sched_clock_running)) |
332 | return 0; |
333 | |
334 | return sched_clock(); |
335 | } |
336 | |
337 | u64 cpu_clock(int cpu) |
338 | { |
339 | return sched_clock_cpu(cpu); |
340 | } |
341 | |
342 | u64 local_clock(void) |
343 | { |
344 | return sched_clock_cpu(0); |
345 | } |
346 | |
347 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
348 | |
349 | EXPORT_SYMBOL_GPL(cpu_clock); |
350 | EXPORT_SYMBOL_GPL(local_clock); |
351 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9