Root/
1 | /* |
2 | * linux/arch/ia64/kernel/time.c |
3 | * |
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
5 | * Stephane Eranian <eranian@hpl.hp.com> |
6 | * David Mosberger <davidm@hpl.hp.com> |
7 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> |
8 | * Copyright (C) 1999-2000 VA Linux Systems |
9 | * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com> |
10 | */ |
11 | |
12 | #include <linux/cpu.h> |
13 | #include <linux/init.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> |
16 | #include <linux/profile.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/time.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/efi.h> |
21 | #include <linux/timex.h> |
22 | #include <linux/clocksource.h> |
23 | #include <linux/platform_device.h> |
24 | |
25 | #include <asm/machvec.h> |
26 | #include <asm/delay.h> |
27 | #include <asm/hw_irq.h> |
28 | #include <asm/paravirt.h> |
29 | #include <asm/ptrace.h> |
30 | #include <asm/sal.h> |
31 | #include <asm/sections.h> |
32 | #include <asm/system.h> |
33 | |
34 | #include "fsyscall_gtod_data.h" |
35 | |
36 | static cycle_t itc_get_cycles(struct clocksource *cs); |
37 | |
38 | struct fsyscall_gtod_data_t fsyscall_gtod_data = { |
39 | .lock = SEQLOCK_UNLOCKED, |
40 | }; |
41 | |
42 | struct itc_jitter_data_t itc_jitter_data; |
43 | |
44 | volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ |
45 | |
46 | #ifdef CONFIG_IA64_DEBUG_IRQ |
47 | |
48 | unsigned long last_cli_ip; |
49 | EXPORT_SYMBOL(last_cli_ip); |
50 | |
51 | #endif |
52 | |
53 | #ifdef CONFIG_PARAVIRT |
54 | /* We need to define a real function for sched_clock, to override the |
55 | weak default version */ |
56 | unsigned long long sched_clock(void) |
57 | { |
58 | return paravirt_sched_clock(); |
59 | } |
60 | #endif |
61 | |
62 | #ifdef CONFIG_PARAVIRT |
63 | static void |
64 | paravirt_clocksource_resume(struct clocksource *cs) |
65 | { |
66 | if (pv_time_ops.clocksource_resume) |
67 | pv_time_ops.clocksource_resume(); |
68 | } |
69 | #endif |
70 | |
71 | static struct clocksource clocksource_itc = { |
72 | .name = "itc", |
73 | .rating = 350, |
74 | .read = itc_get_cycles, |
75 | .mask = CLOCKSOURCE_MASK(64), |
76 | .mult = 0, /*to be calculated*/ |
77 | .shift = 16, |
78 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
79 | #ifdef CONFIG_PARAVIRT |
80 | .resume = paravirt_clocksource_resume, |
81 | #endif |
82 | }; |
83 | static struct clocksource *itc_clocksource; |
84 | |
85 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
86 | |
87 | #include <linux/kernel_stat.h> |
88 | |
89 | extern cputime_t cycle_to_cputime(u64 cyc); |
90 | |
91 | /* |
92 | * Called from the context switch with interrupts disabled, to charge all |
93 | * accumulated times to the current process, and to prepare accounting on |
94 | * the next process. |
95 | */ |
96 | void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) |
97 | { |
98 | struct thread_info *pi = task_thread_info(prev); |
99 | struct thread_info *ni = task_thread_info(next); |
100 | cputime_t delta_stime, delta_utime; |
101 | __u64 now; |
102 | |
103 | now = ia64_get_itc(); |
104 | |
105 | delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); |
106 | if (idle_task(smp_processor_id()) != prev) |
107 | account_system_time(prev, 0, delta_stime, delta_stime); |
108 | else |
109 | account_idle_time(delta_stime); |
110 | |
111 | if (pi->ac_utime) { |
112 | delta_utime = cycle_to_cputime(pi->ac_utime); |
113 | account_user_time(prev, delta_utime, delta_utime); |
114 | } |
115 | |
116 | pi->ac_stamp = ni->ac_stamp = now; |
117 | ni->ac_stime = ni->ac_utime = 0; |
118 | } |
119 | |
120 | /* |
121 | * Account time for a transition between system, hard irq or soft irq state. |
122 | * Note that this function is called with interrupts enabled. |
123 | */ |
124 | void account_system_vtime(struct task_struct *tsk) |
125 | { |
126 | struct thread_info *ti = task_thread_info(tsk); |
127 | unsigned long flags; |
128 | cputime_t delta_stime; |
129 | __u64 now; |
130 | |
131 | local_irq_save(flags); |
132 | |
133 | now = ia64_get_itc(); |
134 | |
135 | delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); |
136 | if (irq_count() || idle_task(smp_processor_id()) != tsk) |
137 | account_system_time(tsk, 0, delta_stime, delta_stime); |
138 | else |
139 | account_idle_time(delta_stime); |
140 | ti->ac_stime = 0; |
141 | |
142 | ti->ac_stamp = now; |
143 | |
144 | local_irq_restore(flags); |
145 | } |
146 | EXPORT_SYMBOL_GPL(account_system_vtime); |
147 | |
148 | /* |
149 | * Called from the timer interrupt handler to charge accumulated user time |
150 | * to the current process. Must be called with interrupts disabled. |
151 | */ |
152 | void account_process_tick(struct task_struct *p, int user_tick) |
153 | { |
154 | struct thread_info *ti = task_thread_info(p); |
155 | cputime_t delta_utime; |
156 | |
157 | if (ti->ac_utime) { |
158 | delta_utime = cycle_to_cputime(ti->ac_utime); |
159 | account_user_time(p, delta_utime, delta_utime); |
160 | ti->ac_utime = 0; |
161 | } |
162 | } |
163 | |
164 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ |
165 | |
166 | static irqreturn_t |
167 | timer_interrupt (int irq, void *dev_id) |
168 | { |
169 | unsigned long new_itm; |
170 | |
171 | if (unlikely(cpu_is_offline(smp_processor_id()))) { |
172 | return IRQ_HANDLED; |
173 | } |
174 | |
175 | platform_timer_interrupt(irq, dev_id); |
176 | |
177 | new_itm = local_cpu_data->itm_next; |
178 | |
179 | if (!time_after(ia64_get_itc(), new_itm)) |
180 | printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", |
181 | ia64_get_itc(), new_itm); |
182 | |
183 | profile_tick(CPU_PROFILING); |
184 | |
185 | if (paravirt_do_steal_accounting(&new_itm)) |
186 | goto skip_process_time_accounting; |
187 | |
188 | while (1) { |
189 | update_process_times(user_mode(get_irq_regs())); |
190 | |
191 | new_itm += local_cpu_data->itm_delta; |
192 | |
193 | if (smp_processor_id() == time_keeper_id) { |
194 | /* |
195 | * Here we are in the timer irq handler. We have irqs locally |
196 | * disabled, but we don't know if the timer_bh is running on |
197 | * another CPU. We need to avoid to SMP race by acquiring the |
198 | * xtime_lock. |
199 | */ |
200 | write_seqlock(&xtime_lock); |
201 | do_timer(1); |
202 | local_cpu_data->itm_next = new_itm; |
203 | write_sequnlock(&xtime_lock); |
204 | } else |
205 | local_cpu_data->itm_next = new_itm; |
206 | |
207 | if (time_after(new_itm, ia64_get_itc())) |
208 | break; |
209 | |
210 | /* |
211 | * Allow IPIs to interrupt the timer loop. |
212 | */ |
213 | local_irq_enable(); |
214 | local_irq_disable(); |
215 | } |
216 | |
217 | skip_process_time_accounting: |
218 | |
219 | do { |
220 | /* |
221 | * If we're too close to the next clock tick for |
222 | * comfort, we increase the safety margin by |
223 | * intentionally dropping the next tick(s). We do NOT |
224 | * update itm.next because that would force us to call |
225 | * do_timer() which in turn would let our clock run |
226 | * too fast (with the potentially devastating effect |
227 | * of losing monotony of time). |
228 | */ |
229 | while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) |
230 | new_itm += local_cpu_data->itm_delta; |
231 | ia64_set_itm(new_itm); |
232 | /* double check, in case we got hit by a (slow) PMI: */ |
233 | } while (time_after_eq(ia64_get_itc(), new_itm)); |
234 | return IRQ_HANDLED; |
235 | } |
236 | |
237 | /* |
238 | * Encapsulate access to the itm structure for SMP. |
239 | */ |
240 | void |
241 | ia64_cpu_local_tick (void) |
242 | { |
243 | int cpu = smp_processor_id(); |
244 | unsigned long shift = 0, delta; |
245 | |
246 | /* arrange for the cycle counter to generate a timer interrupt: */ |
247 | ia64_set_itv(IA64_TIMER_VECTOR); |
248 | |
249 | delta = local_cpu_data->itm_delta; |
250 | /* |
251 | * Stagger the timer tick for each CPU so they don't occur all at (almost) the |
252 | * same time: |
253 | */ |
254 | if (cpu) { |
255 | unsigned long hi = 1UL << ia64_fls(cpu); |
256 | shift = (2*(cpu - hi) + 1) * delta/hi/2; |
257 | } |
258 | local_cpu_data->itm_next = ia64_get_itc() + delta + shift; |
259 | ia64_set_itm(local_cpu_data->itm_next); |
260 | } |
261 | |
262 | static int nojitter; |
263 | |
264 | static int __init nojitter_setup(char *str) |
265 | { |
266 | nojitter = 1; |
267 | printk("Jitter checking for ITC timers disabled\n"); |
268 | return 1; |
269 | } |
270 | |
271 | __setup("nojitter", nojitter_setup); |
272 | |
273 | |
274 | void __devinit |
275 | ia64_init_itm (void) |
276 | { |
277 | unsigned long platform_base_freq, itc_freq; |
278 | struct pal_freq_ratio itc_ratio, proc_ratio; |
279 | long status, platform_base_drift, itc_drift; |
280 | |
281 | /* |
282 | * According to SAL v2.6, we need to use a SAL call to determine the platform base |
283 | * frequency and then a PAL call to determine the frequency ratio between the ITC |
284 | * and the base frequency. |
285 | */ |
286 | status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, |
287 | &platform_base_freq, &platform_base_drift); |
288 | if (status != 0) { |
289 | printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status)); |
290 | } else { |
291 | status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio); |
292 | if (status != 0) |
293 | printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status); |
294 | } |
295 | if (status != 0) { |
296 | /* invent "random" values */ |
297 | printk(KERN_ERR |
298 | "SAL/PAL failed to obtain frequency info---inventing reasonable values\n"); |
299 | platform_base_freq = 100000000; |
300 | platform_base_drift = -1; /* no drift info */ |
301 | itc_ratio.num = 3; |
302 | itc_ratio.den = 1; |
303 | } |
304 | if (platform_base_freq < 40000000) { |
305 | printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n", |
306 | platform_base_freq); |
307 | platform_base_freq = 75000000; |
308 | platform_base_drift = -1; |
309 | } |
310 | if (!proc_ratio.den) |
311 | proc_ratio.den = 1; /* avoid division by zero */ |
312 | if (!itc_ratio.den) |
313 | itc_ratio.den = 1; /* avoid division by zero */ |
314 | |
315 | itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den; |
316 | |
317 | local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ; |
318 | printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, " |
319 | "ITC freq=%lu.%03luMHz", smp_processor_id(), |
320 | platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000, |
321 | itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000); |
322 | |
323 | if (platform_base_drift != -1) { |
324 | itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den; |
325 | printk("+/-%ldppm\n", itc_drift); |
326 | } else { |
327 | itc_drift = -1; |
328 | printk("\n"); |
329 | } |
330 | |
331 | local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den; |
332 | local_cpu_data->itc_freq = itc_freq; |
333 | local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC; |
334 | local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT) |
335 | + itc_freq/2)/itc_freq; |
336 | |
337 | if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { |
338 | #ifdef CONFIG_SMP |
339 | /* On IA64 in an SMP configuration ITCs are never accurately synchronized. |
340 | * Jitter compensation requires a cmpxchg which may limit |
341 | * the scalability of the syscalls for retrieving time. |
342 | * The ITC synchronization is usually successful to within a few |
343 | * ITC ticks but this is not a sure thing. If you need to improve |
344 | * timer performance in SMP situations then boot the kernel with the |
345 | * "nojitter" option. However, doing so may result in time fluctuating (maybe |
346 | * even going backward) if the ITC offsets between the individual CPUs |
347 | * are too large. |
348 | */ |
349 | if (!nojitter) |
350 | itc_jitter_data.itc_jitter = 1; |
351 | #endif |
352 | } else |
353 | /* |
354 | * ITC is drifty and we have not synchronized the ITCs in smpboot.c. |
355 | * ITC values may fluctuate significantly between processors. |
356 | * Clock should not be used for hrtimers. Mark itc as only |
357 | * useful for boot and testing. |
358 | * |
359 | * Note that jitter compensation is off! There is no point of |
360 | * synchronizing ITCs since they may be large differentials |
361 | * that change over time. |
362 | * |
363 | * The only way to fix this would be to repeatedly sync the |
364 | * ITCs. Until that time we have to avoid ITC. |
365 | */ |
366 | clocksource_itc.rating = 50; |
367 | |
368 | paravirt_init_missing_ticks_accounting(smp_processor_id()); |
369 | |
370 | /* avoid softlock up message when cpu is unplug and plugged again. */ |
371 | touch_softlockup_watchdog(); |
372 | |
373 | /* Setup the CPU local timer tick */ |
374 | ia64_cpu_local_tick(); |
375 | |
376 | if (!itc_clocksource) { |
377 | /* Sort out mult/shift values: */ |
378 | clocksource_itc.mult = |
379 | clocksource_hz2mult(local_cpu_data->itc_freq, |
380 | clocksource_itc.shift); |
381 | clocksource_register(&clocksource_itc); |
382 | itc_clocksource = &clocksource_itc; |
383 | } |
384 | } |
385 | |
386 | static cycle_t itc_get_cycles(struct clocksource *cs) |
387 | { |
388 | unsigned long lcycle, now, ret; |
389 | |
390 | if (!itc_jitter_data.itc_jitter) |
391 | return get_cycles(); |
392 | |
393 | lcycle = itc_jitter_data.itc_lastcycle; |
394 | now = get_cycles(); |
395 | if (lcycle && time_after(lcycle, now)) |
396 | return lcycle; |
397 | |
398 | /* |
399 | * Keep track of the last timer value returned. |
400 | * In an SMP environment, you could lose out in contention of |
401 | * cmpxchg. If so, your cmpxchg returns new value which the |
402 | * winner of contention updated to. Use the new value instead. |
403 | */ |
404 | ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now); |
405 | if (unlikely(ret != lcycle)) |
406 | return ret; |
407 | |
408 | return now; |
409 | } |
410 | |
411 | |
412 | static struct irqaction timer_irqaction = { |
413 | .handler = timer_interrupt, |
414 | .flags = IRQF_DISABLED | IRQF_IRQPOLL, |
415 | .name = "timer" |
416 | }; |
417 | |
418 | static struct platform_device rtc_efi_dev = { |
419 | .name = "rtc-efi", |
420 | .id = -1, |
421 | }; |
422 | |
423 | static int __init rtc_init(void) |
424 | { |
425 | if (platform_device_register(&rtc_efi_dev) < 0) |
426 | printk(KERN_ERR "unable to register rtc device...\n"); |
427 | |
428 | /* not necessarily an error */ |
429 | return 0; |
430 | } |
431 | module_init(rtc_init); |
432 | |
433 | void read_persistent_clock(struct timespec *ts) |
434 | { |
435 | efi_gettimeofday(ts); |
436 | } |
437 | |
438 | void __init |
439 | time_init (void) |
440 | { |
441 | register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction); |
442 | ia64_init_itm(); |
443 | } |
444 | |
445 | /* |
446 | * Generic udelay assumes that if preemption is allowed and the thread |
447 | * migrates to another CPU, that the ITC values are synchronized across |
448 | * all CPUs. |
449 | */ |
450 | static void |
451 | ia64_itc_udelay (unsigned long usecs) |
452 | { |
453 | unsigned long start = ia64_get_itc(); |
454 | unsigned long end = start + usecs*local_cpu_data->cyc_per_usec; |
455 | |
456 | while (time_before(ia64_get_itc(), end)) |
457 | cpu_relax(); |
458 | } |
459 | |
460 | void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay; |
461 | |
462 | void |
463 | udelay (unsigned long usecs) |
464 | { |
465 | (*ia64_udelay)(usecs); |
466 | } |
467 | EXPORT_SYMBOL(udelay); |
468 | |
469 | /* IA64 doesn't cache the timezone */ |
470 | void update_vsyscall_tz(void) |
471 | { |
472 | } |
473 | |
474 | void update_vsyscall(struct timespec *wall, struct timespec *wtm, |
475 | struct clocksource *c, u32 mult) |
476 | { |
477 | unsigned long flags; |
478 | |
479 | write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); |
480 | |
481 | /* copy fsyscall clock data */ |
482 | fsyscall_gtod_data.clk_mask = c->mask; |
483 | fsyscall_gtod_data.clk_mult = mult; |
484 | fsyscall_gtod_data.clk_shift = c->shift; |
485 | fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; |
486 | fsyscall_gtod_data.clk_cycle_last = c->cycle_last; |
487 | |
488 | /* copy kernel time structures */ |
489 | fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; |
490 | fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; |
491 | fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec |
492 | + wall->tv_sec; |
493 | fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec |
494 | + wall->tv_nsec; |
495 | |
496 | /* normalize */ |
497 | while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) { |
498 | fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC; |
499 | fsyscall_gtod_data.monotonic_time.tv_sec++; |
500 | } |
501 | |
502 | write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); |
503 | } |
504 | |
505 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9