Root/
1 | /* |
2 | * Copyright (C) 2004-2007 Atmel Corporation |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. |
7 | */ |
8 | #include <linux/clk.h> |
9 | #include <linux/clockchips.h> |
10 | #include <linux/init.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/irq.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/time.h> |
15 | #include <linux/cpu.h> |
16 | |
17 | #include <asm/sysreg.h> |
18 | |
19 | #include <mach/pm.h> |
20 | |
21 | |
22 | static cycle_t read_cycle_count(struct clocksource *cs) |
23 | { |
24 | return (cycle_t)sysreg_read(COUNT); |
25 | } |
26 | |
27 | /* |
28 | * The architectural cycle count registers are a fine clocksource unless |
29 | * the system idle loop use sleep states like "idle": the CPU cycles |
30 | * measured by COUNT (and COMPARE) don't happen during sleep states. |
31 | * Their duration also changes if cpufreq changes the CPU clock rate. |
32 | * So we rate the clocksource using COUNT as very low quality. |
33 | */ |
34 | static struct clocksource counter = { |
35 | .name = "avr32_counter", |
36 | .rating = 50, |
37 | .read = read_cycle_count, |
38 | .mask = CLOCKSOURCE_MASK(32), |
39 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
40 | }; |
41 | |
42 | static irqreturn_t timer_interrupt(int irq, void *dev_id) |
43 | { |
44 | struct clock_event_device *evdev = dev_id; |
45 | |
46 | if (unlikely(!(intc_get_pending(0) & 1))) |
47 | return IRQ_NONE; |
48 | |
49 | /* |
50 | * Disable the interrupt until the clockevent subsystem |
51 | * reprograms it. |
52 | */ |
53 | sysreg_write(COMPARE, 0); |
54 | |
55 | evdev->event_handler(evdev); |
56 | return IRQ_HANDLED; |
57 | } |
58 | |
59 | static struct irqaction timer_irqaction = { |
60 | .handler = timer_interrupt, |
61 | /* Oprofile uses the same irq as the timer, so allow it to be shared */ |
62 | .flags = IRQF_TIMER | IRQF_SHARED, |
63 | .name = "avr32_comparator", |
64 | }; |
65 | |
66 | static int comparator_next_event(unsigned long delta, |
67 | struct clock_event_device *evdev) |
68 | { |
69 | unsigned long flags; |
70 | |
71 | raw_local_irq_save(flags); |
72 | |
73 | /* The time to read COUNT then update COMPARE must be less |
74 | * than the min_delta_ns value for this clockevent source. |
75 | */ |
76 | sysreg_write(COMPARE, (sysreg_read(COUNT) + delta) ? : 1); |
77 | |
78 | raw_local_irq_restore(flags); |
79 | |
80 | return 0; |
81 | } |
82 | |
83 | static void comparator_mode(enum clock_event_mode mode, |
84 | struct clock_event_device *evdev) |
85 | { |
86 | switch (mode) { |
87 | case CLOCK_EVT_MODE_ONESHOT: |
88 | pr_debug("%s: start\n", evdev->name); |
89 | /* FALLTHROUGH */ |
90 | case CLOCK_EVT_MODE_RESUME: |
91 | /* |
92 | * If we're using the COUNT and COMPARE registers we |
93 | * need to force idle poll. |
94 | */ |
95 | cpu_idle_poll_ctrl(true); |
96 | break; |
97 | case CLOCK_EVT_MODE_UNUSED: |
98 | case CLOCK_EVT_MODE_SHUTDOWN: |
99 | sysreg_write(COMPARE, 0); |
100 | pr_debug("%s: stop\n", evdev->name); |
101 | if (evdev->mode == CLOCK_EVT_MODE_ONESHOT || |
102 | evdev->mode == CLOCK_EVT_MODE_RESUME) { |
103 | /* |
104 | * Only disable idle poll if we have forced that |
105 | * in a previous call. |
106 | */ |
107 | cpu_idle_poll_ctrl(false); |
108 | } |
109 | break; |
110 | default: |
111 | BUG(); |
112 | } |
113 | } |
114 | |
115 | static struct clock_event_device comparator = { |
116 | .name = "avr32_comparator", |
117 | .features = CLOCK_EVT_FEAT_ONESHOT, |
118 | .shift = 16, |
119 | .rating = 50, |
120 | .set_next_event = comparator_next_event, |
121 | .set_mode = comparator_mode, |
122 | }; |
123 | |
124 | void read_persistent_clock(struct timespec *ts) |
125 | { |
126 | ts->tv_sec = mktime(2007, 1, 1, 0, 0, 0); |
127 | ts->tv_nsec = 0; |
128 | } |
129 | |
130 | void __init time_init(void) |
131 | { |
132 | unsigned long counter_hz; |
133 | int ret; |
134 | |
135 | /* figure rate for counter */ |
136 | counter_hz = clk_get_rate(boot_cpu_data.clk); |
137 | ret = clocksource_register_hz(&counter, counter_hz); |
138 | if (ret) |
139 | pr_debug("timer: could not register clocksource: %d\n", ret); |
140 | |
141 | /* setup COMPARE clockevent */ |
142 | comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); |
143 | comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); |
144 | comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; |
145 | comparator.cpumask = cpumask_of(0); |
146 | |
147 | sysreg_write(COMPARE, 0); |
148 | timer_irqaction.dev_id = &comparator; |
149 | |
150 | ret = setup_irq(0, &timer_irqaction); |
151 | if (ret) |
152 | pr_debug("timer: could not request IRQ 0: %d\n", ret); |
153 | else { |
154 | clockevents_register_device(&comparator); |
155 | |
156 | pr_info("%s: irq 0, %lu.%03lu MHz\n", comparator.name, |
157 | ((counter_hz + 500) / 1000) / 1000, |
158 | ((counter_hz + 500) / 1000) % 1000); |
159 | } |
160 | } |
161 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9