Root/
1 | /* |
2 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
3 | * |
4 | * Licensed under the terms of the GNU GPL License version 2. |
5 | * |
6 | * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* |
7 | */ |
8 | |
9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> |
11 | #include <linux/init.h> |
12 | #include <linux/cpufreq.h> |
13 | #include <linux/timex.h> |
14 | |
15 | #include <asm/msr.h> |
16 | #include <asm/processor.h> |
17 | #include <asm/cpu_device_id.h> |
18 | |
19 | static struct cpufreq_driver longrun_driver; |
20 | |
21 | /** |
22 | * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz |
23 | * values into per cent values. In TMTA microcode, the following is valid: |
24 | * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) |
25 | */ |
26 | static unsigned int longrun_low_freq, longrun_high_freq; |
27 | |
28 | |
29 | /** |
30 | * longrun_get_policy - get the current LongRun policy |
31 | * @policy: struct cpufreq_policy where current policy is written into |
32 | * |
33 | * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS |
34 | * and MSR_TMTA_LONGRUN_CTRL |
35 | */ |
36 | static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy) |
37 | { |
38 | u32 msr_lo, msr_hi; |
39 | |
40 | rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); |
41 | pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi); |
42 | if (msr_lo & 0x01) |
43 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; |
44 | else |
45 | policy->policy = CPUFREQ_POLICY_POWERSAVE; |
46 | |
47 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); |
48 | pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi); |
49 | msr_lo &= 0x0000007F; |
50 | msr_hi &= 0x0000007F; |
51 | |
52 | if (longrun_high_freq <= longrun_low_freq) { |
53 | /* Assume degenerate Longrun table */ |
54 | policy->min = policy->max = longrun_high_freq; |
55 | } else { |
56 | policy->min = longrun_low_freq + msr_lo * |
57 | ((longrun_high_freq - longrun_low_freq) / 100); |
58 | policy->max = longrun_low_freq + msr_hi * |
59 | ((longrun_high_freq - longrun_low_freq) / 100); |
60 | } |
61 | policy->cpu = 0; |
62 | } |
63 | |
64 | |
65 | /** |
66 | * longrun_set_policy - sets a new CPUFreq policy |
67 | * @policy: new policy |
68 | * |
69 | * Sets a new CPUFreq policy on LongRun-capable processors. This function |
70 | * has to be called with cpufreq_driver locked. |
71 | */ |
72 | static int longrun_set_policy(struct cpufreq_policy *policy) |
73 | { |
74 | u32 msr_lo, msr_hi; |
75 | u32 pctg_lo, pctg_hi; |
76 | |
77 | if (!policy) |
78 | return -EINVAL; |
79 | |
80 | if (longrun_high_freq <= longrun_low_freq) { |
81 | /* Assume degenerate Longrun table */ |
82 | pctg_lo = pctg_hi = 100; |
83 | } else { |
84 | pctg_lo = (policy->min - longrun_low_freq) / |
85 | ((longrun_high_freq - longrun_low_freq) / 100); |
86 | pctg_hi = (policy->max - longrun_low_freq) / |
87 | ((longrun_high_freq - longrun_low_freq) / 100); |
88 | } |
89 | |
90 | if (pctg_hi > 100) |
91 | pctg_hi = 100; |
92 | if (pctg_lo > pctg_hi) |
93 | pctg_lo = pctg_hi; |
94 | |
95 | /* performance or economy mode */ |
96 | rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); |
97 | msr_lo &= 0xFFFFFFFE; |
98 | switch (policy->policy) { |
99 | case CPUFREQ_POLICY_PERFORMANCE: |
100 | msr_lo |= 0x00000001; |
101 | break; |
102 | case CPUFREQ_POLICY_POWERSAVE: |
103 | break; |
104 | } |
105 | wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); |
106 | |
107 | /* lower and upper boundary */ |
108 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); |
109 | msr_lo &= 0xFFFFFF80; |
110 | msr_hi &= 0xFFFFFF80; |
111 | msr_lo |= pctg_lo; |
112 | msr_hi |= pctg_hi; |
113 | wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); |
114 | |
115 | return 0; |
116 | } |
117 | |
118 | |
119 | /** |
120 | * longrun_verify_poliy - verifies a new CPUFreq policy |
121 | * @policy: the policy to verify |
122 | * |
123 | * Validates a new CPUFreq policy. This function has to be called with |
124 | * cpufreq_driver locked. |
125 | */ |
126 | static int longrun_verify_policy(struct cpufreq_policy *policy) |
127 | { |
128 | if (!policy) |
129 | return -EINVAL; |
130 | |
131 | policy->cpu = 0; |
132 | cpufreq_verify_within_limits(policy, |
133 | policy->cpuinfo.min_freq, |
134 | policy->cpuinfo.max_freq); |
135 | |
136 | if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && |
137 | (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) |
138 | return -EINVAL; |
139 | |
140 | return 0; |
141 | } |
142 | |
143 | static unsigned int longrun_get(unsigned int cpu) |
144 | { |
145 | u32 eax, ebx, ecx, edx; |
146 | |
147 | if (cpu) |
148 | return 0; |
149 | |
150 | cpuid(0x80860007, &eax, &ebx, &ecx, &edx); |
151 | pr_debug("cpuid eax is %u\n", eax); |
152 | |
153 | return eax * 1000; |
154 | } |
155 | |
156 | /** |
157 | * longrun_determine_freqs - determines the lowest and highest possible core frequency |
158 | * @low_freq: an int to put the lowest frequency into |
159 | * @high_freq: an int to put the highest frequency into |
160 | * |
161 | * Determines the lowest and highest possible core frequencies on this CPU. |
162 | * This is necessary to calculate the performance percentage according to |
163 | * TMTA rules: |
164 | * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) |
165 | */ |
166 | static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, |
167 | unsigned int *high_freq) |
168 | { |
169 | u32 msr_lo, msr_hi; |
170 | u32 save_lo, save_hi; |
171 | u32 eax, ebx, ecx, edx; |
172 | u32 try_hi; |
173 | struct cpuinfo_x86 *c = &cpu_data(0); |
174 | |
175 | if (!low_freq || !high_freq) |
176 | return -EINVAL; |
177 | |
178 | if (cpu_has(c, X86_FEATURE_LRTI)) { |
179 | /* if the LongRun Table Interface is present, the |
180 | * detection is a bit easier: |
181 | * For minimum frequency, read out the maximum |
182 | * level (msr_hi), write that into "currently |
183 | * selected level", and read out the frequency. |
184 | * For maximum frequency, read out level zero. |
185 | */ |
186 | /* minimum */ |
187 | rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi); |
188 | wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi); |
189 | rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); |
190 | *low_freq = msr_lo * 1000; /* to kHz */ |
191 | |
192 | /* maximum */ |
193 | wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi); |
194 | rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); |
195 | *high_freq = msr_lo * 1000; /* to kHz */ |
196 | |
197 | pr_debug("longrun table interface told %u - %u kHz\n", |
198 | *low_freq, *high_freq); |
199 | |
200 | if (*low_freq > *high_freq) |
201 | *low_freq = *high_freq; |
202 | return 0; |
203 | } |
204 | |
205 | /* set the upper border to the value determined during TSC init */ |
206 | *high_freq = (cpu_khz / 1000); |
207 | *high_freq = *high_freq * 1000; |
208 | pr_debug("high frequency is %u kHz\n", *high_freq); |
209 | |
210 | /* get current borders */ |
211 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); |
212 | save_lo = msr_lo & 0x0000007F; |
213 | save_hi = msr_hi & 0x0000007F; |
214 | |
215 | /* if current perf_pctg is larger than 90%, we need to decrease the |
216 | * upper limit to make the calculation more accurate. |
217 | */ |
218 | cpuid(0x80860007, &eax, &ebx, &ecx, &edx); |
219 | /* try decreasing in 10% steps, some processors react only |
220 | * on some barrier values */ |
221 | for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -= 10) { |
222 | /* set to 0 to try_hi perf_pctg */ |
223 | msr_lo &= 0xFFFFFF80; |
224 | msr_hi &= 0xFFFFFF80; |
225 | msr_hi |= try_hi; |
226 | wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); |
227 | |
228 | /* read out current core MHz and current perf_pctg */ |
229 | cpuid(0x80860007, &eax, &ebx, &ecx, &edx); |
230 | |
231 | /* restore values */ |
232 | wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); |
233 | } |
234 | pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax); |
235 | |
236 | /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) |
237 | * eqals |
238 | * low_freq * (1 - perf_pctg) = (cur_freq - high_freq * perf_pctg) |
239 | * |
240 | * high_freq * perf_pctg is stored tempoarily into "ebx". |
241 | */ |
242 | ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */ |
243 | |
244 | if ((ecx > 95) || (ecx == 0) || (eax < ebx)) |
245 | return -EIO; |
246 | |
247 | edx = ((eax - ebx) * 100) / (100 - ecx); |
248 | *low_freq = edx * 1000; /* back to kHz */ |
249 | |
250 | pr_debug("low frequency is %u kHz\n", *low_freq); |
251 | |
252 | if (*low_freq > *high_freq) |
253 | *low_freq = *high_freq; |
254 | |
255 | return 0; |
256 | } |
257 | |
258 | |
259 | static int __cpuinit longrun_cpu_init(struct cpufreq_policy *policy) |
260 | { |
261 | int result = 0; |
262 | |
263 | /* capability check */ |
264 | if (policy->cpu != 0) |
265 | return -ENODEV; |
266 | |
267 | /* detect low and high frequency */ |
268 | result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq); |
269 | if (result) |
270 | return result; |
271 | |
272 | /* cpuinfo and default policy values */ |
273 | policy->cpuinfo.min_freq = longrun_low_freq; |
274 | policy->cpuinfo.max_freq = longrun_high_freq; |
275 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
276 | longrun_get_policy(policy); |
277 | |
278 | return 0; |
279 | } |
280 | |
281 | |
282 | static struct cpufreq_driver longrun_driver = { |
283 | .flags = CPUFREQ_CONST_LOOPS, |
284 | .verify = longrun_verify_policy, |
285 | .setpolicy = longrun_set_policy, |
286 | .get = longrun_get, |
287 | .init = longrun_cpu_init, |
288 | .name = "longrun", |
289 | .owner = THIS_MODULE, |
290 | }; |
291 | |
292 | static const struct x86_cpu_id longrun_ids[] = { |
293 | { X86_VENDOR_TRANSMETA, X86_FAMILY_ANY, X86_MODEL_ANY, |
294 | X86_FEATURE_LONGRUN }, |
295 | {} |
296 | }; |
297 | MODULE_DEVICE_TABLE(x86cpu, longrun_ids); |
298 | |
299 | /** |
300 | * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver |
301 | * |
302 | * Initializes the LongRun support. |
303 | */ |
304 | static int __init longrun_init(void) |
305 | { |
306 | if (!x86_match_cpu(longrun_ids)) |
307 | return -ENODEV; |
308 | return cpufreq_register_driver(&longrun_driver); |
309 | } |
310 | |
311 | |
312 | /** |
313 | * longrun_exit - unregisters LongRun support |
314 | */ |
315 | static void __exit longrun_exit(void) |
316 | { |
317 | cpufreq_unregister_driver(&longrun_driver); |
318 | } |
319 | |
320 | |
321 | MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); |
322 | MODULE_DESCRIPTION("LongRun driver for Transmeta Crusoe and " |
323 | "Efficeon processors."); |
324 | MODULE_LICENSE("GPL"); |
325 | |
326 | module_init(longrun_init); |
327 | module_exit(longrun_exit); |
328 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9