Root/
1 | /* |
2 | * drivers/cpufreq/cpufreq_ondemand.c |
3 | * |
4 | * Copyright (C) 2001 Russell King |
5 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. |
6 | * Jun Nakajima <jun.nakajima@intel.com> |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. |
11 | */ |
12 | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | |
15 | #include <linux/cpufreq.h> |
16 | #include <linux/init.h> |
17 | #include <linux/kernel.h> |
18 | #include <linux/kernel_stat.h> |
19 | #include <linux/kobject.h> |
20 | #include <linux/module.h> |
21 | #include <linux/mutex.h> |
22 | #include <linux/percpu-defs.h> |
23 | #include <linux/sysfs.h> |
24 | #include <linux/tick.h> |
25 | #include <linux/types.h> |
26 | |
27 | #include "cpufreq_governor.h" |
28 | |
29 | /* On-demand governor macros */ |
30 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) |
31 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
32 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
33 | #define MAX_SAMPLING_DOWN_FACTOR (100000) |
34 | #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) |
35 | #define MICRO_FREQUENCY_UP_THRESHOLD (95) |
36 | #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) |
37 | #define MIN_FREQUENCY_UP_THRESHOLD (11) |
38 | #define MAX_FREQUENCY_UP_THRESHOLD (100) |
39 | |
40 | static struct dbs_data od_dbs_data; |
41 | static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); |
42 | |
43 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
44 | static struct cpufreq_governor cpufreq_gov_ondemand; |
45 | #endif |
46 | |
47 | static struct od_dbs_tuners od_tuners = { |
48 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
49 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, |
50 | .adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD - |
51 | DEF_FREQUENCY_DOWN_DIFFERENTIAL, |
52 | .ignore_nice = 0, |
53 | .powersave_bias = 0, |
54 | }; |
55 | |
56 | static void ondemand_powersave_bias_init_cpu(int cpu) |
57 | { |
58 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
59 | |
60 | dbs_info->freq_table = cpufreq_frequency_get_table(cpu); |
61 | dbs_info->freq_lo = 0; |
62 | } |
63 | |
64 | /* |
65 | * Not all CPUs want IO time to be accounted as busy; this depends on how |
66 | * efficient idling at a higher frequency/voltage is. |
67 | * Pavel Machek says this is not so for various generations of AMD and old |
68 | * Intel systems. |
69 | * Mike Chan (android.com) claims this is also not true for ARM. |
70 | * Because of this, whitelist specific known (series) of CPUs by default, and |
71 | * leave all others up to the user. |
72 | */ |
73 | static int should_io_be_busy(void) |
74 | { |
75 | #if defined(CONFIG_X86) |
76 | /* |
77 | * For Intel, Core 2 (model 15) and later have an efficient idle. |
78 | */ |
79 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
80 | boot_cpu_data.x86 == 6 && |
81 | boot_cpu_data.x86_model >= 15) |
82 | return 1; |
83 | #endif |
84 | return 0; |
85 | } |
86 | |
87 | /* |
88 | * Find right freq to be set now with powersave_bias on. |
89 | * Returns the freq_hi to be used right now and will set freq_hi_jiffies, |
90 | * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. |
91 | */ |
92 | static unsigned int powersave_bias_target(struct cpufreq_policy *policy, |
93 | unsigned int freq_next, unsigned int relation) |
94 | { |
95 | unsigned int freq_req, freq_reduc, freq_avg; |
96 | unsigned int freq_hi, freq_lo; |
97 | unsigned int index = 0; |
98 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; |
99 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
100 | policy->cpu); |
101 | |
102 | if (!dbs_info->freq_table) { |
103 | dbs_info->freq_lo = 0; |
104 | dbs_info->freq_lo_jiffies = 0; |
105 | return freq_next; |
106 | } |
107 | |
108 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, |
109 | relation, &index); |
110 | freq_req = dbs_info->freq_table[index].frequency; |
111 | freq_reduc = freq_req * od_tuners.powersave_bias / 1000; |
112 | freq_avg = freq_req - freq_reduc; |
113 | |
114 | /* Find freq bounds for freq_avg in freq_table */ |
115 | index = 0; |
116 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, |
117 | CPUFREQ_RELATION_H, &index); |
118 | freq_lo = dbs_info->freq_table[index].frequency; |
119 | index = 0; |
120 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, |
121 | CPUFREQ_RELATION_L, &index); |
122 | freq_hi = dbs_info->freq_table[index].frequency; |
123 | |
124 | /* Find out how long we have to be in hi and lo freqs */ |
125 | if (freq_hi == freq_lo) { |
126 | dbs_info->freq_lo = 0; |
127 | dbs_info->freq_lo_jiffies = 0; |
128 | return freq_lo; |
129 | } |
130 | jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate); |
131 | jiffies_hi = (freq_avg - freq_lo) * jiffies_total; |
132 | jiffies_hi += ((freq_hi - freq_lo) / 2); |
133 | jiffies_hi /= (freq_hi - freq_lo); |
134 | jiffies_lo = jiffies_total - jiffies_hi; |
135 | dbs_info->freq_lo = freq_lo; |
136 | dbs_info->freq_lo_jiffies = jiffies_lo; |
137 | dbs_info->freq_hi_jiffies = jiffies_hi; |
138 | return freq_hi; |
139 | } |
140 | |
141 | static void ondemand_powersave_bias_init(void) |
142 | { |
143 | int i; |
144 | for_each_online_cpu(i) { |
145 | ondemand_powersave_bias_init_cpu(i); |
146 | } |
147 | } |
148 | |
149 | static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) |
150 | { |
151 | if (od_tuners.powersave_bias) |
152 | freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); |
153 | else if (p->cur == p->max) |
154 | return; |
155 | |
156 | __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ? |
157 | CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); |
158 | } |
159 | |
160 | /* |
161 | * Every sampling_rate, we check, if current idle time is less than 20% |
162 | * (default), then we try to increase frequency. Every sampling_rate, we look |
163 | * for the lowest frequency which can sustain the load while keeping idle time |
164 | * over 30%. If such a frequency exist, we try to decrease to this frequency. |
165 | * |
166 | * Any frequency increase takes it to the maximum frequency. Frequency reduction |
167 | * happens at minimum steps of 5% (default) of current frequency |
168 | */ |
169 | static void od_check_cpu(int cpu, unsigned int load_freq) |
170 | { |
171 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
172 | struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; |
173 | |
174 | dbs_info->freq_lo = 0; |
175 | |
176 | /* Check for frequency increase */ |
177 | if (load_freq > od_tuners.up_threshold * policy->cur) { |
178 | /* If switching to max speed, apply sampling_down_factor */ |
179 | if (policy->cur < policy->max) |
180 | dbs_info->rate_mult = |
181 | od_tuners.sampling_down_factor; |
182 | dbs_freq_increase(policy, policy->max); |
183 | return; |
184 | } |
185 | |
186 | /* Check for frequency decrease */ |
187 | /* if we cannot reduce the frequency anymore, break out early */ |
188 | if (policy->cur == policy->min) |
189 | return; |
190 | |
191 | /* |
192 | * The optimal frequency is the frequency that is the lowest that can |
193 | * support the current CPU usage without triggering the up policy. To be |
194 | * safe, we focus 10 points under the threshold. |
195 | */ |
196 | if (load_freq < od_tuners.adj_up_threshold * policy->cur) { |
197 | unsigned int freq_next; |
198 | freq_next = load_freq / od_tuners.adj_up_threshold; |
199 | |
200 | /* No longer fully busy, reset rate_mult */ |
201 | dbs_info->rate_mult = 1; |
202 | |
203 | if (freq_next < policy->min) |
204 | freq_next = policy->min; |
205 | |
206 | if (!od_tuners.powersave_bias) { |
207 | __cpufreq_driver_target(policy, freq_next, |
208 | CPUFREQ_RELATION_L); |
209 | } else { |
210 | int freq = powersave_bias_target(policy, freq_next, |
211 | CPUFREQ_RELATION_L); |
212 | __cpufreq_driver_target(policy, freq, |
213 | CPUFREQ_RELATION_L); |
214 | } |
215 | } |
216 | } |
217 | |
218 | static void od_dbs_timer(struct work_struct *work) |
219 | { |
220 | struct delayed_work *dw = to_delayed_work(work); |
221 | struct od_cpu_dbs_info_s *dbs_info = |
222 | container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); |
223 | unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; |
224 | struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info, |
225 | cpu); |
226 | int delay, sample_type = core_dbs_info->sample_type; |
227 | bool eval_load; |
228 | |
229 | mutex_lock(&core_dbs_info->cdbs.timer_mutex); |
230 | eval_load = need_load_eval(&core_dbs_info->cdbs, |
231 | od_tuners.sampling_rate); |
232 | |
233 | /* Common NORMAL_SAMPLE setup */ |
234 | core_dbs_info->sample_type = OD_NORMAL_SAMPLE; |
235 | if (sample_type == OD_SUB_SAMPLE) { |
236 | delay = core_dbs_info->freq_lo_jiffies; |
237 | if (eval_load) |
238 | __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy, |
239 | core_dbs_info->freq_lo, |
240 | CPUFREQ_RELATION_H); |
241 | } else { |
242 | if (eval_load) |
243 | dbs_check_cpu(&od_dbs_data, cpu); |
244 | if (core_dbs_info->freq_lo) { |
245 | /* Setup timer for SUB_SAMPLE */ |
246 | core_dbs_info->sample_type = OD_SUB_SAMPLE; |
247 | delay = core_dbs_info->freq_hi_jiffies; |
248 | } else { |
249 | delay = delay_for_sampling_rate(od_tuners.sampling_rate |
250 | * core_dbs_info->rate_mult); |
251 | } |
252 | } |
253 | |
254 | schedule_delayed_work_on(smp_processor_id(), dw, delay); |
255 | mutex_unlock(&core_dbs_info->cdbs.timer_mutex); |
256 | } |
257 | |
258 | /************************** sysfs interface ************************/ |
259 | |
260 | static ssize_t show_sampling_rate_min(struct kobject *kobj, |
261 | struct attribute *attr, char *buf) |
262 | { |
263 | return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate); |
264 | } |
265 | |
266 | /** |
267 | * update_sampling_rate - update sampling rate effective immediately if needed. |
268 | * @new_rate: new sampling rate |
269 | * |
270 | * If new rate is smaller than the old, simply updating |
271 | * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the |
272 | * original sampling_rate was 1 second and the requested new sampling rate is 10 |
273 | * ms because the user needs immediate reaction from ondemand governor, but not |
274 | * sure if higher frequency will be required or not, then, the governor may |
275 | * change the sampling rate too late; up to 1 second later. Thus, if we are |
276 | * reducing the sampling rate, we need to make the new value effective |
277 | * immediately. |
278 | */ |
279 | static void update_sampling_rate(unsigned int new_rate) |
280 | { |
281 | int cpu; |
282 | |
283 | od_tuners.sampling_rate = new_rate = max(new_rate, |
284 | od_dbs_data.min_sampling_rate); |
285 | |
286 | for_each_online_cpu(cpu) { |
287 | struct cpufreq_policy *policy; |
288 | struct od_cpu_dbs_info_s *dbs_info; |
289 | unsigned long next_sampling, appointed_at; |
290 | |
291 | policy = cpufreq_cpu_get(cpu); |
292 | if (!policy) |
293 | continue; |
294 | if (policy->governor != &cpufreq_gov_ondemand) { |
295 | cpufreq_cpu_put(policy); |
296 | continue; |
297 | } |
298 | dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
299 | cpufreq_cpu_put(policy); |
300 | |
301 | mutex_lock(&dbs_info->cdbs.timer_mutex); |
302 | |
303 | if (!delayed_work_pending(&dbs_info->cdbs.work)) { |
304 | mutex_unlock(&dbs_info->cdbs.timer_mutex); |
305 | continue; |
306 | } |
307 | |
308 | next_sampling = jiffies + usecs_to_jiffies(new_rate); |
309 | appointed_at = dbs_info->cdbs.work.timer.expires; |
310 | |
311 | if (time_before(next_sampling, appointed_at)) { |
312 | |
313 | mutex_unlock(&dbs_info->cdbs.timer_mutex); |
314 | cancel_delayed_work_sync(&dbs_info->cdbs.work); |
315 | mutex_lock(&dbs_info->cdbs.timer_mutex); |
316 | |
317 | schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, |
318 | usecs_to_jiffies(new_rate)); |
319 | |
320 | } |
321 | mutex_unlock(&dbs_info->cdbs.timer_mutex); |
322 | } |
323 | } |
324 | |
325 | static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, |
326 | const char *buf, size_t count) |
327 | { |
328 | unsigned int input; |
329 | int ret; |
330 | ret = sscanf(buf, "%u", &input); |
331 | if (ret != 1) |
332 | return -EINVAL; |
333 | update_sampling_rate(input); |
334 | return count; |
335 | } |
336 | |
337 | static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, |
338 | const char *buf, size_t count) |
339 | { |
340 | unsigned int input; |
341 | int ret; |
342 | |
343 | ret = sscanf(buf, "%u", &input); |
344 | if (ret != 1) |
345 | return -EINVAL; |
346 | od_tuners.io_is_busy = !!input; |
347 | return count; |
348 | } |
349 | |
350 | static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, |
351 | const char *buf, size_t count) |
352 | { |
353 | unsigned int input; |
354 | int ret; |
355 | ret = sscanf(buf, "%u", &input); |
356 | |
357 | if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || |
358 | input < MIN_FREQUENCY_UP_THRESHOLD) { |
359 | return -EINVAL; |
360 | } |
361 | /* Calculate the new adj_up_threshold */ |
362 | od_tuners.adj_up_threshold += input; |
363 | od_tuners.adj_up_threshold -= od_tuners.up_threshold; |
364 | |
365 | od_tuners.up_threshold = input; |
366 | return count; |
367 | } |
368 | |
369 | static ssize_t store_sampling_down_factor(struct kobject *a, |
370 | struct attribute *b, const char *buf, size_t count) |
371 | { |
372 | unsigned int input, j; |
373 | int ret; |
374 | ret = sscanf(buf, "%u", &input); |
375 | |
376 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
377 | return -EINVAL; |
378 | od_tuners.sampling_down_factor = input; |
379 | |
380 | /* Reset down sampling multiplier in case it was active */ |
381 | for_each_online_cpu(j) { |
382 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
383 | j); |
384 | dbs_info->rate_mult = 1; |
385 | } |
386 | return count; |
387 | } |
388 | |
389 | static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, |
390 | const char *buf, size_t count) |
391 | { |
392 | unsigned int input; |
393 | int ret; |
394 | |
395 | unsigned int j; |
396 | |
397 | ret = sscanf(buf, "%u", &input); |
398 | if (ret != 1) |
399 | return -EINVAL; |
400 | |
401 | if (input > 1) |
402 | input = 1; |
403 | |
404 | if (input == od_tuners.ignore_nice) { /* nothing to do */ |
405 | return count; |
406 | } |
407 | od_tuners.ignore_nice = input; |
408 | |
409 | /* we need to re-evaluate prev_cpu_idle */ |
410 | for_each_online_cpu(j) { |
411 | struct od_cpu_dbs_info_s *dbs_info; |
412 | dbs_info = &per_cpu(od_cpu_dbs_info, j); |
413 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
414 | &dbs_info->cdbs.prev_cpu_wall); |
415 | if (od_tuners.ignore_nice) |
416 | dbs_info->cdbs.prev_cpu_nice = |
417 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
418 | |
419 | } |
420 | return count; |
421 | } |
422 | |
423 | static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, |
424 | const char *buf, size_t count) |
425 | { |
426 | unsigned int input; |
427 | int ret; |
428 | ret = sscanf(buf, "%u", &input); |
429 | |
430 | if (ret != 1) |
431 | return -EINVAL; |
432 | |
433 | if (input > 1000) |
434 | input = 1000; |
435 | |
436 | od_tuners.powersave_bias = input; |
437 | ondemand_powersave_bias_init(); |
438 | return count; |
439 | } |
440 | |
441 | show_one(od, sampling_rate, sampling_rate); |
442 | show_one(od, io_is_busy, io_is_busy); |
443 | show_one(od, up_threshold, up_threshold); |
444 | show_one(od, sampling_down_factor, sampling_down_factor); |
445 | show_one(od, ignore_nice_load, ignore_nice); |
446 | show_one(od, powersave_bias, powersave_bias); |
447 | |
448 | define_one_global_rw(sampling_rate); |
449 | define_one_global_rw(io_is_busy); |
450 | define_one_global_rw(up_threshold); |
451 | define_one_global_rw(sampling_down_factor); |
452 | define_one_global_rw(ignore_nice_load); |
453 | define_one_global_rw(powersave_bias); |
454 | define_one_global_ro(sampling_rate_min); |
455 | |
456 | static struct attribute *dbs_attributes[] = { |
457 | &sampling_rate_min.attr, |
458 | &sampling_rate.attr, |
459 | &up_threshold.attr, |
460 | &sampling_down_factor.attr, |
461 | &ignore_nice_load.attr, |
462 | &powersave_bias.attr, |
463 | &io_is_busy.attr, |
464 | NULL |
465 | }; |
466 | |
467 | static struct attribute_group od_attr_group = { |
468 | .attrs = dbs_attributes, |
469 | .name = "ondemand", |
470 | }; |
471 | |
472 | /************************** sysfs end ************************/ |
473 | |
474 | define_get_cpu_dbs_routines(od_cpu_dbs_info); |
475 | |
476 | static struct od_ops od_ops = { |
477 | .io_busy = should_io_be_busy, |
478 | .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, |
479 | .powersave_bias_target = powersave_bias_target, |
480 | .freq_increase = dbs_freq_increase, |
481 | }; |
482 | |
483 | static struct dbs_data od_dbs_data = { |
484 | .governor = GOV_ONDEMAND, |
485 | .attr_group = &od_attr_group, |
486 | .tuners = &od_tuners, |
487 | .get_cpu_cdbs = get_cpu_cdbs, |
488 | .get_cpu_dbs_info_s = get_cpu_dbs_info_s, |
489 | .gov_dbs_timer = od_dbs_timer, |
490 | .gov_check_cpu = od_check_cpu, |
491 | .gov_ops = &od_ops, |
492 | }; |
493 | |
494 | static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, |
495 | unsigned int event) |
496 | { |
497 | return cpufreq_governor_dbs(&od_dbs_data, policy, event); |
498 | } |
499 | |
500 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
501 | static |
502 | #endif |
503 | struct cpufreq_governor cpufreq_gov_ondemand = { |
504 | .name = "ondemand", |
505 | .governor = od_cpufreq_governor_dbs, |
506 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, |
507 | .owner = THIS_MODULE, |
508 | }; |
509 | |
510 | static int __init cpufreq_gov_dbs_init(void) |
511 | { |
512 | u64 idle_time; |
513 | int cpu = get_cpu(); |
514 | |
515 | mutex_init(&od_dbs_data.mutex); |
516 | idle_time = get_cpu_idle_time_us(cpu, NULL); |
517 | put_cpu(); |
518 | if (idle_time != -1ULL) { |
519 | /* Idle micro accounting is supported. Use finer thresholds */ |
520 | od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; |
521 | od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD - |
522 | MICRO_FREQUENCY_DOWN_DIFFERENTIAL; |
523 | /* |
524 | * In nohz/micro accounting case we set the minimum frequency |
525 | * not depending on HZ, but fixed (very low). The deferred |
526 | * timer might skip some samples if idle/sleeping as needed. |
527 | */ |
528 | od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; |
529 | } else { |
530 | /* For correct statistics, we need 10 ticks for each measure */ |
531 | od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO * |
532 | jiffies_to_usecs(10); |
533 | } |
534 | |
535 | return cpufreq_register_governor(&cpufreq_gov_ondemand); |
536 | } |
537 | |
538 | static void __exit cpufreq_gov_dbs_exit(void) |
539 | { |
540 | cpufreq_unregister_governor(&cpufreq_gov_ondemand); |
541 | } |
542 | |
543 | MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); |
544 | MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); |
545 | MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " |
546 | "Low Latency Frequency Transition capable processors"); |
547 | MODULE_LICENSE("GPL"); |
548 | |
549 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
550 | fs_initcall(cpufreq_gov_dbs_init); |
551 | #else |
552 | module_init(cpufreq_gov_dbs_init); |
553 | #endif |
554 | module_exit(cpufreq_gov_dbs_exit); |
555 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9