Root/target/linux/xburst/patches-3.0/0028-Add-cpufreq-support.patch

1From b0b7794018b75ce33be133664fb58ab3e6efc2e8 Mon Sep 17 00:00:00 2001
2From: Xiangfu Liu <xiangfu@macbook.openmobilefree.net>
3Date: Wed, 14 Sep 2011 14:29:52 +0800
4Subject: [PATCH 28/32] Add cpufreq support
5
6---
7 drivers/cpufreq/cpufreq_stats.c | 161 ++++++++++++++++++++-------------------
8 drivers/mmc/host/jz4740_mmc.c | 69 ++++++++++++++++-
9 2 files changed, 150 insertions(+), 80 deletions(-)
10
11diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
12index faf7c52..86d032c 100644
13--- a/drivers/cpufreq/cpufreq_stats.c
14+++ b/drivers/cpufreq/cpufreq_stats.c
15@@ -20,6 +20,7 @@
16 #include <linux/kobject.h>
17 #include <linux/spinlock.h>
18 #include <linux/notifier.h>
19+#include <linux/string.h>
20 #include <asm/cputime.h>
21 
22 static spinlock_t cpufreq_stats_lock;
23@@ -36,7 +37,7 @@ struct cpufreq_stats {
24     unsigned long long last_time;
25     unsigned int max_state;
26     unsigned int state_num;
27- unsigned int last_index;
28+ int last_index;
29     cputime64_t *time_in_state;
30     unsigned int *freq_table;
31 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
32@@ -59,7 +60,7 @@ static int cpufreq_stats_update(unsigned int cpu)
33     cur_time = get_jiffies_64();
34     spin_lock(&cpufreq_stats_lock);
35     stat = per_cpu(cpufreq_stats_table, cpu);
36- if (stat->time_in_state)
37+ if (stat->time_in_state && stat->last_index != -1)
38         stat->time_in_state[stat->last_index] =
39             cputime64_add(stat->time_in_state[stat->last_index],
40                       cputime_sub(cur_time, stat->last_time));
41@@ -82,7 +83,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
42     ssize_t len = 0;
43     int i;
44     struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
45- if (!stat)
46+ if (!stat || !stat->time_in_state)
47         return 0;
48     cpufreq_stats_update(stat->cpu);
49     for (i = 0; i < stat->state_num; i++) {
50@@ -100,7 +101,7 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
51     int i, j;
52 
53     struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
54- if (!stat)
55+ if (!stat || !stat->trans_table)
56         return 0;
57     cpufreq_stats_update(stat->cpu);
58     len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
59@@ -159,63 +160,35 @@ static struct attribute_group stats_attr_group = {
60 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
61 {
62     int index;
63- for (index = 0; index < stat->max_state; index++)
64- if (stat->freq_table[index] == freq)
65- return index;
66+ if (stat->freq_table)
67+ for (index = 0; index < stat->max_state; index++)
68+ if (stat->freq_table[index] == freq)
69+ return index;
70     return -1;
71 }
72 
73-/* should be called late in the CPU removal sequence so that the stats
74- * memory is still available in case someone tries to use it.
75- */
76 static void cpufreq_stats_free_table(unsigned int cpu)
77 {
78     struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
79+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
80+ if (policy && policy->cpu == cpu)
81+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
82     if (stat) {
83         kfree(stat->time_in_state);
84         kfree(stat);
85     }
86     per_cpu(cpufreq_stats_table, cpu) = NULL;
87-}
88-
89-/* must be called early in the CPU removal sequence (before
90- * cpufreq_remove_dev) so that policy is still valid.
91- */
92-static void cpufreq_stats_free_sysfs(unsigned int cpu)
93-{
94- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
95- if (policy && policy->cpu == cpu)
96- sysfs_remove_group(&policy->kobj, &stats_attr_group);
97     if (policy)
98         cpufreq_cpu_put(policy);
99 }
100 
101-static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
102+static int cpufreq_stats_update_table(struct cpufreq_policy *policy,
103         struct cpufreq_frequency_table *table)
104 {
105- unsigned int i, j, count = 0, ret = 0;
106- struct cpufreq_stats *stat;
107- struct cpufreq_policy *data;
108+ unsigned int i, j, count = 0;
109     unsigned int alloc_size;
110     unsigned int cpu = policy->cpu;
111- if (per_cpu(cpufreq_stats_table, cpu))
112- return -EBUSY;
113- stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
114- if ((stat) == NULL)
115- return -ENOMEM;
116-
117- data = cpufreq_cpu_get(cpu);
118- if (data == NULL) {
119- ret = -EINVAL;
120- goto error_get_fail;
121- }
122-
123- ret = sysfs_create_group(&data->kobj, &stats_attr_group);
124- if (ret)
125- goto error_out;
126-
127- stat->cpu = cpu;
128- per_cpu(cpufreq_stats_table, cpu) = stat;
129+ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
130 
131     for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
132         unsigned int freq = table[i].frequency;
133@@ -224,40 +197,73 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
134         count++;
135     }
136 
137+ if (stat->max_state != count) {
138+ stat->max_state = count;
139+ kfree(stat->time_in_state);
140+ stat->time_in_state = NULL;
141+ }
142     alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
143-
144 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
145     alloc_size += count * count * sizeof(int);
146 #endif
147- stat->max_state = count;
148- stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
149- if (!stat->time_in_state) {
150- ret = -ENOMEM;
151- goto error_out;
152- }
153- stat->freq_table = (unsigned int *)(stat->time_in_state + count);
154-
155+ if (stat->time_in_state) {
156+ memset(stat->time_in_state, 0, alloc_size);
157+ } else {
158+ stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
159+ if (!stat->time_in_state)
160+ return -ENOMEM;
161+ stat->freq_table = (unsigned int *)(
162+ stat->time_in_state + count);
163 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
164- stat->trans_table = stat->freq_table + count;
165+ stat->trans_table = stat->freq_table + count;
166 #endif
167+ }
168+
169     j = 0;
170- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
171- unsigned int freq = table[i].frequency;
172- if (freq == CPUFREQ_ENTRY_INVALID)
173- continue;
174- if (freq_table_get_index(stat, freq) == -1)
175- stat->freq_table[j++] = freq;
176+ if (stat->freq_table) {
177+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
178+ unsigned int freq = table[i].frequency;
179+ if (freq == CPUFREQ_ENTRY_INVALID)
180+ continue;
181+ if (freq_table_get_index(stat, freq) == -1)
182+ stat->freq_table[j++] = freq;
183+ }
184     }
185     stat->state_num = j;
186     spin_lock(&cpufreq_stats_lock);
187     stat->last_time = get_jiffies_64();
188     stat->last_index = freq_table_get_index(stat, policy->cur);
189     spin_unlock(&cpufreq_stats_lock);
190+ return 0;
191+}
192+
193+static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
194+ struct cpufreq_frequency_table *table)
195+{
196+ unsigned int ret = 0;
197+ struct cpufreq_stats *stat;
198+ struct cpufreq_policy *data;
199+ unsigned int cpu = policy->cpu;
200+
201+ stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
202+ if ((stat) == NULL)
203+ return -ENOMEM;
204+
205+ data = cpufreq_cpu_get(cpu);
206+ if (data == NULL) {
207+ ret = -EINVAL;
208+ goto error_out;
209+ }
210+ ret = sysfs_create_group(&data->kobj, &stats_attr_group);
211     cpufreq_cpu_put(data);
212+ if (ret)
213+ goto error_out;
214+
215+ stat->cpu = cpu;
216+ per_cpu(cpufreq_stats_table, cpu) = stat;
217+
218     return 0;
219 error_out:
220- cpufreq_cpu_put(data);
221-error_get_fail:
222     kfree(stat);
223     per_cpu(cpufreq_stats_table, cpu) = NULL;
224     return ret;
225@@ -275,10 +281,12 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
226     table = cpufreq_frequency_get_table(cpu);
227     if (!table)
228         return 0;
229- ret = cpufreq_stats_create_table(policy, table);
230- if (ret)
231- return ret;
232- return 0;
233+ if (!per_cpu(cpufreq_stats_table, cpu)) {
234+ ret = cpufreq_stats_create_table(policy, table);
235+ if (ret)
236+ return ret;
237+ }
238+ return cpufreq_stats_update_table(policy, table);
239 }
240 
241 static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
242@@ -298,21 +306,23 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
243     old_index = stat->last_index;
244     new_index = freq_table_get_index(stat, freq->new);
245 
246- /* We can't do stat->time_in_state[-1]= .. */
247- if (old_index == -1 || new_index == -1)
248- return 0;
249-
250     cpufreq_stats_update(freq->cpu);
251-
252     if (old_index == new_index)
253         return 0;
254 
255+ if (new_index == -1)
256+ return 0;
257+
258     spin_lock(&cpufreq_stats_lock);
259     stat->last_index = new_index;
260+ if (old_index != -1) {
261 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
262- stat->trans_table[old_index * stat->max_state + new_index]++;
263+ if (stat->trans_table)
264+ stat->trans_table[old_index * stat->max_state +
265+ new_index]++;
266 #endif
267- stat->total_trans++;
268+ stat->total_trans++;
269+ }
270     spin_unlock(&cpufreq_stats_lock);
271     return 0;
272 }
273@@ -328,9 +338,6 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
274     case CPU_ONLINE_FROZEN:
275         cpufreq_update_policy(cpu);
276         break;
277- case CPU_DOWN_PREPARE:
278- cpufreq_stats_free_sysfs(cpu);
279- break;
280     case CPU_DEAD:
281     case CPU_DEAD_FROZEN:
282         cpufreq_stats_free_table(cpu);
283@@ -339,10 +346,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
284     return NOTIFY_OK;
285 }
286 
287-/* priority=1 so this will get called before cpufreq_remove_dev */
288-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
289+static struct notifier_block cpufreq_stat_cpu_notifier __refdata =
290+{
291     .notifier_call = cpufreq_stat_cpu_callback,
292- .priority = 1,
293 };
294 
295 static struct notifier_block notifier_policy_block = {
296@@ -389,7 +395,6 @@ static void __exit cpufreq_stats_exit(void)
297     unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
298     for_each_online_cpu(cpu) {
299         cpufreq_stats_free_table(cpu);
300- cpufreq_stats_free_sysfs(cpu);
301     }
302 }
303 
304diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
305index 74218ad..6e40f1b 100644
306--- a/drivers/mmc/host/jz4740_mmc.c
307+++ b/drivers/mmc/host/jz4740_mmc.c
308@@ -23,6 +23,7 @@
309 #include <linux/delay.h>
310 #include <linux/scatterlist.h>
311 #include <linux/clk.h>
312+#include <linux/cpufreq.h>
313 
314 #include <linux/bitops.h>
315 #include <linux/gpio.h>
316@@ -685,6 +686,60 @@ static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
317     jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
318 }
319 
320+#ifdef CONFIG_CPU_FREQ
321+
322+static struct jz4740_mmc_host *cpufreq_host;
323+
324+static int jz4740_mmc_cpufreq_transition(struct notifier_block *nb,
325+ unsigned long val, void *data)
326+{
327+ /* TODO: We only have to take action when the PLL freq changes:
328+ the main dividers have no influence on the MSC device clock. */
329+
330+ if (val == CPUFREQ_PRECHANGE) {
331+ mmc_claim_host(cpufreq_host->mmc);
332+ clk_disable(cpufreq_host->clk);
333+ } else if (val == CPUFREQ_POSTCHANGE) {
334+ struct mmc_ios *ios = &cpufreq_host->mmc->ios;
335+ if (ios->clock)
336+ jz4740_mmc_set_clock_rate(cpufreq_host, ios->clock);
337+ if (ios->power_mode != MMC_POWER_OFF)
338+ clk_enable(cpufreq_host->clk);
339+ mmc_release_host(cpufreq_host->mmc);
340+ }
341+ return 0;
342+}
343+
344+static struct notifier_block jz4740_mmc_cpufreq_nb = {
345+ .notifier_call = jz4740_mmc_cpufreq_transition,
346+};
347+
348+static inline int jz4740_mmc_cpufreq_register(struct jz4740_mmc_host *host)
349+{
350+ cpufreq_host = host;
351+ return cpufreq_register_notifier(&jz4740_mmc_cpufreq_nb,
352+ CPUFREQ_TRANSITION_NOTIFIER);
353+}
354+
355+static inline void jz4740_mmc_cpufreq_unregister(void)
356+{
357+ cpufreq_unregister_notifier(&jz4740_mmc_cpufreq_nb,
358+ CPUFREQ_TRANSITION_NOTIFIER);
359+}
360+
361+#else
362+
363+static inline int jz4740_mmc_cpufreq_register(struct jz4740_mmc_host *host)
364+{
365+ return 0;
366+}
367+
368+static inline void jz4740_mmc_cpufreq_unregister(void)
369+{
370+}
371+
372+#endif
373+
374 static const struct mmc_host_ops jz4740_mmc_ops = {
375     .request = jz4740_mmc_request,
376     .set_ios = jz4740_mmc_set_ios,
377@@ -834,11 +889,18 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)
378         goto err_free_host;
379     }
380 
381+ ret = jz4740_mmc_cpufreq_register(host);
382+ if (ret) {
383+ dev_err(&pdev->dev,
384+ "Failed to register cpufreq transition notifier\n");
385+ goto err_clk_put;
386+ }
387+
388     host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
389     if (!host->mem) {
390         ret = -ENOENT;
391         dev_err(&pdev->dev, "Failed to get base platform memory\n");
392- goto err_clk_put;
393+ goto err_cpufreq_unreg;
394     }
395 
396     host->mem = request_mem_region(host->mem->start,
397@@ -846,7 +908,7 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)
398     if (!host->mem) {
399         ret = -EBUSY;
400         dev_err(&pdev->dev, "Failed to request base memory region\n");
401- goto err_clk_put;
402+ goto err_cpufreq_unreg;
403     }
404 
405     host->base = ioremap_nocache(host->mem->start, resource_size(host->mem));
406@@ -929,6 +991,8 @@ err_iounmap:
407     iounmap(host->base);
408 err_release_mem_region:
409     release_mem_region(host->mem->start, resource_size(host->mem));
410+err_cpufreq_unreg:
411+ jz4740_mmc_cpufreq_unregister();
412 err_clk_put:
413     clk_put(host->clk);
414 err_free_host:
415@@ -958,6 +1022,7 @@ static int __devexit jz4740_mmc_remove(struct platform_device *pdev)
416     iounmap(host->base);
417     release_mem_region(host->mem->start, resource_size(host->mem));
418 
419+ jz4740_mmc_cpufreq_unregister();
420     clk_put(host->clk);
421 
422     platform_set_drvdata(pdev, NULL);
423--
4241.7.4.1
425
426

Archive Download this file



interactive