Date: | 2011-08-02 10:26:09 (11 years 9 months ago) |
---|---|
Author: | Maarten ter Huurne |
Commit: | 2485a5469be6d50b84e03c05654de70f64d4c346 |
Message: | cpufreq_stats: Support runtime changes to frequency table. |
Files: |
drivers/cpufreq/cpufreq_stats.c (12 diffs) |
Change Details
drivers/cpufreq/cpufreq_stats.c | ||
---|---|---|
20 | 20 | #include <linux/kobject.h> |
21 | 21 | #include <linux/spinlock.h> |
22 | 22 | #include <linux/notifier.h> |
23 | #include <linux/string.h> | |
23 | 24 | #include <asm/cputime.h> |
24 | 25 | |
25 | 26 | static spinlock_t cpufreq_stats_lock; |
... | ... | |
36 | 37 | unsigned long long last_time; |
37 | 38 | unsigned int max_state; |
38 | 39 | unsigned int state_num; |
39 | unsigned int last_index; | |
40 | int last_index; | |
40 | 41 | cputime64_t *time_in_state; |
41 | 42 | unsigned int *freq_table; |
42 | 43 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
... | ... | |
59 | 60 | cur_time = get_jiffies_64(); |
60 | 61 | spin_lock(&cpufreq_stats_lock); |
61 | 62 | stat = per_cpu(cpufreq_stats_table, cpu); |
62 | if (stat->time_in_state) | |
63 | if (stat->time_in_state && stat->last_index != -1) | |
63 | 64 | stat->time_in_state[stat->last_index] = |
64 | 65 | cputime64_add(stat->time_in_state[stat->last_index], |
65 | 66 | cputime_sub(cur_time, stat->last_time)); |
... | ... | |
82 | 83 | ssize_t len = 0; |
83 | 84 | int i; |
84 | 85 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); |
85 | if (!stat) | |
86 | if (!stat || !stat->time_in_state) | |
86 | 87 | return 0; |
87 | 88 | cpufreq_stats_update(stat->cpu); |
88 | 89 | for (i = 0; i < stat->state_num; i++) { |
... | ... | |
100 | 101 | int i, j; |
101 | 102 | |
102 | 103 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); |
103 | if (!stat) | |
104 | if (!stat || !stat->trans_table) | |
104 | 105 | return 0; |
105 | 106 | cpufreq_stats_update(stat->cpu); |
106 | 107 | len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n"); |
... | ... | |
159 | 160 | static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) |
160 | 161 | { |
161 | 162 | int index; |
162 | for (index = 0; index < stat->max_state; index++) | |
163 | if (stat->freq_table[index] == freq) | |
164 | return index; | |
163 | if (stat->freq_table) | |
164 | for (index = 0; index < stat->max_state; index++) | |
165 | if (stat->freq_table[index] == freq) | |
166 | return index; | |
165 | 167 | return -1; |
166 | 168 | } |
167 | 169 | |
168 | /* should be called late in the CPU removal sequence so that the stats | |
169 | * memory is still available in case someone tries to use it. | |
170 | */ | |
171 | 170 | static void cpufreq_stats_free_table(unsigned int cpu) |
172 | 171 | { |
173 | 172 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); |
173 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | |
174 | if (policy && policy->cpu == cpu) | |
175 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | |
174 | 176 | if (stat) { |
175 | 177 | kfree(stat->time_in_state); |
176 | 178 | kfree(stat); |
177 | 179 | } |
178 | 180 | per_cpu(cpufreq_stats_table, cpu) = NULL; |
179 | } | |
180 | ||
181 | /* must be called early in the CPU removal sequence (before | |
182 | * cpufreq_remove_dev) so that policy is still valid. | |
183 | */ | |
184 | static void cpufreq_stats_free_sysfs(unsigned int cpu) | |
185 | { | |
186 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | |
187 | if (policy && policy->cpu == cpu) | |
188 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | |
189 | 181 | if (policy) |
190 | 182 | cpufreq_cpu_put(policy); |
191 | 183 | } |
192 | 184 | |
193 | static int cpufreq_stats_create_table(struct cpufreq_policy *policy, | |
185 | static int cpufreq_stats_update_table(struct cpufreq_policy *policy, | |
194 | 186 | struct cpufreq_frequency_table *table) |
195 | 187 | { |
196 | unsigned int i, j, count = 0, ret = 0; | |
197 | struct cpufreq_stats *stat; | |
198 | struct cpufreq_policy *data; | |
188 | unsigned int i, j, count = 0; | |
199 | 189 | unsigned int alloc_size; |
200 | 190 | unsigned int cpu = policy->cpu; |
201 | if (per_cpu(cpufreq_stats_table, cpu)) | |
202 | return -EBUSY; | |
203 | stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL); | |
204 | if ((stat) == NULL) | |
205 | return -ENOMEM; | |
206 | ||
207 | data = cpufreq_cpu_get(cpu); | |
208 | if (data == NULL) { | |
209 | ret = -EINVAL; | |
210 | goto error_get_fail; | |
211 | } | |
212 | ||
213 | ret = sysfs_create_group(&data->kobj, &stats_attr_group); | |
214 | if (ret) | |
215 | goto error_out; | |
216 | ||
217 | stat->cpu = cpu; | |
218 | per_cpu(cpufreq_stats_table, cpu) = stat; | |
191 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); | |
219 | 192 | |
220 | 193 | for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
221 | 194 | unsigned int freq = table[i].frequency; |
... | ... | |
224 | 197 | count++; |
225 | 198 | } |
226 | 199 | |
200 | if (stat->max_state != count) { | |
201 | stat->max_state = count; | |
202 | kfree(stat->time_in_state); | |
203 | stat->time_in_state = NULL; | |
204 | } | |
227 | 205 | alloc_size = count * sizeof(int) + count * sizeof(cputime64_t); |
228 | ||
229 | 206 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
230 | 207 | alloc_size += count * count * sizeof(int); |
231 | 208 | #endif |
232 | stat->max_state = count; | |
233 | stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); | |
234 | if (!stat->time_in_state) { | |
235 | ret = -ENOMEM; | |
236 | goto error_out; | |
237 | } | |
238 | stat->freq_table = (unsigned int *)(stat->time_in_state + count); | |
239 | ||
209 | if (stat->time_in_state) { | |
210 | memset(stat->time_in_state, 0, alloc_size); | |
211 | } else { | |
212 | stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); | |
213 | if (!stat->time_in_state) | |
214 | return -ENOMEM; | |
215 | stat->freq_table = (unsigned int *)( | |
216 | stat->time_in_state + count); | |
240 | 217 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
241 | stat->trans_table = stat->freq_table + count; | |
218 | stat->trans_table = stat->freq_table + count; | |
242 | 219 | #endif |
220 | } | |
221 | ||
243 | 222 | j = 0; |
244 | for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { | |
245 | unsigned int freq = table[i].frequency; | |
246 | if (freq == CPUFREQ_ENTRY_INVALID) | |
247 | continue; | |
248 | if (freq_table_get_index(stat, freq) == -1) | |
249 | stat->freq_table[j++] = freq; | |
223 | if (stat->freq_table) { | |
224 | for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { | |
225 | unsigned int freq = table[i].frequency; | |
226 | if (freq == CPUFREQ_ENTRY_INVALID) | |
227 | continue; | |
228 | if (freq_table_get_index(stat, freq) == -1) | |
229 | stat->freq_table[j++] = freq; | |
230 | } | |
250 | 231 | } |
251 | 232 | stat->state_num = j; |
252 | 233 | spin_lock(&cpufreq_stats_lock); |
253 | 234 | stat->last_time = get_jiffies_64(); |
254 | 235 | stat->last_index = freq_table_get_index(stat, policy->cur); |
255 | 236 | spin_unlock(&cpufreq_stats_lock); |
237 | return 0; | |
238 | } | |
239 | ||
240 | static int cpufreq_stats_create_table(struct cpufreq_policy *policy, | |
241 | struct cpufreq_frequency_table *table) | |
242 | { | |
243 | unsigned int ret = 0; | |
244 | struct cpufreq_stats *stat; | |
245 | struct cpufreq_policy *data; | |
246 | unsigned int cpu = policy->cpu; | |
247 | ||
248 | stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL); | |
249 | if ((stat) == NULL) | |
250 | return -ENOMEM; | |
251 | ||
252 | data = cpufreq_cpu_get(cpu); | |
253 | if (data == NULL) { | |
254 | ret = -EINVAL; | |
255 | goto error_out; | |
256 | } | |
257 | ret = sysfs_create_group(&data->kobj, &stats_attr_group); | |
256 | 258 | cpufreq_cpu_put(data); |
259 | if (ret) | |
260 | goto error_out; | |
261 | ||
262 | stat->cpu = cpu; | |
263 | per_cpu(cpufreq_stats_table, cpu) = stat; | |
264 | ||
257 | 265 | return 0; |
258 | 266 | error_out: |
259 | cpufreq_cpu_put(data); | |
260 | error_get_fail: | |
261 | 267 | kfree(stat); |
262 | 268 | per_cpu(cpufreq_stats_table, cpu) = NULL; |
263 | 269 | return ret; |
... | ... | |
275 | 281 | table = cpufreq_frequency_get_table(cpu); |
276 | 282 | if (!table) |
277 | 283 | return 0; |
278 | ret = cpufreq_stats_create_table(policy, table); | |
279 | if (ret) | |
280 | return ret; | |
281 | return 0; | |
284 | if (!per_cpu(cpufreq_stats_table, cpu)) { | |
285 | ret = cpufreq_stats_create_table(policy, table); | |
286 | if (ret) | |
287 | return ret; | |
288 | } | |
289 | return cpufreq_stats_update_table(policy, table); | |
282 | 290 | } |
283 | 291 | |
284 | 292 | static int cpufreq_stat_notifier_trans(struct notifier_block *nb, |
... | ... | |
298 | 306 | old_index = stat->last_index; |
299 | 307 | new_index = freq_table_get_index(stat, freq->new); |
300 | 308 | |
301 | /* We can't do stat->time_in_state[-1]= .. */ | |
302 | if (old_index == -1 || new_index == -1) | |
303 | return 0; | |
304 | ||
305 | 309 | cpufreq_stats_update(freq->cpu); |
306 | ||
307 | 310 | if (old_index == new_index) |
308 | 311 | return 0; |
309 | 312 | |
313 | if (new_index == -1) | |
314 | return 0; | |
315 | ||
310 | 316 | spin_lock(&cpufreq_stats_lock); |
311 | 317 | stat->last_index = new_index; |
318 | if (old_index != -1) { | |
312 | 319 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
313 | stat->trans_table[old_index * stat->max_state + new_index]++; | |
320 | if (stat->trans_table) | |
321 | stat->trans_table[old_index * stat->max_state + | |
322 | new_index]++; | |
314 | 323 | #endif |
315 | stat->total_trans++; | |
324 | stat->total_trans++; | |
325 | } | |
316 | 326 | spin_unlock(&cpufreq_stats_lock); |
317 | 327 | return 0; |
318 | 328 | } |
... | ... | |
328 | 338 | case CPU_ONLINE_FROZEN: |
329 | 339 | cpufreq_update_policy(cpu); |
330 | 340 | break; |
331 | case CPU_DOWN_PREPARE: | |
332 | cpufreq_stats_free_sysfs(cpu); | |
333 | break; | |
334 | 341 | case CPU_DEAD: |
335 | 342 | case CPU_DEAD_FROZEN: |
336 | 343 | cpufreq_stats_free_table(cpu); |
... | ... | |
339 | 346 | return NOTIFY_OK; |
340 | 347 | } |
341 | 348 | |
342 | /* priority=1 so this will get called before cpufreq_remove_dev */ | |
343 | static struct notifier_block cpufreq_stat_cpu_notifier __refdata = { | |
349 | static struct notifier_block cpufreq_stat_cpu_notifier __refdata = | |
350 | { | |
344 | 351 | .notifier_call = cpufreq_stat_cpu_callback, |
345 | .priority = 1, | |
346 | 352 | }; |
347 | 353 | |
348 | 354 | static struct notifier_block notifier_policy_block = { |
... | ... | |
389 | 395 | unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); |
390 | 396 | for_each_online_cpu(cpu) { |
391 | 397 | cpufreq_stats_free_table(cpu); |
392 | cpufreq_stats_free_sysfs(cpu); | |
393 | 398 | } |
394 | 399 | } |
395 | 400 |
Branches:
ben-wpan
ben-wpan-stefan
5396a9238205f20f811ea57898980d3ca82df0b6
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9