Root/
1 | /* |
2 | * linux/kernel/profile.c |
3 | * Simple profiling. Manages a direct-mapped profile hit count buffer, |
4 | * with configurable resolution, support for restricting the cpus on |
5 | * which profiling is done, and switching between cpu time and |
6 | * schedule() calls via kernel command line parameters passed at boot. |
7 | * |
8 | * Scheduler profiling support, Arjan van de Ven and Ingo Molnar, |
9 | * Red Hat, July 2004 |
10 | * Consolidation of architecture support code for profiling, |
11 | * Nadia Yvette Chambers, Oracle, July 2004 |
12 | * Amortized hit count accounting via per-cpu open-addressed hashtables |
13 | * to resolve timer interrupt livelocks, Nadia Yvette Chambers, |
14 | * Oracle, 2004 |
15 | */ |
16 | |
17 | #include <linux/export.h> |
18 | #include <linux/profile.h> |
19 | #include <linux/bootmem.h> |
20 | #include <linux/notifier.h> |
21 | #include <linux/mm.h> |
22 | #include <linux/cpumask.h> |
23 | #include <linux/cpu.h> |
24 | #include <linux/highmem.h> |
25 | #include <linux/mutex.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/vmalloc.h> |
28 | #include <asm/sections.h> |
29 | #include <asm/irq_regs.h> |
30 | #include <asm/ptrace.h> |
31 | |
32 | struct profile_hit { |
33 | u32 pc, hits; |
34 | }; |
35 | #define PROFILE_GRPSHIFT 3 |
36 | #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT) |
37 | #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) |
38 | #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) |
39 | |
40 | static atomic_t *prof_buffer; |
41 | static unsigned long prof_len, prof_shift; |
42 | |
43 | int prof_on __read_mostly; |
44 | EXPORT_SYMBOL_GPL(prof_on); |
45 | |
46 | static cpumask_var_t prof_cpu_mask; |
47 | #ifdef CONFIG_SMP |
48 | static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); |
49 | static DEFINE_PER_CPU(int, cpu_profile_flip); |
50 | static DEFINE_MUTEX(profile_flip_mutex); |
51 | #endif /* CONFIG_SMP */ |
52 | |
53 | int profile_setup(char *str) |
54 | { |
55 | static char schedstr[] = "schedule"; |
56 | static char sleepstr[] = "sleep"; |
57 | static char kvmstr[] = "kvm"; |
58 | int par; |
59 | |
60 | if (!strncmp(str, sleepstr, strlen(sleepstr))) { |
61 | #ifdef CONFIG_SCHEDSTATS |
62 | prof_on = SLEEP_PROFILING; |
63 | if (str[strlen(sleepstr)] == ',') |
64 | str += strlen(sleepstr) + 1; |
65 | if (get_option(&str, &par)) |
66 | prof_shift = par; |
67 | printk(KERN_INFO |
68 | "kernel sleep profiling enabled (shift: %ld)\n", |
69 | prof_shift); |
70 | #else |
71 | printk(KERN_WARNING |
72 | "kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); |
73 | #endif /* CONFIG_SCHEDSTATS */ |
74 | } else if (!strncmp(str, schedstr, strlen(schedstr))) { |
75 | prof_on = SCHED_PROFILING; |
76 | if (str[strlen(schedstr)] == ',') |
77 | str += strlen(schedstr) + 1; |
78 | if (get_option(&str, &par)) |
79 | prof_shift = par; |
80 | printk(KERN_INFO |
81 | "kernel schedule profiling enabled (shift: %ld)\n", |
82 | prof_shift); |
83 | } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { |
84 | prof_on = KVM_PROFILING; |
85 | if (str[strlen(kvmstr)] == ',') |
86 | str += strlen(kvmstr) + 1; |
87 | if (get_option(&str, &par)) |
88 | prof_shift = par; |
89 | printk(KERN_INFO |
90 | "kernel KVM profiling enabled (shift: %ld)\n", |
91 | prof_shift); |
92 | } else if (get_option(&str, &par)) { |
93 | prof_shift = par; |
94 | prof_on = CPU_PROFILING; |
95 | printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n", |
96 | prof_shift); |
97 | } |
98 | return 1; |
99 | } |
100 | __setup("profile=", profile_setup); |
101 | |
102 | |
103 | int __ref profile_init(void) |
104 | { |
105 | int buffer_bytes; |
106 | if (!prof_on) |
107 | return 0; |
108 | |
109 | /* only text is profiled */ |
110 | prof_len = (_etext - _stext) >> prof_shift; |
111 | buffer_bytes = prof_len*sizeof(atomic_t); |
112 | |
113 | if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) |
114 | return -ENOMEM; |
115 | |
116 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); |
117 | |
118 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); |
119 | if (prof_buffer) |
120 | return 0; |
121 | |
122 | prof_buffer = alloc_pages_exact(buffer_bytes, |
123 | GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); |
124 | if (prof_buffer) |
125 | return 0; |
126 | |
127 | prof_buffer = vzalloc(buffer_bytes); |
128 | if (prof_buffer) |
129 | return 0; |
130 | |
131 | free_cpumask_var(prof_cpu_mask); |
132 | return -ENOMEM; |
133 | } |
134 | |
135 | /* Profile event notifications */ |
136 | |
137 | static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); |
138 | static ATOMIC_NOTIFIER_HEAD(task_free_notifier); |
139 | static BLOCKING_NOTIFIER_HEAD(munmap_notifier); |
140 | |
141 | void profile_task_exit(struct task_struct *task) |
142 | { |
143 | blocking_notifier_call_chain(&task_exit_notifier, 0, task); |
144 | } |
145 | |
146 | int profile_handoff_task(struct task_struct *task) |
147 | { |
148 | int ret; |
149 | ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); |
150 | return (ret == NOTIFY_OK) ? 1 : 0; |
151 | } |
152 | |
153 | void profile_munmap(unsigned long addr) |
154 | { |
155 | blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr); |
156 | } |
157 | |
158 | int task_handoff_register(struct notifier_block *n) |
159 | { |
160 | return atomic_notifier_chain_register(&task_free_notifier, n); |
161 | } |
162 | EXPORT_SYMBOL_GPL(task_handoff_register); |
163 | |
164 | int task_handoff_unregister(struct notifier_block *n) |
165 | { |
166 | return atomic_notifier_chain_unregister(&task_free_notifier, n); |
167 | } |
168 | EXPORT_SYMBOL_GPL(task_handoff_unregister); |
169 | |
170 | int profile_event_register(enum profile_type type, struct notifier_block *n) |
171 | { |
172 | int err = -EINVAL; |
173 | |
174 | switch (type) { |
175 | case PROFILE_TASK_EXIT: |
176 | err = blocking_notifier_chain_register( |
177 | &task_exit_notifier, n); |
178 | break; |
179 | case PROFILE_MUNMAP: |
180 | err = blocking_notifier_chain_register( |
181 | &munmap_notifier, n); |
182 | break; |
183 | } |
184 | |
185 | return err; |
186 | } |
187 | EXPORT_SYMBOL_GPL(profile_event_register); |
188 | |
189 | int profile_event_unregister(enum profile_type type, struct notifier_block *n) |
190 | { |
191 | int err = -EINVAL; |
192 | |
193 | switch (type) { |
194 | case PROFILE_TASK_EXIT: |
195 | err = blocking_notifier_chain_unregister( |
196 | &task_exit_notifier, n); |
197 | break; |
198 | case PROFILE_MUNMAP: |
199 | err = blocking_notifier_chain_unregister( |
200 | &munmap_notifier, n); |
201 | break; |
202 | } |
203 | |
204 | return err; |
205 | } |
206 | EXPORT_SYMBOL_GPL(profile_event_unregister); |
207 | |
208 | #ifdef CONFIG_SMP |
209 | /* |
210 | * Each cpu has a pair of open-addressed hashtables for pending |
211 | * profile hits. read_profile() IPI's all cpus to request them |
212 | * to flip buffers and flushes their contents to prof_buffer itself. |
213 | * Flip requests are serialized by the profile_flip_mutex. The sole |
214 | * use of having a second hashtable is for avoiding cacheline |
215 | * contention that would otherwise happen during flushes of pending |
216 | * profile hits required for the accuracy of reported profile hits |
217 | * and so resurrect the interrupt livelock issue. |
218 | * |
219 | * The open-addressed hashtables are indexed by profile buffer slot |
220 | * and hold the number of pending hits to that profile buffer slot on |
221 | * a cpu in an entry. When the hashtable overflows, all pending hits |
222 | * are accounted to their corresponding profile buffer slots with |
223 | * atomic_add() and the hashtable emptied. As numerous pending hits |
224 | * may be accounted to a profile buffer slot in a hashtable entry, |
225 | * this amortizes a number of atomic profile buffer increments likely |
226 | * to be far larger than the number of entries in the hashtable, |
227 | * particularly given that the number of distinct profile buffer |
228 | * positions to which hits are accounted during short intervals (e.g. |
229 | * several seconds) is usually very small. Exclusion from buffer |
230 | * flipping is provided by interrupt disablement (note that for |
231 | * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from |
232 | * process context). |
233 | * The hash function is meant to be lightweight as opposed to strong, |
234 | * and was vaguely inspired by ppc64 firmware-supported inverted |
235 | * pagetable hash functions, but uses a full hashtable full of finite |
236 | * collision chains, not just pairs of them. |
237 | * |
238 | * -- nyc |
239 | */ |
240 | static void __profile_flip_buffers(void *unused) |
241 | { |
242 | int cpu = smp_processor_id(); |
243 | |
244 | per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); |
245 | } |
246 | |
247 | static void profile_flip_buffers(void) |
248 | { |
249 | int i, j, cpu; |
250 | |
251 | mutex_lock(&profile_flip_mutex); |
252 | j = per_cpu(cpu_profile_flip, get_cpu()); |
253 | put_cpu(); |
254 | on_each_cpu(__profile_flip_buffers, NULL, 1); |
255 | for_each_online_cpu(cpu) { |
256 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; |
257 | for (i = 0; i < NR_PROFILE_HIT; ++i) { |
258 | if (!hits[i].hits) { |
259 | if (hits[i].pc) |
260 | hits[i].pc = 0; |
261 | continue; |
262 | } |
263 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); |
264 | hits[i].hits = hits[i].pc = 0; |
265 | } |
266 | } |
267 | mutex_unlock(&profile_flip_mutex); |
268 | } |
269 | |
270 | static void profile_discard_flip_buffers(void) |
271 | { |
272 | int i, cpu; |
273 | |
274 | mutex_lock(&profile_flip_mutex); |
275 | i = per_cpu(cpu_profile_flip, get_cpu()); |
276 | put_cpu(); |
277 | on_each_cpu(__profile_flip_buffers, NULL, 1); |
278 | for_each_online_cpu(cpu) { |
279 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; |
280 | memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); |
281 | } |
282 | mutex_unlock(&profile_flip_mutex); |
283 | } |
284 | |
285 | static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) |
286 | { |
287 | unsigned long primary, secondary, flags, pc = (unsigned long)__pc; |
288 | int i, j, cpu; |
289 | struct profile_hit *hits; |
290 | |
291 | pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); |
292 | i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; |
293 | secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; |
294 | cpu = get_cpu(); |
295 | hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; |
296 | if (!hits) { |
297 | put_cpu(); |
298 | return; |
299 | } |
300 | /* |
301 | * We buffer the global profiler buffer into a per-CPU |
302 | * queue and thus reduce the number of global (and possibly |
303 | * NUMA-alien) accesses. The write-queue is self-coalescing: |
304 | */ |
305 | local_irq_save(flags); |
306 | do { |
307 | for (j = 0; j < PROFILE_GRPSZ; ++j) { |
308 | if (hits[i + j].pc == pc) { |
309 | hits[i + j].hits += nr_hits; |
310 | goto out; |
311 | } else if (!hits[i + j].hits) { |
312 | hits[i + j].pc = pc; |
313 | hits[i + j].hits = nr_hits; |
314 | goto out; |
315 | } |
316 | } |
317 | i = (i + secondary) & (NR_PROFILE_HIT - 1); |
318 | } while (i != primary); |
319 | |
320 | /* |
321 | * Add the current hit(s) and flush the write-queue out |
322 | * to the global buffer: |
323 | */ |
324 | atomic_add(nr_hits, &prof_buffer[pc]); |
325 | for (i = 0; i < NR_PROFILE_HIT; ++i) { |
326 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); |
327 | hits[i].pc = hits[i].hits = 0; |
328 | } |
329 | out: |
330 | local_irq_restore(flags); |
331 | put_cpu(); |
332 | } |
333 | |
334 | static int profile_cpu_callback(struct notifier_block *info, |
335 | unsigned long action, void *__cpu) |
336 | { |
337 | int node, cpu = (unsigned long)__cpu; |
338 | struct page *page; |
339 | |
340 | switch (action) { |
341 | case CPU_UP_PREPARE: |
342 | case CPU_UP_PREPARE_FROZEN: |
343 | node = cpu_to_mem(cpu); |
344 | per_cpu(cpu_profile_flip, cpu) = 0; |
345 | if (!per_cpu(cpu_profile_hits, cpu)[1]) { |
346 | page = alloc_pages_exact_node(node, |
347 | GFP_KERNEL | __GFP_ZERO, |
348 | 0); |
349 | if (!page) |
350 | return notifier_from_errno(-ENOMEM); |
351 | per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); |
352 | } |
353 | if (!per_cpu(cpu_profile_hits, cpu)[0]) { |
354 | page = alloc_pages_exact_node(node, |
355 | GFP_KERNEL | __GFP_ZERO, |
356 | 0); |
357 | if (!page) |
358 | goto out_free; |
359 | per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); |
360 | } |
361 | break; |
362 | out_free: |
363 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); |
364 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; |
365 | __free_page(page); |
366 | return notifier_from_errno(-ENOMEM); |
367 | case CPU_ONLINE: |
368 | case CPU_ONLINE_FROZEN: |
369 | if (prof_cpu_mask != NULL) |
370 | cpumask_set_cpu(cpu, prof_cpu_mask); |
371 | break; |
372 | case CPU_UP_CANCELED: |
373 | case CPU_UP_CANCELED_FROZEN: |
374 | case CPU_DEAD: |
375 | case CPU_DEAD_FROZEN: |
376 | if (prof_cpu_mask != NULL) |
377 | cpumask_clear_cpu(cpu, prof_cpu_mask); |
378 | if (per_cpu(cpu_profile_hits, cpu)[0]) { |
379 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); |
380 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; |
381 | __free_page(page); |
382 | } |
383 | if (per_cpu(cpu_profile_hits, cpu)[1]) { |
384 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); |
385 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; |
386 | __free_page(page); |
387 | } |
388 | break; |
389 | } |
390 | return NOTIFY_OK; |
391 | } |
392 | #else /* !CONFIG_SMP */ |
393 | #define profile_flip_buffers() do { } while (0) |
394 | #define profile_discard_flip_buffers() do { } while (0) |
395 | #define profile_cpu_callback NULL |
396 | |
397 | static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) |
398 | { |
399 | unsigned long pc; |
400 | pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; |
401 | atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); |
402 | } |
403 | #endif /* !CONFIG_SMP */ |
404 | |
405 | void profile_hits(int type, void *__pc, unsigned int nr_hits) |
406 | { |
407 | if (prof_on != type || !prof_buffer) |
408 | return; |
409 | do_profile_hits(type, __pc, nr_hits); |
410 | } |
411 | EXPORT_SYMBOL_GPL(profile_hits); |
412 | |
413 | void profile_tick(int type) |
414 | { |
415 | struct pt_regs *regs = get_irq_regs(); |
416 | |
417 | if (!user_mode(regs) && prof_cpu_mask != NULL && |
418 | cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) |
419 | profile_hit(type, (void *)profile_pc(regs)); |
420 | } |
421 | |
422 | #ifdef CONFIG_PROC_FS |
423 | #include <linux/proc_fs.h> |
424 | #include <linux/seq_file.h> |
425 | #include <asm/uaccess.h> |
426 | |
427 | static int prof_cpu_mask_proc_show(struct seq_file *m, void *v) |
428 | { |
429 | seq_cpumask(m, prof_cpu_mask); |
430 | seq_putc(m, '\n'); |
431 | return 0; |
432 | } |
433 | |
434 | static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file) |
435 | { |
436 | return single_open(file, prof_cpu_mask_proc_show, NULL); |
437 | } |
438 | |
439 | static ssize_t prof_cpu_mask_proc_write(struct file *file, |
440 | const char __user *buffer, size_t count, loff_t *pos) |
441 | { |
442 | cpumask_var_t new_value; |
443 | int err; |
444 | |
445 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
446 | return -ENOMEM; |
447 | |
448 | err = cpumask_parse_user(buffer, count, new_value); |
449 | if (!err) { |
450 | cpumask_copy(prof_cpu_mask, new_value); |
451 | err = count; |
452 | } |
453 | free_cpumask_var(new_value); |
454 | return err; |
455 | } |
456 | |
457 | static const struct file_operations prof_cpu_mask_proc_fops = { |
458 | .open = prof_cpu_mask_proc_open, |
459 | .read = seq_read, |
460 | .llseek = seq_lseek, |
461 | .release = single_release, |
462 | .write = prof_cpu_mask_proc_write, |
463 | }; |
464 | |
465 | void create_prof_cpu_mask(void) |
466 | { |
467 | /* create /proc/irq/prof_cpu_mask */ |
468 | proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops); |
469 | } |
470 | |
471 | /* |
472 | * This function accesses profiling information. The returned data is |
473 | * binary: the sampling step and the actual contents of the profile |
474 | * buffer. Use of the program readprofile is recommended in order to |
475 | * get meaningful info out of these data. |
476 | */ |
477 | static ssize_t |
478 | read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
479 | { |
480 | unsigned long p = *ppos; |
481 | ssize_t read; |
482 | char *pnt; |
483 | unsigned int sample_step = 1 << prof_shift; |
484 | |
485 | profile_flip_buffers(); |
486 | if (p >= (prof_len+1)*sizeof(unsigned int)) |
487 | return 0; |
488 | if (count > (prof_len+1)*sizeof(unsigned int) - p) |
489 | count = (prof_len+1)*sizeof(unsigned int) - p; |
490 | read = 0; |
491 | |
492 | while (p < sizeof(unsigned int) && count > 0) { |
493 | if (put_user(*((char *)(&sample_step)+p), buf)) |
494 | return -EFAULT; |
495 | buf++; p++; count--; read++; |
496 | } |
497 | pnt = (char *)prof_buffer + p - sizeof(atomic_t); |
498 | if (copy_to_user(buf, (void *)pnt, count)) |
499 | return -EFAULT; |
500 | read += count; |
501 | *ppos += read; |
502 | return read; |
503 | } |
504 | |
505 | /* |
506 | * Writing to /proc/profile resets the counters |
507 | * |
508 | * Writing a 'profiling multiplier' value into it also re-sets the profiling |
509 | * interrupt frequency, on architectures that support this. |
510 | */ |
511 | static ssize_t write_profile(struct file *file, const char __user *buf, |
512 | size_t count, loff_t *ppos) |
513 | { |
514 | #ifdef CONFIG_SMP |
515 | extern int setup_profiling_timer(unsigned int multiplier); |
516 | |
517 | if (count == sizeof(int)) { |
518 | unsigned int multiplier; |
519 | |
520 | if (copy_from_user(&multiplier, buf, sizeof(int))) |
521 | return -EFAULT; |
522 | |
523 | if (setup_profiling_timer(multiplier)) |
524 | return -EINVAL; |
525 | } |
526 | #endif |
527 | profile_discard_flip_buffers(); |
528 | memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); |
529 | return count; |
530 | } |
531 | |
532 | static const struct file_operations proc_profile_operations = { |
533 | .read = read_profile, |
534 | .write = write_profile, |
535 | .llseek = default_llseek, |
536 | }; |
537 | |
538 | #ifdef CONFIG_SMP |
539 | static void profile_nop(void *unused) |
540 | { |
541 | } |
542 | |
543 | static int create_hash_tables(void) |
544 | { |
545 | int cpu; |
546 | |
547 | for_each_online_cpu(cpu) { |
548 | int node = cpu_to_mem(cpu); |
549 | struct page *page; |
550 | |
551 | page = alloc_pages_exact_node(node, |
552 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, |
553 | 0); |
554 | if (!page) |
555 | goto out_cleanup; |
556 | per_cpu(cpu_profile_hits, cpu)[1] |
557 | = (struct profile_hit *)page_address(page); |
558 | page = alloc_pages_exact_node(node, |
559 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, |
560 | 0); |
561 | if (!page) |
562 | goto out_cleanup; |
563 | per_cpu(cpu_profile_hits, cpu)[0] |
564 | = (struct profile_hit *)page_address(page); |
565 | } |
566 | return 0; |
567 | out_cleanup: |
568 | prof_on = 0; |
569 | smp_mb(); |
570 | on_each_cpu(profile_nop, NULL, 1); |
571 | for_each_online_cpu(cpu) { |
572 | struct page *page; |
573 | |
574 | if (per_cpu(cpu_profile_hits, cpu)[0]) { |
575 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); |
576 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; |
577 | __free_page(page); |
578 | } |
579 | if (per_cpu(cpu_profile_hits, cpu)[1]) { |
580 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); |
581 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; |
582 | __free_page(page); |
583 | } |
584 | } |
585 | return -1; |
586 | } |
587 | #else |
588 | #define create_hash_tables() ({ 0; }) |
589 | #endif |
590 | |
591 | int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ |
592 | { |
593 | struct proc_dir_entry *entry; |
594 | |
595 | if (!prof_on) |
596 | return 0; |
597 | if (create_hash_tables()) |
598 | return -ENOMEM; |
599 | entry = proc_create("profile", S_IWUSR | S_IRUGO, |
600 | NULL, &proc_profile_operations); |
601 | if (!entry) |
602 | return 0; |
603 | proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); |
604 | hotcpu_notifier(profile_cpu_callback, 0); |
605 | return 0; |
606 | } |
607 | module_init(create_proc_profile); |
608 | #endif /* CONFIG_PROC_FS */ |
609 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9