Root/
1 | /* |
2 | * linux/kernel/profile.c |
3 | * Simple profiling. Manages a direct-mapped profile hit count buffer, |
4 | * with configurable resolution, support for restricting the cpus on |
5 | * which profiling is done, and switching between cpu time and |
6 | * schedule() calls via kernel command line parameters passed at boot. |
7 | * |
8 | * Scheduler profiling support, Arjan van de Ven and Ingo Molnar, |
9 | * Red Hat, July 2004 |
10 | * Consolidation of architecture support code for profiling, |
11 | * William Irwin, Oracle, July 2004 |
12 | * Amortized hit count accounting via per-cpu open-addressed hashtables |
13 | * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004 |
14 | */ |
15 | |
16 | #include <linux/export.h> |
17 | #include <linux/profile.h> |
18 | #include <linux/bootmem.h> |
19 | #include <linux/notifier.h> |
20 | #include <linux/mm.h> |
21 | #include <linux/cpumask.h> |
22 | #include <linux/cpu.h> |
23 | #include <linux/highmem.h> |
24 | #include <linux/mutex.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/vmalloc.h> |
27 | #include <asm/sections.h> |
28 | #include <asm/irq_regs.h> |
29 | #include <asm/ptrace.h> |
30 | |
31 | struct profile_hit { |
32 | u32 pc, hits; |
33 | }; |
34 | #define PROFILE_GRPSHIFT 3 |
35 | #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT) |
36 | #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) |
37 | #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) |
38 | |
39 | /* Oprofile timer tick hook */ |
40 | static int (*timer_hook)(struct pt_regs *) __read_mostly; |
41 | |
42 | static atomic_t *prof_buffer; |
43 | static unsigned long prof_len, prof_shift; |
44 | |
45 | int prof_on __read_mostly; |
46 | EXPORT_SYMBOL_GPL(prof_on); |
47 | |
48 | static cpumask_var_t prof_cpu_mask; |
49 | #ifdef CONFIG_SMP |
50 | static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); |
51 | static DEFINE_PER_CPU(int, cpu_profile_flip); |
52 | static DEFINE_MUTEX(profile_flip_mutex); |
53 | #endif /* CONFIG_SMP */ |
54 | |
55 | int profile_setup(char *str) |
56 | { |
57 | static char schedstr[] = "schedule"; |
58 | static char sleepstr[] = "sleep"; |
59 | static char kvmstr[] = "kvm"; |
60 | int par; |
61 | |
62 | if (!strncmp(str, sleepstr, strlen(sleepstr))) { |
63 | #ifdef CONFIG_SCHEDSTATS |
64 | prof_on = SLEEP_PROFILING; |
65 | if (str[strlen(sleepstr)] == ',') |
66 | str += strlen(sleepstr) + 1; |
67 | if (get_option(&str, &par)) |
68 | prof_shift = par; |
69 | printk(KERN_INFO |
70 | "kernel sleep profiling enabled (shift: %ld)\n", |
71 | prof_shift); |
72 | #else |
73 | printk(KERN_WARNING |
74 | "kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); |
75 | #endif /* CONFIG_SCHEDSTATS */ |
76 | } else if (!strncmp(str, schedstr, strlen(schedstr))) { |
77 | prof_on = SCHED_PROFILING; |
78 | if (str[strlen(schedstr)] == ',') |
79 | str += strlen(schedstr) + 1; |
80 | if (get_option(&str, &par)) |
81 | prof_shift = par; |
82 | printk(KERN_INFO |
83 | "kernel schedule profiling enabled (shift: %ld)\n", |
84 | prof_shift); |
85 | } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { |
86 | prof_on = KVM_PROFILING; |
87 | if (str[strlen(kvmstr)] == ',') |
88 | str += strlen(kvmstr) + 1; |
89 | if (get_option(&str, &par)) |
90 | prof_shift = par; |
91 | printk(KERN_INFO |
92 | "kernel KVM profiling enabled (shift: %ld)\n", |
93 | prof_shift); |
94 | } else if (get_option(&str, &par)) { |
95 | prof_shift = par; |
96 | prof_on = CPU_PROFILING; |
97 | printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n", |
98 | prof_shift); |
99 | } |
100 | return 1; |
101 | } |
102 | __setup("profile=", profile_setup); |
103 | |
104 | |
105 | int __ref profile_init(void) |
106 | { |
107 | int buffer_bytes; |
108 | if (!prof_on) |
109 | return 0; |
110 | |
111 | /* only text is profiled */ |
112 | prof_len = (_etext - _stext) >> prof_shift; |
113 | buffer_bytes = prof_len*sizeof(atomic_t); |
114 | |
115 | if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) |
116 | return -ENOMEM; |
117 | |
118 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); |
119 | |
120 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); |
121 | if (prof_buffer) |
122 | return 0; |
123 | |
124 | prof_buffer = alloc_pages_exact(buffer_bytes, |
125 | GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); |
126 | if (prof_buffer) |
127 | return 0; |
128 | |
129 | prof_buffer = vzalloc(buffer_bytes); |
130 | if (prof_buffer) |
131 | return 0; |
132 | |
133 | free_cpumask_var(prof_cpu_mask); |
134 | return -ENOMEM; |
135 | } |
136 | |
137 | /* Profile event notifications */ |
138 | |
139 | static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); |
140 | static ATOMIC_NOTIFIER_HEAD(task_free_notifier); |
141 | static BLOCKING_NOTIFIER_HEAD(munmap_notifier); |
142 | |
143 | void profile_task_exit(struct task_struct *task) |
144 | { |
145 | blocking_notifier_call_chain(&task_exit_notifier, 0, task); |
146 | } |
147 | |
148 | int profile_handoff_task(struct task_struct *task) |
149 | { |
150 | int ret; |
151 | ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); |
152 | return (ret == NOTIFY_OK) ? 1 : 0; |
153 | } |
154 | |
155 | void profile_munmap(unsigned long addr) |
156 | { |
157 | blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr); |
158 | } |
159 | |
160 | int task_handoff_register(struct notifier_block *n) |
161 | { |
162 | return atomic_notifier_chain_register(&task_free_notifier, n); |
163 | } |
164 | EXPORT_SYMBOL_GPL(task_handoff_register); |
165 | |
166 | int task_handoff_unregister(struct notifier_block *n) |
167 | { |
168 | return atomic_notifier_chain_unregister(&task_free_notifier, n); |
169 | } |
170 | EXPORT_SYMBOL_GPL(task_handoff_unregister); |
171 | |
172 | int profile_event_register(enum profile_type type, struct notifier_block *n) |
173 | { |
174 | int err = -EINVAL; |
175 | |
176 | switch (type) { |
177 | case PROFILE_TASK_EXIT: |
178 | err = blocking_notifier_chain_register( |
179 | &task_exit_notifier, n); |
180 | break; |
181 | case PROFILE_MUNMAP: |
182 | err = blocking_notifier_chain_register( |
183 | &munmap_notifier, n); |
184 | break; |
185 | } |
186 | |
187 | return err; |
188 | } |
189 | EXPORT_SYMBOL_GPL(profile_event_register); |
190 | |
191 | int profile_event_unregister(enum profile_type type, struct notifier_block *n) |
192 | { |
193 | int err = -EINVAL; |
194 | |
195 | switch (type) { |
196 | case PROFILE_TASK_EXIT: |
197 | err = blocking_notifier_chain_unregister( |
198 | &task_exit_notifier, n); |
199 | break; |
200 | case PROFILE_MUNMAP: |
201 | err = blocking_notifier_chain_unregister( |
202 | &munmap_notifier, n); |
203 | break; |
204 | } |
205 | |
206 | return err; |
207 | } |
208 | EXPORT_SYMBOL_GPL(profile_event_unregister); |
209 | |
210 | int register_timer_hook(int (*hook)(struct pt_regs *)) |
211 | { |
212 | if (timer_hook) |
213 | return -EBUSY; |
214 | timer_hook = hook; |
215 | return 0; |
216 | } |
217 | EXPORT_SYMBOL_GPL(register_timer_hook); |
218 | |
219 | void unregister_timer_hook(int (*hook)(struct pt_regs *)) |
220 | { |
221 | WARN_ON(hook != timer_hook); |
222 | timer_hook = NULL; |
223 | /* make sure all CPUs see the NULL hook */ |
224 | synchronize_sched(); /* Allow ongoing interrupts to complete. */ |
225 | } |
226 | EXPORT_SYMBOL_GPL(unregister_timer_hook); |
227 | |
228 | |
229 | #ifdef CONFIG_SMP |
230 | /* |
231 | * Each cpu has a pair of open-addressed hashtables for pending |
232 | * profile hits. read_profile() IPI's all cpus to request them |
233 | * to flip buffers and flushes their contents to prof_buffer itself. |
234 | * Flip requests are serialized by the profile_flip_mutex. The sole |
235 | * use of having a second hashtable is for avoiding cacheline |
236 | * contention that would otherwise happen during flushes of pending |
237 | * profile hits required for the accuracy of reported profile hits |
238 | * and so resurrect the interrupt livelock issue. |
239 | * |
240 | * The open-addressed hashtables are indexed by profile buffer slot |
241 | * and hold the number of pending hits to that profile buffer slot on |
242 | * a cpu in an entry. When the hashtable overflows, all pending hits |
243 | * are accounted to their corresponding profile buffer slots with |
244 | * atomic_add() and the hashtable emptied. As numerous pending hits |
245 | * may be accounted to a profile buffer slot in a hashtable entry, |
246 | * this amortizes a number of atomic profile buffer increments likely |
247 | * to be far larger than the number of entries in the hashtable, |
248 | * particularly given that the number of distinct profile buffer |
249 | * positions to which hits are accounted during short intervals (e.g. |
250 | * several seconds) is usually very small. Exclusion from buffer |
251 | * flipping is provided by interrupt disablement (note that for |
252 | * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from |
253 | * process context). |
254 | * The hash function is meant to be lightweight as opposed to strong, |
255 | * and was vaguely inspired by ppc64 firmware-supported inverted |
256 | * pagetable hash functions, but uses a full hashtable full of finite |
257 | * collision chains, not just pairs of them. |
258 | * |
259 | * -- wli |
260 | */ |
261 | static void __profile_flip_buffers(void *unused) |
262 | { |
263 | int cpu = smp_processor_id(); |
264 | |
265 | per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); |
266 | } |
267 | |
268 | static void profile_flip_buffers(void) |
269 | { |
270 | int i, j, cpu; |
271 | |
272 | mutex_lock(&profile_flip_mutex); |
273 | j = per_cpu(cpu_profile_flip, get_cpu()); |
274 | put_cpu(); |
275 | on_each_cpu(__profile_flip_buffers, NULL, 1); |
276 | for_each_online_cpu(cpu) { |
277 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; |
278 | for (i = 0; i < NR_PROFILE_HIT; ++i) { |
279 | if (!hits[i].hits) { |
280 | if (hits[i].pc) |
281 | hits[i].pc = 0; |
282 | continue; |
283 | } |
284 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); |
285 | hits[i].hits = hits[i].pc = 0; |
286 | } |
287 | } |
288 | mutex_unlock(&profile_flip_mutex); |
289 | } |
290 | |
291 | static void profile_discard_flip_buffers(void) |
292 | { |
293 | int i, cpu; |
294 | |
295 | mutex_lock(&profile_flip_mutex); |
296 | i = per_cpu(cpu_profile_flip, get_cpu()); |
297 | put_cpu(); |
298 | on_each_cpu(__profile_flip_buffers, NULL, 1); |
299 | for_each_online_cpu(cpu) { |
300 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; |
301 | memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); |
302 | } |
303 | mutex_unlock(&profile_flip_mutex); |
304 | } |
305 | |
306 | static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) |
307 | { |
308 | unsigned long primary, secondary, flags, pc = (unsigned long)__pc; |
309 | int i, j, cpu; |
310 | struct profile_hit *hits; |
311 | |
312 | pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); |
313 | i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; |
314 | secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; |
315 | cpu = get_cpu(); |
316 | hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; |
317 | if (!hits) { |
318 | put_cpu(); |
319 | return; |
320 | } |
321 | /* |
322 | * We buffer the global profiler buffer into a per-CPU |
323 | * queue and thus reduce the number of global (and possibly |
324 | * NUMA-alien) accesses. The write-queue is self-coalescing: |
325 | */ |
326 | local_irq_save(flags); |
327 | do { |
328 | for (j = 0; j < PROFILE_GRPSZ; ++j) { |
329 | if (hits[i + j].pc == pc) { |
330 | hits[i + j].hits += nr_hits; |
331 | goto out; |
332 | } else if (!hits[i + j].hits) { |
333 | hits[i + j].pc = pc; |
334 | hits[i + j].hits = nr_hits; |
335 | goto out; |
336 | } |
337 | } |
338 | i = (i + secondary) & (NR_PROFILE_HIT - 1); |
339 | } while (i != primary); |
340 | |
341 | /* |
342 | * Add the current hit(s) and flush the write-queue out |
343 | * to the global buffer: |
344 | */ |
345 | atomic_add(nr_hits, &prof_buffer[pc]); |
346 | for (i = 0; i < NR_PROFILE_HIT; ++i) { |
347 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); |
348 | hits[i].pc = hits[i].hits = 0; |
349 | } |
350 | out: |
351 | local_irq_restore(flags); |
352 | put_cpu(); |
353 | } |
354 | |
355 | static int __cpuinit profile_cpu_callback(struct notifier_block *info, |
356 | unsigned long action, void *__cpu) |
357 | { |
358 | int node, cpu = (unsigned long)__cpu; |
359 | struct page *page; |
360 | |
361 | switch (action) { |
362 | case CPU_UP_PREPARE: |
363 | case CPU_UP_PREPARE_FROZEN: |
364 | node = cpu_to_mem(cpu); |
365 | per_cpu(cpu_profile_flip, cpu) = 0; |
366 | if (!per_cpu(cpu_profile_hits, cpu)[1]) { |
367 | page = alloc_pages_exact_node(node, |
368 | GFP_KERNEL | __GFP_ZERO, |
369 | 0); |
370 | if (!page) |
371 | return notifier_from_errno(-ENOMEM); |
372 | per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); |
373 | } |
374 | if (!per_cpu(cpu_profile_hits, cpu)[0]) { |
375 | page = alloc_pages_exact_node(node, |
376 | GFP_KERNEL | __GFP_ZERO, |
377 | 0); |
378 | if (!page) |
379 | goto out_free; |
380 | per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); |
381 | } |
382 | break; |
383 | out_free: |
384 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); |
385 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; |
386 | __free_page(page); |
387 | return notifier_from_errno(-ENOMEM); |
388 | case CPU_ONLINE: |
389 | case CPU_ONLINE_FROZEN: |
390 | if (prof_cpu_mask != NULL) |
391 | cpumask_set_cpu(cpu, prof_cpu_mask); |
392 | break; |
393 | case CPU_UP_CANCELED: |
394 | case CPU_UP_CANCELED_FROZEN: |
395 | case CPU_DEAD: |
396 | case CPU_DEAD_FROZEN: |
397 | if (prof_cpu_mask != NULL) |
398 | cpumask_clear_cpu(cpu, prof_cpu_mask); |
399 | if (per_cpu(cpu_profile_hits, cpu)[0]) { |
400 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); |
401 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; |
402 | __free_page(page); |
403 | } |
404 | if (per_cpu(cpu_profile_hits, cpu)[1]) { |
405 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); |
406 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; |
407 | __free_page(page); |
408 | } |
409 | break; |
410 | } |
411 | return NOTIFY_OK; |
412 | } |
413 | #else /* !CONFIG_SMP */ |
414 | #define profile_flip_buffers() do { } while (0) |
415 | #define profile_discard_flip_buffers() do { } while (0) |
416 | #define profile_cpu_callback NULL |
417 | |
418 | static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) |
419 | { |
420 | unsigned long pc; |
421 | pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; |
422 | atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); |
423 | } |
424 | #endif /* !CONFIG_SMP */ |
425 | |
426 | void profile_hits(int type, void *__pc, unsigned int nr_hits) |
427 | { |
428 | if (prof_on != type || !prof_buffer) |
429 | return; |
430 | do_profile_hits(type, __pc, nr_hits); |
431 | } |
432 | EXPORT_SYMBOL_GPL(profile_hits); |
433 | |
434 | void profile_tick(int type) |
435 | { |
436 | struct pt_regs *regs = get_irq_regs(); |
437 | |
438 | if (type == CPU_PROFILING && timer_hook) |
439 | timer_hook(regs); |
440 | if (!user_mode(regs) && prof_cpu_mask != NULL && |
441 | cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) |
442 | profile_hit(type, (void *)profile_pc(regs)); |
443 | } |
444 | |
445 | #ifdef CONFIG_PROC_FS |
446 | #include <linux/proc_fs.h> |
447 | #include <linux/seq_file.h> |
448 | #include <asm/uaccess.h> |
449 | |
450 | static int prof_cpu_mask_proc_show(struct seq_file *m, void *v) |
451 | { |
452 | seq_cpumask(m, prof_cpu_mask); |
453 | seq_putc(m, '\n'); |
454 | return 0; |
455 | } |
456 | |
457 | static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file) |
458 | { |
459 | return single_open(file, prof_cpu_mask_proc_show, NULL); |
460 | } |
461 | |
462 | static ssize_t prof_cpu_mask_proc_write(struct file *file, |
463 | const char __user *buffer, size_t count, loff_t *pos) |
464 | { |
465 | cpumask_var_t new_value; |
466 | int err; |
467 | |
468 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
469 | return -ENOMEM; |
470 | |
471 | err = cpumask_parse_user(buffer, count, new_value); |
472 | if (!err) { |
473 | cpumask_copy(prof_cpu_mask, new_value); |
474 | err = count; |
475 | } |
476 | free_cpumask_var(new_value); |
477 | return err; |
478 | } |
479 | |
480 | static const struct file_operations prof_cpu_mask_proc_fops = { |
481 | .open = prof_cpu_mask_proc_open, |
482 | .read = seq_read, |
483 | .llseek = seq_lseek, |
484 | .release = single_release, |
485 | .write = prof_cpu_mask_proc_write, |
486 | }; |
487 | |
488 | void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) |
489 | { |
490 | /* create /proc/irq/prof_cpu_mask */ |
491 | proc_create("prof_cpu_mask", 0600, root_irq_dir, &prof_cpu_mask_proc_fops); |
492 | } |
493 | |
494 | /* |
495 | * This function accesses profiling information. The returned data is |
496 | * binary: the sampling step and the actual contents of the profile |
497 | * buffer. Use of the program readprofile is recommended in order to |
498 | * get meaningful info out of these data. |
499 | */ |
500 | static ssize_t |
501 | read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
502 | { |
503 | unsigned long p = *ppos; |
504 | ssize_t read; |
505 | char *pnt; |
506 | unsigned int sample_step = 1 << prof_shift; |
507 | |
508 | profile_flip_buffers(); |
509 | if (p >= (prof_len+1)*sizeof(unsigned int)) |
510 | return 0; |
511 | if (count > (prof_len+1)*sizeof(unsigned int) - p) |
512 | count = (prof_len+1)*sizeof(unsigned int) - p; |
513 | read = 0; |
514 | |
515 | while (p < sizeof(unsigned int) && count > 0) { |
516 | if (put_user(*((char *)(&sample_step)+p), buf)) |
517 | return -EFAULT; |
518 | buf++; p++; count--; read++; |
519 | } |
520 | pnt = (char *)prof_buffer + p - sizeof(atomic_t); |
521 | if (copy_to_user(buf, (void *)pnt, count)) |
522 | return -EFAULT; |
523 | read += count; |
524 | *ppos += read; |
525 | return read; |
526 | } |
527 | |
528 | /* |
529 | * Writing to /proc/profile resets the counters |
530 | * |
531 | * Writing a 'profiling multiplier' value into it also re-sets the profiling |
532 | * interrupt frequency, on architectures that support this. |
533 | */ |
534 | static ssize_t write_profile(struct file *file, const char __user *buf, |
535 | size_t count, loff_t *ppos) |
536 | { |
537 | #ifdef CONFIG_SMP |
538 | extern int setup_profiling_timer(unsigned int multiplier); |
539 | |
540 | if (count == sizeof(int)) { |
541 | unsigned int multiplier; |
542 | |
543 | if (copy_from_user(&multiplier, buf, sizeof(int))) |
544 | return -EFAULT; |
545 | |
546 | if (setup_profiling_timer(multiplier)) |
547 | return -EINVAL; |
548 | } |
549 | #endif |
550 | profile_discard_flip_buffers(); |
551 | memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); |
552 | return count; |
553 | } |
554 | |
555 | static const struct file_operations proc_profile_operations = { |
556 | .read = read_profile, |
557 | .write = write_profile, |
558 | .llseek = default_llseek, |
559 | }; |
560 | |
561 | #ifdef CONFIG_SMP |
562 | static void profile_nop(void *unused) |
563 | { |
564 | } |
565 | |
566 | static int create_hash_tables(void) |
567 | { |
568 | int cpu; |
569 | |
570 | for_each_online_cpu(cpu) { |
571 | int node = cpu_to_mem(cpu); |
572 | struct page *page; |
573 | |
574 | page = alloc_pages_exact_node(node, |
575 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, |
576 | 0); |
577 | if (!page) |
578 | goto out_cleanup; |
579 | per_cpu(cpu_profile_hits, cpu)[1] |
580 | = (struct profile_hit *)page_address(page); |
581 | page = alloc_pages_exact_node(node, |
582 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, |
583 | 0); |
584 | if (!page) |
585 | goto out_cleanup; |
586 | per_cpu(cpu_profile_hits, cpu)[0] |
587 | = (struct profile_hit *)page_address(page); |
588 | } |
589 | return 0; |
590 | out_cleanup: |
591 | prof_on = 0; |
592 | smp_mb(); |
593 | on_each_cpu(profile_nop, NULL, 1); |
594 | for_each_online_cpu(cpu) { |
595 | struct page *page; |
596 | |
597 | if (per_cpu(cpu_profile_hits, cpu)[0]) { |
598 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); |
599 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; |
600 | __free_page(page); |
601 | } |
602 | if (per_cpu(cpu_profile_hits, cpu)[1]) { |
603 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); |
604 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; |
605 | __free_page(page); |
606 | } |
607 | } |
608 | return -1; |
609 | } |
610 | #else |
611 | #define create_hash_tables() ({ 0; }) |
612 | #endif |
613 | |
614 | int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ |
615 | { |
616 | struct proc_dir_entry *entry; |
617 | |
618 | if (!prof_on) |
619 | return 0; |
620 | if (create_hash_tables()) |
621 | return -ENOMEM; |
622 | entry = proc_create("profile", S_IWUSR | S_IRUGO, |
623 | NULL, &proc_profile_operations); |
624 | if (!entry) |
625 | return 0; |
626 | entry->size = (1+prof_len) * sizeof(atomic_t); |
627 | hotcpu_notifier(profile_cpu_callback, 0); |
628 | return 0; |
629 | } |
630 | module_init(create_proc_profile); |
631 | #endif /* CONFIG_PROC_FS */ |
632 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9