Root/
1 | #include "builtin.h" |
2 | #include "perf.h" |
3 | |
4 | #include "util/util.h" |
5 | #include "util/cache.h" |
6 | #include "util/symbol.h" |
7 | #include "util/thread.h" |
8 | #include "util/header.h" |
9 | #include "util/session.h" |
10 | #include "util/tool.h" |
11 | |
12 | #include "util/parse-options.h" |
13 | #include "util/trace-event.h" |
14 | |
15 | #include "util/debug.h" |
16 | |
17 | #include <linux/rbtree.h> |
18 | |
19 | struct alloc_stat; |
20 | typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); |
21 | |
22 | static const char *input_name; |
23 | |
24 | static int alloc_flag; |
25 | static int caller_flag; |
26 | |
27 | static int alloc_lines = -1; |
28 | static int caller_lines = -1; |
29 | |
30 | static bool raw_ip; |
31 | |
32 | static char default_sort_order[] = "frag,hit,bytes"; |
33 | |
34 | static int *cpunode_map; |
35 | static int max_cpu_num; |
36 | |
37 | struct alloc_stat { |
38 | u64 call_site; |
39 | u64 ptr; |
40 | u64 bytes_req; |
41 | u64 bytes_alloc; |
42 | u32 hit; |
43 | u32 pingpong; |
44 | |
45 | short alloc_cpu; |
46 | |
47 | struct rb_node node; |
48 | }; |
49 | |
50 | static struct rb_root root_alloc_stat; |
51 | static struct rb_root root_alloc_sorted; |
52 | static struct rb_root root_caller_stat; |
53 | static struct rb_root root_caller_sorted; |
54 | |
55 | static unsigned long total_requested, total_allocated; |
56 | static unsigned long nr_allocs, nr_cross_allocs; |
57 | |
58 | #define PATH_SYS_NODE "/sys/devices/system/node" |
59 | |
60 | static void init_cpunode_map(void) |
61 | { |
62 | FILE *fp; |
63 | int i; |
64 | |
65 | fp = fopen("/sys/devices/system/cpu/kernel_max", "r"); |
66 | if (!fp) { |
67 | max_cpu_num = 4096; |
68 | return; |
69 | } |
70 | |
71 | if (fscanf(fp, "%d", &max_cpu_num) < 1) |
72 | die("Failed to read 'kernel_max' from sysfs"); |
73 | max_cpu_num++; |
74 | |
75 | cpunode_map = calloc(max_cpu_num, sizeof(int)); |
76 | if (!cpunode_map) |
77 | die("calloc"); |
78 | for (i = 0; i < max_cpu_num; i++) |
79 | cpunode_map[i] = -1; |
80 | fclose(fp); |
81 | } |
82 | |
83 | static void setup_cpunode_map(void) |
84 | { |
85 | struct dirent *dent1, *dent2; |
86 | DIR *dir1, *dir2; |
87 | unsigned int cpu, mem; |
88 | char buf[PATH_MAX]; |
89 | |
90 | init_cpunode_map(); |
91 | |
92 | dir1 = opendir(PATH_SYS_NODE); |
93 | if (!dir1) |
94 | return; |
95 | |
96 | while ((dent1 = readdir(dir1)) != NULL) { |
97 | if (dent1->d_type != DT_DIR || |
98 | sscanf(dent1->d_name, "node%u", &mem) < 1) |
99 | continue; |
100 | |
101 | snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name); |
102 | dir2 = opendir(buf); |
103 | if (!dir2) |
104 | continue; |
105 | while ((dent2 = readdir(dir2)) != NULL) { |
106 | if (dent2->d_type != DT_LNK || |
107 | sscanf(dent2->d_name, "cpu%u", &cpu) < 1) |
108 | continue; |
109 | cpunode_map[cpu] = mem; |
110 | } |
111 | closedir(dir2); |
112 | } |
113 | closedir(dir1); |
114 | } |
115 | |
116 | static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, |
117 | int bytes_req, int bytes_alloc, int cpu) |
118 | { |
119 | struct rb_node **node = &root_alloc_stat.rb_node; |
120 | struct rb_node *parent = NULL; |
121 | struct alloc_stat *data = NULL; |
122 | |
123 | while (*node) { |
124 | parent = *node; |
125 | data = rb_entry(*node, struct alloc_stat, node); |
126 | |
127 | if (ptr > data->ptr) |
128 | node = &(*node)->rb_right; |
129 | else if (ptr < data->ptr) |
130 | node = &(*node)->rb_left; |
131 | else |
132 | break; |
133 | } |
134 | |
135 | if (data && data->ptr == ptr) { |
136 | data->hit++; |
137 | data->bytes_req += bytes_req; |
138 | data->bytes_alloc += bytes_alloc; |
139 | } else { |
140 | data = malloc(sizeof(*data)); |
141 | if (!data) |
142 | die("malloc"); |
143 | data->ptr = ptr; |
144 | data->pingpong = 0; |
145 | data->hit = 1; |
146 | data->bytes_req = bytes_req; |
147 | data->bytes_alloc = bytes_alloc; |
148 | |
149 | rb_link_node(&data->node, parent, node); |
150 | rb_insert_color(&data->node, &root_alloc_stat); |
151 | } |
152 | data->call_site = call_site; |
153 | data->alloc_cpu = cpu; |
154 | } |
155 | |
156 | static void insert_caller_stat(unsigned long call_site, |
157 | int bytes_req, int bytes_alloc) |
158 | { |
159 | struct rb_node **node = &root_caller_stat.rb_node; |
160 | struct rb_node *parent = NULL; |
161 | struct alloc_stat *data = NULL; |
162 | |
163 | while (*node) { |
164 | parent = *node; |
165 | data = rb_entry(*node, struct alloc_stat, node); |
166 | |
167 | if (call_site > data->call_site) |
168 | node = &(*node)->rb_right; |
169 | else if (call_site < data->call_site) |
170 | node = &(*node)->rb_left; |
171 | else |
172 | break; |
173 | } |
174 | |
175 | if (data && data->call_site == call_site) { |
176 | data->hit++; |
177 | data->bytes_req += bytes_req; |
178 | data->bytes_alloc += bytes_alloc; |
179 | } else { |
180 | data = malloc(sizeof(*data)); |
181 | if (!data) |
182 | die("malloc"); |
183 | data->call_site = call_site; |
184 | data->pingpong = 0; |
185 | data->hit = 1; |
186 | data->bytes_req = bytes_req; |
187 | data->bytes_alloc = bytes_alloc; |
188 | |
189 | rb_link_node(&data->node, parent, node); |
190 | rb_insert_color(&data->node, &root_caller_stat); |
191 | } |
192 | } |
193 | |
194 | static void process_alloc_event(void *data, |
195 | struct event *event, |
196 | int cpu, |
197 | u64 timestamp __used, |
198 | struct thread *thread __used, |
199 | int node) |
200 | { |
201 | unsigned long call_site; |
202 | unsigned long ptr; |
203 | int bytes_req; |
204 | int bytes_alloc; |
205 | int node1, node2; |
206 | |
207 | ptr = raw_field_value(event, "ptr", data); |
208 | call_site = raw_field_value(event, "call_site", data); |
209 | bytes_req = raw_field_value(event, "bytes_req", data); |
210 | bytes_alloc = raw_field_value(event, "bytes_alloc", data); |
211 | |
212 | insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu); |
213 | insert_caller_stat(call_site, bytes_req, bytes_alloc); |
214 | |
215 | total_requested += bytes_req; |
216 | total_allocated += bytes_alloc; |
217 | |
218 | if (node) { |
219 | node1 = cpunode_map[cpu]; |
220 | node2 = raw_field_value(event, "node", data); |
221 | if (node1 != node2) |
222 | nr_cross_allocs++; |
223 | } |
224 | nr_allocs++; |
225 | } |
226 | |
227 | static int ptr_cmp(struct alloc_stat *, struct alloc_stat *); |
228 | static int callsite_cmp(struct alloc_stat *, struct alloc_stat *); |
229 | |
230 | static struct alloc_stat *search_alloc_stat(unsigned long ptr, |
231 | unsigned long call_site, |
232 | struct rb_root *root, |
233 | sort_fn_t sort_fn) |
234 | { |
235 | struct rb_node *node = root->rb_node; |
236 | struct alloc_stat key = { .ptr = ptr, .call_site = call_site }; |
237 | |
238 | while (node) { |
239 | struct alloc_stat *data; |
240 | int cmp; |
241 | |
242 | data = rb_entry(node, struct alloc_stat, node); |
243 | |
244 | cmp = sort_fn(&key, data); |
245 | if (cmp < 0) |
246 | node = node->rb_left; |
247 | else if (cmp > 0) |
248 | node = node->rb_right; |
249 | else |
250 | return data; |
251 | } |
252 | return NULL; |
253 | } |
254 | |
255 | static void process_free_event(void *data, |
256 | struct event *event, |
257 | int cpu, |
258 | u64 timestamp __used, |
259 | struct thread *thread __used) |
260 | { |
261 | unsigned long ptr; |
262 | struct alloc_stat *s_alloc, *s_caller; |
263 | |
264 | ptr = raw_field_value(event, "ptr", data); |
265 | |
266 | s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); |
267 | if (!s_alloc) |
268 | return; |
269 | |
270 | if (cpu != s_alloc->alloc_cpu) { |
271 | s_alloc->pingpong++; |
272 | |
273 | s_caller = search_alloc_stat(0, s_alloc->call_site, |
274 | &root_caller_stat, callsite_cmp); |
275 | assert(s_caller); |
276 | s_caller->pingpong++; |
277 | } |
278 | s_alloc->alloc_cpu = -1; |
279 | } |
280 | |
281 | static void process_raw_event(union perf_event *raw_event __used, void *data, |
282 | int cpu, u64 timestamp, struct thread *thread) |
283 | { |
284 | struct event *event; |
285 | int type; |
286 | |
287 | type = trace_parse_common_type(data); |
288 | event = trace_find_event(type); |
289 | |
290 | if (!strcmp(event->name, "kmalloc") || |
291 | !strcmp(event->name, "kmem_cache_alloc")) { |
292 | process_alloc_event(data, event, cpu, timestamp, thread, 0); |
293 | return; |
294 | } |
295 | |
296 | if (!strcmp(event->name, "kmalloc_node") || |
297 | !strcmp(event->name, "kmem_cache_alloc_node")) { |
298 | process_alloc_event(data, event, cpu, timestamp, thread, 1); |
299 | return; |
300 | } |
301 | |
302 | if (!strcmp(event->name, "kfree") || |
303 | !strcmp(event->name, "kmem_cache_free")) { |
304 | process_free_event(data, event, cpu, timestamp, thread); |
305 | return; |
306 | } |
307 | } |
308 | |
309 | static int process_sample_event(struct perf_tool *tool __used, |
310 | union perf_event *event, |
311 | struct perf_sample *sample, |
312 | struct perf_evsel *evsel __used, |
313 | struct machine *machine) |
314 | { |
315 | struct thread *thread = machine__findnew_thread(machine, event->ip.pid); |
316 | |
317 | if (thread == NULL) { |
318 | pr_debug("problem processing %d event, skipping it.\n", |
319 | event->header.type); |
320 | return -1; |
321 | } |
322 | |
323 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); |
324 | |
325 | process_raw_event(event, sample->raw_data, sample->cpu, |
326 | sample->time, thread); |
327 | |
328 | return 0; |
329 | } |
330 | |
331 | static struct perf_tool perf_kmem = { |
332 | .sample = process_sample_event, |
333 | .comm = perf_event__process_comm, |
334 | .ordered_samples = true, |
335 | }; |
336 | |
337 | static double fragmentation(unsigned long n_req, unsigned long n_alloc) |
338 | { |
339 | if (n_alloc == 0) |
340 | return 0.0; |
341 | else |
342 | return 100.0 - (100.0 * n_req / n_alloc); |
343 | } |
344 | |
345 | static void __print_result(struct rb_root *root, struct perf_session *session, |
346 | int n_lines, int is_caller) |
347 | { |
348 | struct rb_node *next; |
349 | struct machine *machine; |
350 | |
351 | printf("%.102s\n", graph_dotted_line); |
352 | printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr"); |
353 | printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n"); |
354 | printf("%.102s\n", graph_dotted_line); |
355 | |
356 | next = rb_first(root); |
357 | |
358 | machine = perf_session__find_host_machine(session); |
359 | if (!machine) { |
360 | pr_err("__print_result: couldn't find kernel information\n"); |
361 | return; |
362 | } |
363 | while (next && n_lines--) { |
364 | struct alloc_stat *data = rb_entry(next, struct alloc_stat, |
365 | node); |
366 | struct symbol *sym = NULL; |
367 | struct map *map; |
368 | char buf[BUFSIZ]; |
369 | u64 addr; |
370 | |
371 | if (is_caller) { |
372 | addr = data->call_site; |
373 | if (!raw_ip) |
374 | sym = machine__find_kernel_function(machine, addr, &map, NULL); |
375 | } else |
376 | addr = data->ptr; |
377 | |
378 | if (sym != NULL) |
379 | snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name, |
380 | addr - map->unmap_ip(map, sym->start)); |
381 | else |
382 | snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr); |
383 | printf(" %-34s |", buf); |
384 | |
385 | printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n", |
386 | (unsigned long long)data->bytes_alloc, |
387 | (unsigned long)data->bytes_alloc / data->hit, |
388 | (unsigned long long)data->bytes_req, |
389 | (unsigned long)data->bytes_req / data->hit, |
390 | (unsigned long)data->hit, |
391 | (unsigned long)data->pingpong, |
392 | fragmentation(data->bytes_req, data->bytes_alloc)); |
393 | |
394 | next = rb_next(next); |
395 | } |
396 | |
397 | if (n_lines == -1) |
398 | printf(" ... | ... | ... | ... | ... | ... \n"); |
399 | |
400 | printf("%.102s\n", graph_dotted_line); |
401 | } |
402 | |
403 | static void print_summary(void) |
404 | { |
405 | printf("\nSUMMARY\n=======\n"); |
406 | printf("Total bytes requested: %lu\n", total_requested); |
407 | printf("Total bytes allocated: %lu\n", total_allocated); |
408 | printf("Total bytes wasted on internal fragmentation: %lu\n", |
409 | total_allocated - total_requested); |
410 | printf("Internal fragmentation: %f%%\n", |
411 | fragmentation(total_requested, total_allocated)); |
412 | printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs); |
413 | } |
414 | |
415 | static void print_result(struct perf_session *session) |
416 | { |
417 | if (caller_flag) |
418 | __print_result(&root_caller_sorted, session, caller_lines, 1); |
419 | if (alloc_flag) |
420 | __print_result(&root_alloc_sorted, session, alloc_lines, 0); |
421 | print_summary(); |
422 | } |
423 | |
424 | struct sort_dimension { |
425 | const char name[20]; |
426 | sort_fn_t cmp; |
427 | struct list_head list; |
428 | }; |
429 | |
430 | static LIST_HEAD(caller_sort); |
431 | static LIST_HEAD(alloc_sort); |
432 | |
433 | static void sort_insert(struct rb_root *root, struct alloc_stat *data, |
434 | struct list_head *sort_list) |
435 | { |
436 | struct rb_node **new = &(root->rb_node); |
437 | struct rb_node *parent = NULL; |
438 | struct sort_dimension *sort; |
439 | |
440 | while (*new) { |
441 | struct alloc_stat *this; |
442 | int cmp = 0; |
443 | |
444 | this = rb_entry(*new, struct alloc_stat, node); |
445 | parent = *new; |
446 | |
447 | list_for_each_entry(sort, sort_list, list) { |
448 | cmp = sort->cmp(data, this); |
449 | if (cmp) |
450 | break; |
451 | } |
452 | |
453 | if (cmp > 0) |
454 | new = &((*new)->rb_left); |
455 | else |
456 | new = &((*new)->rb_right); |
457 | } |
458 | |
459 | rb_link_node(&data->node, parent, new); |
460 | rb_insert_color(&data->node, root); |
461 | } |
462 | |
463 | static void __sort_result(struct rb_root *root, struct rb_root *root_sorted, |
464 | struct list_head *sort_list) |
465 | { |
466 | struct rb_node *node; |
467 | struct alloc_stat *data; |
468 | |
469 | for (;;) { |
470 | node = rb_first(root); |
471 | if (!node) |
472 | break; |
473 | |
474 | rb_erase(node, root); |
475 | data = rb_entry(node, struct alloc_stat, node); |
476 | sort_insert(root_sorted, data, sort_list); |
477 | } |
478 | } |
479 | |
480 | static void sort_result(void) |
481 | { |
482 | __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort); |
483 | __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort); |
484 | } |
485 | |
486 | static int __cmd_kmem(void) |
487 | { |
488 | int err = -EINVAL; |
489 | struct perf_session *session = perf_session__new(input_name, O_RDONLY, |
490 | 0, false, &perf_kmem); |
491 | if (session == NULL) |
492 | return -ENOMEM; |
493 | |
494 | if (perf_session__create_kernel_maps(session) < 0) |
495 | goto out_delete; |
496 | |
497 | if (!perf_session__has_traces(session, "kmem record")) |
498 | goto out_delete; |
499 | |
500 | setup_pager(); |
501 | err = perf_session__process_events(session, &perf_kmem); |
502 | if (err != 0) |
503 | goto out_delete; |
504 | sort_result(); |
505 | print_result(session); |
506 | out_delete: |
507 | perf_session__delete(session); |
508 | return err; |
509 | } |
510 | |
511 | static const char * const kmem_usage[] = { |
512 | "perf kmem [<options>] {record|stat}", |
513 | NULL |
514 | }; |
515 | |
516 | static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r) |
517 | { |
518 | if (l->ptr < r->ptr) |
519 | return -1; |
520 | else if (l->ptr > r->ptr) |
521 | return 1; |
522 | return 0; |
523 | } |
524 | |
525 | static struct sort_dimension ptr_sort_dimension = { |
526 | .name = "ptr", |
527 | .cmp = ptr_cmp, |
528 | }; |
529 | |
530 | static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r) |
531 | { |
532 | if (l->call_site < r->call_site) |
533 | return -1; |
534 | else if (l->call_site > r->call_site) |
535 | return 1; |
536 | return 0; |
537 | } |
538 | |
539 | static struct sort_dimension callsite_sort_dimension = { |
540 | .name = "callsite", |
541 | .cmp = callsite_cmp, |
542 | }; |
543 | |
544 | static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r) |
545 | { |
546 | if (l->hit < r->hit) |
547 | return -1; |
548 | else if (l->hit > r->hit) |
549 | return 1; |
550 | return 0; |
551 | } |
552 | |
553 | static struct sort_dimension hit_sort_dimension = { |
554 | .name = "hit", |
555 | .cmp = hit_cmp, |
556 | }; |
557 | |
558 | static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r) |
559 | { |
560 | if (l->bytes_alloc < r->bytes_alloc) |
561 | return -1; |
562 | else if (l->bytes_alloc > r->bytes_alloc) |
563 | return 1; |
564 | return 0; |
565 | } |
566 | |
567 | static struct sort_dimension bytes_sort_dimension = { |
568 | .name = "bytes", |
569 | .cmp = bytes_cmp, |
570 | }; |
571 | |
572 | static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r) |
573 | { |
574 | double x, y; |
575 | |
576 | x = fragmentation(l->bytes_req, l->bytes_alloc); |
577 | y = fragmentation(r->bytes_req, r->bytes_alloc); |
578 | |
579 | if (x < y) |
580 | return -1; |
581 | else if (x > y) |
582 | return 1; |
583 | return 0; |
584 | } |
585 | |
586 | static struct sort_dimension frag_sort_dimension = { |
587 | .name = "frag", |
588 | .cmp = frag_cmp, |
589 | }; |
590 | |
591 | static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r) |
592 | { |
593 | if (l->pingpong < r->pingpong) |
594 | return -1; |
595 | else if (l->pingpong > r->pingpong) |
596 | return 1; |
597 | return 0; |
598 | } |
599 | |
600 | static struct sort_dimension pingpong_sort_dimension = { |
601 | .name = "pingpong", |
602 | .cmp = pingpong_cmp, |
603 | }; |
604 | |
605 | static struct sort_dimension *avail_sorts[] = { |
606 | &ptr_sort_dimension, |
607 | &callsite_sort_dimension, |
608 | &hit_sort_dimension, |
609 | &bytes_sort_dimension, |
610 | &frag_sort_dimension, |
611 | &pingpong_sort_dimension, |
612 | }; |
613 | |
614 | #define NUM_AVAIL_SORTS \ |
615 | (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *)) |
616 | |
617 | static int sort_dimension__add(const char *tok, struct list_head *list) |
618 | { |
619 | struct sort_dimension *sort; |
620 | int i; |
621 | |
622 | for (i = 0; i < NUM_AVAIL_SORTS; i++) { |
623 | if (!strcmp(avail_sorts[i]->name, tok)) { |
624 | sort = malloc(sizeof(*sort)); |
625 | if (!sort) |
626 | die("malloc"); |
627 | memcpy(sort, avail_sorts[i], sizeof(*sort)); |
628 | list_add_tail(&sort->list, list); |
629 | return 0; |
630 | } |
631 | } |
632 | |
633 | return -1; |
634 | } |
635 | |
636 | static int setup_sorting(struct list_head *sort_list, const char *arg) |
637 | { |
638 | char *tok; |
639 | char *str = strdup(arg); |
640 | |
641 | if (!str) |
642 | die("strdup"); |
643 | |
644 | while (true) { |
645 | tok = strsep(&str, ","); |
646 | if (!tok) |
647 | break; |
648 | if (sort_dimension__add(tok, sort_list) < 0) { |
649 | error("Unknown --sort key: '%s'", tok); |
650 | free(str); |
651 | return -1; |
652 | } |
653 | } |
654 | |
655 | free(str); |
656 | return 0; |
657 | } |
658 | |
659 | static int parse_sort_opt(const struct option *opt __used, |
660 | const char *arg, int unset __used) |
661 | { |
662 | if (!arg) |
663 | return -1; |
664 | |
665 | if (caller_flag > alloc_flag) |
666 | return setup_sorting(&caller_sort, arg); |
667 | else |
668 | return setup_sorting(&alloc_sort, arg); |
669 | |
670 | return 0; |
671 | } |
672 | |
673 | static int parse_caller_opt(const struct option *opt __used, |
674 | const char *arg __used, int unset __used) |
675 | { |
676 | caller_flag = (alloc_flag + 1); |
677 | return 0; |
678 | } |
679 | |
680 | static int parse_alloc_opt(const struct option *opt __used, |
681 | const char *arg __used, int unset __used) |
682 | { |
683 | alloc_flag = (caller_flag + 1); |
684 | return 0; |
685 | } |
686 | |
687 | static int parse_line_opt(const struct option *opt __used, |
688 | const char *arg, int unset __used) |
689 | { |
690 | int lines; |
691 | |
692 | if (!arg) |
693 | return -1; |
694 | |
695 | lines = strtoul(arg, NULL, 10); |
696 | |
697 | if (caller_flag > alloc_flag) |
698 | caller_lines = lines; |
699 | else |
700 | alloc_lines = lines; |
701 | |
702 | return 0; |
703 | } |
704 | |
705 | static const struct option kmem_options[] = { |
706 | OPT_STRING('i', "input", &input_name, "file", |
707 | "input file name"), |
708 | OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL, |
709 | "show per-callsite statistics", |
710 | parse_caller_opt), |
711 | OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL, |
712 | "show per-allocation statistics", |
713 | parse_alloc_opt), |
714 | OPT_CALLBACK('s', "sort", NULL, "key[,key2...]", |
715 | "sort by keys: ptr, call_site, bytes, hit, pingpong, frag", |
716 | parse_sort_opt), |
717 | OPT_CALLBACK('l', "line", NULL, "num", |
718 | "show n lines", |
719 | parse_line_opt), |
720 | OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"), |
721 | OPT_END() |
722 | }; |
723 | |
724 | static const char *record_args[] = { |
725 | "record", |
726 | "-a", |
727 | "-R", |
728 | "-f", |
729 | "-c", "1", |
730 | "-e", "kmem:kmalloc", |
731 | "-e", "kmem:kmalloc_node", |
732 | "-e", "kmem:kfree", |
733 | "-e", "kmem:kmem_cache_alloc", |
734 | "-e", "kmem:kmem_cache_alloc_node", |
735 | "-e", "kmem:kmem_cache_free", |
736 | }; |
737 | |
738 | static int __cmd_record(int argc, const char **argv) |
739 | { |
740 | unsigned int rec_argc, i, j; |
741 | const char **rec_argv; |
742 | |
743 | rec_argc = ARRAY_SIZE(record_args) + argc - 1; |
744 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); |
745 | |
746 | if (rec_argv == NULL) |
747 | return -ENOMEM; |
748 | |
749 | for (i = 0; i < ARRAY_SIZE(record_args); i++) |
750 | rec_argv[i] = strdup(record_args[i]); |
751 | |
752 | for (j = 1; j < (unsigned int)argc; j++, i++) |
753 | rec_argv[i] = argv[j]; |
754 | |
755 | return cmd_record(i, rec_argv, NULL); |
756 | } |
757 | |
758 | int cmd_kmem(int argc, const char **argv, const char *prefix __used) |
759 | { |
760 | argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); |
761 | |
762 | if (!argc) |
763 | usage_with_options(kmem_usage, kmem_options); |
764 | |
765 | symbol__init(); |
766 | |
767 | if (!strncmp(argv[0], "rec", 3)) { |
768 | return __cmd_record(argc, argv); |
769 | } else if (!strcmp(argv[0], "stat")) { |
770 | setup_cpunode_map(); |
771 | |
772 | if (list_empty(&caller_sort)) |
773 | setup_sorting(&caller_sort, default_sort_order); |
774 | if (list_empty(&alloc_sort)) |
775 | setup_sorting(&alloc_sort, default_sort_order); |
776 | |
777 | return __cmd_kmem(); |
778 | } else |
779 | usage_with_options(kmem_usage, kmem_options); |
780 | |
781 | return 0; |
782 | } |
783 | |
784 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9