Root/
1 | #include "builtin.h" |
2 | #include "perf.h" |
3 | |
4 | #include "util/util.h" |
5 | #include "util/cache.h" |
6 | #include "util/symbol.h" |
7 | #include "util/thread.h" |
8 | #include "util/header.h" |
9 | |
10 | #include "util/parse-options.h" |
11 | #include "util/trace-event.h" |
12 | |
13 | #include "util/debug.h" |
14 | #include "util/session.h" |
15 | #include "util/tool.h" |
16 | |
17 | #include <sys/types.h> |
18 | #include <sys/prctl.h> |
19 | #include <semaphore.h> |
20 | #include <pthread.h> |
21 | #include <math.h> |
22 | #include <limits.h> |
23 | |
24 | #include <linux/list.h> |
25 | #include <linux/hash.h> |
26 | |
27 | static struct perf_session *session; |
28 | |
29 | /* based on kernel/lockdep.c */ |
30 | #define LOCKHASH_BITS 12 |
31 | #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS) |
32 | |
33 | static struct list_head lockhash_table[LOCKHASH_SIZE]; |
34 | |
35 | #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS) |
36 | #define lockhashentry(key) (lockhash_table + __lockhashfn((key))) |
37 | |
38 | struct lock_stat { |
39 | struct list_head hash_entry; |
40 | struct rb_node rb; /* used for sorting */ |
41 | |
42 | /* |
43 | * FIXME: raw_field_value() returns unsigned long long, |
44 | * so address of lockdep_map should be dealed as 64bit. |
45 | * Is there more better solution? |
46 | */ |
47 | void *addr; /* address of lockdep_map, used as ID */ |
48 | char *name; /* for strcpy(), we cannot use const */ |
49 | |
50 | unsigned int nr_acquire; |
51 | unsigned int nr_acquired; |
52 | unsigned int nr_contended; |
53 | unsigned int nr_release; |
54 | |
55 | unsigned int nr_readlock; |
56 | unsigned int nr_trylock; |
57 | /* these times are in nano sec. */ |
58 | u64 wait_time_total; |
59 | u64 wait_time_min; |
60 | u64 wait_time_max; |
61 | |
62 | int discard; /* flag of blacklist */ |
63 | }; |
64 | |
65 | /* |
66 | * States of lock_seq_stat |
67 | * |
68 | * UNINITIALIZED is required for detecting first event of acquire. |
69 | * As the nature of lock events, there is no guarantee |
70 | * that the first event for the locks are acquire, |
71 | * it can be acquired, contended or release. |
72 | */ |
73 | #define SEQ_STATE_UNINITIALIZED 0 /* initial state */ |
74 | #define SEQ_STATE_RELEASED 1 |
75 | #define SEQ_STATE_ACQUIRING 2 |
76 | #define SEQ_STATE_ACQUIRED 3 |
77 | #define SEQ_STATE_READ_ACQUIRED 4 |
78 | #define SEQ_STATE_CONTENDED 5 |
79 | |
80 | /* |
81 | * MAX_LOCK_DEPTH |
82 | * Imported from include/linux/sched.h. |
83 | * Should this be synchronized? |
84 | */ |
85 | #define MAX_LOCK_DEPTH 48 |
86 | |
87 | /* |
88 | * struct lock_seq_stat: |
89 | * Place to put on state of one lock sequence |
90 | * 1) acquire -> acquired -> release |
91 | * 2) acquire -> contended -> acquired -> release |
92 | * 3) acquire (with read or try) -> release |
93 | * 4) Are there other patterns? |
94 | */ |
95 | struct lock_seq_stat { |
96 | struct list_head list; |
97 | int state; |
98 | u64 prev_event_time; |
99 | void *addr; |
100 | |
101 | int read_count; |
102 | }; |
103 | |
104 | struct thread_stat { |
105 | struct rb_node rb; |
106 | |
107 | u32 tid; |
108 | struct list_head seq_list; |
109 | }; |
110 | |
111 | static struct rb_root thread_stats; |
112 | |
113 | static struct thread_stat *thread_stat_find(u32 tid) |
114 | { |
115 | struct rb_node *node; |
116 | struct thread_stat *st; |
117 | |
118 | node = thread_stats.rb_node; |
119 | while (node) { |
120 | st = container_of(node, struct thread_stat, rb); |
121 | if (st->tid == tid) |
122 | return st; |
123 | else if (tid < st->tid) |
124 | node = node->rb_left; |
125 | else |
126 | node = node->rb_right; |
127 | } |
128 | |
129 | return NULL; |
130 | } |
131 | |
132 | static void thread_stat_insert(struct thread_stat *new) |
133 | { |
134 | struct rb_node **rb = &thread_stats.rb_node; |
135 | struct rb_node *parent = NULL; |
136 | struct thread_stat *p; |
137 | |
138 | while (*rb) { |
139 | p = container_of(*rb, struct thread_stat, rb); |
140 | parent = *rb; |
141 | |
142 | if (new->tid < p->tid) |
143 | rb = &(*rb)->rb_left; |
144 | else if (new->tid > p->tid) |
145 | rb = &(*rb)->rb_right; |
146 | else |
147 | BUG_ON("inserting invalid thread_stat\n"); |
148 | } |
149 | |
150 | rb_link_node(&new->rb, parent, rb); |
151 | rb_insert_color(&new->rb, &thread_stats); |
152 | } |
153 | |
154 | static struct thread_stat *thread_stat_findnew_after_first(u32 tid) |
155 | { |
156 | struct thread_stat *st; |
157 | |
158 | st = thread_stat_find(tid); |
159 | if (st) |
160 | return st; |
161 | |
162 | st = zalloc(sizeof(struct thread_stat)); |
163 | if (!st) |
164 | die("memory allocation failed\n"); |
165 | |
166 | st->tid = tid; |
167 | INIT_LIST_HEAD(&st->seq_list); |
168 | |
169 | thread_stat_insert(st); |
170 | |
171 | return st; |
172 | } |
173 | |
174 | static struct thread_stat *thread_stat_findnew_first(u32 tid); |
175 | static struct thread_stat *(*thread_stat_findnew)(u32 tid) = |
176 | thread_stat_findnew_first; |
177 | |
178 | static struct thread_stat *thread_stat_findnew_first(u32 tid) |
179 | { |
180 | struct thread_stat *st; |
181 | |
182 | st = zalloc(sizeof(struct thread_stat)); |
183 | if (!st) |
184 | die("memory allocation failed\n"); |
185 | st->tid = tid; |
186 | INIT_LIST_HEAD(&st->seq_list); |
187 | |
188 | rb_link_node(&st->rb, NULL, &thread_stats.rb_node); |
189 | rb_insert_color(&st->rb, &thread_stats); |
190 | |
191 | thread_stat_findnew = thread_stat_findnew_after_first; |
192 | return st; |
193 | } |
194 | |
195 | /* build simple key function one is bigger than two */ |
196 | #define SINGLE_KEY(member) \ |
197 | static int lock_stat_key_ ## member(struct lock_stat *one, \ |
198 | struct lock_stat *two) \ |
199 | { \ |
200 | return one->member > two->member; \ |
201 | } |
202 | |
203 | SINGLE_KEY(nr_acquired) |
204 | SINGLE_KEY(nr_contended) |
205 | SINGLE_KEY(wait_time_total) |
206 | SINGLE_KEY(wait_time_max) |
207 | |
208 | static int lock_stat_key_wait_time_min(struct lock_stat *one, |
209 | struct lock_stat *two) |
210 | { |
211 | u64 s1 = one->wait_time_min; |
212 | u64 s2 = two->wait_time_min; |
213 | if (s1 == ULLONG_MAX) |
214 | s1 = 0; |
215 | if (s2 == ULLONG_MAX) |
216 | s2 = 0; |
217 | return s1 > s2; |
218 | } |
219 | |
220 | struct lock_key { |
221 | /* |
222 | * name: the value for specify by user |
223 | * this should be simpler than raw name of member |
224 | * e.g. nr_acquired -> acquired, wait_time_total -> wait_total |
225 | */ |
226 | const char *name; |
227 | int (*key)(struct lock_stat*, struct lock_stat*); |
228 | }; |
229 | |
230 | static const char *sort_key = "acquired"; |
231 | |
232 | static int (*compare)(struct lock_stat *, struct lock_stat *); |
233 | |
234 | static struct rb_root result; /* place to store sorted data */ |
235 | |
236 | #define DEF_KEY_LOCK(name, fn_suffix) \ |
237 | { #name, lock_stat_key_ ## fn_suffix } |
238 | struct lock_key keys[] = { |
239 | DEF_KEY_LOCK(acquired, nr_acquired), |
240 | DEF_KEY_LOCK(contended, nr_contended), |
241 | DEF_KEY_LOCK(wait_total, wait_time_total), |
242 | DEF_KEY_LOCK(wait_min, wait_time_min), |
243 | DEF_KEY_LOCK(wait_max, wait_time_max), |
244 | |
245 | /* extra comparisons much complicated should be here */ |
246 | |
247 | { NULL, NULL } |
248 | }; |
249 | |
250 | static void select_key(void) |
251 | { |
252 | int i; |
253 | |
254 | for (i = 0; keys[i].name; i++) { |
255 | if (!strcmp(keys[i].name, sort_key)) { |
256 | compare = keys[i].key; |
257 | return; |
258 | } |
259 | } |
260 | |
261 | die("Unknown compare key:%s\n", sort_key); |
262 | } |
263 | |
264 | static void insert_to_result(struct lock_stat *st, |
265 | int (*bigger)(struct lock_stat *, struct lock_stat *)) |
266 | { |
267 | struct rb_node **rb = &result.rb_node; |
268 | struct rb_node *parent = NULL; |
269 | struct lock_stat *p; |
270 | |
271 | while (*rb) { |
272 | p = container_of(*rb, struct lock_stat, rb); |
273 | parent = *rb; |
274 | |
275 | if (bigger(st, p)) |
276 | rb = &(*rb)->rb_left; |
277 | else |
278 | rb = &(*rb)->rb_right; |
279 | } |
280 | |
281 | rb_link_node(&st->rb, parent, rb); |
282 | rb_insert_color(&st->rb, &result); |
283 | } |
284 | |
285 | /* returns left most element of result, and erase it */ |
286 | static struct lock_stat *pop_from_result(void) |
287 | { |
288 | struct rb_node *node = result.rb_node; |
289 | |
290 | if (!node) |
291 | return NULL; |
292 | |
293 | while (node->rb_left) |
294 | node = node->rb_left; |
295 | |
296 | rb_erase(node, &result); |
297 | return container_of(node, struct lock_stat, rb); |
298 | } |
299 | |
300 | static struct lock_stat *lock_stat_findnew(void *addr, const char *name) |
301 | { |
302 | struct list_head *entry = lockhashentry(addr); |
303 | struct lock_stat *ret, *new; |
304 | |
305 | list_for_each_entry(ret, entry, hash_entry) { |
306 | if (ret->addr == addr) |
307 | return ret; |
308 | } |
309 | |
310 | new = zalloc(sizeof(struct lock_stat)); |
311 | if (!new) |
312 | goto alloc_failed; |
313 | |
314 | new->addr = addr; |
315 | new->name = zalloc(sizeof(char) * strlen(name) + 1); |
316 | if (!new->name) |
317 | goto alloc_failed; |
318 | strcpy(new->name, name); |
319 | |
320 | new->wait_time_min = ULLONG_MAX; |
321 | |
322 | list_add(&new->hash_entry, entry); |
323 | return new; |
324 | |
325 | alloc_failed: |
326 | die("memory allocation failed\n"); |
327 | } |
328 | |
329 | static const char *input_name; |
330 | |
331 | struct raw_event_sample { |
332 | u32 size; |
333 | char data[0]; |
334 | }; |
335 | |
336 | struct trace_acquire_event { |
337 | void *addr; |
338 | const char *name; |
339 | int flag; |
340 | }; |
341 | |
342 | struct trace_acquired_event { |
343 | void *addr; |
344 | const char *name; |
345 | }; |
346 | |
347 | struct trace_contended_event { |
348 | void *addr; |
349 | const char *name; |
350 | }; |
351 | |
352 | struct trace_release_event { |
353 | void *addr; |
354 | const char *name; |
355 | }; |
356 | |
357 | struct trace_lock_handler { |
358 | void (*acquire_event)(struct trace_acquire_event *, |
359 | struct event *, |
360 | int cpu, |
361 | u64 timestamp, |
362 | struct thread *thread); |
363 | |
364 | void (*acquired_event)(struct trace_acquired_event *, |
365 | struct event *, |
366 | int cpu, |
367 | u64 timestamp, |
368 | struct thread *thread); |
369 | |
370 | void (*contended_event)(struct trace_contended_event *, |
371 | struct event *, |
372 | int cpu, |
373 | u64 timestamp, |
374 | struct thread *thread); |
375 | |
376 | void (*release_event)(struct trace_release_event *, |
377 | struct event *, |
378 | int cpu, |
379 | u64 timestamp, |
380 | struct thread *thread); |
381 | }; |
382 | |
383 | static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr) |
384 | { |
385 | struct lock_seq_stat *seq; |
386 | |
387 | list_for_each_entry(seq, &ts->seq_list, list) { |
388 | if (seq->addr == addr) |
389 | return seq; |
390 | } |
391 | |
392 | seq = zalloc(sizeof(struct lock_seq_stat)); |
393 | if (!seq) |
394 | die("Not enough memory\n"); |
395 | seq->state = SEQ_STATE_UNINITIALIZED; |
396 | seq->addr = addr; |
397 | |
398 | list_add(&seq->list, &ts->seq_list); |
399 | return seq; |
400 | } |
401 | |
402 | enum broken_state { |
403 | BROKEN_ACQUIRE, |
404 | BROKEN_ACQUIRED, |
405 | BROKEN_CONTENDED, |
406 | BROKEN_RELEASE, |
407 | BROKEN_MAX, |
408 | }; |
409 | |
410 | static int bad_hist[BROKEN_MAX]; |
411 | |
412 | enum acquire_flags { |
413 | TRY_LOCK = 1, |
414 | READ_LOCK = 2, |
415 | }; |
416 | |
417 | static void |
418 | report_lock_acquire_event(struct trace_acquire_event *acquire_event, |
419 | struct event *__event __used, |
420 | int cpu __used, |
421 | u64 timestamp __used, |
422 | struct thread *thread __used) |
423 | { |
424 | struct lock_stat *ls; |
425 | struct thread_stat *ts; |
426 | struct lock_seq_stat *seq; |
427 | |
428 | ls = lock_stat_findnew(acquire_event->addr, acquire_event->name); |
429 | if (ls->discard) |
430 | return; |
431 | |
432 | ts = thread_stat_findnew(thread->pid); |
433 | seq = get_seq(ts, acquire_event->addr); |
434 | |
435 | switch (seq->state) { |
436 | case SEQ_STATE_UNINITIALIZED: |
437 | case SEQ_STATE_RELEASED: |
438 | if (!acquire_event->flag) { |
439 | seq->state = SEQ_STATE_ACQUIRING; |
440 | } else { |
441 | if (acquire_event->flag & TRY_LOCK) |
442 | ls->nr_trylock++; |
443 | if (acquire_event->flag & READ_LOCK) |
444 | ls->nr_readlock++; |
445 | seq->state = SEQ_STATE_READ_ACQUIRED; |
446 | seq->read_count = 1; |
447 | ls->nr_acquired++; |
448 | } |
449 | break; |
450 | case SEQ_STATE_READ_ACQUIRED: |
451 | if (acquire_event->flag & READ_LOCK) { |
452 | seq->read_count++; |
453 | ls->nr_acquired++; |
454 | goto end; |
455 | } else { |
456 | goto broken; |
457 | } |
458 | break; |
459 | case SEQ_STATE_ACQUIRED: |
460 | case SEQ_STATE_ACQUIRING: |
461 | case SEQ_STATE_CONTENDED: |
462 | broken: |
463 | /* broken lock sequence, discard it */ |
464 | ls->discard = 1; |
465 | bad_hist[BROKEN_ACQUIRE]++; |
466 | list_del(&seq->list); |
467 | free(seq); |
468 | goto end; |
469 | break; |
470 | default: |
471 | BUG_ON("Unknown state of lock sequence found!\n"); |
472 | break; |
473 | } |
474 | |
475 | ls->nr_acquire++; |
476 | seq->prev_event_time = timestamp; |
477 | end: |
478 | return; |
479 | } |
480 | |
481 | static void |
482 | report_lock_acquired_event(struct trace_acquired_event *acquired_event, |
483 | struct event *__event __used, |
484 | int cpu __used, |
485 | u64 timestamp __used, |
486 | struct thread *thread __used) |
487 | { |
488 | struct lock_stat *ls; |
489 | struct thread_stat *ts; |
490 | struct lock_seq_stat *seq; |
491 | u64 contended_term; |
492 | |
493 | ls = lock_stat_findnew(acquired_event->addr, acquired_event->name); |
494 | if (ls->discard) |
495 | return; |
496 | |
497 | ts = thread_stat_findnew(thread->pid); |
498 | seq = get_seq(ts, acquired_event->addr); |
499 | |
500 | switch (seq->state) { |
501 | case SEQ_STATE_UNINITIALIZED: |
502 | /* orphan event, do nothing */ |
503 | return; |
504 | case SEQ_STATE_ACQUIRING: |
505 | break; |
506 | case SEQ_STATE_CONTENDED: |
507 | contended_term = timestamp - seq->prev_event_time; |
508 | ls->wait_time_total += contended_term; |
509 | if (contended_term < ls->wait_time_min) |
510 | ls->wait_time_min = contended_term; |
511 | if (ls->wait_time_max < contended_term) |
512 | ls->wait_time_max = contended_term; |
513 | break; |
514 | case SEQ_STATE_RELEASED: |
515 | case SEQ_STATE_ACQUIRED: |
516 | case SEQ_STATE_READ_ACQUIRED: |
517 | /* broken lock sequence, discard it */ |
518 | ls->discard = 1; |
519 | bad_hist[BROKEN_ACQUIRED]++; |
520 | list_del(&seq->list); |
521 | free(seq); |
522 | goto end; |
523 | break; |
524 | |
525 | default: |
526 | BUG_ON("Unknown state of lock sequence found!\n"); |
527 | break; |
528 | } |
529 | |
530 | seq->state = SEQ_STATE_ACQUIRED; |
531 | ls->nr_acquired++; |
532 | seq->prev_event_time = timestamp; |
533 | end: |
534 | return; |
535 | } |
536 | |
537 | static void |
538 | report_lock_contended_event(struct trace_contended_event *contended_event, |
539 | struct event *__event __used, |
540 | int cpu __used, |
541 | u64 timestamp __used, |
542 | struct thread *thread __used) |
543 | { |
544 | struct lock_stat *ls; |
545 | struct thread_stat *ts; |
546 | struct lock_seq_stat *seq; |
547 | |
548 | ls = lock_stat_findnew(contended_event->addr, contended_event->name); |
549 | if (ls->discard) |
550 | return; |
551 | |
552 | ts = thread_stat_findnew(thread->pid); |
553 | seq = get_seq(ts, contended_event->addr); |
554 | |
555 | switch (seq->state) { |
556 | case SEQ_STATE_UNINITIALIZED: |
557 | /* orphan event, do nothing */ |
558 | return; |
559 | case SEQ_STATE_ACQUIRING: |
560 | break; |
561 | case SEQ_STATE_RELEASED: |
562 | case SEQ_STATE_ACQUIRED: |
563 | case SEQ_STATE_READ_ACQUIRED: |
564 | case SEQ_STATE_CONTENDED: |
565 | /* broken lock sequence, discard it */ |
566 | ls->discard = 1; |
567 | bad_hist[BROKEN_CONTENDED]++; |
568 | list_del(&seq->list); |
569 | free(seq); |
570 | goto end; |
571 | break; |
572 | default: |
573 | BUG_ON("Unknown state of lock sequence found!\n"); |
574 | break; |
575 | } |
576 | |
577 | seq->state = SEQ_STATE_CONTENDED; |
578 | ls->nr_contended++; |
579 | seq->prev_event_time = timestamp; |
580 | end: |
581 | return; |
582 | } |
583 | |
584 | static void |
585 | report_lock_release_event(struct trace_release_event *release_event, |
586 | struct event *__event __used, |
587 | int cpu __used, |
588 | u64 timestamp __used, |
589 | struct thread *thread __used) |
590 | { |
591 | struct lock_stat *ls; |
592 | struct thread_stat *ts; |
593 | struct lock_seq_stat *seq; |
594 | |
595 | ls = lock_stat_findnew(release_event->addr, release_event->name); |
596 | if (ls->discard) |
597 | return; |
598 | |
599 | ts = thread_stat_findnew(thread->pid); |
600 | seq = get_seq(ts, release_event->addr); |
601 | |
602 | switch (seq->state) { |
603 | case SEQ_STATE_UNINITIALIZED: |
604 | goto end; |
605 | break; |
606 | case SEQ_STATE_ACQUIRED: |
607 | break; |
608 | case SEQ_STATE_READ_ACQUIRED: |
609 | seq->read_count--; |
610 | BUG_ON(seq->read_count < 0); |
611 | if (!seq->read_count) { |
612 | ls->nr_release++; |
613 | goto end; |
614 | } |
615 | break; |
616 | case SEQ_STATE_ACQUIRING: |
617 | case SEQ_STATE_CONTENDED: |
618 | case SEQ_STATE_RELEASED: |
619 | /* broken lock sequence, discard it */ |
620 | ls->discard = 1; |
621 | bad_hist[BROKEN_RELEASE]++; |
622 | goto free_seq; |
623 | break; |
624 | default: |
625 | BUG_ON("Unknown state of lock sequence found!\n"); |
626 | break; |
627 | } |
628 | |
629 | ls->nr_release++; |
630 | free_seq: |
631 | list_del(&seq->list); |
632 | free(seq); |
633 | end: |
634 | return; |
635 | } |
636 | |
637 | /* lock oriented handlers */ |
638 | /* TODO: handlers for CPU oriented, thread oriented */ |
639 | static struct trace_lock_handler report_lock_ops = { |
640 | .acquire_event = report_lock_acquire_event, |
641 | .acquired_event = report_lock_acquired_event, |
642 | .contended_event = report_lock_contended_event, |
643 | .release_event = report_lock_release_event, |
644 | }; |
645 | |
646 | static struct trace_lock_handler *trace_handler; |
647 | |
648 | static void |
649 | process_lock_acquire_event(void *data, |
650 | struct event *event __used, |
651 | int cpu __used, |
652 | u64 timestamp __used, |
653 | struct thread *thread __used) |
654 | { |
655 | struct trace_acquire_event acquire_event; |
656 | u64 tmp; /* this is required for casting... */ |
657 | |
658 | tmp = raw_field_value(event, "lockdep_addr", data); |
659 | memcpy(&acquire_event.addr, &tmp, sizeof(void *)); |
660 | acquire_event.name = (char *)raw_field_ptr(event, "name", data); |
661 | acquire_event.flag = (int)raw_field_value(event, "flag", data); |
662 | |
663 | if (trace_handler->acquire_event) |
664 | trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread); |
665 | } |
666 | |
667 | static void |
668 | process_lock_acquired_event(void *data, |
669 | struct event *event __used, |
670 | int cpu __used, |
671 | u64 timestamp __used, |
672 | struct thread *thread __used) |
673 | { |
674 | struct trace_acquired_event acquired_event; |
675 | u64 tmp; /* this is required for casting... */ |
676 | |
677 | tmp = raw_field_value(event, "lockdep_addr", data); |
678 | memcpy(&acquired_event.addr, &tmp, sizeof(void *)); |
679 | acquired_event.name = (char *)raw_field_ptr(event, "name", data); |
680 | |
681 | if (trace_handler->acquire_event) |
682 | trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread); |
683 | } |
684 | |
685 | static void |
686 | process_lock_contended_event(void *data, |
687 | struct event *event __used, |
688 | int cpu __used, |
689 | u64 timestamp __used, |
690 | struct thread *thread __used) |
691 | { |
692 | struct trace_contended_event contended_event; |
693 | u64 tmp; /* this is required for casting... */ |
694 | |
695 | tmp = raw_field_value(event, "lockdep_addr", data); |
696 | memcpy(&contended_event.addr, &tmp, sizeof(void *)); |
697 | contended_event.name = (char *)raw_field_ptr(event, "name", data); |
698 | |
699 | if (trace_handler->acquire_event) |
700 | trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread); |
701 | } |
702 | |
703 | static void |
704 | process_lock_release_event(void *data, |
705 | struct event *event __used, |
706 | int cpu __used, |
707 | u64 timestamp __used, |
708 | struct thread *thread __used) |
709 | { |
710 | struct trace_release_event release_event; |
711 | u64 tmp; /* this is required for casting... */ |
712 | |
713 | tmp = raw_field_value(event, "lockdep_addr", data); |
714 | memcpy(&release_event.addr, &tmp, sizeof(void *)); |
715 | release_event.name = (char *)raw_field_ptr(event, "name", data); |
716 | |
717 | if (trace_handler->acquire_event) |
718 | trace_handler->release_event(&release_event, event, cpu, timestamp, thread); |
719 | } |
720 | |
721 | static void |
722 | process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread) |
723 | { |
724 | struct event *event; |
725 | int type; |
726 | |
727 | type = trace_parse_common_type(data); |
728 | event = trace_find_event(type); |
729 | |
730 | if (!strcmp(event->name, "lock_acquire")) |
731 | process_lock_acquire_event(data, event, cpu, timestamp, thread); |
732 | if (!strcmp(event->name, "lock_acquired")) |
733 | process_lock_acquired_event(data, event, cpu, timestamp, thread); |
734 | if (!strcmp(event->name, "lock_contended")) |
735 | process_lock_contended_event(data, event, cpu, timestamp, thread); |
736 | if (!strcmp(event->name, "lock_release")) |
737 | process_lock_release_event(data, event, cpu, timestamp, thread); |
738 | } |
739 | |
740 | static void print_bad_events(int bad, int total) |
741 | { |
742 | /* Output for debug, this have to be removed */ |
743 | int i; |
744 | const char *name[4] = |
745 | { "acquire", "acquired", "contended", "release" }; |
746 | |
747 | pr_info("\n=== output for debug===\n\n"); |
748 | pr_info("bad: %d, total: %d\n", bad, total); |
749 | pr_info("bad rate: %f %%\n", (double)bad / (double)total * 100); |
750 | pr_info("histogram of events caused bad sequence\n"); |
751 | for (i = 0; i < BROKEN_MAX; i++) |
752 | pr_info(" %10s: %d\n", name[i], bad_hist[i]); |
753 | } |
754 | |
755 | /* TODO: various way to print, coloring, nano or milli sec */ |
756 | static void print_result(void) |
757 | { |
758 | struct lock_stat *st; |
759 | char cut_name[20]; |
760 | int bad, total; |
761 | |
762 | pr_info("%20s ", "Name"); |
763 | pr_info("%10s ", "acquired"); |
764 | pr_info("%10s ", "contended"); |
765 | |
766 | pr_info("%15s ", "total wait (ns)"); |
767 | pr_info("%15s ", "max wait (ns)"); |
768 | pr_info("%15s ", "min wait (ns)"); |
769 | |
770 | pr_info("\n\n"); |
771 | |
772 | bad = total = 0; |
773 | while ((st = pop_from_result())) { |
774 | total++; |
775 | if (st->discard) { |
776 | bad++; |
777 | continue; |
778 | } |
779 | bzero(cut_name, 20); |
780 | |
781 | if (strlen(st->name) < 16) { |
782 | /* output raw name */ |
783 | pr_info("%20s ", st->name); |
784 | } else { |
785 | strncpy(cut_name, st->name, 16); |
786 | cut_name[16] = '.'; |
787 | cut_name[17] = '.'; |
788 | cut_name[18] = '.'; |
789 | cut_name[19] = '\0'; |
790 | /* cut off name for saving output style */ |
791 | pr_info("%20s ", cut_name); |
792 | } |
793 | |
794 | pr_info("%10u ", st->nr_acquired); |
795 | pr_info("%10u ", st->nr_contended); |
796 | |
797 | pr_info("%15" PRIu64 " ", st->wait_time_total); |
798 | pr_info("%15" PRIu64 " ", st->wait_time_max); |
799 | pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ? |
800 | 0 : st->wait_time_min); |
801 | pr_info("\n"); |
802 | } |
803 | |
804 | print_bad_events(bad, total); |
805 | } |
806 | |
807 | static bool info_threads, info_map; |
808 | |
809 | static void dump_threads(void) |
810 | { |
811 | struct thread_stat *st; |
812 | struct rb_node *node; |
813 | struct thread *t; |
814 | |
815 | pr_info("%10s: comm\n", "Thread ID"); |
816 | |
817 | node = rb_first(&thread_stats); |
818 | while (node) { |
819 | st = container_of(node, struct thread_stat, rb); |
820 | t = perf_session__findnew(session, st->tid); |
821 | pr_info("%10d: %s\n", st->tid, t->comm); |
822 | node = rb_next(node); |
823 | }; |
824 | } |
825 | |
826 | static void dump_map(void) |
827 | { |
828 | unsigned int i; |
829 | struct lock_stat *st; |
830 | |
831 | pr_info("Address of instance: name of class\n"); |
832 | for (i = 0; i < LOCKHASH_SIZE; i++) { |
833 | list_for_each_entry(st, &lockhash_table[i], hash_entry) { |
834 | pr_info(" %p: %s\n", st->addr, st->name); |
835 | } |
836 | } |
837 | } |
838 | |
839 | static void dump_info(void) |
840 | { |
841 | if (info_threads) |
842 | dump_threads(); |
843 | else if (info_map) |
844 | dump_map(); |
845 | else |
846 | die("Unknown type of information\n"); |
847 | } |
848 | |
849 | static int process_sample_event(struct perf_tool *tool __used, |
850 | union perf_event *event, |
851 | struct perf_sample *sample, |
852 | struct perf_evsel *evsel __used, |
853 | struct machine *machine) |
854 | { |
855 | struct thread *thread = machine__findnew_thread(machine, sample->tid); |
856 | |
857 | if (thread == NULL) { |
858 | pr_debug("problem processing %d event, skipping it.\n", |
859 | event->header.type); |
860 | return -1; |
861 | } |
862 | |
863 | process_raw_event(sample->raw_data, sample->cpu, sample->time, thread); |
864 | |
865 | return 0; |
866 | } |
867 | |
868 | static struct perf_tool eops = { |
869 | .sample = process_sample_event, |
870 | .comm = perf_event__process_comm, |
871 | .ordered_samples = true, |
872 | }; |
873 | |
874 | static int read_events(void) |
875 | { |
876 | session = perf_session__new(input_name, O_RDONLY, 0, false, &eops); |
877 | if (!session) |
878 | die("Initializing perf session failed\n"); |
879 | |
880 | return perf_session__process_events(session, &eops); |
881 | } |
882 | |
883 | static void sort_result(void) |
884 | { |
885 | unsigned int i; |
886 | struct lock_stat *st; |
887 | |
888 | for (i = 0; i < LOCKHASH_SIZE; i++) { |
889 | list_for_each_entry(st, &lockhash_table[i], hash_entry) { |
890 | insert_to_result(st, compare); |
891 | } |
892 | } |
893 | } |
894 | |
895 | static void __cmd_report(void) |
896 | { |
897 | setup_pager(); |
898 | select_key(); |
899 | read_events(); |
900 | sort_result(); |
901 | print_result(); |
902 | } |
903 | |
904 | static const char * const report_usage[] = { |
905 | "perf lock report [<options>]", |
906 | NULL |
907 | }; |
908 | |
909 | static const struct option report_options[] = { |
910 | OPT_STRING('k', "key", &sort_key, "acquired", |
911 | "key for sorting (acquired / contended / wait_total / wait_max / wait_min)"), |
912 | /* TODO: type */ |
913 | OPT_END() |
914 | }; |
915 | |
916 | static const char * const info_usage[] = { |
917 | "perf lock info [<options>]", |
918 | NULL |
919 | }; |
920 | |
921 | static const struct option info_options[] = { |
922 | OPT_BOOLEAN('t', "threads", &info_threads, |
923 | "dump thread list in perf.data"), |
924 | OPT_BOOLEAN('m', "map", &info_map, |
925 | "map of lock instances (address:name table)"), |
926 | OPT_END() |
927 | }; |
928 | |
929 | static const char * const lock_usage[] = { |
930 | "perf lock [<options>] {record|report|script|info}", |
931 | NULL |
932 | }; |
933 | |
934 | static const struct option lock_options[] = { |
935 | OPT_STRING('i', "input", &input_name, "file", "input file name"), |
936 | OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), |
937 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), |
938 | OPT_END() |
939 | }; |
940 | |
941 | static const char *record_args[] = { |
942 | "record", |
943 | "-R", |
944 | "-f", |
945 | "-m", "1024", |
946 | "-c", "1", |
947 | "-e", "lock:lock_acquire", |
948 | "-e", "lock:lock_acquired", |
949 | "-e", "lock:lock_contended", |
950 | "-e", "lock:lock_release", |
951 | }; |
952 | |
953 | static int __cmd_record(int argc, const char **argv) |
954 | { |
955 | unsigned int rec_argc, i, j; |
956 | const char **rec_argv; |
957 | |
958 | rec_argc = ARRAY_SIZE(record_args) + argc - 1; |
959 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); |
960 | |
961 | if (rec_argv == NULL) |
962 | return -ENOMEM; |
963 | |
964 | for (i = 0; i < ARRAY_SIZE(record_args); i++) |
965 | rec_argv[i] = strdup(record_args[i]); |
966 | |
967 | for (j = 1; j < (unsigned int)argc; j++, i++) |
968 | rec_argv[i] = argv[j]; |
969 | |
970 | BUG_ON(i != rec_argc); |
971 | |
972 | return cmd_record(i, rec_argv, NULL); |
973 | } |
974 | |
975 | int cmd_lock(int argc, const char **argv, const char *prefix __used) |
976 | { |
977 | unsigned int i; |
978 | |
979 | symbol__init(); |
980 | for (i = 0; i < LOCKHASH_SIZE; i++) |
981 | INIT_LIST_HEAD(lockhash_table + i); |
982 | |
983 | argc = parse_options(argc, argv, lock_options, lock_usage, |
984 | PARSE_OPT_STOP_AT_NON_OPTION); |
985 | if (!argc) |
986 | usage_with_options(lock_usage, lock_options); |
987 | |
988 | if (!strncmp(argv[0], "rec", 3)) { |
989 | return __cmd_record(argc, argv); |
990 | } else if (!strncmp(argv[0], "report", 6)) { |
991 | trace_handler = &report_lock_ops; |
992 | if (argc) { |
993 | argc = parse_options(argc, argv, |
994 | report_options, report_usage, 0); |
995 | if (argc) |
996 | usage_with_options(report_usage, report_options); |
997 | } |
998 | __cmd_report(); |
999 | } else if (!strcmp(argv[0], "script")) { |
1000 | /* Aliased to 'perf script' */ |
1001 | return cmd_script(argc, argv, prefix); |
1002 | } else if (!strcmp(argv[0], "info")) { |
1003 | if (argc) { |
1004 | argc = parse_options(argc, argv, |
1005 | info_options, info_usage, 0); |
1006 | if (argc) |
1007 | usage_with_options(info_usage, info_options); |
1008 | } |
1009 | /* recycling report_lock_ops */ |
1010 | trace_handler = &report_lock_ops; |
1011 | setup_pager(); |
1012 | read_events(); |
1013 | dump_info(); |
1014 | } else { |
1015 | usage_with_options(lock_usage, lock_options); |
1016 | } |
1017 | |
1018 | return 0; |
1019 | } |
1020 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9