Root/tools/perf/util/session.c

1#define _FILE_OFFSET_BITS 64
2
3#include <linux/kernel.h>
4
5#include <byteswap.h>
6#include <unistd.h>
7#include <sys/types.h>
8
9#include "session.h"
10#include "sort.h"
11#include "util.h"
12
13static int perf_session__open(struct perf_session *self, bool force)
14{
15    struct stat input_stat;
16
17    self->fd = open(self->filename, O_RDONLY);
18    if (self->fd < 0) {
19        pr_err("failed to open file: %s", self->filename);
20        if (!strcmp(self->filename, "perf.data"))
21            pr_err(" (try 'perf record' first)");
22        pr_err("\n");
23        return -errno;
24    }
25
26    if (fstat(self->fd, &input_stat) < 0)
27        goto out_close;
28
29    if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
30        pr_err("file %s not owned by current user or root\n",
31               self->filename);
32        goto out_close;
33    }
34
35    if (!input_stat.st_size) {
36        pr_info("zero-sized file (%s), nothing to do!\n",
37            self->filename);
38        goto out_close;
39    }
40
41    if (perf_header__read(&self->header, self->fd) < 0) {
42        pr_err("incompatible file format");
43        goto out_close;
44    }
45
46    self->size = input_stat.st_size;
47    return 0;
48
49out_close:
50    close(self->fd);
51    self->fd = -1;
52    return -1;
53}
54
55static inline int perf_session__create_kernel_maps(struct perf_session *self)
56{
57    return map_groups__create_kernel_maps(&self->kmaps, self->vmlinux_maps);
58}
59
60struct perf_session *perf_session__new(const char *filename, int mode, bool force)
61{
62    size_t len = filename ? strlen(filename) + 1 : 0;
63    struct perf_session *self = zalloc(sizeof(*self) + len);
64
65    if (self == NULL)
66        goto out;
67
68    if (perf_header__init(&self->header) < 0)
69        goto out_free;
70
71    memcpy(self->filename, filename, len);
72    self->threads = RB_ROOT;
73    self->stats_by_id = RB_ROOT;
74    self->last_match = NULL;
75    self->mmap_window = 32;
76    self->cwd = NULL;
77    self->cwdlen = 0;
78    self->unknown_events = 0;
79    map_groups__init(&self->kmaps);
80
81    if (mode == O_RDONLY) {
82        if (perf_session__open(self, force) < 0)
83            goto out_delete;
84    } else if (mode == O_WRONLY) {
85        /*
86         * In O_RDONLY mode this will be performed when reading the
87         * kernel MMAP event, in event__process_mmap().
88         */
89        if (perf_session__create_kernel_maps(self) < 0)
90            goto out_delete;
91    }
92
93    self->sample_type = perf_header__sample_type(&self->header);
94out:
95    return self;
96out_free:
97    free(self);
98    return NULL;
99out_delete:
100    perf_session__delete(self);
101    return NULL;
102}
103
104void perf_session__delete(struct perf_session *self)
105{
106    perf_header__exit(&self->header);
107    close(self->fd);
108    free(self->cwd);
109    free(self);
110}
111
112static bool symbol__match_parent_regex(struct symbol *sym)
113{
114    if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
115        return 1;
116
117    return 0;
118}
119
120struct symbol **perf_session__resolve_callchain(struct perf_session *self,
121                        struct thread *thread,
122                        struct ip_callchain *chain,
123                        struct symbol **parent)
124{
125    u8 cpumode = PERF_RECORD_MISC_USER;
126    struct symbol **syms = NULL;
127    unsigned int i;
128
129    if (symbol_conf.use_callchain) {
130        syms = calloc(chain->nr, sizeof(*syms));
131        if (!syms) {
132            fprintf(stderr, "Can't allocate memory for symbols\n");
133            exit(-1);
134        }
135    }
136
137    for (i = 0; i < chain->nr; i++) {
138        u64 ip = chain->ips[i];
139        struct addr_location al;
140
141        if (ip >= PERF_CONTEXT_MAX) {
142            switch (ip) {
143            case PERF_CONTEXT_HV:
144                cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
145            case PERF_CONTEXT_KERNEL:
146                cpumode = PERF_RECORD_MISC_KERNEL; break;
147            case PERF_CONTEXT_USER:
148                cpumode = PERF_RECORD_MISC_USER; break;
149            default:
150                break;
151            }
152            continue;
153        }
154
155        thread__find_addr_location(thread, self, cpumode,
156                       MAP__FUNCTION, ip, &al, NULL);
157        if (al.sym != NULL) {
158            if (sort__has_parent && !*parent &&
159                symbol__match_parent_regex(al.sym))
160                *parent = al.sym;
161            if (!symbol_conf.use_callchain)
162                break;
163            syms[i] = al.sym;
164        }
165    }
166
167    return syms;
168}
169
170static int process_event_stub(event_t *event __used,
171                  struct perf_session *session __used)
172{
173    dump_printf(": unhandled!\n");
174    return 0;
175}
176
177static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
178{
179    if (handler->sample == NULL)
180        handler->sample = process_event_stub;
181    if (handler->mmap == NULL)
182        handler->mmap = process_event_stub;
183    if (handler->comm == NULL)
184        handler->comm = process_event_stub;
185    if (handler->fork == NULL)
186        handler->fork = process_event_stub;
187    if (handler->exit == NULL)
188        handler->exit = process_event_stub;
189    if (handler->lost == NULL)
190        handler->lost = process_event_stub;
191    if (handler->read == NULL)
192        handler->read = process_event_stub;
193    if (handler->throttle == NULL)
194        handler->throttle = process_event_stub;
195    if (handler->unthrottle == NULL)
196        handler->unthrottle = process_event_stub;
197}
198
199static const char *event__name[] = {
200    [0] = "TOTAL",
201    [PERF_RECORD_MMAP] = "MMAP",
202    [PERF_RECORD_LOST] = "LOST",
203    [PERF_RECORD_COMM] = "COMM",
204    [PERF_RECORD_EXIT] = "EXIT",
205    [PERF_RECORD_THROTTLE] = "THROTTLE",
206    [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
207    [PERF_RECORD_FORK] = "FORK",
208    [PERF_RECORD_READ] = "READ",
209    [PERF_RECORD_SAMPLE] = "SAMPLE",
210};
211
212unsigned long event__total[PERF_RECORD_MAX];
213
214void event__print_totals(void)
215{
216    int i;
217    for (i = 0; i < PERF_RECORD_MAX; ++i)
218        pr_info("%10s events: %10ld\n",
219            event__name[i], event__total[i]);
220}
221
222void mem_bswap_64(void *src, int byte_size)
223{
224    u64 *m = src;
225
226    while (byte_size > 0) {
227        *m = bswap_64(*m);
228        byte_size -= sizeof(u64);
229        ++m;
230    }
231}
232
233static void event__all64_swap(event_t *self)
234{
235    struct perf_event_header *hdr = &self->header;
236    mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
237}
238
239static void event__comm_swap(event_t *self)
240{
241    self->comm.pid = bswap_32(self->comm.pid);
242    self->comm.tid = bswap_32(self->comm.tid);
243}
244
245static void event__mmap_swap(event_t *self)
246{
247    self->mmap.pid = bswap_32(self->mmap.pid);
248    self->mmap.tid = bswap_32(self->mmap.tid);
249    self->mmap.start = bswap_64(self->mmap.start);
250    self->mmap.len = bswap_64(self->mmap.len);
251    self->mmap.pgoff = bswap_64(self->mmap.pgoff);
252}
253
254static void event__task_swap(event_t *self)
255{
256    self->fork.pid = bswap_32(self->fork.pid);
257    self->fork.tid = bswap_32(self->fork.tid);
258    self->fork.ppid = bswap_32(self->fork.ppid);
259    self->fork.ptid = bswap_32(self->fork.ptid);
260    self->fork.time = bswap_64(self->fork.time);
261}
262
263static void event__read_swap(event_t *self)
264{
265    self->read.pid = bswap_32(self->read.pid);
266    self->read.tid = bswap_32(self->read.tid);
267    self->read.value = bswap_64(self->read.value);
268    self->read.time_enabled = bswap_64(self->read.time_enabled);
269    self->read.time_running = bswap_64(self->read.time_running);
270    self->read.id = bswap_64(self->read.id);
271}
272
273typedef void (*event__swap_op)(event_t *self);
274
275static event__swap_op event__swap_ops[] = {
276    [PERF_RECORD_MMAP] = event__mmap_swap,
277    [PERF_RECORD_COMM] = event__comm_swap,
278    [PERF_RECORD_FORK] = event__task_swap,
279    [PERF_RECORD_EXIT] = event__task_swap,
280    [PERF_RECORD_LOST] = event__all64_swap,
281    [PERF_RECORD_READ] = event__read_swap,
282    [PERF_RECORD_SAMPLE] = event__all64_swap,
283    [PERF_RECORD_MAX] = NULL,
284};
285
286static int perf_session__process_event(struct perf_session *self,
287                       event_t *event,
288                       struct perf_event_ops *ops,
289                       u64 offset, u64 head)
290{
291    trace_event(event);
292
293    if (event->header.type < PERF_RECORD_MAX) {
294        dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
295                offset + head, event->header.size,
296                event__name[event->header.type]);
297        ++event__total[0];
298        ++event__total[event->header.type];
299    }
300
301    if (self->header.needs_swap && event__swap_ops[event->header.type])
302        event__swap_ops[event->header.type](event);
303
304    switch (event->header.type) {
305    case PERF_RECORD_SAMPLE:
306        return ops->sample(event, self);
307    case PERF_RECORD_MMAP:
308        return ops->mmap(event, self);
309    case PERF_RECORD_COMM:
310        return ops->comm(event, self);
311    case PERF_RECORD_FORK:
312        return ops->fork(event, self);
313    case PERF_RECORD_EXIT:
314        return ops->exit(event, self);
315    case PERF_RECORD_LOST:
316        return ops->lost(event, self);
317    case PERF_RECORD_READ:
318        return ops->read(event, self);
319    case PERF_RECORD_THROTTLE:
320        return ops->throttle(event, self);
321    case PERF_RECORD_UNTHROTTLE:
322        return ops->unthrottle(event, self);
323    default:
324        self->unknown_events++;
325        return -1;
326    }
327}
328
329void perf_event_header__bswap(struct perf_event_header *self)
330{
331    self->type = bswap_32(self->type);
332    self->misc = bswap_16(self->misc);
333    self->size = bswap_16(self->size);
334}
335
336int perf_header__read_build_ids(struct perf_header *self,
337                int input, u64 offset, u64 size)
338{
339    struct build_id_event bev;
340    char filename[PATH_MAX];
341    u64 limit = offset + size;
342    int err = -1;
343
344    while (offset < limit) {
345        struct dso *dso;
346        ssize_t len;
347        struct list_head *head = &dsos__user;
348
349        if (read(input, &bev, sizeof(bev)) != sizeof(bev))
350            goto out;
351
352        if (self->needs_swap)
353            perf_event_header__bswap(&bev.header);
354
355        len = bev.header.size - sizeof(bev);
356        if (read(input, filename, len) != len)
357            goto out;
358
359        if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
360            head = &dsos__kernel;
361
362        dso = __dsos__findnew(head, filename);
363        if (dso != NULL) {
364            dso__set_build_id(dso, &bev.build_id);
365            if (head == &dsos__kernel && filename[0] == '[')
366                dso->kernel = 1;
367        }
368
369        offset += bev.header.size;
370    }
371    err = 0;
372out:
373    return err;
374}
375
376static struct thread *perf_session__register_idle_thread(struct perf_session *self)
377{
378    struct thread *thread = perf_session__findnew(self, 0);
379
380    if (thread == NULL || thread__set_comm(thread, "swapper")) {
381        pr_err("problem inserting idle task.\n");
382        thread = NULL;
383    }
384
385    return thread;
386}
387
388int __perf_session__process_events(struct perf_session *self,
389                   u64 data_offset, u64 data_size,
390                   u64 file_size, struct perf_event_ops *ops)
391{
392    int err, mmap_prot, mmap_flags;
393    u64 head, shift;
394    u64 offset = 0;
395    size_t page_size;
396    event_t *event;
397    uint32_t size;
398    char *buf;
399
400    perf_event_ops__fill_defaults(ops);
401
402    page_size = sysconf(_SC_PAGESIZE);
403
404    head = data_offset;
405    shift = page_size * (head / page_size);
406    offset += shift;
407    head -= shift;
408
409    mmap_prot = PROT_READ;
410    mmap_flags = MAP_SHARED;
411
412    if (self->header.needs_swap) {
413        mmap_prot |= PROT_WRITE;
414        mmap_flags = MAP_PRIVATE;
415    }
416remap:
417    buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
418           mmap_flags, self->fd, offset);
419    if (buf == MAP_FAILED) {
420        pr_err("failed to mmap file\n");
421        err = -errno;
422        goto out_err;
423    }
424
425more:
426    event = (event_t *)(buf + head);
427
428    if (self->header.needs_swap)
429        perf_event_header__bswap(&event->header);
430    size = event->header.size;
431    if (size == 0)
432        size = 8;
433
434    if (head + event->header.size >= page_size * self->mmap_window) {
435        int munmap_ret;
436
437        shift = page_size * (head / page_size);
438
439        munmap_ret = munmap(buf, page_size * self->mmap_window);
440        assert(munmap_ret == 0);
441
442        offset += shift;
443        head -= shift;
444        goto remap;
445    }
446
447    size = event->header.size;
448
449    dump_printf("\n%#Lx [%#x]: event: %d\n",
450            offset + head, event->header.size, event->header.type);
451
452    if (size == 0 ||
453        perf_session__process_event(self, event, ops, offset, head) < 0) {
454        dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
455                offset + head, event->header.size,
456                event->header.type);
457        /*
458         * assume we lost track of the stream, check alignment, and
459         * increment a single u64 in the hope to catch on again 'soon'.
460         */
461        if (unlikely(head & 7))
462            head &= ~7ULL;
463
464        size = 8;
465    }
466
467    head += size;
468
469    if (offset + head >= data_offset + data_size)
470        goto done;
471
472    if (offset + head < file_size)
473        goto more;
474done:
475    err = 0;
476out_err:
477    return err;
478}
479
480int perf_session__process_events(struct perf_session *self,
481                 struct perf_event_ops *ops)
482{
483    int err;
484
485    if (perf_session__register_idle_thread(self) == NULL)
486        return -ENOMEM;
487
488    if (!symbol_conf.full_paths) {
489        char bf[PATH_MAX];
490
491        if (getcwd(bf, sizeof(bf)) == NULL) {
492            err = -errno;
493out_getcwd_err:
494            pr_err("failed to get the current directory\n");
495            goto out_err;
496        }
497        self->cwd = strdup(bf);
498        if (self->cwd == NULL) {
499            err = -ENOMEM;
500            goto out_getcwd_err;
501        }
502        self->cwdlen = strlen(self->cwd);
503    }
504
505    err = __perf_session__process_events(self, self->header.data_offset,
506                         self->header.data_size,
507                         self->size, ops);
508out_err:
509    return err;
510}
511
512bool perf_session__has_traces(struct perf_session *self, const char *msg)
513{
514    if (!(self->sample_type & PERF_SAMPLE_RAW)) {
515        pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
516        return false;
517    }
518
519    return true;
520}
521
522int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
523                         const char *symbol_name,
524                         u64 addr)
525{
526    char *bracket;
527    enum map_type i;
528
529    self->ref_reloc_sym.name = strdup(symbol_name);
530    if (self->ref_reloc_sym.name == NULL)
531        return -ENOMEM;
532
533    bracket = strchr(self->ref_reloc_sym.name, ']');
534    if (bracket)
535        *bracket = '\0';
536
537    self->ref_reloc_sym.addr = addr;
538
539    for (i = 0; i < MAP__NR_TYPES; ++i) {
540        struct kmap *kmap = map__kmap(self->vmlinux_maps[i]);
541        kmap->ref_reloc_sym = &self->ref_reloc_sym;
542    }
543
544    return 0;
545}
546
547static u64 map__reloc_map_ip(struct map *map, u64 ip)
548{
549    return ip + (s64)map->pgoff;
550}
551
552static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
553{
554    return ip - (s64)map->pgoff;
555}
556
557void map__reloc_vmlinux(struct map *self)
558{
559    struct kmap *kmap = map__kmap(self);
560    s64 reloc;
561
562    if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
563        return;
564
565    reloc = (kmap->ref_reloc_sym->unrelocated_addr -
566         kmap->ref_reloc_sym->addr);
567
568    if (!reloc)
569        return;
570
571    self->map_ip = map__reloc_map_ip;
572    self->unmap_ip = map__reloc_unmap_ip;
573    self->pgoff = reloc;
574}
575

Archive Download this file



interactive