Root/
1 | /* |
2 | * Memory mapped I/O tracing |
3 | * |
4 | * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi> |
5 | */ |
6 | |
7 | #define DEBUG 1 |
8 | |
9 | #include <linux/kernel.h> |
10 | #include <linux/mmiotrace.h> |
11 | #include <linux/pci.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/time.h> |
14 | |
15 | #include <asm/atomic.h> |
16 | |
17 | #include "trace.h" |
18 | #include "trace_output.h" |
19 | |
20 | struct header_iter { |
21 | struct pci_dev *dev; |
22 | }; |
23 | |
24 | static struct trace_array *mmio_trace_array; |
25 | static bool overrun_detected; |
26 | static unsigned long prev_overruns; |
27 | static atomic_t dropped_count; |
28 | |
29 | static void mmio_reset_data(struct trace_array *tr) |
30 | { |
31 | overrun_detected = false; |
32 | prev_overruns = 0; |
33 | |
34 | tracing_reset_online_cpus(tr); |
35 | } |
36 | |
37 | static int mmio_trace_init(struct trace_array *tr) |
38 | { |
39 | pr_debug("in %s\n", __func__); |
40 | mmio_trace_array = tr; |
41 | |
42 | mmio_reset_data(tr); |
43 | enable_mmiotrace(); |
44 | return 0; |
45 | } |
46 | |
47 | static void mmio_trace_reset(struct trace_array *tr) |
48 | { |
49 | pr_debug("in %s\n", __func__); |
50 | |
51 | disable_mmiotrace(); |
52 | mmio_reset_data(tr); |
53 | mmio_trace_array = NULL; |
54 | } |
55 | |
56 | static void mmio_trace_start(struct trace_array *tr) |
57 | { |
58 | pr_debug("in %s\n", __func__); |
59 | mmio_reset_data(tr); |
60 | } |
61 | |
62 | static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) |
63 | { |
64 | int ret = 0; |
65 | int i; |
66 | resource_size_t start, end; |
67 | const struct pci_driver *drv = pci_dev_driver(dev); |
68 | |
69 | /* XXX: incomplete checks for trace_seq_printf() return value */ |
70 | ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", |
71 | dev->bus->number, dev->devfn, |
72 | dev->vendor, dev->device, dev->irq); |
73 | /* |
74 | * XXX: is pci_resource_to_user() appropriate, since we are |
75 | * supposed to interpret the __ioremap() phys_addr argument based on |
76 | * these printed values? |
77 | */ |
78 | for (i = 0; i < 7; i++) { |
79 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); |
80 | ret += trace_seq_printf(s, " %llx", |
81 | (unsigned long long)(start | |
82 | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); |
83 | } |
84 | for (i = 0; i < 7; i++) { |
85 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); |
86 | ret += trace_seq_printf(s, " %llx", |
87 | dev->resource[i].start < dev->resource[i].end ? |
88 | (unsigned long long)(end - start) + 1 : 0); |
89 | } |
90 | if (drv) |
91 | ret += trace_seq_printf(s, " %s\n", drv->name); |
92 | else |
93 | ret += trace_seq_printf(s, " \n"); |
94 | return ret; |
95 | } |
96 | |
97 | static void destroy_header_iter(struct header_iter *hiter) |
98 | { |
99 | if (!hiter) |
100 | return; |
101 | pci_dev_put(hiter->dev); |
102 | kfree(hiter); |
103 | } |
104 | |
105 | static void mmio_pipe_open(struct trace_iterator *iter) |
106 | { |
107 | struct header_iter *hiter; |
108 | struct trace_seq *s = &iter->seq; |
109 | |
110 | trace_seq_printf(s, "VERSION 20070824\n"); |
111 | |
112 | hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); |
113 | if (!hiter) |
114 | return; |
115 | |
116 | hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); |
117 | iter->private = hiter; |
118 | } |
119 | |
120 | /* XXX: This is not called when the pipe is closed! */ |
121 | static void mmio_close(struct trace_iterator *iter) |
122 | { |
123 | struct header_iter *hiter = iter->private; |
124 | destroy_header_iter(hiter); |
125 | iter->private = NULL; |
126 | } |
127 | |
128 | static unsigned long count_overruns(struct trace_iterator *iter) |
129 | { |
130 | unsigned long cnt = atomic_xchg(&dropped_count, 0); |
131 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); |
132 | |
133 | if (over > prev_overruns) |
134 | cnt += over - prev_overruns; |
135 | prev_overruns = over; |
136 | return cnt; |
137 | } |
138 | |
139 | static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp, |
140 | char __user *ubuf, size_t cnt, loff_t *ppos) |
141 | { |
142 | ssize_t ret; |
143 | struct header_iter *hiter = iter->private; |
144 | struct trace_seq *s = &iter->seq; |
145 | unsigned long n; |
146 | |
147 | n = count_overruns(iter); |
148 | if (n) { |
149 | /* XXX: This is later than where events were lost. */ |
150 | trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n); |
151 | if (!overrun_detected) |
152 | pr_warning("mmiotrace has lost events.\n"); |
153 | overrun_detected = true; |
154 | goto print_out; |
155 | } |
156 | |
157 | if (!hiter) |
158 | return 0; |
159 | |
160 | mmio_print_pcidev(s, hiter->dev); |
161 | hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev); |
162 | |
163 | if (!hiter->dev) { |
164 | destroy_header_iter(hiter); |
165 | iter->private = NULL; |
166 | } |
167 | |
168 | print_out: |
169 | ret = trace_seq_to_user(s, ubuf, cnt); |
170 | return (ret == -EBUSY) ? 0 : ret; |
171 | } |
172 | |
173 | static enum print_line_t mmio_print_rw(struct trace_iterator *iter) |
174 | { |
175 | struct trace_entry *entry = iter->ent; |
176 | struct trace_mmiotrace_rw *field; |
177 | struct mmiotrace_rw *rw; |
178 | struct trace_seq *s = &iter->seq; |
179 | unsigned long long t = ns2usecs(iter->ts); |
180 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
181 | unsigned secs = (unsigned long)t; |
182 | int ret = 1; |
183 | |
184 | trace_assign_type(field, entry); |
185 | rw = &field->rw; |
186 | |
187 | switch (rw->opcode) { |
188 | case MMIO_READ: |
189 | ret = trace_seq_printf(s, |
190 | "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
191 | rw->width, secs, usec_rem, rw->map_id, |
192 | (unsigned long long)rw->phys, |
193 | rw->value, rw->pc, 0); |
194 | break; |
195 | case MMIO_WRITE: |
196 | ret = trace_seq_printf(s, |
197 | "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
198 | rw->width, secs, usec_rem, rw->map_id, |
199 | (unsigned long long)rw->phys, |
200 | rw->value, rw->pc, 0); |
201 | break; |
202 | case MMIO_UNKNOWN_OP: |
203 | ret = trace_seq_printf(s, |
204 | "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx," |
205 | "%02lx 0x%lx %d\n", |
206 | secs, usec_rem, rw->map_id, |
207 | (unsigned long long)rw->phys, |
208 | (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff, |
209 | (rw->value >> 0) & 0xff, rw->pc, 0); |
210 | break; |
211 | default: |
212 | ret = trace_seq_printf(s, "rw what?\n"); |
213 | break; |
214 | } |
215 | if (ret) |
216 | return TRACE_TYPE_HANDLED; |
217 | return TRACE_TYPE_PARTIAL_LINE; |
218 | } |
219 | |
220 | static enum print_line_t mmio_print_map(struct trace_iterator *iter) |
221 | { |
222 | struct trace_entry *entry = iter->ent; |
223 | struct trace_mmiotrace_map *field; |
224 | struct mmiotrace_map *m; |
225 | struct trace_seq *s = &iter->seq; |
226 | unsigned long long t = ns2usecs(iter->ts); |
227 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
228 | unsigned secs = (unsigned long)t; |
229 | int ret; |
230 | |
231 | trace_assign_type(field, entry); |
232 | m = &field->map; |
233 | |
234 | switch (m->opcode) { |
235 | case MMIO_PROBE: |
236 | ret = trace_seq_printf(s, |
237 | "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", |
238 | secs, usec_rem, m->map_id, |
239 | (unsigned long long)m->phys, m->virt, m->len, |
240 | 0UL, 0); |
241 | break; |
242 | case MMIO_UNPROBE: |
243 | ret = trace_seq_printf(s, |
244 | "UNMAP %u.%06lu %d 0x%lx %d\n", |
245 | secs, usec_rem, m->map_id, 0UL, 0); |
246 | break; |
247 | default: |
248 | ret = trace_seq_printf(s, "map what?\n"); |
249 | break; |
250 | } |
251 | if (ret) |
252 | return TRACE_TYPE_HANDLED; |
253 | return TRACE_TYPE_PARTIAL_LINE; |
254 | } |
255 | |
256 | static enum print_line_t mmio_print_mark(struct trace_iterator *iter) |
257 | { |
258 | struct trace_entry *entry = iter->ent; |
259 | struct print_entry *print = (struct print_entry *)entry; |
260 | const char *msg = print->buf; |
261 | struct trace_seq *s = &iter->seq; |
262 | unsigned long long t = ns2usecs(iter->ts); |
263 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
264 | unsigned secs = (unsigned long)t; |
265 | int ret; |
266 | |
267 | /* The trailing newline must be in the message. */ |
268 | ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); |
269 | if (!ret) |
270 | return TRACE_TYPE_PARTIAL_LINE; |
271 | |
272 | return TRACE_TYPE_HANDLED; |
273 | } |
274 | |
275 | static enum print_line_t mmio_print_line(struct trace_iterator *iter) |
276 | { |
277 | switch (iter->ent->type) { |
278 | case TRACE_MMIO_RW: |
279 | return mmio_print_rw(iter); |
280 | case TRACE_MMIO_MAP: |
281 | return mmio_print_map(iter); |
282 | case TRACE_PRINT: |
283 | return mmio_print_mark(iter); |
284 | default: |
285 | return TRACE_TYPE_HANDLED; /* ignore unknown entries */ |
286 | } |
287 | } |
288 | |
289 | static struct tracer mmio_tracer __read_mostly = |
290 | { |
291 | .name = "mmiotrace", |
292 | .init = mmio_trace_init, |
293 | .reset = mmio_trace_reset, |
294 | .start = mmio_trace_start, |
295 | .pipe_open = mmio_pipe_open, |
296 | .close = mmio_close, |
297 | .read = mmio_read, |
298 | .print_line = mmio_print_line, |
299 | }; |
300 | |
301 | __init static int init_mmio_trace(void) |
302 | { |
303 | return register_tracer(&mmio_tracer); |
304 | } |
305 | device_initcall(init_mmio_trace); |
306 | |
307 | static void __trace_mmiotrace_rw(struct trace_array *tr, |
308 | struct trace_array_cpu *data, |
309 | struct mmiotrace_rw *rw) |
310 | { |
311 | struct ftrace_event_call *call = &event_mmiotrace_rw; |
312 | struct ring_buffer *buffer = tr->buffer; |
313 | struct ring_buffer_event *event; |
314 | struct trace_mmiotrace_rw *entry; |
315 | int pc = preempt_count(); |
316 | |
317 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, |
318 | sizeof(*entry), 0, pc); |
319 | if (!event) { |
320 | atomic_inc(&dropped_count); |
321 | return; |
322 | } |
323 | entry = ring_buffer_event_data(event); |
324 | entry->rw = *rw; |
325 | |
326 | if (!filter_check_discard(call, entry, buffer, event)) |
327 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
328 | } |
329 | |
330 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
331 | { |
332 | struct trace_array *tr = mmio_trace_array; |
333 | struct trace_array_cpu *data = tr->data[smp_processor_id()]; |
334 | __trace_mmiotrace_rw(tr, data, rw); |
335 | } |
336 | |
337 | static void __trace_mmiotrace_map(struct trace_array *tr, |
338 | struct trace_array_cpu *data, |
339 | struct mmiotrace_map *map) |
340 | { |
341 | struct ftrace_event_call *call = &event_mmiotrace_map; |
342 | struct ring_buffer *buffer = tr->buffer; |
343 | struct ring_buffer_event *event; |
344 | struct trace_mmiotrace_map *entry; |
345 | int pc = preempt_count(); |
346 | |
347 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, |
348 | sizeof(*entry), 0, pc); |
349 | if (!event) { |
350 | atomic_inc(&dropped_count); |
351 | return; |
352 | } |
353 | entry = ring_buffer_event_data(event); |
354 | entry->map = *map; |
355 | |
356 | if (!filter_check_discard(call, entry, buffer, event)) |
357 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
358 | } |
359 | |
360 | void mmio_trace_mapping(struct mmiotrace_map *map) |
361 | { |
362 | struct trace_array *tr = mmio_trace_array; |
363 | struct trace_array_cpu *data; |
364 | |
365 | preempt_disable(); |
366 | data = tr->data[smp_processor_id()]; |
367 | __trace_mmiotrace_map(tr, data, map); |
368 | preempt_enable(); |
369 | } |
370 | |
371 | int mmio_trace_printk(const char *fmt, va_list args) |
372 | { |
373 | return trace_vprintk(0, fmt, args); |
374 | } |
375 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9