Root/
1 | /* |
2 | * trace_events_filter - generic event filtering |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. |
8 | * |
9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * |
18 | * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> |
19 | */ |
20 | |
21 | #include <linux/module.h> |
22 | #include <linux/ctype.h> |
23 | #include <linux/mutex.h> |
24 | #include <linux/perf_event.h> |
25 | #include <linux/slab.h> |
26 | |
27 | #include "trace.h" |
28 | #include "trace_output.h" |
29 | |
30 | #define DEFAULT_SYS_FILTER_MESSAGE \ |
31 | "### global filter ###\n" \ |
32 | "# Use this to set filters for multiple events.\n" \ |
33 | "# Only events with the given fields will be affected.\n" \ |
34 | "# If no events are modified, an error message will be displayed here" |
35 | |
36 | enum filter_op_ids |
37 | { |
38 | OP_OR, |
39 | OP_AND, |
40 | OP_GLOB, |
41 | OP_NE, |
42 | OP_EQ, |
43 | OP_LT, |
44 | OP_LE, |
45 | OP_GT, |
46 | OP_GE, |
47 | OP_NONE, |
48 | OP_OPEN_PAREN, |
49 | }; |
50 | |
51 | struct filter_op { |
52 | int id; |
53 | char *string; |
54 | int precedence; |
55 | }; |
56 | |
57 | static struct filter_op filter_ops[] = { |
58 | { OP_OR, "||", 1 }, |
59 | { OP_AND, "&&", 2 }, |
60 | { OP_GLOB, "~", 4 }, |
61 | { OP_NE, "!=", 4 }, |
62 | { OP_EQ, "==", 4 }, |
63 | { OP_LT, "<", 5 }, |
64 | { OP_LE, "<=", 5 }, |
65 | { OP_GT, ">", 5 }, |
66 | { OP_GE, ">=", 5 }, |
67 | { OP_NONE, "OP_NONE", 0 }, |
68 | { OP_OPEN_PAREN, "(", 0 }, |
69 | }; |
70 | |
71 | enum { |
72 | FILT_ERR_NONE, |
73 | FILT_ERR_INVALID_OP, |
74 | FILT_ERR_UNBALANCED_PAREN, |
75 | FILT_ERR_TOO_MANY_OPERANDS, |
76 | FILT_ERR_OPERAND_TOO_LONG, |
77 | FILT_ERR_FIELD_NOT_FOUND, |
78 | FILT_ERR_ILLEGAL_FIELD_OP, |
79 | FILT_ERR_ILLEGAL_INTVAL, |
80 | FILT_ERR_BAD_SUBSYS_FILTER, |
81 | FILT_ERR_TOO_MANY_PREDS, |
82 | FILT_ERR_MISSING_FIELD, |
83 | FILT_ERR_INVALID_FILTER, |
84 | FILT_ERR_IP_FIELD_ONLY, |
85 | }; |
86 | |
87 | static char *err_text[] = { |
88 | "No error", |
89 | "Invalid operator", |
90 | "Unbalanced parens", |
91 | "Too many operands", |
92 | "Operand too long", |
93 | "Field not found", |
94 | "Illegal operation for field type", |
95 | "Illegal integer value", |
96 | "Couldn't find or set field in one of a subsystem's events", |
97 | "Too many terms in predicate expression", |
98 | "Missing field name and/or value", |
99 | "Meaningless filter expression", |
100 | "Only 'ip' field is supported for function trace", |
101 | }; |
102 | |
103 | struct opstack_op { |
104 | int op; |
105 | struct list_head list; |
106 | }; |
107 | |
108 | struct postfix_elt { |
109 | int op; |
110 | char *operand; |
111 | struct list_head list; |
112 | }; |
113 | |
114 | struct filter_parse_state { |
115 | struct filter_op *ops; |
116 | struct list_head opstack; |
117 | struct list_head postfix; |
118 | int lasterr; |
119 | int lasterr_pos; |
120 | |
121 | struct { |
122 | char *string; |
123 | unsigned int cnt; |
124 | unsigned int tail; |
125 | } infix; |
126 | |
127 | struct { |
128 | char string[MAX_FILTER_STR_VAL]; |
129 | int pos; |
130 | unsigned int tail; |
131 | } operand; |
132 | }; |
133 | |
134 | struct pred_stack { |
135 | struct filter_pred **preds; |
136 | int index; |
137 | }; |
138 | |
139 | #define DEFINE_COMPARISON_PRED(type) \ |
140 | static int filter_pred_##type(struct filter_pred *pred, void *event) \ |
141 | { \ |
142 | type *addr = (type *)(event + pred->offset); \ |
143 | type val = (type)pred->val; \ |
144 | int match = 0; \ |
145 | \ |
146 | switch (pred->op) { \ |
147 | case OP_LT: \ |
148 | match = (*addr < val); \ |
149 | break; \ |
150 | case OP_LE: \ |
151 | match = (*addr <= val); \ |
152 | break; \ |
153 | case OP_GT: \ |
154 | match = (*addr > val); \ |
155 | break; \ |
156 | case OP_GE: \ |
157 | match = (*addr >= val); \ |
158 | break; \ |
159 | default: \ |
160 | break; \ |
161 | } \ |
162 | \ |
163 | return match; \ |
164 | } |
165 | |
166 | #define DEFINE_EQUALITY_PRED(size) \ |
167 | static int filter_pred_##size(struct filter_pred *pred, void *event) \ |
168 | { \ |
169 | u##size *addr = (u##size *)(event + pred->offset); \ |
170 | u##size val = (u##size)pred->val; \ |
171 | int match; \ |
172 | \ |
173 | match = (val == *addr) ^ pred->not; \ |
174 | \ |
175 | return match; \ |
176 | } |
177 | |
178 | DEFINE_COMPARISON_PRED(s64); |
179 | DEFINE_COMPARISON_PRED(u64); |
180 | DEFINE_COMPARISON_PRED(s32); |
181 | DEFINE_COMPARISON_PRED(u32); |
182 | DEFINE_COMPARISON_PRED(s16); |
183 | DEFINE_COMPARISON_PRED(u16); |
184 | DEFINE_COMPARISON_PRED(s8); |
185 | DEFINE_COMPARISON_PRED(u8); |
186 | |
187 | DEFINE_EQUALITY_PRED(64); |
188 | DEFINE_EQUALITY_PRED(32); |
189 | DEFINE_EQUALITY_PRED(16); |
190 | DEFINE_EQUALITY_PRED(8); |
191 | |
192 | /* Filter predicate for fixed sized arrays of characters */ |
193 | static int filter_pred_string(struct filter_pred *pred, void *event) |
194 | { |
195 | char *addr = (char *)(event + pred->offset); |
196 | int cmp, match; |
197 | |
198 | cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len); |
199 | |
200 | match = cmp ^ pred->not; |
201 | |
202 | return match; |
203 | } |
204 | |
205 | /* Filter predicate for char * pointers */ |
206 | static int filter_pred_pchar(struct filter_pred *pred, void *event) |
207 | { |
208 | char **addr = (char **)(event + pred->offset); |
209 | int cmp, match; |
210 | int len = strlen(*addr) + 1; /* including tailing '\0' */ |
211 | |
212 | cmp = pred->regex.match(*addr, &pred->regex, len); |
213 | |
214 | match = cmp ^ pred->not; |
215 | |
216 | return match; |
217 | } |
218 | |
219 | /* |
220 | * Filter predicate for dynamic sized arrays of characters. |
221 | * These are implemented through a list of strings at the end |
222 | * of the entry. |
223 | * Also each of these strings have a field in the entry which |
224 | * contains its offset from the beginning of the entry. |
225 | * We have then first to get this field, dereference it |
226 | * and add it to the address of the entry, and at last we have |
227 | * the address of the string. |
228 | */ |
229 | static int filter_pred_strloc(struct filter_pred *pred, void *event) |
230 | { |
231 | u32 str_item = *(u32 *)(event + pred->offset); |
232 | int str_loc = str_item & 0xffff; |
233 | int str_len = str_item >> 16; |
234 | char *addr = (char *)(event + str_loc); |
235 | int cmp, match; |
236 | |
237 | cmp = pred->regex.match(addr, &pred->regex, str_len); |
238 | |
239 | match = cmp ^ pred->not; |
240 | |
241 | return match; |
242 | } |
243 | |
244 | static int filter_pred_none(struct filter_pred *pred, void *event) |
245 | { |
246 | return 0; |
247 | } |
248 | |
249 | /* |
250 | * regex_match_foo - Basic regex callbacks |
251 | * |
252 | * @str: the string to be searched |
253 | * @r: the regex structure containing the pattern string |
254 | * @len: the length of the string to be searched (including '\0') |
255 | * |
256 | * Note: |
257 | * - @str might not be NULL-terminated if it's of type DYN_STRING |
258 | * or STATIC_STRING |
259 | */ |
260 | |
261 | static int regex_match_full(char *str, struct regex *r, int len) |
262 | { |
263 | if (strncmp(str, r->pattern, len) == 0) |
264 | return 1; |
265 | return 0; |
266 | } |
267 | |
268 | static int regex_match_front(char *str, struct regex *r, int len) |
269 | { |
270 | if (strncmp(str, r->pattern, r->len) == 0) |
271 | return 1; |
272 | return 0; |
273 | } |
274 | |
275 | static int regex_match_middle(char *str, struct regex *r, int len) |
276 | { |
277 | if (strnstr(str, r->pattern, len)) |
278 | return 1; |
279 | return 0; |
280 | } |
281 | |
282 | static int regex_match_end(char *str, struct regex *r, int len) |
283 | { |
284 | int strlen = len - 1; |
285 | |
286 | if (strlen >= r->len && |
287 | memcmp(str + strlen - r->len, r->pattern, r->len) == 0) |
288 | return 1; |
289 | return 0; |
290 | } |
291 | |
292 | /** |
293 | * filter_parse_regex - parse a basic regex |
294 | * @buff: the raw regex |
295 | * @len: length of the regex |
296 | * @search: will point to the beginning of the string to compare |
297 | * @not: tell whether the match will have to be inverted |
298 | * |
299 | * This passes in a buffer containing a regex and this function will |
300 | * set search to point to the search part of the buffer and |
301 | * return the type of search it is (see enum above). |
302 | * This does modify buff. |
303 | * |
304 | * Returns enum type. |
305 | * search returns the pointer to use for comparison. |
306 | * not returns 1 if buff started with a '!' |
307 | * 0 otherwise. |
308 | */ |
309 | enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) |
310 | { |
311 | int type = MATCH_FULL; |
312 | int i; |
313 | |
314 | if (buff[0] == '!') { |
315 | *not = 1; |
316 | buff++; |
317 | len--; |
318 | } else |
319 | *not = 0; |
320 | |
321 | *search = buff; |
322 | |
323 | for (i = 0; i < len; i++) { |
324 | if (buff[i] == '*') { |
325 | if (!i) { |
326 | *search = buff + 1; |
327 | type = MATCH_END_ONLY; |
328 | } else { |
329 | if (type == MATCH_END_ONLY) |
330 | type = MATCH_MIDDLE_ONLY; |
331 | else |
332 | type = MATCH_FRONT_ONLY; |
333 | buff[i] = 0; |
334 | break; |
335 | } |
336 | } |
337 | } |
338 | |
339 | return type; |
340 | } |
341 | |
342 | static void filter_build_regex(struct filter_pred *pred) |
343 | { |
344 | struct regex *r = &pred->regex; |
345 | char *search; |
346 | enum regex_type type = MATCH_FULL; |
347 | int not = 0; |
348 | |
349 | if (pred->op == OP_GLOB) { |
350 | type = filter_parse_regex(r->pattern, r->len, &search, ¬); |
351 | r->len = strlen(search); |
352 | memmove(r->pattern, search, r->len+1); |
353 | } |
354 | |
355 | switch (type) { |
356 | case MATCH_FULL: |
357 | r->match = regex_match_full; |
358 | break; |
359 | case MATCH_FRONT_ONLY: |
360 | r->match = regex_match_front; |
361 | break; |
362 | case MATCH_MIDDLE_ONLY: |
363 | r->match = regex_match_middle; |
364 | break; |
365 | case MATCH_END_ONLY: |
366 | r->match = regex_match_end; |
367 | break; |
368 | } |
369 | |
370 | pred->not ^= not; |
371 | } |
372 | |
373 | enum move_type { |
374 | MOVE_DOWN, |
375 | MOVE_UP_FROM_LEFT, |
376 | MOVE_UP_FROM_RIGHT |
377 | }; |
378 | |
379 | static struct filter_pred * |
380 | get_pred_parent(struct filter_pred *pred, struct filter_pred *preds, |
381 | int index, enum move_type *move) |
382 | { |
383 | if (pred->parent & FILTER_PRED_IS_RIGHT) |
384 | *move = MOVE_UP_FROM_RIGHT; |
385 | else |
386 | *move = MOVE_UP_FROM_LEFT; |
387 | pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT]; |
388 | |
389 | return pred; |
390 | } |
391 | |
392 | enum walk_return { |
393 | WALK_PRED_ABORT, |
394 | WALK_PRED_PARENT, |
395 | WALK_PRED_DEFAULT, |
396 | }; |
397 | |
398 | typedef int (*filter_pred_walkcb_t) (enum move_type move, |
399 | struct filter_pred *pred, |
400 | int *err, void *data); |
401 | |
402 | static int walk_pred_tree(struct filter_pred *preds, |
403 | struct filter_pred *root, |
404 | filter_pred_walkcb_t cb, void *data) |
405 | { |
406 | struct filter_pred *pred = root; |
407 | enum move_type move = MOVE_DOWN; |
408 | int done = 0; |
409 | |
410 | if (!preds) |
411 | return -EINVAL; |
412 | |
413 | do { |
414 | int err = 0, ret; |
415 | |
416 | ret = cb(move, pred, &err, data); |
417 | if (ret == WALK_PRED_ABORT) |
418 | return err; |
419 | if (ret == WALK_PRED_PARENT) |
420 | goto get_parent; |
421 | |
422 | switch (move) { |
423 | case MOVE_DOWN: |
424 | if (pred->left != FILTER_PRED_INVALID) { |
425 | pred = &preds[pred->left]; |
426 | continue; |
427 | } |
428 | goto get_parent; |
429 | case MOVE_UP_FROM_LEFT: |
430 | pred = &preds[pred->right]; |
431 | move = MOVE_DOWN; |
432 | continue; |
433 | case MOVE_UP_FROM_RIGHT: |
434 | get_parent: |
435 | if (pred == root) |
436 | break; |
437 | pred = get_pred_parent(pred, preds, |
438 | pred->parent, |
439 | &move); |
440 | continue; |
441 | } |
442 | done = 1; |
443 | } while (!done); |
444 | |
445 | /* We are fine. */ |
446 | return 0; |
447 | } |
448 | |
449 | /* |
450 | * A series of AND or ORs where found together. Instead of |
451 | * climbing up and down the tree branches, an array of the |
452 | * ops were made in order of checks. We can just move across |
453 | * the array and short circuit if needed. |
454 | */ |
455 | static int process_ops(struct filter_pred *preds, |
456 | struct filter_pred *op, void *rec) |
457 | { |
458 | struct filter_pred *pred; |
459 | int match = 0; |
460 | int type; |
461 | int i; |
462 | |
463 | /* |
464 | * Micro-optimization: We set type to true if op |
465 | * is an OR and false otherwise (AND). Then we |
466 | * just need to test if the match is equal to |
467 | * the type, and if it is, we can short circuit the |
468 | * rest of the checks: |
469 | * |
470 | * if ((match && op->op == OP_OR) || |
471 | * (!match && op->op == OP_AND)) |
472 | * return match; |
473 | */ |
474 | type = op->op == OP_OR; |
475 | |
476 | for (i = 0; i < op->val; i++) { |
477 | pred = &preds[op->ops[i]]; |
478 | if (!WARN_ON_ONCE(!pred->fn)) |
479 | match = pred->fn(pred, rec); |
480 | if (!!match == type) |
481 | return match; |
482 | } |
483 | return match; |
484 | } |
485 | |
486 | struct filter_match_preds_data { |
487 | struct filter_pred *preds; |
488 | int match; |
489 | void *rec; |
490 | }; |
491 | |
492 | static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred, |
493 | int *err, void *data) |
494 | { |
495 | struct filter_match_preds_data *d = data; |
496 | |
497 | *err = 0; |
498 | switch (move) { |
499 | case MOVE_DOWN: |
500 | /* only AND and OR have children */ |
501 | if (pred->left != FILTER_PRED_INVALID) { |
502 | /* If ops is set, then it was folded. */ |
503 | if (!pred->ops) |
504 | return WALK_PRED_DEFAULT; |
505 | /* We can treat folded ops as a leaf node */ |
506 | d->match = process_ops(d->preds, pred, d->rec); |
507 | } else { |
508 | if (!WARN_ON_ONCE(!pred->fn)) |
509 | d->match = pred->fn(pred, d->rec); |
510 | } |
511 | |
512 | return WALK_PRED_PARENT; |
513 | case MOVE_UP_FROM_LEFT: |
514 | /* |
515 | * Check for short circuits. |
516 | * |
517 | * Optimization: !!match == (pred->op == OP_OR) |
518 | * is the same as: |
519 | * if ((match && pred->op == OP_OR) || |
520 | * (!match && pred->op == OP_AND)) |
521 | */ |
522 | if (!!d->match == (pred->op == OP_OR)) |
523 | return WALK_PRED_PARENT; |
524 | break; |
525 | case MOVE_UP_FROM_RIGHT: |
526 | break; |
527 | } |
528 | |
529 | return WALK_PRED_DEFAULT; |
530 | } |
531 | |
532 | /* return 1 if event matches, 0 otherwise (discard) */ |
533 | int filter_match_preds(struct event_filter *filter, void *rec) |
534 | { |
535 | struct filter_pred *preds; |
536 | struct filter_pred *root; |
537 | struct filter_match_preds_data data = { |
538 | /* match is currently meaningless */ |
539 | .match = -1, |
540 | .rec = rec, |
541 | }; |
542 | int n_preds, ret; |
543 | |
544 | /* no filter is considered a match */ |
545 | if (!filter) |
546 | return 1; |
547 | |
548 | n_preds = filter->n_preds; |
549 | if (!n_preds) |
550 | return 1; |
551 | |
552 | /* |
553 | * n_preds, root and filter->preds are protect with preemption disabled. |
554 | */ |
555 | root = rcu_dereference_sched(filter->root); |
556 | if (!root) |
557 | return 1; |
558 | |
559 | data.preds = preds = rcu_dereference_sched(filter->preds); |
560 | ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data); |
561 | WARN_ON(ret); |
562 | return data.match; |
563 | } |
564 | EXPORT_SYMBOL_GPL(filter_match_preds); |
565 | |
566 | static void parse_error(struct filter_parse_state *ps, int err, int pos) |
567 | { |
568 | ps->lasterr = err; |
569 | ps->lasterr_pos = pos; |
570 | } |
571 | |
572 | static void remove_filter_string(struct event_filter *filter) |
573 | { |
574 | if (!filter) |
575 | return; |
576 | |
577 | kfree(filter->filter_string); |
578 | filter->filter_string = NULL; |
579 | } |
580 | |
581 | static int replace_filter_string(struct event_filter *filter, |
582 | char *filter_string) |
583 | { |
584 | kfree(filter->filter_string); |
585 | filter->filter_string = kstrdup(filter_string, GFP_KERNEL); |
586 | if (!filter->filter_string) |
587 | return -ENOMEM; |
588 | |
589 | return 0; |
590 | } |
591 | |
592 | static int append_filter_string(struct event_filter *filter, |
593 | char *string) |
594 | { |
595 | int newlen; |
596 | char *new_filter_string; |
597 | |
598 | BUG_ON(!filter->filter_string); |
599 | newlen = strlen(filter->filter_string) + strlen(string) + 1; |
600 | new_filter_string = kmalloc(newlen, GFP_KERNEL); |
601 | if (!new_filter_string) |
602 | return -ENOMEM; |
603 | |
604 | strcpy(new_filter_string, filter->filter_string); |
605 | strcat(new_filter_string, string); |
606 | kfree(filter->filter_string); |
607 | filter->filter_string = new_filter_string; |
608 | |
609 | return 0; |
610 | } |
611 | |
612 | static void append_filter_err(struct filter_parse_state *ps, |
613 | struct event_filter *filter) |
614 | { |
615 | int pos = ps->lasterr_pos; |
616 | char *buf, *pbuf; |
617 | |
618 | buf = (char *)__get_free_page(GFP_TEMPORARY); |
619 | if (!buf) |
620 | return; |
621 | |
622 | append_filter_string(filter, "\n"); |
623 | memset(buf, ' ', PAGE_SIZE); |
624 | if (pos > PAGE_SIZE - 128) |
625 | pos = 0; |
626 | buf[pos] = '^'; |
627 | pbuf = &buf[pos] + 1; |
628 | |
629 | sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]); |
630 | append_filter_string(filter, buf); |
631 | free_page((unsigned long) buf); |
632 | } |
633 | |
634 | void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) |
635 | { |
636 | struct event_filter *filter; |
637 | |
638 | mutex_lock(&event_mutex); |
639 | filter = call->filter; |
640 | if (filter && filter->filter_string) |
641 | trace_seq_printf(s, "%s\n", filter->filter_string); |
642 | else |
643 | trace_seq_printf(s, "none\n"); |
644 | mutex_unlock(&event_mutex); |
645 | } |
646 | |
647 | void print_subsystem_event_filter(struct event_subsystem *system, |
648 | struct trace_seq *s) |
649 | { |
650 | struct event_filter *filter; |
651 | |
652 | mutex_lock(&event_mutex); |
653 | filter = system->filter; |
654 | if (filter && filter->filter_string) |
655 | trace_seq_printf(s, "%s\n", filter->filter_string); |
656 | else |
657 | trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); |
658 | mutex_unlock(&event_mutex); |
659 | } |
660 | |
661 | static struct ftrace_event_field * |
662 | __find_event_field(struct list_head *head, char *name) |
663 | { |
664 | struct ftrace_event_field *field; |
665 | |
666 | list_for_each_entry(field, head, link) { |
667 | if (!strcmp(field->name, name)) |
668 | return field; |
669 | } |
670 | |
671 | return NULL; |
672 | } |
673 | |
674 | static struct ftrace_event_field * |
675 | find_event_field(struct ftrace_event_call *call, char *name) |
676 | { |
677 | struct ftrace_event_field *field; |
678 | struct list_head *head; |
679 | |
680 | field = __find_event_field(&ftrace_common_fields, name); |
681 | if (field) |
682 | return field; |
683 | |
684 | head = trace_get_fields(call); |
685 | return __find_event_field(head, name); |
686 | } |
687 | |
688 | static int __alloc_pred_stack(struct pred_stack *stack, int n_preds) |
689 | { |
690 | stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL); |
691 | if (!stack->preds) |
692 | return -ENOMEM; |
693 | stack->index = n_preds; |
694 | return 0; |
695 | } |
696 | |
697 | static void __free_pred_stack(struct pred_stack *stack) |
698 | { |
699 | kfree(stack->preds); |
700 | stack->index = 0; |
701 | } |
702 | |
703 | static int __push_pred_stack(struct pred_stack *stack, |
704 | struct filter_pred *pred) |
705 | { |
706 | int index = stack->index; |
707 | |
708 | if (WARN_ON(index == 0)) |
709 | return -ENOSPC; |
710 | |
711 | stack->preds[--index] = pred; |
712 | stack->index = index; |
713 | return 0; |
714 | } |
715 | |
716 | static struct filter_pred * |
717 | __pop_pred_stack(struct pred_stack *stack) |
718 | { |
719 | struct filter_pred *pred; |
720 | int index = stack->index; |
721 | |
722 | pred = stack->preds[index++]; |
723 | if (!pred) |
724 | return NULL; |
725 | |
726 | stack->index = index; |
727 | return pred; |
728 | } |
729 | |
730 | static int filter_set_pred(struct event_filter *filter, |
731 | int idx, |
732 | struct pred_stack *stack, |
733 | struct filter_pred *src) |
734 | { |
735 | struct filter_pred *dest = &filter->preds[idx]; |
736 | struct filter_pred *left; |
737 | struct filter_pred *right; |
738 | |
739 | *dest = *src; |
740 | dest->index = idx; |
741 | |
742 | if (dest->op == OP_OR || dest->op == OP_AND) { |
743 | right = __pop_pred_stack(stack); |
744 | left = __pop_pred_stack(stack); |
745 | if (!left || !right) |
746 | return -EINVAL; |
747 | /* |
748 | * If both children can be folded |
749 | * and they are the same op as this op or a leaf, |
750 | * then this op can be folded. |
751 | */ |
752 | if (left->index & FILTER_PRED_FOLD && |
753 | (left->op == dest->op || |
754 | left->left == FILTER_PRED_INVALID) && |
755 | right->index & FILTER_PRED_FOLD && |
756 | (right->op == dest->op || |
757 | right->left == FILTER_PRED_INVALID)) |
758 | dest->index |= FILTER_PRED_FOLD; |
759 | |
760 | dest->left = left->index & ~FILTER_PRED_FOLD; |
761 | dest->right = right->index & ~FILTER_PRED_FOLD; |
762 | left->parent = dest->index & ~FILTER_PRED_FOLD; |
763 | right->parent = dest->index | FILTER_PRED_IS_RIGHT; |
764 | } else { |
765 | /* |
766 | * Make dest->left invalid to be used as a quick |
767 | * way to know this is a leaf node. |
768 | */ |
769 | dest->left = FILTER_PRED_INVALID; |
770 | |
771 | /* All leafs allow folding the parent ops. */ |
772 | dest->index |= FILTER_PRED_FOLD; |
773 | } |
774 | |
775 | return __push_pred_stack(stack, dest); |
776 | } |
777 | |
778 | static void __free_preds(struct event_filter *filter) |
779 | { |
780 | if (filter->preds) { |
781 | kfree(filter->preds); |
782 | filter->preds = NULL; |
783 | } |
784 | filter->a_preds = 0; |
785 | filter->n_preds = 0; |
786 | } |
787 | |
788 | static void filter_disable(struct ftrace_event_call *call) |
789 | { |
790 | call->flags &= ~TRACE_EVENT_FL_FILTERED; |
791 | } |
792 | |
793 | static void __free_filter(struct event_filter *filter) |
794 | { |
795 | if (!filter) |
796 | return; |
797 | |
798 | __free_preds(filter); |
799 | kfree(filter->filter_string); |
800 | kfree(filter); |
801 | } |
802 | |
803 | /* |
804 | * Called when destroying the ftrace_event_call. |
805 | * The call is being freed, so we do not need to worry about |
806 | * the call being currently used. This is for module code removing |
807 | * the tracepoints from within it. |
808 | */ |
809 | void destroy_preds(struct ftrace_event_call *call) |
810 | { |
811 | __free_filter(call->filter); |
812 | call->filter = NULL; |
813 | } |
814 | |
815 | static struct event_filter *__alloc_filter(void) |
816 | { |
817 | struct event_filter *filter; |
818 | |
819 | filter = kzalloc(sizeof(*filter), GFP_KERNEL); |
820 | return filter; |
821 | } |
822 | |
823 | static int __alloc_preds(struct event_filter *filter, int n_preds) |
824 | { |
825 | struct filter_pred *pred; |
826 | int i; |
827 | |
828 | if (filter->preds) |
829 | __free_preds(filter); |
830 | |
831 | filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL); |
832 | |
833 | if (!filter->preds) |
834 | return -ENOMEM; |
835 | |
836 | filter->a_preds = n_preds; |
837 | filter->n_preds = 0; |
838 | |
839 | for (i = 0; i < n_preds; i++) { |
840 | pred = &filter->preds[i]; |
841 | pred->fn = filter_pred_none; |
842 | } |
843 | |
844 | return 0; |
845 | } |
846 | |
847 | static void filter_free_subsystem_preds(struct event_subsystem *system) |
848 | { |
849 | struct ftrace_event_call *call; |
850 | |
851 | list_for_each_entry(call, &ftrace_events, list) { |
852 | if (strcmp(call->class->system, system->name) != 0) |
853 | continue; |
854 | |
855 | filter_disable(call); |
856 | remove_filter_string(call->filter); |
857 | } |
858 | } |
859 | |
860 | static void filter_free_subsystem_filters(struct event_subsystem *system) |
861 | { |
862 | struct ftrace_event_call *call; |
863 | |
864 | list_for_each_entry(call, &ftrace_events, list) { |
865 | if (strcmp(call->class->system, system->name) != 0) |
866 | continue; |
867 | __free_filter(call->filter); |
868 | call->filter = NULL; |
869 | } |
870 | } |
871 | |
872 | static int filter_add_pred(struct filter_parse_state *ps, |
873 | struct event_filter *filter, |
874 | struct filter_pred *pred, |
875 | struct pred_stack *stack) |
876 | { |
877 | int err; |
878 | |
879 | if (WARN_ON(filter->n_preds == filter->a_preds)) { |
880 | parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); |
881 | return -ENOSPC; |
882 | } |
883 | |
884 | err = filter_set_pred(filter, filter->n_preds, stack, pred); |
885 | if (err) |
886 | return err; |
887 | |
888 | filter->n_preds++; |
889 | |
890 | return 0; |
891 | } |
892 | |
893 | int filter_assign_type(const char *type) |
894 | { |
895 | if (strstr(type, "__data_loc") && strstr(type, "char")) |
896 | return FILTER_DYN_STRING; |
897 | |
898 | if (strchr(type, '[') && strstr(type, "char")) |
899 | return FILTER_STATIC_STRING; |
900 | |
901 | return FILTER_OTHER; |
902 | } |
903 | |
904 | static bool is_function_field(struct ftrace_event_field *field) |
905 | { |
906 | return field->filter_type == FILTER_TRACE_FN; |
907 | } |
908 | |
909 | static bool is_string_field(struct ftrace_event_field *field) |
910 | { |
911 | return field->filter_type == FILTER_DYN_STRING || |
912 | field->filter_type == FILTER_STATIC_STRING || |
913 | field->filter_type == FILTER_PTR_STRING; |
914 | } |
915 | |
916 | static int is_legal_op(struct ftrace_event_field *field, int op) |
917 | { |
918 | if (is_string_field(field) && |
919 | (op != OP_EQ && op != OP_NE && op != OP_GLOB)) |
920 | return 0; |
921 | if (!is_string_field(field) && op == OP_GLOB) |
922 | return 0; |
923 | |
924 | return 1; |
925 | } |
926 | |
927 | static filter_pred_fn_t select_comparison_fn(int op, int field_size, |
928 | int field_is_signed) |
929 | { |
930 | filter_pred_fn_t fn = NULL; |
931 | |
932 | switch (field_size) { |
933 | case 8: |
934 | if (op == OP_EQ || op == OP_NE) |
935 | fn = filter_pred_64; |
936 | else if (field_is_signed) |
937 | fn = filter_pred_s64; |
938 | else |
939 | fn = filter_pred_u64; |
940 | break; |
941 | case 4: |
942 | if (op == OP_EQ || op == OP_NE) |
943 | fn = filter_pred_32; |
944 | else if (field_is_signed) |
945 | fn = filter_pred_s32; |
946 | else |
947 | fn = filter_pred_u32; |
948 | break; |
949 | case 2: |
950 | if (op == OP_EQ || op == OP_NE) |
951 | fn = filter_pred_16; |
952 | else if (field_is_signed) |
953 | fn = filter_pred_s16; |
954 | else |
955 | fn = filter_pred_u16; |
956 | break; |
957 | case 1: |
958 | if (op == OP_EQ || op == OP_NE) |
959 | fn = filter_pred_8; |
960 | else if (field_is_signed) |
961 | fn = filter_pred_s8; |
962 | else |
963 | fn = filter_pred_u8; |
964 | break; |
965 | } |
966 | |
967 | return fn; |
968 | } |
969 | |
970 | static int init_pred(struct filter_parse_state *ps, |
971 | struct ftrace_event_field *field, |
972 | struct filter_pred *pred) |
973 | |
974 | { |
975 | filter_pred_fn_t fn = filter_pred_none; |
976 | unsigned long long val; |
977 | int ret; |
978 | |
979 | pred->offset = field->offset; |
980 | |
981 | if (!is_legal_op(field, pred->op)) { |
982 | parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0); |
983 | return -EINVAL; |
984 | } |
985 | |
986 | if (is_string_field(field)) { |
987 | filter_build_regex(pred); |
988 | |
989 | if (field->filter_type == FILTER_STATIC_STRING) { |
990 | fn = filter_pred_string; |
991 | pred->regex.field_len = field->size; |
992 | } else if (field->filter_type == FILTER_DYN_STRING) |
993 | fn = filter_pred_strloc; |
994 | else |
995 | fn = filter_pred_pchar; |
996 | } else if (is_function_field(field)) { |
997 | if (strcmp(field->name, "ip")) { |
998 | parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0); |
999 | return -EINVAL; |
1000 | } |
1001 | } else { |
1002 | if (field->is_signed) |
1003 | ret = strict_strtoll(pred->regex.pattern, 0, &val); |
1004 | else |
1005 | ret = strict_strtoull(pred->regex.pattern, 0, &val); |
1006 | if (ret) { |
1007 | parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); |
1008 | return -EINVAL; |
1009 | } |
1010 | pred->val = val; |
1011 | |
1012 | fn = select_comparison_fn(pred->op, field->size, |
1013 | field->is_signed); |
1014 | if (!fn) { |
1015 | parse_error(ps, FILT_ERR_INVALID_OP, 0); |
1016 | return -EINVAL; |
1017 | } |
1018 | } |
1019 | |
1020 | if (pred->op == OP_NE) |
1021 | pred->not = 1; |
1022 | |
1023 | pred->fn = fn; |
1024 | return 0; |
1025 | } |
1026 | |
1027 | static void parse_init(struct filter_parse_state *ps, |
1028 | struct filter_op *ops, |
1029 | char *infix_string) |
1030 | { |
1031 | memset(ps, '\0', sizeof(*ps)); |
1032 | |
1033 | ps->infix.string = infix_string; |
1034 | ps->infix.cnt = strlen(infix_string); |
1035 | ps->ops = ops; |
1036 | |
1037 | INIT_LIST_HEAD(&ps->opstack); |
1038 | INIT_LIST_HEAD(&ps->postfix); |
1039 | } |
1040 | |
1041 | static char infix_next(struct filter_parse_state *ps) |
1042 | { |
1043 | ps->infix.cnt--; |
1044 | |
1045 | return ps->infix.string[ps->infix.tail++]; |
1046 | } |
1047 | |
1048 | static char infix_peek(struct filter_parse_state *ps) |
1049 | { |
1050 | if (ps->infix.tail == strlen(ps->infix.string)) |
1051 | return 0; |
1052 | |
1053 | return ps->infix.string[ps->infix.tail]; |
1054 | } |
1055 | |
1056 | static void infix_advance(struct filter_parse_state *ps) |
1057 | { |
1058 | ps->infix.cnt--; |
1059 | ps->infix.tail++; |
1060 | } |
1061 | |
1062 | static inline int is_precedence_lower(struct filter_parse_state *ps, |
1063 | int a, int b) |
1064 | { |
1065 | return ps->ops[a].precedence < ps->ops[b].precedence; |
1066 | } |
1067 | |
1068 | static inline int is_op_char(struct filter_parse_state *ps, char c) |
1069 | { |
1070 | int i; |
1071 | |
1072 | for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) { |
1073 | if (ps->ops[i].string[0] == c) |
1074 | return 1; |
1075 | } |
1076 | |
1077 | return 0; |
1078 | } |
1079 | |
1080 | static int infix_get_op(struct filter_parse_state *ps, char firstc) |
1081 | { |
1082 | char nextc = infix_peek(ps); |
1083 | char opstr[3]; |
1084 | int i; |
1085 | |
1086 | opstr[0] = firstc; |
1087 | opstr[1] = nextc; |
1088 | opstr[2] = '\0'; |
1089 | |
1090 | for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) { |
1091 | if (!strcmp(opstr, ps->ops[i].string)) { |
1092 | infix_advance(ps); |
1093 | return ps->ops[i].id; |
1094 | } |
1095 | } |
1096 | |
1097 | opstr[1] = '\0'; |
1098 | |
1099 | for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) { |
1100 | if (!strcmp(opstr, ps->ops[i].string)) |
1101 | return ps->ops[i].id; |
1102 | } |
1103 | |
1104 | return OP_NONE; |
1105 | } |
1106 | |
1107 | static inline void clear_operand_string(struct filter_parse_state *ps) |
1108 | { |
1109 | memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL); |
1110 | ps->operand.tail = 0; |
1111 | } |
1112 | |
1113 | static inline int append_operand_char(struct filter_parse_state *ps, char c) |
1114 | { |
1115 | if (ps->operand.tail == MAX_FILTER_STR_VAL - 1) |
1116 | return -EINVAL; |
1117 | |
1118 | ps->operand.string[ps->operand.tail++] = c; |
1119 | |
1120 | return 0; |
1121 | } |
1122 | |
1123 | static int filter_opstack_push(struct filter_parse_state *ps, int op) |
1124 | { |
1125 | struct opstack_op *opstack_op; |
1126 | |
1127 | opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL); |
1128 | if (!opstack_op) |
1129 | return -ENOMEM; |
1130 | |
1131 | opstack_op->op = op; |
1132 | list_add(&opstack_op->list, &ps->opstack); |
1133 | |
1134 | return 0; |
1135 | } |
1136 | |
1137 | static int filter_opstack_empty(struct filter_parse_state *ps) |
1138 | { |
1139 | return list_empty(&ps->opstack); |
1140 | } |
1141 | |
1142 | static int filter_opstack_top(struct filter_parse_state *ps) |
1143 | { |
1144 | struct opstack_op *opstack_op; |
1145 | |
1146 | if (filter_opstack_empty(ps)) |
1147 | return OP_NONE; |
1148 | |
1149 | opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list); |
1150 | |
1151 | return opstack_op->op; |
1152 | } |
1153 | |
1154 | static int filter_opstack_pop(struct filter_parse_state *ps) |
1155 | { |
1156 | struct opstack_op *opstack_op; |
1157 | int op; |
1158 | |
1159 | if (filter_opstack_empty(ps)) |
1160 | return OP_NONE; |
1161 | |
1162 | opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list); |
1163 | op = opstack_op->op; |
1164 | list_del(&opstack_op->list); |
1165 | |
1166 | kfree(opstack_op); |
1167 | |
1168 | return op; |
1169 | } |
1170 | |
1171 | static void filter_opstack_clear(struct filter_parse_state *ps) |
1172 | { |
1173 | while (!filter_opstack_empty(ps)) |
1174 | filter_opstack_pop(ps); |
1175 | } |
1176 | |
1177 | static char *curr_operand(struct filter_parse_state *ps) |
1178 | { |
1179 | return ps->operand.string; |
1180 | } |
1181 | |
1182 | static int postfix_append_operand(struct filter_parse_state *ps, char *operand) |
1183 | { |
1184 | struct postfix_elt *elt; |
1185 | |
1186 | elt = kmalloc(sizeof(*elt), GFP_KERNEL); |
1187 | if (!elt) |
1188 | return -ENOMEM; |
1189 | |
1190 | elt->op = OP_NONE; |
1191 | elt->operand = kstrdup(operand, GFP_KERNEL); |
1192 | if (!elt->operand) { |
1193 | kfree(elt); |
1194 | return -ENOMEM; |
1195 | } |
1196 | |
1197 | list_add_tail(&elt->list, &ps->postfix); |
1198 | |
1199 | return 0; |
1200 | } |
1201 | |
1202 | static int postfix_append_op(struct filter_parse_state *ps, int op) |
1203 | { |
1204 | struct postfix_elt *elt; |
1205 | |
1206 | elt = kmalloc(sizeof(*elt), GFP_KERNEL); |
1207 | if (!elt) |
1208 | return -ENOMEM; |
1209 | |
1210 | elt->op = op; |
1211 | elt->operand = NULL; |
1212 | |
1213 | list_add_tail(&elt->list, &ps->postfix); |
1214 | |
1215 | return 0; |
1216 | } |
1217 | |
1218 | static void postfix_clear(struct filter_parse_state *ps) |
1219 | { |
1220 | struct postfix_elt *elt; |
1221 | |
1222 | while (!list_empty(&ps->postfix)) { |
1223 | elt = list_first_entry(&ps->postfix, struct postfix_elt, list); |
1224 | list_del(&elt->list); |
1225 | kfree(elt->operand); |
1226 | kfree(elt); |
1227 | } |
1228 | } |
1229 | |
1230 | static int filter_parse(struct filter_parse_state *ps) |
1231 | { |
1232 | int in_string = 0; |
1233 | int op, top_op; |
1234 | char ch; |
1235 | |
1236 | while ((ch = infix_next(ps))) { |
1237 | if (ch == '"') { |
1238 | in_string ^= 1; |
1239 | continue; |
1240 | } |
1241 | |
1242 | if (in_string) |
1243 | goto parse_operand; |
1244 | |
1245 | if (isspace(ch)) |
1246 | continue; |
1247 | |
1248 | if (is_op_char(ps, ch)) { |
1249 | op = infix_get_op(ps, ch); |
1250 | if (op == OP_NONE) { |
1251 | parse_error(ps, FILT_ERR_INVALID_OP, 0); |
1252 | return -EINVAL; |
1253 | } |
1254 | |
1255 | if (strlen(curr_operand(ps))) { |
1256 | postfix_append_operand(ps, curr_operand(ps)); |
1257 | clear_operand_string(ps); |
1258 | } |
1259 | |
1260 | while (!filter_opstack_empty(ps)) { |
1261 | top_op = filter_opstack_top(ps); |
1262 | if (!is_precedence_lower(ps, top_op, op)) { |
1263 | top_op = filter_opstack_pop(ps); |
1264 | postfix_append_op(ps, top_op); |
1265 | continue; |
1266 | } |
1267 | break; |
1268 | } |
1269 | |
1270 | filter_opstack_push(ps, op); |
1271 | continue; |
1272 | } |
1273 | |
1274 | if (ch == '(') { |
1275 | filter_opstack_push(ps, OP_OPEN_PAREN); |
1276 | continue; |
1277 | } |
1278 | |
1279 | if (ch == ')') { |
1280 | if (strlen(curr_operand(ps))) { |
1281 | postfix_append_operand(ps, curr_operand(ps)); |
1282 | clear_operand_string(ps); |
1283 | } |
1284 | |
1285 | top_op = filter_opstack_pop(ps); |
1286 | while (top_op != OP_NONE) { |
1287 | if (top_op == OP_OPEN_PAREN) |
1288 | break; |
1289 | postfix_append_op(ps, top_op); |
1290 | top_op = filter_opstack_pop(ps); |
1291 | } |
1292 | if (top_op == OP_NONE) { |
1293 | parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0); |
1294 | return -EINVAL; |
1295 | } |
1296 | continue; |
1297 | } |
1298 | parse_operand: |
1299 | if (append_operand_char(ps, ch)) { |
1300 | parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0); |
1301 | return -EINVAL; |
1302 | } |
1303 | } |
1304 | |
1305 | if (strlen(curr_operand(ps))) |
1306 | postfix_append_operand(ps, curr_operand(ps)); |
1307 | |
1308 | while (!filter_opstack_empty(ps)) { |
1309 | top_op = filter_opstack_pop(ps); |
1310 | if (top_op == OP_NONE) |
1311 | break; |
1312 | if (top_op == OP_OPEN_PAREN) { |
1313 | parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0); |
1314 | return -EINVAL; |
1315 | } |
1316 | postfix_append_op(ps, top_op); |
1317 | } |
1318 | |
1319 | return 0; |
1320 | } |
1321 | |
1322 | static struct filter_pred *create_pred(struct filter_parse_state *ps, |
1323 | struct ftrace_event_call *call, |
1324 | int op, char *operand1, char *operand2) |
1325 | { |
1326 | struct ftrace_event_field *field; |
1327 | static struct filter_pred pred; |
1328 | |
1329 | memset(&pred, 0, sizeof(pred)); |
1330 | pred.op = op; |
1331 | |
1332 | if (op == OP_AND || op == OP_OR) |
1333 | return &pred; |
1334 | |
1335 | if (!operand1 || !operand2) { |
1336 | parse_error(ps, FILT_ERR_MISSING_FIELD, 0); |
1337 | return NULL; |
1338 | } |
1339 | |
1340 | field = find_event_field(call, operand1); |
1341 | if (!field) { |
1342 | parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0); |
1343 | return NULL; |
1344 | } |
1345 | |
1346 | strcpy(pred.regex.pattern, operand2); |
1347 | pred.regex.len = strlen(pred.regex.pattern); |
1348 | pred.field = field; |
1349 | return init_pred(ps, field, &pred) ? NULL : &pred; |
1350 | } |
1351 | |
1352 | static int check_preds(struct filter_parse_state *ps) |
1353 | { |
1354 | int n_normal_preds = 0, n_logical_preds = 0; |
1355 | struct postfix_elt *elt; |
1356 | |
1357 | list_for_each_entry(elt, &ps->postfix, list) { |
1358 | if (elt->op == OP_NONE) |
1359 | continue; |
1360 | |
1361 | if (elt->op == OP_AND || elt->op == OP_OR) { |
1362 | n_logical_preds++; |
1363 | continue; |
1364 | } |
1365 | n_normal_preds++; |
1366 | } |
1367 | |
1368 | if (!n_normal_preds || n_logical_preds >= n_normal_preds) { |
1369 | parse_error(ps, FILT_ERR_INVALID_FILTER, 0); |
1370 | return -EINVAL; |
1371 | } |
1372 | |
1373 | return 0; |
1374 | } |
1375 | |
1376 | static int count_preds(struct filter_parse_state *ps) |
1377 | { |
1378 | struct postfix_elt *elt; |
1379 | int n_preds = 0; |
1380 | |
1381 | list_for_each_entry(elt, &ps->postfix, list) { |
1382 | if (elt->op == OP_NONE) |
1383 | continue; |
1384 | n_preds++; |
1385 | } |
1386 | |
1387 | return n_preds; |
1388 | } |
1389 | |
1390 | struct check_pred_data { |
1391 | int count; |
1392 | int max; |
1393 | }; |
1394 | |
1395 | static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred, |
1396 | int *err, void *data) |
1397 | { |
1398 | struct check_pred_data *d = data; |
1399 | |
1400 | if (WARN_ON(d->count++ > d->max)) { |
1401 | *err = -EINVAL; |
1402 | return WALK_PRED_ABORT; |
1403 | } |
1404 | return WALK_PRED_DEFAULT; |
1405 | } |
1406 | |
1407 | /* |
1408 | * The tree is walked at filtering of an event. If the tree is not correctly |
1409 | * built, it may cause an infinite loop. Check here that the tree does |
1410 | * indeed terminate. |
1411 | */ |
1412 | static int check_pred_tree(struct event_filter *filter, |
1413 | struct filter_pred *root) |
1414 | { |
1415 | struct check_pred_data data = { |
1416 | /* |
1417 | * The max that we can hit a node is three times. |
1418 | * Once going down, once coming up from left, and |
1419 | * once coming up from right. This is more than enough |
1420 | * since leafs are only hit a single time. |
1421 | */ |
1422 | .max = 3 * filter->n_preds, |
1423 | .count = 0, |
1424 | }; |
1425 | |
1426 | return walk_pred_tree(filter->preds, root, |
1427 | check_pred_tree_cb, &data); |
1428 | } |
1429 | |
1430 | static int count_leafs_cb(enum move_type move, struct filter_pred *pred, |
1431 | int *err, void *data) |
1432 | { |
1433 | int *count = data; |
1434 | |
1435 | if ((move == MOVE_DOWN) && |
1436 | (pred->left == FILTER_PRED_INVALID)) |
1437 | (*count)++; |
1438 | |
1439 | return WALK_PRED_DEFAULT; |
1440 | } |
1441 | |
1442 | static int count_leafs(struct filter_pred *preds, struct filter_pred *root) |
1443 | { |
1444 | int count = 0, ret; |
1445 | |
1446 | ret = walk_pred_tree(preds, root, count_leafs_cb, &count); |
1447 | WARN_ON(ret); |
1448 | return count; |
1449 | } |
1450 | |
1451 | struct fold_pred_data { |
1452 | struct filter_pred *root; |
1453 | int count; |
1454 | int children; |
1455 | }; |
1456 | |
1457 | static int fold_pred_cb(enum move_type move, struct filter_pred *pred, |
1458 | int *err, void *data) |
1459 | { |
1460 | struct fold_pred_data *d = data; |
1461 | struct filter_pred *root = d->root; |
1462 | |
1463 | if (move != MOVE_DOWN) |
1464 | return WALK_PRED_DEFAULT; |
1465 | if (pred->left != FILTER_PRED_INVALID) |
1466 | return WALK_PRED_DEFAULT; |
1467 | |
1468 | if (WARN_ON(d->count == d->children)) { |
1469 | *err = -EINVAL; |
1470 | return WALK_PRED_ABORT; |
1471 | } |
1472 | |
1473 | pred->index &= ~FILTER_PRED_FOLD; |
1474 | root->ops[d->count++] = pred->index; |
1475 | return WALK_PRED_DEFAULT; |
1476 | } |
1477 | |
1478 | static int fold_pred(struct filter_pred *preds, struct filter_pred *root) |
1479 | { |
1480 | struct fold_pred_data data = { |
1481 | .root = root, |
1482 | .count = 0, |
1483 | }; |
1484 | int children; |
1485 | |
1486 | /* No need to keep the fold flag */ |
1487 | root->index &= ~FILTER_PRED_FOLD; |
1488 | |
1489 | /* If the root is a leaf then do nothing */ |
1490 | if (root->left == FILTER_PRED_INVALID) |
1491 | return 0; |
1492 | |
1493 | /* count the children */ |
1494 | children = count_leafs(preds, &preds[root->left]); |
1495 | children += count_leafs(preds, &preds[root->right]); |
1496 | |
1497 | root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL); |
1498 | if (!root->ops) |
1499 | return -ENOMEM; |
1500 | |
1501 | root->val = children; |
1502 | data.children = children; |
1503 | return walk_pred_tree(preds, root, fold_pred_cb, &data); |
1504 | } |
1505 | |
1506 | static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred, |
1507 | int *err, void *data) |
1508 | { |
1509 | struct filter_pred *preds = data; |
1510 | |
1511 | if (move != MOVE_DOWN) |
1512 | return WALK_PRED_DEFAULT; |
1513 | if (!(pred->index & FILTER_PRED_FOLD)) |
1514 | return WALK_PRED_DEFAULT; |
1515 | |
1516 | *err = fold_pred(preds, pred); |
1517 | if (*err) |
1518 | return WALK_PRED_ABORT; |
1519 | |
1520 | /* eveyrhing below is folded, continue with parent */ |
1521 | return WALK_PRED_PARENT; |
1522 | } |
1523 | |
1524 | /* |
1525 | * To optimize the processing of the ops, if we have several "ors" or |
1526 | * "ands" together, we can put them in an array and process them all |
1527 | * together speeding up the filter logic. |
1528 | */ |
1529 | static int fold_pred_tree(struct event_filter *filter, |
1530 | struct filter_pred *root) |
1531 | { |
1532 | return walk_pred_tree(filter->preds, root, fold_pred_tree_cb, |
1533 | filter->preds); |
1534 | } |
1535 | |
1536 | static int replace_preds(struct ftrace_event_call *call, |
1537 | struct event_filter *filter, |
1538 | struct filter_parse_state *ps, |
1539 | char *filter_string, |
1540 | bool dry_run) |
1541 | { |
1542 | char *operand1 = NULL, *operand2 = NULL; |
1543 | struct filter_pred *pred; |
1544 | struct filter_pred *root; |
1545 | struct postfix_elt *elt; |
1546 | struct pred_stack stack = { }; /* init to NULL */ |
1547 | int err; |
1548 | int n_preds = 0; |
1549 | |
1550 | n_preds = count_preds(ps); |
1551 | if (n_preds >= MAX_FILTER_PRED) { |
1552 | parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); |
1553 | return -ENOSPC; |
1554 | } |
1555 | |
1556 | err = check_preds(ps); |
1557 | if (err) |
1558 | return err; |
1559 | |
1560 | if (!dry_run) { |
1561 | err = __alloc_pred_stack(&stack, n_preds); |
1562 | if (err) |
1563 | return err; |
1564 | err = __alloc_preds(filter, n_preds); |
1565 | if (err) |
1566 | goto fail; |
1567 | } |
1568 | |
1569 | n_preds = 0; |
1570 | list_for_each_entry(elt, &ps->postfix, list) { |
1571 | if (elt->op == OP_NONE) { |
1572 | if (!operand1) |
1573 | operand1 = elt->operand; |
1574 | else if (!operand2) |
1575 | operand2 = elt->operand; |
1576 | else { |
1577 | parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0); |
1578 | err = -EINVAL; |
1579 | goto fail; |
1580 | } |
1581 | continue; |
1582 | } |
1583 | |
1584 | if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) { |
1585 | parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); |
1586 | err = -ENOSPC; |
1587 | goto fail; |
1588 | } |
1589 | |
1590 | pred = create_pred(ps, call, elt->op, operand1, operand2); |
1591 | if (!pred) { |
1592 | err = -EINVAL; |
1593 | goto fail; |
1594 | } |
1595 | |
1596 | if (!dry_run) { |
1597 | err = filter_add_pred(ps, filter, pred, &stack); |
1598 | if (err) |
1599 | goto fail; |
1600 | } |
1601 | |
1602 | operand1 = operand2 = NULL; |
1603 | } |
1604 | |
1605 | if (!dry_run) { |
1606 | /* We should have one item left on the stack */ |
1607 | pred = __pop_pred_stack(&stack); |
1608 | if (!pred) |
1609 | return -EINVAL; |
1610 | /* This item is where we start from in matching */ |
1611 | root = pred; |
1612 | /* Make sure the stack is empty */ |
1613 | pred = __pop_pred_stack(&stack); |
1614 | if (WARN_ON(pred)) { |
1615 | err = -EINVAL; |
1616 | filter->root = NULL; |
1617 | goto fail; |
1618 | } |
1619 | err = check_pred_tree(filter, root); |
1620 | if (err) |
1621 | goto fail; |
1622 | |
1623 | /* Optimize the tree */ |
1624 | err = fold_pred_tree(filter, root); |
1625 | if (err) |
1626 | goto fail; |
1627 | |
1628 | /* We don't set root until we know it works */ |
1629 | barrier(); |
1630 | filter->root = root; |
1631 | } |
1632 | |
1633 | err = 0; |
1634 | fail: |
1635 | __free_pred_stack(&stack); |
1636 | return err; |
1637 | } |
1638 | |
1639 | struct filter_list { |
1640 | struct list_head list; |
1641 | struct event_filter *filter; |
1642 | }; |
1643 | |
1644 | static int replace_system_preds(struct event_subsystem *system, |
1645 | struct filter_parse_state *ps, |
1646 | char *filter_string) |
1647 | { |
1648 | struct ftrace_event_call *call; |
1649 | struct filter_list *filter_item; |
1650 | struct filter_list *tmp; |
1651 | LIST_HEAD(filter_list); |
1652 | bool fail = true; |
1653 | int err; |
1654 | |
1655 | list_for_each_entry(call, &ftrace_events, list) { |
1656 | |
1657 | if (strcmp(call->class->system, system->name) != 0) |
1658 | continue; |
1659 | |
1660 | /* |
1661 | * Try to see if the filter can be applied |
1662 | * (filter arg is ignored on dry_run) |
1663 | */ |
1664 | err = replace_preds(call, NULL, ps, filter_string, true); |
1665 | if (err) |
1666 | call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; |
1667 | else |
1668 | call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; |
1669 | } |
1670 | |
1671 | list_for_each_entry(call, &ftrace_events, list) { |
1672 | struct event_filter *filter; |
1673 | |
1674 | if (strcmp(call->class->system, system->name) != 0) |
1675 | continue; |
1676 | |
1677 | if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER) |
1678 | continue; |
1679 | |
1680 | filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); |
1681 | if (!filter_item) |
1682 | goto fail_mem; |
1683 | |
1684 | list_add_tail(&filter_item->list, &filter_list); |
1685 | |
1686 | filter_item->filter = __alloc_filter(); |
1687 | if (!filter_item->filter) |
1688 | goto fail_mem; |
1689 | filter = filter_item->filter; |
1690 | |
1691 | /* Can only fail on no memory */ |
1692 | err = replace_filter_string(filter, filter_string); |
1693 | if (err) |
1694 | goto fail_mem; |
1695 | |
1696 | err = replace_preds(call, filter, ps, filter_string, false); |
1697 | if (err) { |
1698 | filter_disable(call); |
1699 | parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); |
1700 | append_filter_err(ps, filter); |
1701 | } else |
1702 | call->flags |= TRACE_EVENT_FL_FILTERED; |
1703 | /* |
1704 | * Regardless of if this returned an error, we still |
1705 | * replace the filter for the call. |
1706 | */ |
1707 | filter = call->filter; |
1708 | rcu_assign_pointer(call->filter, filter_item->filter); |
1709 | filter_item->filter = filter; |
1710 | |
1711 | fail = false; |
1712 | } |
1713 | |
1714 | if (fail) |
1715 | goto fail; |
1716 | |
1717 | /* |
1718 | * The calls can still be using the old filters. |
1719 | * Do a synchronize_sched() to ensure all calls are |
1720 | * done with them before we free them. |
1721 | */ |
1722 | synchronize_sched(); |
1723 | list_for_each_entry_safe(filter_item, tmp, &filter_list, list) { |
1724 | __free_filter(filter_item->filter); |
1725 | list_del(&filter_item->list); |
1726 | kfree(filter_item); |
1727 | } |
1728 | return 0; |
1729 | fail: |
1730 | /* No call succeeded */ |
1731 | list_for_each_entry_safe(filter_item, tmp, &filter_list, list) { |
1732 | list_del(&filter_item->list); |
1733 | kfree(filter_item); |
1734 | } |
1735 | parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); |
1736 | return -EINVAL; |
1737 | fail_mem: |
1738 | /* If any call succeeded, we still need to sync */ |
1739 | if (!fail) |
1740 | synchronize_sched(); |
1741 | list_for_each_entry_safe(filter_item, tmp, &filter_list, list) { |
1742 | __free_filter(filter_item->filter); |
1743 | list_del(&filter_item->list); |
1744 | kfree(filter_item); |
1745 | } |
1746 | return -ENOMEM; |
1747 | } |
1748 | |
1749 | static int create_filter_start(char *filter_str, bool set_str, |
1750 | struct filter_parse_state **psp, |
1751 | struct event_filter **filterp) |
1752 | { |
1753 | struct event_filter *filter; |
1754 | struct filter_parse_state *ps = NULL; |
1755 | int err = 0; |
1756 | |
1757 | WARN_ON_ONCE(*psp || *filterp); |
1758 | |
1759 | /* allocate everything, and if any fails, free all and fail */ |
1760 | filter = __alloc_filter(); |
1761 | if (filter && set_str) |
1762 | err = replace_filter_string(filter, filter_str); |
1763 | |
1764 | ps = kzalloc(sizeof(*ps), GFP_KERNEL); |
1765 | |
1766 | if (!filter || !ps || err) { |
1767 | kfree(ps); |
1768 | __free_filter(filter); |
1769 | return -ENOMEM; |
1770 | } |
1771 | |
1772 | /* we're committed to creating a new filter */ |
1773 | *filterp = filter; |
1774 | *psp = ps; |
1775 | |
1776 | parse_init(ps, filter_ops, filter_str); |
1777 | err = filter_parse(ps); |
1778 | if (err && set_str) |
1779 | append_filter_err(ps, filter); |
1780 | return err; |
1781 | } |
1782 | |
1783 | static void create_filter_finish(struct filter_parse_state *ps) |
1784 | { |
1785 | if (ps) { |
1786 | filter_opstack_clear(ps); |
1787 | postfix_clear(ps); |
1788 | kfree(ps); |
1789 | } |
1790 | } |
1791 | |
1792 | /** |
1793 | * create_filter - create a filter for a ftrace_event_call |
1794 | * @call: ftrace_event_call to create a filter for |
1795 | * @filter_str: filter string |
1796 | * @set_str: remember @filter_str and enable detailed error in filter |
1797 | * @filterp: out param for created filter (always updated on return) |
1798 | * |
1799 | * Creates a filter for @call with @filter_str. If @set_str is %true, |
1800 | * @filter_str is copied and recorded in the new filter. |
1801 | * |
1802 | * On success, returns 0 and *@filterp points to the new filter. On |
1803 | * failure, returns -errno and *@filterp may point to %NULL or to a new |
1804 | * filter. In the latter case, the returned filter contains error |
1805 | * information if @set_str is %true and the caller is responsible for |
1806 | * freeing it. |
1807 | */ |
1808 | static int create_filter(struct ftrace_event_call *call, |
1809 | char *filter_str, bool set_str, |
1810 | struct event_filter **filterp) |
1811 | { |
1812 | struct event_filter *filter = NULL; |
1813 | struct filter_parse_state *ps = NULL; |
1814 | int err; |
1815 | |
1816 | err = create_filter_start(filter_str, set_str, &ps, &filter); |
1817 | if (!err) { |
1818 | err = replace_preds(call, filter, ps, filter_str, false); |
1819 | if (err && set_str) |
1820 | append_filter_err(ps, filter); |
1821 | } |
1822 | create_filter_finish(ps); |
1823 | |
1824 | *filterp = filter; |
1825 | return err; |
1826 | } |
1827 | |
1828 | /** |
1829 | * create_system_filter - create a filter for an event_subsystem |
1830 | * @system: event_subsystem to create a filter for |
1831 | * @filter_str: filter string |
1832 | * @filterp: out param for created filter (always updated on return) |
1833 | * |
1834 | * Identical to create_filter() except that it creates a subsystem filter |
1835 | * and always remembers @filter_str. |
1836 | */ |
1837 | static int create_system_filter(struct event_subsystem *system, |
1838 | char *filter_str, struct event_filter **filterp) |
1839 | { |
1840 | struct event_filter *filter = NULL; |
1841 | struct filter_parse_state *ps = NULL; |
1842 | int err; |
1843 | |
1844 | err = create_filter_start(filter_str, true, &ps, &filter); |
1845 | if (!err) { |
1846 | err = replace_system_preds(system, ps, filter_str); |
1847 | if (!err) { |
1848 | /* System filters just show a default message */ |
1849 | kfree(filter->filter_string); |
1850 | filter->filter_string = NULL; |
1851 | } else { |
1852 | append_filter_err(ps, filter); |
1853 | } |
1854 | } |
1855 | create_filter_finish(ps); |
1856 | |
1857 | *filterp = filter; |
1858 | return err; |
1859 | } |
1860 | |
1861 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) |
1862 | { |
1863 | struct event_filter *filter; |
1864 | int err = 0; |
1865 | |
1866 | mutex_lock(&event_mutex); |
1867 | |
1868 | if (!strcmp(strstrip(filter_string), "0")) { |
1869 | filter_disable(call); |
1870 | filter = call->filter; |
1871 | if (!filter) |
1872 | goto out_unlock; |
1873 | RCU_INIT_POINTER(call->filter, NULL); |
1874 | /* Make sure the filter is not being used */ |
1875 | synchronize_sched(); |
1876 | __free_filter(filter); |
1877 | goto out_unlock; |
1878 | } |
1879 | |
1880 | err = create_filter(call, filter_string, true, &filter); |
1881 | |
1882 | /* |
1883 | * Always swap the call filter with the new filter |
1884 | * even if there was an error. If there was an error |
1885 | * in the filter, we disable the filter and show the error |
1886 | * string |
1887 | */ |
1888 | if (filter) { |
1889 | struct event_filter *tmp = call->filter; |
1890 | |
1891 | if (!err) |
1892 | call->flags |= TRACE_EVENT_FL_FILTERED; |
1893 | else |
1894 | filter_disable(call); |
1895 | |
1896 | rcu_assign_pointer(call->filter, filter); |
1897 | |
1898 | if (tmp) { |
1899 | /* Make sure the call is done with the filter */ |
1900 | synchronize_sched(); |
1901 | __free_filter(tmp); |
1902 | } |
1903 | } |
1904 | out_unlock: |
1905 | mutex_unlock(&event_mutex); |
1906 | |
1907 | return err; |
1908 | } |
1909 | |
1910 | int apply_subsystem_event_filter(struct event_subsystem *system, |
1911 | char *filter_string) |
1912 | { |
1913 | struct event_filter *filter; |
1914 | int err = 0; |
1915 | |
1916 | mutex_lock(&event_mutex); |
1917 | |
1918 | /* Make sure the system still has events */ |
1919 | if (!system->nr_events) { |
1920 | err = -ENODEV; |
1921 | goto out_unlock; |
1922 | } |
1923 | |
1924 | if (!strcmp(strstrip(filter_string), "0")) { |
1925 | filter_free_subsystem_preds(system); |
1926 | remove_filter_string(system->filter); |
1927 | filter = system->filter; |
1928 | system->filter = NULL; |
1929 | /* Ensure all filters are no longer used */ |
1930 | synchronize_sched(); |
1931 | filter_free_subsystem_filters(system); |
1932 | __free_filter(filter); |
1933 | goto out_unlock; |
1934 | } |
1935 | |
1936 | err = create_system_filter(system, filter_string, &filter); |
1937 | if (filter) { |
1938 | /* |
1939 | * No event actually uses the system filter |
1940 | * we can free it without synchronize_sched(). |
1941 | */ |
1942 | __free_filter(system->filter); |
1943 | system->filter = filter; |
1944 | } |
1945 | out_unlock: |
1946 | mutex_unlock(&event_mutex); |
1947 | |
1948 | return err; |
1949 | } |
1950 | |
1951 | #ifdef CONFIG_PERF_EVENTS |
1952 | |
1953 | void ftrace_profile_free_filter(struct perf_event *event) |
1954 | { |
1955 | struct event_filter *filter = event->filter; |
1956 | |
1957 | event->filter = NULL; |
1958 | __free_filter(filter); |
1959 | } |
1960 | |
1961 | struct function_filter_data { |
1962 | struct ftrace_ops *ops; |
1963 | int first_filter; |
1964 | int first_notrace; |
1965 | }; |
1966 | |
1967 | #ifdef CONFIG_FUNCTION_TRACER |
1968 | static char ** |
1969 | ftrace_function_filter_re(char *buf, int len, int *count) |
1970 | { |
1971 | char *str, *sep, **re; |
1972 | |
1973 | str = kstrndup(buf, len, GFP_KERNEL); |
1974 | if (!str) |
1975 | return NULL; |
1976 | |
1977 | /* |
1978 | * The argv_split function takes white space |
1979 | * as a separator, so convert ',' into spaces. |
1980 | */ |
1981 | while ((sep = strchr(str, ','))) |
1982 | *sep = ' '; |
1983 | |
1984 | re = argv_split(GFP_KERNEL, str, count); |
1985 | kfree(str); |
1986 | return re; |
1987 | } |
1988 | |
1989 | static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter, |
1990 | int reset, char *re, int len) |
1991 | { |
1992 | int ret; |
1993 | |
1994 | if (filter) |
1995 | ret = ftrace_set_filter(ops, re, len, reset); |
1996 | else |
1997 | ret = ftrace_set_notrace(ops, re, len, reset); |
1998 | |
1999 | return ret; |
2000 | } |
2001 | |
2002 | static int __ftrace_function_set_filter(int filter, char *buf, int len, |
2003 | struct function_filter_data *data) |
2004 | { |
2005 | int i, re_cnt, ret; |
2006 | int *reset; |
2007 | char **re; |
2008 | |
2009 | reset = filter ? &data->first_filter : &data->first_notrace; |
2010 | |
2011 | /* |
2012 | * The 'ip' field could have multiple filters set, separated |
2013 | * either by space or comma. We first cut the filter and apply |
2014 | * all pieces separatelly. |
2015 | */ |
2016 | re = ftrace_function_filter_re(buf, len, &re_cnt); |
2017 | if (!re) |
2018 | return -EINVAL; |
2019 | |
2020 | for (i = 0; i < re_cnt; i++) { |
2021 | ret = ftrace_function_set_regexp(data->ops, filter, *reset, |
2022 | re[i], strlen(re[i])); |
2023 | if (ret) |
2024 | break; |
2025 | |
2026 | if (*reset) |
2027 | *reset = 0; |
2028 | } |
2029 | |
2030 | argv_free(re); |
2031 | return ret; |
2032 | } |
2033 | |
2034 | static int ftrace_function_check_pred(struct filter_pred *pred, int leaf) |
2035 | { |
2036 | struct ftrace_event_field *field = pred->field; |
2037 | |
2038 | if (leaf) { |
2039 | /* |
2040 | * Check the leaf predicate for function trace, verify: |
2041 | * - only '==' and '!=' is used |
2042 | * - the 'ip' field is used |
2043 | */ |
2044 | if ((pred->op != OP_EQ) && (pred->op != OP_NE)) |
2045 | return -EINVAL; |
2046 | |
2047 | if (strcmp(field->name, "ip")) |
2048 | return -EINVAL; |
2049 | } else { |
2050 | /* |
2051 | * Check the non leaf predicate for function trace, verify: |
2052 | * - only '||' is used |
2053 | */ |
2054 | if (pred->op != OP_OR) |
2055 | return -EINVAL; |
2056 | } |
2057 | |
2058 | return 0; |
2059 | } |
2060 | |
2061 | static int ftrace_function_set_filter_cb(enum move_type move, |
2062 | struct filter_pred *pred, |
2063 | int *err, void *data) |
2064 | { |
2065 | /* Checking the node is valid for function trace. */ |
2066 | if ((move != MOVE_DOWN) || |
2067 | (pred->left != FILTER_PRED_INVALID)) { |
2068 | *err = ftrace_function_check_pred(pred, 0); |
2069 | } else { |
2070 | *err = ftrace_function_check_pred(pred, 1); |
2071 | if (*err) |
2072 | return WALK_PRED_ABORT; |
2073 | |
2074 | *err = __ftrace_function_set_filter(pred->op == OP_EQ, |
2075 | pred->regex.pattern, |
2076 | pred->regex.len, |
2077 | data); |
2078 | } |
2079 | |
2080 | return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT; |
2081 | } |
2082 | |
2083 | static int ftrace_function_set_filter(struct perf_event *event, |
2084 | struct event_filter *filter) |
2085 | { |
2086 | struct function_filter_data data = { |
2087 | .first_filter = 1, |
2088 | .first_notrace = 1, |
2089 | .ops = &event->ftrace_ops, |
2090 | }; |
2091 | |
2092 | return walk_pred_tree(filter->preds, filter->root, |
2093 | ftrace_function_set_filter_cb, &data); |
2094 | } |
2095 | #else |
2096 | static int ftrace_function_set_filter(struct perf_event *event, |
2097 | struct event_filter *filter) |
2098 | { |
2099 | return -ENODEV; |
2100 | } |
2101 | #endif /* CONFIG_FUNCTION_TRACER */ |
2102 | |
2103 | int ftrace_profile_set_filter(struct perf_event *event, int event_id, |
2104 | char *filter_str) |
2105 | { |
2106 | int err; |
2107 | struct event_filter *filter; |
2108 | struct ftrace_event_call *call; |
2109 | |
2110 | mutex_lock(&event_mutex); |
2111 | |
2112 | call = event->tp_event; |
2113 | |
2114 | err = -EINVAL; |
2115 | if (!call) |
2116 | goto out_unlock; |
2117 | |
2118 | err = -EEXIST; |
2119 | if (event->filter) |
2120 | goto out_unlock; |
2121 | |
2122 | err = create_filter(call, filter_str, false, &filter); |
2123 | if (err) |
2124 | goto free_filter; |
2125 | |
2126 | if (ftrace_event_is_function(call)) |
2127 | err = ftrace_function_set_filter(event, filter); |
2128 | else |
2129 | event->filter = filter; |
2130 | |
2131 | free_filter: |
2132 | if (err || ftrace_event_is_function(call)) |
2133 | __free_filter(filter); |
2134 | |
2135 | out_unlock: |
2136 | mutex_unlock(&event_mutex); |
2137 | |
2138 | return err; |
2139 | } |
2140 | |
2141 | #endif /* CONFIG_PERF_EVENTS */ |
2142 | |
2143 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
2144 | |
2145 | #include <linux/types.h> |
2146 | #include <linux/tracepoint.h> |
2147 | |
2148 | #define CREATE_TRACE_POINTS |
2149 | #include "trace_events_filter_test.h" |
2150 | |
2151 | #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \ |
2152 | { \ |
2153 | .filter = FILTER, \ |
2154 | .rec = { .a = va, .b = vb, .c = vc, .d = vd, \ |
2155 | .e = ve, .f = vf, .g = vg, .h = vh }, \ |
2156 | .match = m, \ |
2157 | .not_visited = nvisit, \ |
2158 | } |
2159 | #define YES 1 |
2160 | #define NO 0 |
2161 | |
2162 | static struct test_filter_data_t { |
2163 | char *filter; |
2164 | struct ftrace_raw_ftrace_test_filter rec; |
2165 | int match; |
2166 | char *not_visited; |
2167 | } test_filter_data[] = { |
2168 | #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \ |
2169 | "e == 1 && f == 1 && g == 1 && h == 1" |
2170 | DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""), |
2171 | DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"), |
2172 | DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""), |
2173 | #undef FILTER |
2174 | #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \ |
2175 | "e == 1 || f == 1 || g == 1 || h == 1" |
2176 | DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""), |
2177 | DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""), |
2178 | DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"), |
2179 | #undef FILTER |
2180 | #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \ |
2181 | "(e == 1 || f == 1) && (g == 1 || h == 1)" |
2182 | DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"), |
2183 | DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""), |
2184 | DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"), |
2185 | DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"), |
2186 | #undef FILTER |
2187 | #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \ |
2188 | "(e == 1 && f == 1) || (g == 1 && h == 1)" |
2189 | DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"), |
2190 | DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""), |
2191 | DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""), |
2192 | #undef FILTER |
2193 | #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \ |
2194 | "(e == 1 && f == 1) || (g == 1 && h == 1)" |
2195 | DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"), |
2196 | DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""), |
2197 | DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""), |
2198 | #undef FILTER |
2199 | #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \ |
2200 | "(e == 1 || f == 1)) && (g == 1 || h == 1)" |
2201 | DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"), |
2202 | DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""), |
2203 | DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"), |
2204 | #undef FILTER |
2205 | #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \ |
2206 | "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))" |
2207 | DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"), |
2208 | DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""), |
2209 | DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""), |
2210 | #undef FILTER |
2211 | #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \ |
2212 | "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))" |
2213 | DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"), |
2214 | DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""), |
2215 | DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"), |
2216 | }; |
2217 | |
2218 | #undef DATA_REC |
2219 | #undef FILTER |
2220 | #undef YES |
2221 | #undef NO |
2222 | |
2223 | #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t)) |
2224 | |
2225 | static int test_pred_visited; |
2226 | |
2227 | static int test_pred_visited_fn(struct filter_pred *pred, void *event) |
2228 | { |
2229 | struct ftrace_event_field *field = pred->field; |
2230 | |
2231 | test_pred_visited = 1; |
2232 | printk(KERN_INFO "\npred visited %s\n", field->name); |
2233 | return 1; |
2234 | } |
2235 | |
2236 | static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred, |
2237 | int *err, void *data) |
2238 | { |
2239 | char *fields = data; |
2240 | |
2241 | if ((move == MOVE_DOWN) && |
2242 | (pred->left == FILTER_PRED_INVALID)) { |
2243 | struct ftrace_event_field *field = pred->field; |
2244 | |
2245 | if (!field) { |
2246 | WARN(1, "all leafs should have field defined"); |
2247 | return WALK_PRED_DEFAULT; |
2248 | } |
2249 | if (!strchr(fields, *field->name)) |
2250 | return WALK_PRED_DEFAULT; |
2251 | |
2252 | WARN_ON(!pred->fn); |
2253 | pred->fn = test_pred_visited_fn; |
2254 | } |
2255 | return WALK_PRED_DEFAULT; |
2256 | } |
2257 | |
2258 | static __init int ftrace_test_event_filter(void) |
2259 | { |
2260 | int i; |
2261 | |
2262 | printk(KERN_INFO "Testing ftrace filter: "); |
2263 | |
2264 | for (i = 0; i < DATA_CNT; i++) { |
2265 | struct event_filter *filter = NULL; |
2266 | struct test_filter_data_t *d = &test_filter_data[i]; |
2267 | int err; |
2268 | |
2269 | err = create_filter(&event_ftrace_test_filter, d->filter, |
2270 | false, &filter); |
2271 | if (err) { |
2272 | printk(KERN_INFO |
2273 | "Failed to get filter for '%s', err %d\n", |
2274 | d->filter, err); |
2275 | __free_filter(filter); |
2276 | break; |
2277 | } |
2278 | |
2279 | /* |
2280 | * The preemption disabling is not really needed for self |
2281 | * tests, but the rcu dereference will complain without it. |
2282 | */ |
2283 | preempt_disable(); |
2284 | if (*d->not_visited) |
2285 | walk_pred_tree(filter->preds, filter->root, |
2286 | test_walk_pred_cb, |
2287 | d->not_visited); |
2288 | |
2289 | test_pred_visited = 0; |
2290 | err = filter_match_preds(filter, &d->rec); |
2291 | preempt_enable(); |
2292 | |
2293 | __free_filter(filter); |
2294 | |
2295 | if (test_pred_visited) { |
2296 | printk(KERN_INFO |
2297 | "Failed, unwanted pred visited for filter %s\n", |
2298 | d->filter); |
2299 | break; |
2300 | } |
2301 | |
2302 | if (err != d->match) { |
2303 | printk(KERN_INFO |
2304 | "Failed to match filter '%s', expected %d\n", |
2305 | d->filter, d->match); |
2306 | break; |
2307 | } |
2308 | } |
2309 | |
2310 | if (i == DATA_CNT) |
2311 | printk(KERN_CONT "OK\n"); |
2312 | |
2313 | return 0; |
2314 | } |
2315 | |
2316 | late_initcall(ftrace_test_event_filter); |
2317 | |
2318 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
2319 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9