Root/
1 | #include "audit.h" |
2 | #include <linux/fsnotify_backend.h> |
3 | #include <linux/namei.h> |
4 | #include <linux/mount.h> |
5 | #include <linux/kthread.h> |
6 | #include <linux/slab.h> |
7 | |
8 | struct audit_tree; |
9 | struct audit_chunk; |
10 | |
11 | struct audit_tree { |
12 | atomic_t count; |
13 | int goner; |
14 | struct audit_chunk *root; |
15 | struct list_head chunks; |
16 | struct list_head rules; |
17 | struct list_head list; |
18 | struct list_head same_root; |
19 | struct rcu_head head; |
20 | char pathname[]; |
21 | }; |
22 | |
23 | struct audit_chunk { |
24 | struct list_head hash; |
25 | struct fsnotify_mark mark; |
26 | struct list_head trees; /* with root here */ |
27 | int dead; |
28 | int count; |
29 | atomic_long_t refs; |
30 | struct rcu_head head; |
31 | struct node { |
32 | struct list_head list; |
33 | struct audit_tree *owner; |
34 | unsigned index; /* index; upper bit indicates 'will prune' */ |
35 | } owners[]; |
36 | }; |
37 | |
38 | static LIST_HEAD(tree_list); |
39 | static LIST_HEAD(prune_list); |
40 | |
41 | /* |
42 | * One struct chunk is attached to each inode of interest. |
43 | * We replace struct chunk on tagging/untagging. |
44 | * Rules have pointer to struct audit_tree. |
45 | * Rules have struct list_head rlist forming a list of rules over |
46 | * the same tree. |
47 | * References to struct chunk are collected at audit_inode{,_child}() |
48 | * time and used in AUDIT_TREE rule matching. |
49 | * These references are dropped at the same time we are calling |
50 | * audit_free_names(), etc. |
51 | * |
52 | * Cyclic lists galore: |
53 | * tree.chunks anchors chunk.owners[].list hash_lock |
54 | * tree.rules anchors rule.rlist audit_filter_mutex |
55 | * chunk.trees anchors tree.same_root hash_lock |
56 | * chunk.hash is a hash with middle bits of watch.inode as |
57 | * a hash function. RCU, hash_lock |
58 | * |
59 | * tree is refcounted; one reference for "some rules on rules_list refer to |
60 | * it", one for each chunk with pointer to it. |
61 | * |
62 | * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount |
63 | * of watch contributes 1 to .refs). |
64 | * |
65 | * node.index allows to get from node.list to containing chunk. |
66 | * MSB of that sucker is stolen to mark taggings that we might have to |
67 | * revert - several operations have very unpleasant cleanup logics and |
68 | * that makes a difference. Some. |
69 | */ |
70 | |
71 | static struct fsnotify_group *audit_tree_group; |
72 | |
73 | static struct audit_tree *alloc_tree(const char *s) |
74 | { |
75 | struct audit_tree *tree; |
76 | |
77 | tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); |
78 | if (tree) { |
79 | atomic_set(&tree->count, 1); |
80 | tree->goner = 0; |
81 | INIT_LIST_HEAD(&tree->chunks); |
82 | INIT_LIST_HEAD(&tree->rules); |
83 | INIT_LIST_HEAD(&tree->list); |
84 | INIT_LIST_HEAD(&tree->same_root); |
85 | tree->root = NULL; |
86 | strcpy(tree->pathname, s); |
87 | } |
88 | return tree; |
89 | } |
90 | |
91 | static inline void get_tree(struct audit_tree *tree) |
92 | { |
93 | atomic_inc(&tree->count); |
94 | } |
95 | |
96 | static inline void put_tree(struct audit_tree *tree) |
97 | { |
98 | if (atomic_dec_and_test(&tree->count)) |
99 | kfree_rcu(tree, head); |
100 | } |
101 | |
102 | /* to avoid bringing the entire thing in audit.h */ |
103 | const char *audit_tree_path(struct audit_tree *tree) |
104 | { |
105 | return tree->pathname; |
106 | } |
107 | |
108 | static void free_chunk(struct audit_chunk *chunk) |
109 | { |
110 | int i; |
111 | |
112 | for (i = 0; i < chunk->count; i++) { |
113 | if (chunk->owners[i].owner) |
114 | put_tree(chunk->owners[i].owner); |
115 | } |
116 | kfree(chunk); |
117 | } |
118 | |
119 | void audit_put_chunk(struct audit_chunk *chunk) |
120 | { |
121 | if (atomic_long_dec_and_test(&chunk->refs)) |
122 | free_chunk(chunk); |
123 | } |
124 | |
125 | static void __put_chunk(struct rcu_head *rcu) |
126 | { |
127 | struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); |
128 | audit_put_chunk(chunk); |
129 | } |
130 | |
131 | static void audit_tree_destroy_watch(struct fsnotify_mark *entry) |
132 | { |
133 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); |
134 | call_rcu(&chunk->head, __put_chunk); |
135 | } |
136 | |
137 | static struct audit_chunk *alloc_chunk(int count) |
138 | { |
139 | struct audit_chunk *chunk; |
140 | size_t size; |
141 | int i; |
142 | |
143 | size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); |
144 | chunk = kzalloc(size, GFP_KERNEL); |
145 | if (!chunk) |
146 | return NULL; |
147 | |
148 | INIT_LIST_HEAD(&chunk->hash); |
149 | INIT_LIST_HEAD(&chunk->trees); |
150 | chunk->count = count; |
151 | atomic_long_set(&chunk->refs, 1); |
152 | for (i = 0; i < count; i++) { |
153 | INIT_LIST_HEAD(&chunk->owners[i].list); |
154 | chunk->owners[i].index = i; |
155 | } |
156 | fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); |
157 | return chunk; |
158 | } |
159 | |
160 | enum {HASH_SIZE = 128}; |
161 | static struct list_head chunk_hash_heads[HASH_SIZE]; |
162 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); |
163 | |
164 | static inline struct list_head *chunk_hash(const struct inode *inode) |
165 | { |
166 | unsigned long n = (unsigned long)inode / L1_CACHE_BYTES; |
167 | return chunk_hash_heads + n % HASH_SIZE; |
168 | } |
169 | |
170 | /* hash_lock & entry->lock is held by caller */ |
171 | static void insert_hash(struct audit_chunk *chunk) |
172 | { |
173 | struct fsnotify_mark *entry = &chunk->mark; |
174 | struct list_head *list; |
175 | |
176 | if (!entry->i.inode) |
177 | return; |
178 | list = chunk_hash(entry->i.inode); |
179 | list_add_rcu(&chunk->hash, list); |
180 | } |
181 | |
182 | /* called under rcu_read_lock */ |
183 | struct audit_chunk *audit_tree_lookup(const struct inode *inode) |
184 | { |
185 | struct list_head *list = chunk_hash(inode); |
186 | struct audit_chunk *p; |
187 | |
188 | list_for_each_entry_rcu(p, list, hash) { |
189 | /* mark.inode may have gone NULL, but who cares? */ |
190 | if (p->mark.i.inode == inode) { |
191 | atomic_long_inc(&p->refs); |
192 | return p; |
193 | } |
194 | } |
195 | return NULL; |
196 | } |
197 | |
198 | int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) |
199 | { |
200 | int n; |
201 | for (n = 0; n < chunk->count; n++) |
202 | if (chunk->owners[n].owner == tree) |
203 | return 1; |
204 | return 0; |
205 | } |
206 | |
207 | /* tagging and untagging inodes with trees */ |
208 | |
209 | static struct audit_chunk *find_chunk(struct node *p) |
210 | { |
211 | int index = p->index & ~(1U<<31); |
212 | p -= index; |
213 | return container_of(p, struct audit_chunk, owners[0]); |
214 | } |
215 | |
216 | static void untag_chunk(struct node *p) |
217 | { |
218 | struct audit_chunk *chunk = find_chunk(p); |
219 | struct fsnotify_mark *entry = &chunk->mark; |
220 | struct audit_chunk *new = NULL; |
221 | struct audit_tree *owner; |
222 | int size = chunk->count - 1; |
223 | int i, j; |
224 | |
225 | fsnotify_get_mark(entry); |
226 | |
227 | spin_unlock(&hash_lock); |
228 | |
229 | if (size) |
230 | new = alloc_chunk(size); |
231 | |
232 | spin_lock(&entry->lock); |
233 | if (chunk->dead || !entry->i.inode) { |
234 | spin_unlock(&entry->lock); |
235 | if (new) |
236 | free_chunk(new); |
237 | goto out; |
238 | } |
239 | |
240 | owner = p->owner; |
241 | |
242 | if (!size) { |
243 | chunk->dead = 1; |
244 | spin_lock(&hash_lock); |
245 | list_del_init(&chunk->trees); |
246 | if (owner->root == chunk) |
247 | owner->root = NULL; |
248 | list_del_init(&p->list); |
249 | list_del_rcu(&chunk->hash); |
250 | spin_unlock(&hash_lock); |
251 | spin_unlock(&entry->lock); |
252 | fsnotify_destroy_mark(entry, audit_tree_group); |
253 | goto out; |
254 | } |
255 | |
256 | if (!new) |
257 | goto Fallback; |
258 | |
259 | fsnotify_duplicate_mark(&new->mark, entry); |
260 | if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { |
261 | fsnotify_put_mark(&new->mark); |
262 | goto Fallback; |
263 | } |
264 | |
265 | chunk->dead = 1; |
266 | spin_lock(&hash_lock); |
267 | list_replace_init(&chunk->trees, &new->trees); |
268 | if (owner->root == chunk) { |
269 | list_del_init(&owner->same_root); |
270 | owner->root = NULL; |
271 | } |
272 | |
273 | for (i = j = 0; j <= size; i++, j++) { |
274 | struct audit_tree *s; |
275 | if (&chunk->owners[j] == p) { |
276 | list_del_init(&p->list); |
277 | i--; |
278 | continue; |
279 | } |
280 | s = chunk->owners[j].owner; |
281 | new->owners[i].owner = s; |
282 | new->owners[i].index = chunk->owners[j].index - j + i; |
283 | if (!s) /* result of earlier fallback */ |
284 | continue; |
285 | get_tree(s); |
286 | list_replace_init(&chunk->owners[j].list, &new->owners[i].list); |
287 | } |
288 | |
289 | list_replace_rcu(&chunk->hash, &new->hash); |
290 | list_for_each_entry(owner, &new->trees, same_root) |
291 | owner->root = new; |
292 | spin_unlock(&hash_lock); |
293 | spin_unlock(&entry->lock); |
294 | fsnotify_destroy_mark(entry, audit_tree_group); |
295 | fsnotify_put_mark(&new->mark); /* drop initial reference */ |
296 | goto out; |
297 | |
298 | Fallback: |
299 | // do the best we can |
300 | spin_lock(&hash_lock); |
301 | if (owner->root == chunk) { |
302 | list_del_init(&owner->same_root); |
303 | owner->root = NULL; |
304 | } |
305 | list_del_init(&p->list); |
306 | p->owner = NULL; |
307 | put_tree(owner); |
308 | spin_unlock(&hash_lock); |
309 | spin_unlock(&entry->lock); |
310 | out: |
311 | fsnotify_put_mark(entry); |
312 | spin_lock(&hash_lock); |
313 | } |
314 | |
315 | static int create_chunk(struct inode *inode, struct audit_tree *tree) |
316 | { |
317 | struct fsnotify_mark *entry; |
318 | struct audit_chunk *chunk = alloc_chunk(1); |
319 | if (!chunk) |
320 | return -ENOMEM; |
321 | |
322 | entry = &chunk->mark; |
323 | if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { |
324 | fsnotify_put_mark(entry); |
325 | return -ENOSPC; |
326 | } |
327 | |
328 | spin_lock(&entry->lock); |
329 | spin_lock(&hash_lock); |
330 | if (tree->goner) { |
331 | spin_unlock(&hash_lock); |
332 | chunk->dead = 1; |
333 | spin_unlock(&entry->lock); |
334 | fsnotify_destroy_mark(entry, audit_tree_group); |
335 | fsnotify_put_mark(entry); |
336 | return 0; |
337 | } |
338 | chunk->owners[0].index = (1U << 31); |
339 | chunk->owners[0].owner = tree; |
340 | get_tree(tree); |
341 | list_add(&chunk->owners[0].list, &tree->chunks); |
342 | if (!tree->root) { |
343 | tree->root = chunk; |
344 | list_add(&tree->same_root, &chunk->trees); |
345 | } |
346 | insert_hash(chunk); |
347 | spin_unlock(&hash_lock); |
348 | spin_unlock(&entry->lock); |
349 | fsnotify_put_mark(entry); /* drop initial reference */ |
350 | return 0; |
351 | } |
352 | |
353 | /* the first tagged inode becomes root of tree */ |
354 | static int tag_chunk(struct inode *inode, struct audit_tree *tree) |
355 | { |
356 | struct fsnotify_mark *old_entry, *chunk_entry; |
357 | struct audit_tree *owner; |
358 | struct audit_chunk *chunk, *old; |
359 | struct node *p; |
360 | int n; |
361 | |
362 | old_entry = fsnotify_find_inode_mark(audit_tree_group, inode); |
363 | if (!old_entry) |
364 | return create_chunk(inode, tree); |
365 | |
366 | old = container_of(old_entry, struct audit_chunk, mark); |
367 | |
368 | /* are we already there? */ |
369 | spin_lock(&hash_lock); |
370 | for (n = 0; n < old->count; n++) { |
371 | if (old->owners[n].owner == tree) { |
372 | spin_unlock(&hash_lock); |
373 | fsnotify_put_mark(old_entry); |
374 | return 0; |
375 | } |
376 | } |
377 | spin_unlock(&hash_lock); |
378 | |
379 | chunk = alloc_chunk(old->count + 1); |
380 | if (!chunk) { |
381 | fsnotify_put_mark(old_entry); |
382 | return -ENOMEM; |
383 | } |
384 | |
385 | chunk_entry = &chunk->mark; |
386 | |
387 | spin_lock(&old_entry->lock); |
388 | if (!old_entry->i.inode) { |
389 | /* old_entry is being shot, lets just lie */ |
390 | spin_unlock(&old_entry->lock); |
391 | fsnotify_put_mark(old_entry); |
392 | free_chunk(chunk); |
393 | return -ENOENT; |
394 | } |
395 | |
396 | fsnotify_duplicate_mark(chunk_entry, old_entry); |
397 | if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { |
398 | spin_unlock(&old_entry->lock); |
399 | fsnotify_put_mark(chunk_entry); |
400 | fsnotify_put_mark(old_entry); |
401 | return -ENOSPC; |
402 | } |
403 | |
404 | /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */ |
405 | spin_lock(&chunk_entry->lock); |
406 | spin_lock(&hash_lock); |
407 | |
408 | /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */ |
409 | if (tree->goner) { |
410 | spin_unlock(&hash_lock); |
411 | chunk->dead = 1; |
412 | spin_unlock(&chunk_entry->lock); |
413 | spin_unlock(&old_entry->lock); |
414 | |
415 | fsnotify_destroy_mark(chunk_entry, audit_tree_group); |
416 | |
417 | fsnotify_put_mark(chunk_entry); |
418 | fsnotify_put_mark(old_entry); |
419 | return 0; |
420 | } |
421 | list_replace_init(&old->trees, &chunk->trees); |
422 | for (n = 0, p = chunk->owners; n < old->count; n++, p++) { |
423 | struct audit_tree *s = old->owners[n].owner; |
424 | p->owner = s; |
425 | p->index = old->owners[n].index; |
426 | if (!s) /* result of fallback in untag */ |
427 | continue; |
428 | get_tree(s); |
429 | list_replace_init(&old->owners[n].list, &p->list); |
430 | } |
431 | p->index = (chunk->count - 1) | (1U<<31); |
432 | p->owner = tree; |
433 | get_tree(tree); |
434 | list_add(&p->list, &tree->chunks); |
435 | list_replace_rcu(&old->hash, &chunk->hash); |
436 | list_for_each_entry(owner, &chunk->trees, same_root) |
437 | owner->root = chunk; |
438 | old->dead = 1; |
439 | if (!tree->root) { |
440 | tree->root = chunk; |
441 | list_add(&tree->same_root, &chunk->trees); |
442 | } |
443 | spin_unlock(&hash_lock); |
444 | spin_unlock(&chunk_entry->lock); |
445 | spin_unlock(&old_entry->lock); |
446 | fsnotify_destroy_mark(old_entry, audit_tree_group); |
447 | fsnotify_put_mark(chunk_entry); /* drop initial reference */ |
448 | fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ |
449 | return 0; |
450 | } |
451 | |
452 | static void audit_log_remove_rule(struct audit_krule *rule) |
453 | { |
454 | struct audit_buffer *ab; |
455 | |
456 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); |
457 | if (unlikely(!ab)) |
458 | return; |
459 | audit_log_format(ab, "op="); |
460 | audit_log_string(ab, "remove rule"); |
461 | audit_log_format(ab, " dir="); |
462 | audit_log_untrustedstring(ab, rule->tree->pathname); |
463 | audit_log_key(ab, rule->filterkey); |
464 | audit_log_format(ab, " list=%d res=1", rule->listnr); |
465 | audit_log_end(ab); |
466 | } |
467 | |
468 | static void kill_rules(struct audit_tree *tree) |
469 | { |
470 | struct audit_krule *rule, *next; |
471 | struct audit_entry *entry; |
472 | |
473 | list_for_each_entry_safe(rule, next, &tree->rules, rlist) { |
474 | entry = container_of(rule, struct audit_entry, rule); |
475 | |
476 | list_del_init(&rule->rlist); |
477 | if (rule->tree) { |
478 | /* not a half-baked one */ |
479 | audit_log_remove_rule(rule); |
480 | rule->tree = NULL; |
481 | list_del_rcu(&entry->list); |
482 | list_del(&entry->rule.list); |
483 | call_rcu(&entry->rcu, audit_free_rule_rcu); |
484 | } |
485 | } |
486 | } |
487 | |
488 | /* |
489 | * finish killing struct audit_tree |
490 | */ |
491 | static void prune_one(struct audit_tree *victim) |
492 | { |
493 | spin_lock(&hash_lock); |
494 | while (!list_empty(&victim->chunks)) { |
495 | struct node *p; |
496 | |
497 | p = list_entry(victim->chunks.next, struct node, list); |
498 | |
499 | untag_chunk(p); |
500 | } |
501 | spin_unlock(&hash_lock); |
502 | put_tree(victim); |
503 | } |
504 | |
505 | /* trim the uncommitted chunks from tree */ |
506 | |
507 | static void trim_marked(struct audit_tree *tree) |
508 | { |
509 | struct list_head *p, *q; |
510 | spin_lock(&hash_lock); |
511 | if (tree->goner) { |
512 | spin_unlock(&hash_lock); |
513 | return; |
514 | } |
515 | /* reorder */ |
516 | for (p = tree->chunks.next; p != &tree->chunks; p = q) { |
517 | struct node *node = list_entry(p, struct node, list); |
518 | q = p->next; |
519 | if (node->index & (1U<<31)) { |
520 | list_del_init(p); |
521 | list_add(p, &tree->chunks); |
522 | } |
523 | } |
524 | |
525 | while (!list_empty(&tree->chunks)) { |
526 | struct node *node; |
527 | |
528 | node = list_entry(tree->chunks.next, struct node, list); |
529 | |
530 | /* have we run out of marked? */ |
531 | if (!(node->index & (1U<<31))) |
532 | break; |
533 | |
534 | untag_chunk(node); |
535 | } |
536 | if (!tree->root && !tree->goner) { |
537 | tree->goner = 1; |
538 | spin_unlock(&hash_lock); |
539 | mutex_lock(&audit_filter_mutex); |
540 | kill_rules(tree); |
541 | list_del_init(&tree->list); |
542 | mutex_unlock(&audit_filter_mutex); |
543 | prune_one(tree); |
544 | } else { |
545 | spin_unlock(&hash_lock); |
546 | } |
547 | } |
548 | |
549 | static void audit_schedule_prune(void); |
550 | |
551 | /* called with audit_filter_mutex */ |
552 | int audit_remove_tree_rule(struct audit_krule *rule) |
553 | { |
554 | struct audit_tree *tree; |
555 | tree = rule->tree; |
556 | if (tree) { |
557 | spin_lock(&hash_lock); |
558 | list_del_init(&rule->rlist); |
559 | if (list_empty(&tree->rules) && !tree->goner) { |
560 | tree->root = NULL; |
561 | list_del_init(&tree->same_root); |
562 | tree->goner = 1; |
563 | list_move(&tree->list, &prune_list); |
564 | rule->tree = NULL; |
565 | spin_unlock(&hash_lock); |
566 | audit_schedule_prune(); |
567 | return 1; |
568 | } |
569 | rule->tree = NULL; |
570 | spin_unlock(&hash_lock); |
571 | return 1; |
572 | } |
573 | return 0; |
574 | } |
575 | |
576 | static int compare_root(struct vfsmount *mnt, void *arg) |
577 | { |
578 | return mnt->mnt_root->d_inode == arg; |
579 | } |
580 | |
581 | void audit_trim_trees(void) |
582 | { |
583 | struct list_head cursor; |
584 | |
585 | mutex_lock(&audit_filter_mutex); |
586 | list_add(&cursor, &tree_list); |
587 | while (cursor.next != &tree_list) { |
588 | struct audit_tree *tree; |
589 | struct path path; |
590 | struct vfsmount *root_mnt; |
591 | struct node *node; |
592 | int err; |
593 | |
594 | tree = container_of(cursor.next, struct audit_tree, list); |
595 | get_tree(tree); |
596 | list_del(&cursor); |
597 | list_add(&cursor, &tree->list); |
598 | mutex_unlock(&audit_filter_mutex); |
599 | |
600 | err = kern_path(tree->pathname, 0, &path); |
601 | if (err) |
602 | goto skip_it; |
603 | |
604 | root_mnt = collect_mounts(&path); |
605 | path_put(&path); |
606 | if (IS_ERR(root_mnt)) |
607 | goto skip_it; |
608 | |
609 | spin_lock(&hash_lock); |
610 | list_for_each_entry(node, &tree->chunks, list) { |
611 | struct audit_chunk *chunk = find_chunk(node); |
612 | /* this could be NULL if the watch is dying else where... */ |
613 | struct inode *inode = chunk->mark.i.inode; |
614 | node->index |= 1U<<31; |
615 | if (iterate_mounts(compare_root, inode, root_mnt)) |
616 | node->index &= ~(1U<<31); |
617 | } |
618 | spin_unlock(&hash_lock); |
619 | trim_marked(tree); |
620 | drop_collected_mounts(root_mnt); |
621 | skip_it: |
622 | put_tree(tree); |
623 | mutex_lock(&audit_filter_mutex); |
624 | } |
625 | list_del(&cursor); |
626 | mutex_unlock(&audit_filter_mutex); |
627 | } |
628 | |
629 | int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) |
630 | { |
631 | |
632 | if (pathname[0] != '/' || |
633 | rule->listnr != AUDIT_FILTER_EXIT || |
634 | op != Audit_equal || |
635 | rule->inode_f || rule->watch || rule->tree) |
636 | return -EINVAL; |
637 | rule->tree = alloc_tree(pathname); |
638 | if (!rule->tree) |
639 | return -ENOMEM; |
640 | return 0; |
641 | } |
642 | |
643 | void audit_put_tree(struct audit_tree *tree) |
644 | { |
645 | put_tree(tree); |
646 | } |
647 | |
648 | static int tag_mount(struct vfsmount *mnt, void *arg) |
649 | { |
650 | return tag_chunk(mnt->mnt_root->d_inode, arg); |
651 | } |
652 | |
653 | /* called with audit_filter_mutex */ |
654 | int audit_add_tree_rule(struct audit_krule *rule) |
655 | { |
656 | struct audit_tree *seed = rule->tree, *tree; |
657 | struct path path; |
658 | struct vfsmount *mnt; |
659 | int err; |
660 | |
661 | rule->tree = NULL; |
662 | list_for_each_entry(tree, &tree_list, list) { |
663 | if (!strcmp(seed->pathname, tree->pathname)) { |
664 | put_tree(seed); |
665 | rule->tree = tree; |
666 | list_add(&rule->rlist, &tree->rules); |
667 | return 0; |
668 | } |
669 | } |
670 | tree = seed; |
671 | list_add(&tree->list, &tree_list); |
672 | list_add(&rule->rlist, &tree->rules); |
673 | /* do not set rule->tree yet */ |
674 | mutex_unlock(&audit_filter_mutex); |
675 | |
676 | err = kern_path(tree->pathname, 0, &path); |
677 | if (err) |
678 | goto Err; |
679 | mnt = collect_mounts(&path); |
680 | path_put(&path); |
681 | if (IS_ERR(mnt)) { |
682 | err = PTR_ERR(mnt); |
683 | goto Err; |
684 | } |
685 | |
686 | get_tree(tree); |
687 | err = iterate_mounts(tag_mount, tree, mnt); |
688 | drop_collected_mounts(mnt); |
689 | |
690 | if (!err) { |
691 | struct node *node; |
692 | spin_lock(&hash_lock); |
693 | list_for_each_entry(node, &tree->chunks, list) |
694 | node->index &= ~(1U<<31); |
695 | spin_unlock(&hash_lock); |
696 | } else { |
697 | trim_marked(tree); |
698 | goto Err; |
699 | } |
700 | |
701 | mutex_lock(&audit_filter_mutex); |
702 | if (list_empty(&rule->rlist)) { |
703 | put_tree(tree); |
704 | return -ENOENT; |
705 | } |
706 | rule->tree = tree; |
707 | put_tree(tree); |
708 | |
709 | return 0; |
710 | Err: |
711 | mutex_lock(&audit_filter_mutex); |
712 | list_del_init(&tree->list); |
713 | list_del_init(&tree->rules); |
714 | put_tree(tree); |
715 | return err; |
716 | } |
717 | |
718 | int audit_tag_tree(char *old, char *new) |
719 | { |
720 | struct list_head cursor, barrier; |
721 | int failed = 0; |
722 | struct path path1, path2; |
723 | struct vfsmount *tagged; |
724 | int err; |
725 | |
726 | err = kern_path(new, 0, &path2); |
727 | if (err) |
728 | return err; |
729 | tagged = collect_mounts(&path2); |
730 | path_put(&path2); |
731 | if (IS_ERR(tagged)) |
732 | return PTR_ERR(tagged); |
733 | |
734 | err = kern_path(old, 0, &path1); |
735 | if (err) { |
736 | drop_collected_mounts(tagged); |
737 | return err; |
738 | } |
739 | |
740 | mutex_lock(&audit_filter_mutex); |
741 | list_add(&barrier, &tree_list); |
742 | list_add(&cursor, &barrier); |
743 | |
744 | while (cursor.next != &tree_list) { |
745 | struct audit_tree *tree; |
746 | int good_one = 0; |
747 | |
748 | tree = container_of(cursor.next, struct audit_tree, list); |
749 | get_tree(tree); |
750 | list_del(&cursor); |
751 | list_add(&cursor, &tree->list); |
752 | mutex_unlock(&audit_filter_mutex); |
753 | |
754 | err = kern_path(tree->pathname, 0, &path2); |
755 | if (!err) { |
756 | good_one = path_is_under(&path1, &path2); |
757 | path_put(&path2); |
758 | } |
759 | |
760 | if (!good_one) { |
761 | put_tree(tree); |
762 | mutex_lock(&audit_filter_mutex); |
763 | continue; |
764 | } |
765 | |
766 | failed = iterate_mounts(tag_mount, tree, tagged); |
767 | if (failed) { |
768 | put_tree(tree); |
769 | mutex_lock(&audit_filter_mutex); |
770 | break; |
771 | } |
772 | |
773 | mutex_lock(&audit_filter_mutex); |
774 | spin_lock(&hash_lock); |
775 | if (!tree->goner) { |
776 | list_del(&tree->list); |
777 | list_add(&tree->list, &tree_list); |
778 | } |
779 | spin_unlock(&hash_lock); |
780 | put_tree(tree); |
781 | } |
782 | |
783 | while (barrier.prev != &tree_list) { |
784 | struct audit_tree *tree; |
785 | |
786 | tree = container_of(barrier.prev, struct audit_tree, list); |
787 | get_tree(tree); |
788 | list_del(&tree->list); |
789 | list_add(&tree->list, &barrier); |
790 | mutex_unlock(&audit_filter_mutex); |
791 | |
792 | if (!failed) { |
793 | struct node *node; |
794 | spin_lock(&hash_lock); |
795 | list_for_each_entry(node, &tree->chunks, list) |
796 | node->index &= ~(1U<<31); |
797 | spin_unlock(&hash_lock); |
798 | } else { |
799 | trim_marked(tree); |
800 | } |
801 | |
802 | put_tree(tree); |
803 | mutex_lock(&audit_filter_mutex); |
804 | } |
805 | list_del(&barrier); |
806 | list_del(&cursor); |
807 | mutex_unlock(&audit_filter_mutex); |
808 | path_put(&path1); |
809 | drop_collected_mounts(tagged); |
810 | return failed; |
811 | } |
812 | |
813 | /* |
814 | * That gets run when evict_chunk() ends up needing to kill audit_tree. |
815 | * Runs from a separate thread. |
816 | */ |
817 | static int prune_tree_thread(void *unused) |
818 | { |
819 | mutex_lock(&audit_cmd_mutex); |
820 | mutex_lock(&audit_filter_mutex); |
821 | |
822 | while (!list_empty(&prune_list)) { |
823 | struct audit_tree *victim; |
824 | |
825 | victim = list_entry(prune_list.next, struct audit_tree, list); |
826 | list_del_init(&victim->list); |
827 | |
828 | mutex_unlock(&audit_filter_mutex); |
829 | |
830 | prune_one(victim); |
831 | |
832 | mutex_lock(&audit_filter_mutex); |
833 | } |
834 | |
835 | mutex_unlock(&audit_filter_mutex); |
836 | mutex_unlock(&audit_cmd_mutex); |
837 | return 0; |
838 | } |
839 | |
840 | static void audit_schedule_prune(void) |
841 | { |
842 | kthread_run(prune_tree_thread, NULL, "audit_prune_tree"); |
843 | } |
844 | |
845 | /* |
846 | * ... and that one is done if evict_chunk() decides to delay until the end |
847 | * of syscall. Runs synchronously. |
848 | */ |
849 | void audit_kill_trees(struct list_head *list) |
850 | { |
851 | mutex_lock(&audit_cmd_mutex); |
852 | mutex_lock(&audit_filter_mutex); |
853 | |
854 | while (!list_empty(list)) { |
855 | struct audit_tree *victim; |
856 | |
857 | victim = list_entry(list->next, struct audit_tree, list); |
858 | kill_rules(victim); |
859 | list_del_init(&victim->list); |
860 | |
861 | mutex_unlock(&audit_filter_mutex); |
862 | |
863 | prune_one(victim); |
864 | |
865 | mutex_lock(&audit_filter_mutex); |
866 | } |
867 | |
868 | mutex_unlock(&audit_filter_mutex); |
869 | mutex_unlock(&audit_cmd_mutex); |
870 | } |
871 | |
872 | /* |
873 | * Here comes the stuff asynchronous to auditctl operations |
874 | */ |
875 | |
876 | static void evict_chunk(struct audit_chunk *chunk) |
877 | { |
878 | struct audit_tree *owner; |
879 | struct list_head *postponed = audit_killed_trees(); |
880 | int need_prune = 0; |
881 | int n; |
882 | |
883 | if (chunk->dead) |
884 | return; |
885 | |
886 | chunk->dead = 1; |
887 | mutex_lock(&audit_filter_mutex); |
888 | spin_lock(&hash_lock); |
889 | while (!list_empty(&chunk->trees)) { |
890 | owner = list_entry(chunk->trees.next, |
891 | struct audit_tree, same_root); |
892 | owner->goner = 1; |
893 | owner->root = NULL; |
894 | list_del_init(&owner->same_root); |
895 | spin_unlock(&hash_lock); |
896 | if (!postponed) { |
897 | kill_rules(owner); |
898 | list_move(&owner->list, &prune_list); |
899 | need_prune = 1; |
900 | } else { |
901 | list_move(&owner->list, postponed); |
902 | } |
903 | spin_lock(&hash_lock); |
904 | } |
905 | list_del_rcu(&chunk->hash); |
906 | for (n = 0; n < chunk->count; n++) |
907 | list_del_init(&chunk->owners[n].list); |
908 | spin_unlock(&hash_lock); |
909 | if (need_prune) |
910 | audit_schedule_prune(); |
911 | mutex_unlock(&audit_filter_mutex); |
912 | } |
913 | |
914 | static int audit_tree_handle_event(struct fsnotify_group *group, |
915 | struct fsnotify_mark *inode_mark, |
916 | struct fsnotify_mark *vfsmonut_mark, |
917 | struct fsnotify_event *event) |
918 | { |
919 | BUG(); |
920 | return -EOPNOTSUPP; |
921 | } |
922 | |
923 | static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) |
924 | { |
925 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); |
926 | |
927 | evict_chunk(chunk); |
928 | |
929 | /* |
930 | * We are guaranteed to have at least one reference to the mark from |
931 | * either the inode or the caller of fsnotify_destroy_mark(). |
932 | */ |
933 | BUG_ON(atomic_read(&entry->refcnt) < 1); |
934 | } |
935 | |
936 | static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, |
937 | struct fsnotify_mark *inode_mark, |
938 | struct fsnotify_mark *vfsmount_mark, |
939 | __u32 mask, void *data, int data_type) |
940 | { |
941 | return false; |
942 | } |
943 | |
944 | static const struct fsnotify_ops audit_tree_ops = { |
945 | .handle_event = audit_tree_handle_event, |
946 | .should_send_event = audit_tree_send_event, |
947 | .free_group_priv = NULL, |
948 | .free_event_priv = NULL, |
949 | .freeing_mark = audit_tree_freeing_mark, |
950 | }; |
951 | |
952 | static int __init audit_tree_init(void) |
953 | { |
954 | int i; |
955 | |
956 | audit_tree_group = fsnotify_alloc_group(&audit_tree_ops); |
957 | if (IS_ERR(audit_tree_group)) |
958 | audit_panic("cannot initialize fsnotify group for rectree watches"); |
959 | |
960 | for (i = 0; i < HASH_SIZE; i++) |
961 | INIT_LIST_HEAD(&chunk_hash_heads[i]); |
962 | |
963 | return 0; |
964 | } |
965 | __initcall(audit_tree_init); |
966 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9