Root/
1 | #include "audit.h" |
2 | #include <linux/inotify.h> |
3 | #include <linux/namei.h> |
4 | #include <linux/mount.h> |
5 | #include <linux/kthread.h> |
6 | #include <linux/slab.h> |
7 | |
8 | struct audit_tree; |
9 | struct audit_chunk; |
10 | |
11 | struct audit_tree { |
12 | atomic_t count; |
13 | int goner; |
14 | struct audit_chunk *root; |
15 | struct list_head chunks; |
16 | struct list_head rules; |
17 | struct list_head list; |
18 | struct list_head same_root; |
19 | struct rcu_head head; |
20 | char pathname[]; |
21 | }; |
22 | |
23 | struct audit_chunk { |
24 | struct list_head hash; |
25 | struct inotify_watch watch; |
26 | struct list_head trees; /* with root here */ |
27 | int dead; |
28 | int count; |
29 | atomic_long_t refs; |
30 | struct rcu_head head; |
31 | struct node { |
32 | struct list_head list; |
33 | struct audit_tree *owner; |
34 | unsigned index; /* index; upper bit indicates 'will prune' */ |
35 | } owners[]; |
36 | }; |
37 | |
38 | static LIST_HEAD(tree_list); |
39 | static LIST_HEAD(prune_list); |
40 | |
41 | /* |
42 | * One struct chunk is attached to each inode of interest. |
43 | * We replace struct chunk on tagging/untagging. |
44 | * Rules have pointer to struct audit_tree. |
45 | * Rules have struct list_head rlist forming a list of rules over |
46 | * the same tree. |
47 | * References to struct chunk are collected at audit_inode{,_child}() |
48 | * time and used in AUDIT_TREE rule matching. |
49 | * These references are dropped at the same time we are calling |
50 | * audit_free_names(), etc. |
51 | * |
52 | * Cyclic lists galore: |
53 | * tree.chunks anchors chunk.owners[].list hash_lock |
54 | * tree.rules anchors rule.rlist audit_filter_mutex |
55 | * chunk.trees anchors tree.same_root hash_lock |
56 | * chunk.hash is a hash with middle bits of watch.inode as |
57 | * a hash function. RCU, hash_lock |
58 | * |
59 | * tree is refcounted; one reference for "some rules on rules_list refer to |
60 | * it", one for each chunk with pointer to it. |
61 | * |
62 | * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount |
63 | * of watch contributes 1 to .refs). |
64 | * |
65 | * node.index allows to get from node.list to containing chunk. |
66 | * MSB of that sucker is stolen to mark taggings that we might have to |
67 | * revert - several operations have very unpleasant cleanup logics and |
68 | * that makes a difference. Some. |
69 | */ |
70 | |
71 | static struct inotify_handle *rtree_ih; |
72 | |
73 | static struct audit_tree *alloc_tree(const char *s) |
74 | { |
75 | struct audit_tree *tree; |
76 | |
77 | tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); |
78 | if (tree) { |
79 | atomic_set(&tree->count, 1); |
80 | tree->goner = 0; |
81 | INIT_LIST_HEAD(&tree->chunks); |
82 | INIT_LIST_HEAD(&tree->rules); |
83 | INIT_LIST_HEAD(&tree->list); |
84 | INIT_LIST_HEAD(&tree->same_root); |
85 | tree->root = NULL; |
86 | strcpy(tree->pathname, s); |
87 | } |
88 | return tree; |
89 | } |
90 | |
91 | static inline void get_tree(struct audit_tree *tree) |
92 | { |
93 | atomic_inc(&tree->count); |
94 | } |
95 | |
96 | static void __put_tree(struct rcu_head *rcu) |
97 | { |
98 | struct audit_tree *tree = container_of(rcu, struct audit_tree, head); |
99 | kfree(tree); |
100 | } |
101 | |
102 | static inline void put_tree(struct audit_tree *tree) |
103 | { |
104 | if (atomic_dec_and_test(&tree->count)) |
105 | call_rcu(&tree->head, __put_tree); |
106 | } |
107 | |
108 | /* to avoid bringing the entire thing in audit.h */ |
109 | const char *audit_tree_path(struct audit_tree *tree) |
110 | { |
111 | return tree->pathname; |
112 | } |
113 | |
114 | static struct audit_chunk *alloc_chunk(int count) |
115 | { |
116 | struct audit_chunk *chunk; |
117 | size_t size; |
118 | int i; |
119 | |
120 | size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); |
121 | chunk = kzalloc(size, GFP_KERNEL); |
122 | if (!chunk) |
123 | return NULL; |
124 | |
125 | INIT_LIST_HEAD(&chunk->hash); |
126 | INIT_LIST_HEAD(&chunk->trees); |
127 | chunk->count = count; |
128 | atomic_long_set(&chunk->refs, 1); |
129 | for (i = 0; i < count; i++) { |
130 | INIT_LIST_HEAD(&chunk->owners[i].list); |
131 | chunk->owners[i].index = i; |
132 | } |
133 | inotify_init_watch(&chunk->watch); |
134 | return chunk; |
135 | } |
136 | |
137 | static void free_chunk(struct audit_chunk *chunk) |
138 | { |
139 | int i; |
140 | |
141 | for (i = 0; i < chunk->count; i++) { |
142 | if (chunk->owners[i].owner) |
143 | put_tree(chunk->owners[i].owner); |
144 | } |
145 | kfree(chunk); |
146 | } |
147 | |
148 | void audit_put_chunk(struct audit_chunk *chunk) |
149 | { |
150 | if (atomic_long_dec_and_test(&chunk->refs)) |
151 | free_chunk(chunk); |
152 | } |
153 | |
154 | static void __put_chunk(struct rcu_head *rcu) |
155 | { |
156 | struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); |
157 | audit_put_chunk(chunk); |
158 | } |
159 | |
160 | enum {HASH_SIZE = 128}; |
161 | static struct list_head chunk_hash_heads[HASH_SIZE]; |
162 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); |
163 | |
164 | static inline struct list_head *chunk_hash(const struct inode *inode) |
165 | { |
166 | unsigned long n = (unsigned long)inode / L1_CACHE_BYTES; |
167 | return chunk_hash_heads + n % HASH_SIZE; |
168 | } |
169 | |
170 | /* hash_lock is held by caller */ |
171 | static void insert_hash(struct audit_chunk *chunk) |
172 | { |
173 | struct list_head *list = chunk_hash(chunk->watch.inode); |
174 | list_add_rcu(&chunk->hash, list); |
175 | } |
176 | |
177 | /* called under rcu_read_lock */ |
178 | struct audit_chunk *audit_tree_lookup(const struct inode *inode) |
179 | { |
180 | struct list_head *list = chunk_hash(inode); |
181 | struct audit_chunk *p; |
182 | |
183 | list_for_each_entry_rcu(p, list, hash) { |
184 | if (p->watch.inode == inode) { |
185 | atomic_long_inc(&p->refs); |
186 | return p; |
187 | } |
188 | } |
189 | return NULL; |
190 | } |
191 | |
192 | int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) |
193 | { |
194 | int n; |
195 | for (n = 0; n < chunk->count; n++) |
196 | if (chunk->owners[n].owner == tree) |
197 | return 1; |
198 | return 0; |
199 | } |
200 | |
201 | /* tagging and untagging inodes with trees */ |
202 | |
203 | static struct audit_chunk *find_chunk(struct node *p) |
204 | { |
205 | int index = p->index & ~(1U<<31); |
206 | p -= index; |
207 | return container_of(p, struct audit_chunk, owners[0]); |
208 | } |
209 | |
210 | static void untag_chunk(struct node *p) |
211 | { |
212 | struct audit_chunk *chunk = find_chunk(p); |
213 | struct audit_chunk *new; |
214 | struct audit_tree *owner; |
215 | int size = chunk->count - 1; |
216 | int i, j; |
217 | |
218 | if (!pin_inotify_watch(&chunk->watch)) { |
219 | /* |
220 | * Filesystem is shutting down; all watches are getting |
221 | * evicted, just take it off the node list for this |
222 | * tree and let the eviction logics take care of the |
223 | * rest. |
224 | */ |
225 | owner = p->owner; |
226 | if (owner->root == chunk) { |
227 | list_del_init(&owner->same_root); |
228 | owner->root = NULL; |
229 | } |
230 | list_del_init(&p->list); |
231 | p->owner = NULL; |
232 | put_tree(owner); |
233 | return; |
234 | } |
235 | |
236 | spin_unlock(&hash_lock); |
237 | |
238 | /* |
239 | * pin_inotify_watch() succeeded, so the watch won't go away |
240 | * from under us. |
241 | */ |
242 | mutex_lock(&chunk->watch.inode->inotify_mutex); |
243 | if (chunk->dead) { |
244 | mutex_unlock(&chunk->watch.inode->inotify_mutex); |
245 | goto out; |
246 | } |
247 | |
248 | owner = p->owner; |
249 | |
250 | if (!size) { |
251 | chunk->dead = 1; |
252 | spin_lock(&hash_lock); |
253 | list_del_init(&chunk->trees); |
254 | if (owner->root == chunk) |
255 | owner->root = NULL; |
256 | list_del_init(&p->list); |
257 | list_del_rcu(&chunk->hash); |
258 | spin_unlock(&hash_lock); |
259 | inotify_evict_watch(&chunk->watch); |
260 | mutex_unlock(&chunk->watch.inode->inotify_mutex); |
261 | put_inotify_watch(&chunk->watch); |
262 | goto out; |
263 | } |
264 | |
265 | new = alloc_chunk(size); |
266 | if (!new) |
267 | goto Fallback; |
268 | if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) { |
269 | free_chunk(new); |
270 | goto Fallback; |
271 | } |
272 | |
273 | chunk->dead = 1; |
274 | spin_lock(&hash_lock); |
275 | list_replace_init(&chunk->trees, &new->trees); |
276 | if (owner->root == chunk) { |
277 | list_del_init(&owner->same_root); |
278 | owner->root = NULL; |
279 | } |
280 | |
281 | for (i = j = 0; j <= size; i++, j++) { |
282 | struct audit_tree *s; |
283 | if (&chunk->owners[j] == p) { |
284 | list_del_init(&p->list); |
285 | i--; |
286 | continue; |
287 | } |
288 | s = chunk->owners[j].owner; |
289 | new->owners[i].owner = s; |
290 | new->owners[i].index = chunk->owners[j].index - j + i; |
291 | if (!s) /* result of earlier fallback */ |
292 | continue; |
293 | get_tree(s); |
294 | list_replace_init(&chunk->owners[j].list, &new->owners[i].list); |
295 | } |
296 | |
297 | list_replace_rcu(&chunk->hash, &new->hash); |
298 | list_for_each_entry(owner, &new->trees, same_root) |
299 | owner->root = new; |
300 | spin_unlock(&hash_lock); |
301 | inotify_evict_watch(&chunk->watch); |
302 | mutex_unlock(&chunk->watch.inode->inotify_mutex); |
303 | put_inotify_watch(&chunk->watch); |
304 | goto out; |
305 | |
306 | Fallback: |
307 | // do the best we can |
308 | spin_lock(&hash_lock); |
309 | if (owner->root == chunk) { |
310 | list_del_init(&owner->same_root); |
311 | owner->root = NULL; |
312 | } |
313 | list_del_init(&p->list); |
314 | p->owner = NULL; |
315 | put_tree(owner); |
316 | spin_unlock(&hash_lock); |
317 | mutex_unlock(&chunk->watch.inode->inotify_mutex); |
318 | out: |
319 | unpin_inotify_watch(&chunk->watch); |
320 | spin_lock(&hash_lock); |
321 | } |
322 | |
323 | static int create_chunk(struct inode *inode, struct audit_tree *tree) |
324 | { |
325 | struct audit_chunk *chunk = alloc_chunk(1); |
326 | if (!chunk) |
327 | return -ENOMEM; |
328 | |
329 | if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) { |
330 | free_chunk(chunk); |
331 | return -ENOSPC; |
332 | } |
333 | |
334 | mutex_lock(&inode->inotify_mutex); |
335 | spin_lock(&hash_lock); |
336 | if (tree->goner) { |
337 | spin_unlock(&hash_lock); |
338 | chunk->dead = 1; |
339 | inotify_evict_watch(&chunk->watch); |
340 | mutex_unlock(&inode->inotify_mutex); |
341 | put_inotify_watch(&chunk->watch); |
342 | return 0; |
343 | } |
344 | chunk->owners[0].index = (1U << 31); |
345 | chunk->owners[0].owner = tree; |
346 | get_tree(tree); |
347 | list_add(&chunk->owners[0].list, &tree->chunks); |
348 | if (!tree->root) { |
349 | tree->root = chunk; |
350 | list_add(&tree->same_root, &chunk->trees); |
351 | } |
352 | insert_hash(chunk); |
353 | spin_unlock(&hash_lock); |
354 | mutex_unlock(&inode->inotify_mutex); |
355 | return 0; |
356 | } |
357 | |
358 | /* the first tagged inode becomes root of tree */ |
359 | static int tag_chunk(struct inode *inode, struct audit_tree *tree) |
360 | { |
361 | struct inotify_watch *watch; |
362 | struct audit_tree *owner; |
363 | struct audit_chunk *chunk, *old; |
364 | struct node *p; |
365 | int n; |
366 | |
367 | if (inotify_find_watch(rtree_ih, inode, &watch) < 0) |
368 | return create_chunk(inode, tree); |
369 | |
370 | old = container_of(watch, struct audit_chunk, watch); |
371 | |
372 | /* are we already there? */ |
373 | spin_lock(&hash_lock); |
374 | for (n = 0; n < old->count; n++) { |
375 | if (old->owners[n].owner == tree) { |
376 | spin_unlock(&hash_lock); |
377 | put_inotify_watch(&old->watch); |
378 | return 0; |
379 | } |
380 | } |
381 | spin_unlock(&hash_lock); |
382 | |
383 | chunk = alloc_chunk(old->count + 1); |
384 | if (!chunk) { |
385 | put_inotify_watch(&old->watch); |
386 | return -ENOMEM; |
387 | } |
388 | |
389 | mutex_lock(&inode->inotify_mutex); |
390 | if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) { |
391 | mutex_unlock(&inode->inotify_mutex); |
392 | put_inotify_watch(&old->watch); |
393 | free_chunk(chunk); |
394 | return -ENOSPC; |
395 | } |
396 | spin_lock(&hash_lock); |
397 | if (tree->goner) { |
398 | spin_unlock(&hash_lock); |
399 | chunk->dead = 1; |
400 | inotify_evict_watch(&chunk->watch); |
401 | mutex_unlock(&inode->inotify_mutex); |
402 | put_inotify_watch(&old->watch); |
403 | put_inotify_watch(&chunk->watch); |
404 | return 0; |
405 | } |
406 | list_replace_init(&old->trees, &chunk->trees); |
407 | for (n = 0, p = chunk->owners; n < old->count; n++, p++) { |
408 | struct audit_tree *s = old->owners[n].owner; |
409 | p->owner = s; |
410 | p->index = old->owners[n].index; |
411 | if (!s) /* result of fallback in untag */ |
412 | continue; |
413 | get_tree(s); |
414 | list_replace_init(&old->owners[n].list, &p->list); |
415 | } |
416 | p->index = (chunk->count - 1) | (1U<<31); |
417 | p->owner = tree; |
418 | get_tree(tree); |
419 | list_add(&p->list, &tree->chunks); |
420 | list_replace_rcu(&old->hash, &chunk->hash); |
421 | list_for_each_entry(owner, &chunk->trees, same_root) |
422 | owner->root = chunk; |
423 | old->dead = 1; |
424 | if (!tree->root) { |
425 | tree->root = chunk; |
426 | list_add(&tree->same_root, &chunk->trees); |
427 | } |
428 | spin_unlock(&hash_lock); |
429 | inotify_evict_watch(&old->watch); |
430 | mutex_unlock(&inode->inotify_mutex); |
431 | put_inotify_watch(&old->watch); /* pair to inotify_find_watch */ |
432 | put_inotify_watch(&old->watch); /* and kill it */ |
433 | return 0; |
434 | } |
435 | |
436 | static void kill_rules(struct audit_tree *tree) |
437 | { |
438 | struct audit_krule *rule, *next; |
439 | struct audit_entry *entry; |
440 | struct audit_buffer *ab; |
441 | |
442 | list_for_each_entry_safe(rule, next, &tree->rules, rlist) { |
443 | entry = container_of(rule, struct audit_entry, rule); |
444 | |
445 | list_del_init(&rule->rlist); |
446 | if (rule->tree) { |
447 | /* not a half-baked one */ |
448 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); |
449 | audit_log_format(ab, "op="); |
450 | audit_log_string(ab, "remove rule"); |
451 | audit_log_format(ab, " dir="); |
452 | audit_log_untrustedstring(ab, rule->tree->pathname); |
453 | audit_log_key(ab, rule->filterkey); |
454 | audit_log_format(ab, " list=%d res=1", rule->listnr); |
455 | audit_log_end(ab); |
456 | rule->tree = NULL; |
457 | list_del_rcu(&entry->list); |
458 | list_del(&entry->rule.list); |
459 | call_rcu(&entry->rcu, audit_free_rule_rcu); |
460 | } |
461 | } |
462 | } |
463 | |
464 | /* |
465 | * finish killing struct audit_tree |
466 | */ |
467 | static void prune_one(struct audit_tree *victim) |
468 | { |
469 | spin_lock(&hash_lock); |
470 | while (!list_empty(&victim->chunks)) { |
471 | struct node *p; |
472 | |
473 | p = list_entry(victim->chunks.next, struct node, list); |
474 | |
475 | untag_chunk(p); |
476 | } |
477 | spin_unlock(&hash_lock); |
478 | put_tree(victim); |
479 | } |
480 | |
481 | /* trim the uncommitted chunks from tree */ |
482 | |
483 | static void trim_marked(struct audit_tree *tree) |
484 | { |
485 | struct list_head *p, *q; |
486 | spin_lock(&hash_lock); |
487 | if (tree->goner) { |
488 | spin_unlock(&hash_lock); |
489 | return; |
490 | } |
491 | /* reorder */ |
492 | for (p = tree->chunks.next; p != &tree->chunks; p = q) { |
493 | struct node *node = list_entry(p, struct node, list); |
494 | q = p->next; |
495 | if (node->index & (1U<<31)) { |
496 | list_del_init(p); |
497 | list_add(p, &tree->chunks); |
498 | } |
499 | } |
500 | |
501 | while (!list_empty(&tree->chunks)) { |
502 | struct node *node; |
503 | |
504 | node = list_entry(tree->chunks.next, struct node, list); |
505 | |
506 | /* have we run out of marked? */ |
507 | if (!(node->index & (1U<<31))) |
508 | break; |
509 | |
510 | untag_chunk(node); |
511 | } |
512 | if (!tree->root && !tree->goner) { |
513 | tree->goner = 1; |
514 | spin_unlock(&hash_lock); |
515 | mutex_lock(&audit_filter_mutex); |
516 | kill_rules(tree); |
517 | list_del_init(&tree->list); |
518 | mutex_unlock(&audit_filter_mutex); |
519 | prune_one(tree); |
520 | } else { |
521 | spin_unlock(&hash_lock); |
522 | } |
523 | } |
524 | |
525 | static void audit_schedule_prune(void); |
526 | |
527 | /* called with audit_filter_mutex */ |
528 | int audit_remove_tree_rule(struct audit_krule *rule) |
529 | { |
530 | struct audit_tree *tree; |
531 | tree = rule->tree; |
532 | if (tree) { |
533 | spin_lock(&hash_lock); |
534 | list_del_init(&rule->rlist); |
535 | if (list_empty(&tree->rules) && !tree->goner) { |
536 | tree->root = NULL; |
537 | list_del_init(&tree->same_root); |
538 | tree->goner = 1; |
539 | list_move(&tree->list, &prune_list); |
540 | rule->tree = NULL; |
541 | spin_unlock(&hash_lock); |
542 | audit_schedule_prune(); |
543 | return 1; |
544 | } |
545 | rule->tree = NULL; |
546 | spin_unlock(&hash_lock); |
547 | return 1; |
548 | } |
549 | return 0; |
550 | } |
551 | |
552 | static int compare_root(struct vfsmount *mnt, void *arg) |
553 | { |
554 | return mnt->mnt_root->d_inode == arg; |
555 | } |
556 | |
557 | void audit_trim_trees(void) |
558 | { |
559 | struct list_head cursor; |
560 | |
561 | mutex_lock(&audit_filter_mutex); |
562 | list_add(&cursor, &tree_list); |
563 | while (cursor.next != &tree_list) { |
564 | struct audit_tree *tree; |
565 | struct path path; |
566 | struct vfsmount *root_mnt; |
567 | struct node *node; |
568 | int err; |
569 | |
570 | tree = container_of(cursor.next, struct audit_tree, list); |
571 | get_tree(tree); |
572 | list_del(&cursor); |
573 | list_add(&cursor, &tree->list); |
574 | mutex_unlock(&audit_filter_mutex); |
575 | |
576 | err = kern_path(tree->pathname, 0, &path); |
577 | if (err) |
578 | goto skip_it; |
579 | |
580 | root_mnt = collect_mounts(&path); |
581 | path_put(&path); |
582 | if (!root_mnt) |
583 | goto skip_it; |
584 | |
585 | spin_lock(&hash_lock); |
586 | list_for_each_entry(node, &tree->chunks, list) { |
587 | struct inode *inode = find_chunk(node)->watch.inode; |
588 | node->index |= 1U<<31; |
589 | if (iterate_mounts(compare_root, inode, root_mnt)) |
590 | node->index &= ~(1U<<31); |
591 | } |
592 | spin_unlock(&hash_lock); |
593 | trim_marked(tree); |
594 | put_tree(tree); |
595 | drop_collected_mounts(root_mnt); |
596 | skip_it: |
597 | mutex_lock(&audit_filter_mutex); |
598 | } |
599 | list_del(&cursor); |
600 | mutex_unlock(&audit_filter_mutex); |
601 | } |
602 | |
603 | int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) |
604 | { |
605 | |
606 | if (pathname[0] != '/' || |
607 | rule->listnr != AUDIT_FILTER_EXIT || |
608 | op != Audit_equal || |
609 | rule->inode_f || rule->watch || rule->tree) |
610 | return -EINVAL; |
611 | rule->tree = alloc_tree(pathname); |
612 | if (!rule->tree) |
613 | return -ENOMEM; |
614 | return 0; |
615 | } |
616 | |
617 | void audit_put_tree(struct audit_tree *tree) |
618 | { |
619 | put_tree(tree); |
620 | } |
621 | |
622 | static int tag_mount(struct vfsmount *mnt, void *arg) |
623 | { |
624 | return tag_chunk(mnt->mnt_root->d_inode, arg); |
625 | } |
626 | |
627 | /* called with audit_filter_mutex */ |
628 | int audit_add_tree_rule(struct audit_krule *rule) |
629 | { |
630 | struct audit_tree *seed = rule->tree, *tree; |
631 | struct path path; |
632 | struct vfsmount *mnt; |
633 | int err; |
634 | |
635 | list_for_each_entry(tree, &tree_list, list) { |
636 | if (!strcmp(seed->pathname, tree->pathname)) { |
637 | put_tree(seed); |
638 | rule->tree = tree; |
639 | list_add(&rule->rlist, &tree->rules); |
640 | return 0; |
641 | } |
642 | } |
643 | tree = seed; |
644 | list_add(&tree->list, &tree_list); |
645 | list_add(&rule->rlist, &tree->rules); |
646 | /* do not set rule->tree yet */ |
647 | mutex_unlock(&audit_filter_mutex); |
648 | |
649 | err = kern_path(tree->pathname, 0, &path); |
650 | if (err) |
651 | goto Err; |
652 | mnt = collect_mounts(&path); |
653 | path_put(&path); |
654 | if (!mnt) { |
655 | err = -ENOMEM; |
656 | goto Err; |
657 | } |
658 | |
659 | get_tree(tree); |
660 | err = iterate_mounts(tag_mount, tree, mnt); |
661 | drop_collected_mounts(mnt); |
662 | |
663 | if (!err) { |
664 | struct node *node; |
665 | spin_lock(&hash_lock); |
666 | list_for_each_entry(node, &tree->chunks, list) |
667 | node->index &= ~(1U<<31); |
668 | spin_unlock(&hash_lock); |
669 | } else { |
670 | trim_marked(tree); |
671 | goto Err; |
672 | } |
673 | |
674 | mutex_lock(&audit_filter_mutex); |
675 | if (list_empty(&rule->rlist)) { |
676 | put_tree(tree); |
677 | return -ENOENT; |
678 | } |
679 | rule->tree = tree; |
680 | put_tree(tree); |
681 | |
682 | return 0; |
683 | Err: |
684 | mutex_lock(&audit_filter_mutex); |
685 | list_del_init(&tree->list); |
686 | list_del_init(&tree->rules); |
687 | put_tree(tree); |
688 | return err; |
689 | } |
690 | |
691 | int audit_tag_tree(char *old, char *new) |
692 | { |
693 | struct list_head cursor, barrier; |
694 | int failed = 0; |
695 | struct path path1, path2; |
696 | struct vfsmount *tagged; |
697 | int err; |
698 | |
699 | err = kern_path(new, 0, &path2); |
700 | if (err) |
701 | return err; |
702 | tagged = collect_mounts(&path2); |
703 | path_put(&path2); |
704 | if (!tagged) |
705 | return -ENOMEM; |
706 | |
707 | err = kern_path(old, 0, &path1); |
708 | if (err) { |
709 | drop_collected_mounts(tagged); |
710 | return err; |
711 | } |
712 | |
713 | mutex_lock(&audit_filter_mutex); |
714 | list_add(&barrier, &tree_list); |
715 | list_add(&cursor, &barrier); |
716 | |
717 | while (cursor.next != &tree_list) { |
718 | struct audit_tree *tree; |
719 | int good_one = 0; |
720 | |
721 | tree = container_of(cursor.next, struct audit_tree, list); |
722 | get_tree(tree); |
723 | list_del(&cursor); |
724 | list_add(&cursor, &tree->list); |
725 | mutex_unlock(&audit_filter_mutex); |
726 | |
727 | err = kern_path(tree->pathname, 0, &path2); |
728 | if (!err) { |
729 | good_one = path_is_under(&path1, &path2); |
730 | path_put(&path2); |
731 | } |
732 | |
733 | if (!good_one) { |
734 | put_tree(tree); |
735 | mutex_lock(&audit_filter_mutex); |
736 | continue; |
737 | } |
738 | |
739 | failed = iterate_mounts(tag_mount, tree, tagged); |
740 | if (failed) { |
741 | put_tree(tree); |
742 | mutex_lock(&audit_filter_mutex); |
743 | break; |
744 | } |
745 | |
746 | mutex_lock(&audit_filter_mutex); |
747 | spin_lock(&hash_lock); |
748 | if (!tree->goner) { |
749 | list_del(&tree->list); |
750 | list_add(&tree->list, &tree_list); |
751 | } |
752 | spin_unlock(&hash_lock); |
753 | put_tree(tree); |
754 | } |
755 | |
756 | while (barrier.prev != &tree_list) { |
757 | struct audit_tree *tree; |
758 | |
759 | tree = container_of(barrier.prev, struct audit_tree, list); |
760 | get_tree(tree); |
761 | list_del(&tree->list); |
762 | list_add(&tree->list, &barrier); |
763 | mutex_unlock(&audit_filter_mutex); |
764 | |
765 | if (!failed) { |
766 | struct node *node; |
767 | spin_lock(&hash_lock); |
768 | list_for_each_entry(node, &tree->chunks, list) |
769 | node->index &= ~(1U<<31); |
770 | spin_unlock(&hash_lock); |
771 | } else { |
772 | trim_marked(tree); |
773 | } |
774 | |
775 | put_tree(tree); |
776 | mutex_lock(&audit_filter_mutex); |
777 | } |
778 | list_del(&barrier); |
779 | list_del(&cursor); |
780 | mutex_unlock(&audit_filter_mutex); |
781 | path_put(&path1); |
782 | drop_collected_mounts(tagged); |
783 | return failed; |
784 | } |
785 | |
786 | /* |
787 | * That gets run when evict_chunk() ends up needing to kill audit_tree. |
788 | * Runs from a separate thread. |
789 | */ |
790 | static int prune_tree_thread(void *unused) |
791 | { |
792 | mutex_lock(&audit_cmd_mutex); |
793 | mutex_lock(&audit_filter_mutex); |
794 | |
795 | while (!list_empty(&prune_list)) { |
796 | struct audit_tree *victim; |
797 | |
798 | victim = list_entry(prune_list.next, struct audit_tree, list); |
799 | list_del_init(&victim->list); |
800 | |
801 | mutex_unlock(&audit_filter_mutex); |
802 | |
803 | prune_one(victim); |
804 | |
805 | mutex_lock(&audit_filter_mutex); |
806 | } |
807 | |
808 | mutex_unlock(&audit_filter_mutex); |
809 | mutex_unlock(&audit_cmd_mutex); |
810 | return 0; |
811 | } |
812 | |
813 | static void audit_schedule_prune(void) |
814 | { |
815 | kthread_run(prune_tree_thread, NULL, "audit_prune_tree"); |
816 | } |
817 | |
818 | /* |
819 | * ... and that one is done if evict_chunk() decides to delay until the end |
820 | * of syscall. Runs synchronously. |
821 | */ |
822 | void audit_kill_trees(struct list_head *list) |
823 | { |
824 | mutex_lock(&audit_cmd_mutex); |
825 | mutex_lock(&audit_filter_mutex); |
826 | |
827 | while (!list_empty(list)) { |
828 | struct audit_tree *victim; |
829 | |
830 | victim = list_entry(list->next, struct audit_tree, list); |
831 | kill_rules(victim); |
832 | list_del_init(&victim->list); |
833 | |
834 | mutex_unlock(&audit_filter_mutex); |
835 | |
836 | prune_one(victim); |
837 | |
838 | mutex_lock(&audit_filter_mutex); |
839 | } |
840 | |
841 | mutex_unlock(&audit_filter_mutex); |
842 | mutex_unlock(&audit_cmd_mutex); |
843 | } |
844 | |
845 | /* |
846 | * Here comes the stuff asynchronous to auditctl operations |
847 | */ |
848 | |
849 | /* inode->inotify_mutex is locked */ |
850 | static void evict_chunk(struct audit_chunk *chunk) |
851 | { |
852 | struct audit_tree *owner; |
853 | struct list_head *postponed = audit_killed_trees(); |
854 | int need_prune = 0; |
855 | int n; |
856 | |
857 | if (chunk->dead) |
858 | return; |
859 | |
860 | chunk->dead = 1; |
861 | mutex_lock(&audit_filter_mutex); |
862 | spin_lock(&hash_lock); |
863 | while (!list_empty(&chunk->trees)) { |
864 | owner = list_entry(chunk->trees.next, |
865 | struct audit_tree, same_root); |
866 | owner->goner = 1; |
867 | owner->root = NULL; |
868 | list_del_init(&owner->same_root); |
869 | spin_unlock(&hash_lock); |
870 | if (!postponed) { |
871 | kill_rules(owner); |
872 | list_move(&owner->list, &prune_list); |
873 | need_prune = 1; |
874 | } else { |
875 | list_move(&owner->list, postponed); |
876 | } |
877 | spin_lock(&hash_lock); |
878 | } |
879 | list_del_rcu(&chunk->hash); |
880 | for (n = 0; n < chunk->count; n++) |
881 | list_del_init(&chunk->owners[n].list); |
882 | spin_unlock(&hash_lock); |
883 | if (need_prune) |
884 | audit_schedule_prune(); |
885 | mutex_unlock(&audit_filter_mutex); |
886 | } |
887 | |
888 | static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask, |
889 | u32 cookie, const char *dname, struct inode *inode) |
890 | { |
891 | struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); |
892 | |
893 | if (mask & IN_IGNORED) { |
894 | evict_chunk(chunk); |
895 | put_inotify_watch(watch); |
896 | } |
897 | } |
898 | |
899 | static void destroy_watch(struct inotify_watch *watch) |
900 | { |
901 | struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); |
902 | call_rcu(&chunk->head, __put_chunk); |
903 | } |
904 | |
905 | static const struct inotify_operations rtree_inotify_ops = { |
906 | .handle_event = handle_event, |
907 | .destroy_watch = destroy_watch, |
908 | }; |
909 | |
910 | static int __init audit_tree_init(void) |
911 | { |
912 | int i; |
913 | |
914 | rtree_ih = inotify_init(&rtree_inotify_ops); |
915 | if (IS_ERR(rtree_ih)) |
916 | audit_panic("cannot initialize inotify handle for rectree watches"); |
917 | |
918 | for (i = 0; i < HASH_SIZE; i++) |
919 | INIT_LIST_HEAD(&chunk_hash_heads[i]); |
920 | |
921 | return 0; |
922 | } |
923 | __initcall(audit_tree_init); |
924 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9