Root/
1 | /* Basic authentication token and access key management |
2 | * |
3 | * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) |
5 | * |
6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. |
10 | */ |
11 | |
12 | #include <linux/module.h> |
13 | #include <linux/init.h> |
14 | #include <linux/poison.h> |
15 | #include <linux/sched.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/security.h> |
18 | #include <linux/workqueue.h> |
19 | #include <linux/random.h> |
20 | #include <linux/err.h> |
21 | #include <linux/user_namespace.h> |
22 | #include "internal.h" |
23 | |
24 | static struct kmem_cache *key_jar; |
25 | struct rb_root key_serial_tree; /* tree of keys indexed by serial */ |
26 | DEFINE_SPINLOCK(key_serial_lock); |
27 | |
28 | struct rb_root key_user_tree; /* tree of quota records indexed by UID */ |
29 | DEFINE_SPINLOCK(key_user_lock); |
30 | |
31 | unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */ |
32 | unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */ |
33 | unsigned int key_quota_maxkeys = 200; /* general key count quota */ |
34 | unsigned int key_quota_maxbytes = 20000; /* general key space quota */ |
35 | |
36 | static LIST_HEAD(key_types_list); |
37 | static DECLARE_RWSEM(key_types_sem); |
38 | |
39 | static void key_cleanup(struct work_struct *work); |
40 | static DECLARE_WORK(key_cleanup_task, key_cleanup); |
41 | |
42 | /* we serialise key instantiation and link */ |
43 | DEFINE_MUTEX(key_construction_mutex); |
44 | |
45 | /* any key who's type gets unegistered will be re-typed to this */ |
46 | static struct key_type key_type_dead = { |
47 | .name = "dead", |
48 | }; |
49 | |
50 | #ifdef KEY_DEBUGGING |
51 | void __key_check(const struct key *key) |
52 | { |
53 | printk("__key_check: key %p {%08x} should be {%08x}\n", |
54 | key, key->magic, KEY_DEBUG_MAGIC); |
55 | BUG(); |
56 | } |
57 | #endif |
58 | |
59 | /*****************************************************************************/ |
60 | /* |
61 | * get the key quota record for a user, allocating a new record if one doesn't |
62 | * already exist |
63 | */ |
64 | struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns) |
65 | { |
66 | struct key_user *candidate = NULL, *user; |
67 | struct rb_node *parent = NULL; |
68 | struct rb_node **p; |
69 | |
70 | try_again: |
71 | p = &key_user_tree.rb_node; |
72 | spin_lock(&key_user_lock); |
73 | |
74 | /* search the tree for a user record with a matching UID */ |
75 | while (*p) { |
76 | parent = *p; |
77 | user = rb_entry(parent, struct key_user, node); |
78 | |
79 | if (uid < user->uid) |
80 | p = &(*p)->rb_left; |
81 | else if (uid > user->uid) |
82 | p = &(*p)->rb_right; |
83 | else if (user_ns < user->user_ns) |
84 | p = &(*p)->rb_left; |
85 | else if (user_ns > user->user_ns) |
86 | p = &(*p)->rb_right; |
87 | else |
88 | goto found; |
89 | } |
90 | |
91 | /* if we get here, we failed to find a match in the tree */ |
92 | if (!candidate) { |
93 | /* allocate a candidate user record if we don't already have |
94 | * one */ |
95 | spin_unlock(&key_user_lock); |
96 | |
97 | user = NULL; |
98 | candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); |
99 | if (unlikely(!candidate)) |
100 | goto out; |
101 | |
102 | /* the allocation may have scheduled, so we need to repeat the |
103 | * search lest someone else added the record whilst we were |
104 | * asleep */ |
105 | goto try_again; |
106 | } |
107 | |
108 | /* if we get here, then the user record still hadn't appeared on the |
109 | * second pass - so we use the candidate record */ |
110 | atomic_set(&candidate->usage, 1); |
111 | atomic_set(&candidate->nkeys, 0); |
112 | atomic_set(&candidate->nikeys, 0); |
113 | candidate->uid = uid; |
114 | candidate->user_ns = get_user_ns(user_ns); |
115 | candidate->qnkeys = 0; |
116 | candidate->qnbytes = 0; |
117 | spin_lock_init(&candidate->lock); |
118 | mutex_init(&candidate->cons_lock); |
119 | |
120 | rb_link_node(&candidate->node, parent, p); |
121 | rb_insert_color(&candidate->node, &key_user_tree); |
122 | spin_unlock(&key_user_lock); |
123 | user = candidate; |
124 | goto out; |
125 | |
126 | /* okay - we found a user record for this UID */ |
127 | found: |
128 | atomic_inc(&user->usage); |
129 | spin_unlock(&key_user_lock); |
130 | kfree(candidate); |
131 | out: |
132 | return user; |
133 | |
134 | } /* end key_user_lookup() */ |
135 | |
136 | /*****************************************************************************/ |
137 | /* |
138 | * dispose of a user structure |
139 | */ |
140 | void key_user_put(struct key_user *user) |
141 | { |
142 | if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { |
143 | rb_erase(&user->node, &key_user_tree); |
144 | spin_unlock(&key_user_lock); |
145 | put_user_ns(user->user_ns); |
146 | |
147 | kfree(user); |
148 | } |
149 | |
150 | } /* end key_user_put() */ |
151 | |
152 | /*****************************************************************************/ |
153 | /* |
154 | * assign a key the next unique serial number |
155 | * - these are assigned randomly to avoid security issues through covert |
156 | * channel problems |
157 | */ |
158 | static inline void key_alloc_serial(struct key *key) |
159 | { |
160 | struct rb_node *parent, **p; |
161 | struct key *xkey; |
162 | |
163 | /* propose a random serial number and look for a hole for it in the |
164 | * serial number tree */ |
165 | do { |
166 | get_random_bytes(&key->serial, sizeof(key->serial)); |
167 | |
168 | key->serial >>= 1; /* negative numbers are not permitted */ |
169 | } while (key->serial < 3); |
170 | |
171 | spin_lock(&key_serial_lock); |
172 | |
173 | attempt_insertion: |
174 | parent = NULL; |
175 | p = &key_serial_tree.rb_node; |
176 | |
177 | while (*p) { |
178 | parent = *p; |
179 | xkey = rb_entry(parent, struct key, serial_node); |
180 | |
181 | if (key->serial < xkey->serial) |
182 | p = &(*p)->rb_left; |
183 | else if (key->serial > xkey->serial) |
184 | p = &(*p)->rb_right; |
185 | else |
186 | goto serial_exists; |
187 | } |
188 | |
189 | /* we've found a suitable hole - arrange for this key to occupy it */ |
190 | rb_link_node(&key->serial_node, parent, p); |
191 | rb_insert_color(&key->serial_node, &key_serial_tree); |
192 | |
193 | spin_unlock(&key_serial_lock); |
194 | return; |
195 | |
196 | /* we found a key with the proposed serial number - walk the tree from |
197 | * that point looking for the next unused serial number */ |
198 | serial_exists: |
199 | for (;;) { |
200 | key->serial++; |
201 | if (key->serial < 3) { |
202 | key->serial = 3; |
203 | goto attempt_insertion; |
204 | } |
205 | |
206 | parent = rb_next(parent); |
207 | if (!parent) |
208 | goto attempt_insertion; |
209 | |
210 | xkey = rb_entry(parent, struct key, serial_node); |
211 | if (key->serial < xkey->serial) |
212 | goto attempt_insertion; |
213 | } |
214 | |
215 | } /* end key_alloc_serial() */ |
216 | |
217 | /*****************************************************************************/ |
218 | /* |
219 | * allocate a key of the specified type |
220 | * - update the user's quota to reflect the existence of the key |
221 | * - called from a key-type operation with key_types_sem read-locked by |
222 | * key_create_or_update() |
223 | * - this prevents unregistration of the key type |
224 | * - upon return the key is as yet uninstantiated; the caller needs to either |
225 | * instantiate the key or discard it before returning |
226 | */ |
227 | struct key *key_alloc(struct key_type *type, const char *desc, |
228 | uid_t uid, gid_t gid, const struct cred *cred, |
229 | key_perm_t perm, unsigned long flags) |
230 | { |
231 | struct key_user *user = NULL; |
232 | struct key *key; |
233 | size_t desclen, quotalen; |
234 | int ret; |
235 | |
236 | key = ERR_PTR(-EINVAL); |
237 | if (!desc || !*desc) |
238 | goto error; |
239 | |
240 | desclen = strlen(desc) + 1; |
241 | quotalen = desclen + type->def_datalen; |
242 | |
243 | /* get hold of the key tracking for this user */ |
244 | user = key_user_lookup(uid, cred->user->user_ns); |
245 | if (!user) |
246 | goto no_memory_1; |
247 | |
248 | /* check that the user's quota permits allocation of another key and |
249 | * its description */ |
250 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { |
251 | unsigned maxkeys = (uid == 0) ? |
252 | key_quota_root_maxkeys : key_quota_maxkeys; |
253 | unsigned maxbytes = (uid == 0) ? |
254 | key_quota_root_maxbytes : key_quota_maxbytes; |
255 | |
256 | spin_lock(&user->lock); |
257 | if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { |
258 | if (user->qnkeys + 1 >= maxkeys || |
259 | user->qnbytes + quotalen >= maxbytes || |
260 | user->qnbytes + quotalen < user->qnbytes) |
261 | goto no_quota; |
262 | } |
263 | |
264 | user->qnkeys++; |
265 | user->qnbytes += quotalen; |
266 | spin_unlock(&user->lock); |
267 | } |
268 | |
269 | /* allocate and initialise the key and its description */ |
270 | key = kmem_cache_alloc(key_jar, GFP_KERNEL); |
271 | if (!key) |
272 | goto no_memory_2; |
273 | |
274 | if (desc) { |
275 | key->description = kmemdup(desc, desclen, GFP_KERNEL); |
276 | if (!key->description) |
277 | goto no_memory_3; |
278 | } |
279 | |
280 | atomic_set(&key->usage, 1); |
281 | init_rwsem(&key->sem); |
282 | key->type = type; |
283 | key->user = user; |
284 | key->quotalen = quotalen; |
285 | key->datalen = type->def_datalen; |
286 | key->uid = uid; |
287 | key->gid = gid; |
288 | key->perm = perm; |
289 | key->flags = 0; |
290 | key->expiry = 0; |
291 | key->payload.data = NULL; |
292 | key->security = NULL; |
293 | |
294 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) |
295 | key->flags |= 1 << KEY_FLAG_IN_QUOTA; |
296 | |
297 | memset(&key->type_data, 0, sizeof(key->type_data)); |
298 | |
299 | #ifdef KEY_DEBUGGING |
300 | key->magic = KEY_DEBUG_MAGIC; |
301 | #endif |
302 | |
303 | /* let the security module know about the key */ |
304 | ret = security_key_alloc(key, cred, flags); |
305 | if (ret < 0) |
306 | goto security_error; |
307 | |
308 | /* publish the key by giving it a serial number */ |
309 | atomic_inc(&user->nkeys); |
310 | key_alloc_serial(key); |
311 | |
312 | error: |
313 | return key; |
314 | |
315 | security_error: |
316 | kfree(key->description); |
317 | kmem_cache_free(key_jar, key); |
318 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { |
319 | spin_lock(&user->lock); |
320 | user->qnkeys--; |
321 | user->qnbytes -= quotalen; |
322 | spin_unlock(&user->lock); |
323 | } |
324 | key_user_put(user); |
325 | key = ERR_PTR(ret); |
326 | goto error; |
327 | |
328 | no_memory_3: |
329 | kmem_cache_free(key_jar, key); |
330 | no_memory_2: |
331 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { |
332 | spin_lock(&user->lock); |
333 | user->qnkeys--; |
334 | user->qnbytes -= quotalen; |
335 | spin_unlock(&user->lock); |
336 | } |
337 | key_user_put(user); |
338 | no_memory_1: |
339 | key = ERR_PTR(-ENOMEM); |
340 | goto error; |
341 | |
342 | no_quota: |
343 | spin_unlock(&user->lock); |
344 | key_user_put(user); |
345 | key = ERR_PTR(-EDQUOT); |
346 | goto error; |
347 | |
348 | } /* end key_alloc() */ |
349 | |
350 | EXPORT_SYMBOL(key_alloc); |
351 | |
352 | /*****************************************************************************/ |
353 | /* |
354 | * reserve an amount of quota for the key's payload |
355 | */ |
356 | int key_payload_reserve(struct key *key, size_t datalen) |
357 | { |
358 | int delta = (int)datalen - key->datalen; |
359 | int ret = 0; |
360 | |
361 | key_check(key); |
362 | |
363 | /* contemplate the quota adjustment */ |
364 | if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { |
365 | unsigned maxbytes = (key->user->uid == 0) ? |
366 | key_quota_root_maxbytes : key_quota_maxbytes; |
367 | |
368 | spin_lock(&key->user->lock); |
369 | |
370 | if (delta > 0 && |
371 | (key->user->qnbytes + delta >= maxbytes || |
372 | key->user->qnbytes + delta < key->user->qnbytes)) { |
373 | ret = -EDQUOT; |
374 | } |
375 | else { |
376 | key->user->qnbytes += delta; |
377 | key->quotalen += delta; |
378 | } |
379 | spin_unlock(&key->user->lock); |
380 | } |
381 | |
382 | /* change the recorded data length if that didn't generate an error */ |
383 | if (ret == 0) |
384 | key->datalen = datalen; |
385 | |
386 | return ret; |
387 | |
388 | } /* end key_payload_reserve() */ |
389 | |
390 | EXPORT_SYMBOL(key_payload_reserve); |
391 | |
392 | /*****************************************************************************/ |
393 | /* |
394 | * instantiate a key and link it into the target keyring atomically |
395 | * - called with the target keyring's semaphore writelocked |
396 | */ |
397 | static int __key_instantiate_and_link(struct key *key, |
398 | const void *data, |
399 | size_t datalen, |
400 | struct key *keyring, |
401 | struct key *authkey, |
402 | struct keyring_list **_prealloc) |
403 | { |
404 | int ret, awaken; |
405 | |
406 | key_check(key); |
407 | key_check(keyring); |
408 | |
409 | awaken = 0; |
410 | ret = -EBUSY; |
411 | |
412 | mutex_lock(&key_construction_mutex); |
413 | |
414 | /* can't instantiate twice */ |
415 | if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { |
416 | /* instantiate the key */ |
417 | ret = key->type->instantiate(key, data, datalen); |
418 | |
419 | if (ret == 0) { |
420 | /* mark the key as being instantiated */ |
421 | atomic_inc(&key->user->nikeys); |
422 | set_bit(KEY_FLAG_INSTANTIATED, &key->flags); |
423 | |
424 | if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) |
425 | awaken = 1; |
426 | |
427 | /* and link it into the destination keyring */ |
428 | if (keyring) |
429 | __key_link(keyring, key, _prealloc); |
430 | |
431 | /* disable the authorisation key */ |
432 | if (authkey) |
433 | key_revoke(authkey); |
434 | } |
435 | } |
436 | |
437 | mutex_unlock(&key_construction_mutex); |
438 | |
439 | /* wake up anyone waiting for a key to be constructed */ |
440 | if (awaken) |
441 | wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); |
442 | |
443 | return ret; |
444 | |
445 | } /* end __key_instantiate_and_link() */ |
446 | |
447 | /*****************************************************************************/ |
448 | /* |
449 | * instantiate a key and link it into the target keyring atomically |
450 | */ |
451 | int key_instantiate_and_link(struct key *key, |
452 | const void *data, |
453 | size_t datalen, |
454 | struct key *keyring, |
455 | struct key *authkey) |
456 | { |
457 | struct keyring_list *prealloc; |
458 | int ret; |
459 | |
460 | if (keyring) { |
461 | ret = __key_link_begin(keyring, key->type, key->description, |
462 | &prealloc); |
463 | if (ret < 0) |
464 | return ret; |
465 | } |
466 | |
467 | ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey, |
468 | &prealloc); |
469 | |
470 | if (keyring) |
471 | __key_link_end(keyring, key->type, prealloc); |
472 | |
473 | return ret; |
474 | |
475 | } /* end key_instantiate_and_link() */ |
476 | |
477 | EXPORT_SYMBOL(key_instantiate_and_link); |
478 | |
479 | /*****************************************************************************/ |
480 | /* |
481 | * negatively instantiate a key and link it into the target keyring atomically |
482 | */ |
483 | int key_negate_and_link(struct key *key, |
484 | unsigned timeout, |
485 | struct key *keyring, |
486 | struct key *authkey) |
487 | { |
488 | struct keyring_list *prealloc; |
489 | struct timespec now; |
490 | int ret, awaken, link_ret = 0; |
491 | |
492 | key_check(key); |
493 | key_check(keyring); |
494 | |
495 | awaken = 0; |
496 | ret = -EBUSY; |
497 | |
498 | if (keyring) |
499 | link_ret = __key_link_begin(keyring, key->type, |
500 | key->description, &prealloc); |
501 | |
502 | mutex_lock(&key_construction_mutex); |
503 | |
504 | /* can't instantiate twice */ |
505 | if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { |
506 | /* mark the key as being negatively instantiated */ |
507 | atomic_inc(&key->user->nikeys); |
508 | set_bit(KEY_FLAG_NEGATIVE, &key->flags); |
509 | set_bit(KEY_FLAG_INSTANTIATED, &key->flags); |
510 | now = current_kernel_time(); |
511 | key->expiry = now.tv_sec + timeout; |
512 | key_schedule_gc(key->expiry + key_gc_delay); |
513 | |
514 | if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) |
515 | awaken = 1; |
516 | |
517 | ret = 0; |
518 | |
519 | /* and link it into the destination keyring */ |
520 | if (keyring && link_ret == 0) |
521 | __key_link(keyring, key, &prealloc); |
522 | |
523 | /* disable the authorisation key */ |
524 | if (authkey) |
525 | key_revoke(authkey); |
526 | } |
527 | |
528 | mutex_unlock(&key_construction_mutex); |
529 | |
530 | if (keyring) |
531 | __key_link_end(keyring, key->type, prealloc); |
532 | |
533 | /* wake up anyone waiting for a key to be constructed */ |
534 | if (awaken) |
535 | wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); |
536 | |
537 | return ret == 0 ? link_ret : ret; |
538 | |
539 | } /* end key_negate_and_link() */ |
540 | |
541 | EXPORT_SYMBOL(key_negate_and_link); |
542 | |
543 | /*****************************************************************************/ |
544 | /* |
545 | * do cleaning up in process context so that we don't have to disable |
546 | * interrupts all over the place |
547 | */ |
548 | static void key_cleanup(struct work_struct *work) |
549 | { |
550 | struct rb_node *_n; |
551 | struct key *key; |
552 | |
553 | go_again: |
554 | /* look for a dead key in the tree */ |
555 | spin_lock(&key_serial_lock); |
556 | |
557 | for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { |
558 | key = rb_entry(_n, struct key, serial_node); |
559 | |
560 | if (atomic_read(&key->usage) == 0) |
561 | goto found_dead_key; |
562 | } |
563 | |
564 | spin_unlock(&key_serial_lock); |
565 | return; |
566 | |
567 | found_dead_key: |
568 | /* we found a dead key - once we've removed it from the tree, we can |
569 | * drop the lock */ |
570 | rb_erase(&key->serial_node, &key_serial_tree); |
571 | spin_unlock(&key_serial_lock); |
572 | |
573 | key_check(key); |
574 | |
575 | security_key_free(key); |
576 | |
577 | /* deal with the user's key tracking and quota */ |
578 | if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { |
579 | spin_lock(&key->user->lock); |
580 | key->user->qnkeys--; |
581 | key->user->qnbytes -= key->quotalen; |
582 | spin_unlock(&key->user->lock); |
583 | } |
584 | |
585 | atomic_dec(&key->user->nkeys); |
586 | if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) |
587 | atomic_dec(&key->user->nikeys); |
588 | |
589 | key_user_put(key->user); |
590 | |
591 | /* now throw away the key memory */ |
592 | if (key->type->destroy) |
593 | key->type->destroy(key); |
594 | |
595 | kfree(key->description); |
596 | |
597 | #ifdef KEY_DEBUGGING |
598 | key->magic = KEY_DEBUG_MAGIC_X; |
599 | #endif |
600 | kmem_cache_free(key_jar, key); |
601 | |
602 | /* there may, of course, be more than one key to destroy */ |
603 | goto go_again; |
604 | |
605 | } /* end key_cleanup() */ |
606 | |
607 | /*****************************************************************************/ |
608 | /* |
609 | * dispose of a reference to a key |
610 | * - when all the references are gone, we schedule the cleanup task to come and |
611 | * pull it out of the tree in definite process context |
612 | */ |
613 | void key_put(struct key *key) |
614 | { |
615 | if (key) { |
616 | key_check(key); |
617 | |
618 | if (atomic_dec_and_test(&key->usage)) |
619 | schedule_work(&key_cleanup_task); |
620 | } |
621 | |
622 | } /* end key_put() */ |
623 | |
624 | EXPORT_SYMBOL(key_put); |
625 | |
626 | /*****************************************************************************/ |
627 | /* |
628 | * find a key by its serial number |
629 | */ |
630 | struct key *key_lookup(key_serial_t id) |
631 | { |
632 | struct rb_node *n; |
633 | struct key *key; |
634 | |
635 | spin_lock(&key_serial_lock); |
636 | |
637 | /* search the tree for the specified key */ |
638 | n = key_serial_tree.rb_node; |
639 | while (n) { |
640 | key = rb_entry(n, struct key, serial_node); |
641 | |
642 | if (id < key->serial) |
643 | n = n->rb_left; |
644 | else if (id > key->serial) |
645 | n = n->rb_right; |
646 | else |
647 | goto found; |
648 | } |
649 | |
650 | not_found: |
651 | key = ERR_PTR(-ENOKEY); |
652 | goto error; |
653 | |
654 | found: |
655 | /* pretend it doesn't exist if it is awaiting deletion */ |
656 | if (atomic_read(&key->usage) == 0) |
657 | goto not_found; |
658 | |
659 | /* this races with key_put(), but that doesn't matter since key_put() |
660 | * doesn't actually change the key |
661 | */ |
662 | atomic_inc(&key->usage); |
663 | |
664 | error: |
665 | spin_unlock(&key_serial_lock); |
666 | return key; |
667 | |
668 | } /* end key_lookup() */ |
669 | |
670 | /*****************************************************************************/ |
671 | /* |
672 | * find and lock the specified key type against removal |
673 | * - we return with the sem readlocked |
674 | */ |
675 | struct key_type *key_type_lookup(const char *type) |
676 | { |
677 | struct key_type *ktype; |
678 | |
679 | down_read(&key_types_sem); |
680 | |
681 | /* look up the key type to see if it's one of the registered kernel |
682 | * types */ |
683 | list_for_each_entry(ktype, &key_types_list, link) { |
684 | if (strcmp(ktype->name, type) == 0) |
685 | goto found_kernel_type; |
686 | } |
687 | |
688 | up_read(&key_types_sem); |
689 | ktype = ERR_PTR(-ENOKEY); |
690 | |
691 | found_kernel_type: |
692 | return ktype; |
693 | |
694 | } /* end key_type_lookup() */ |
695 | |
696 | /*****************************************************************************/ |
697 | /* |
698 | * unlock a key type |
699 | */ |
700 | void key_type_put(struct key_type *ktype) |
701 | { |
702 | up_read(&key_types_sem); |
703 | |
704 | } /* end key_type_put() */ |
705 | |
706 | /*****************************************************************************/ |
707 | /* |
708 | * attempt to update an existing key |
709 | * - the key has an incremented refcount |
710 | * - we need to put the key if we get an error |
711 | */ |
712 | static inline key_ref_t __key_update(key_ref_t key_ref, |
713 | const void *payload, size_t plen) |
714 | { |
715 | struct key *key = key_ref_to_ptr(key_ref); |
716 | int ret; |
717 | |
718 | /* need write permission on the key to update it */ |
719 | ret = key_permission(key_ref, KEY_WRITE); |
720 | if (ret < 0) |
721 | goto error; |
722 | |
723 | ret = -EEXIST; |
724 | if (!key->type->update) |
725 | goto error; |
726 | |
727 | down_write(&key->sem); |
728 | |
729 | ret = key->type->update(key, payload, plen); |
730 | if (ret == 0) |
731 | /* updating a negative key instantiates it */ |
732 | clear_bit(KEY_FLAG_NEGATIVE, &key->flags); |
733 | |
734 | up_write(&key->sem); |
735 | |
736 | if (ret < 0) |
737 | goto error; |
738 | out: |
739 | return key_ref; |
740 | |
741 | error: |
742 | key_put(key); |
743 | key_ref = ERR_PTR(ret); |
744 | goto out; |
745 | |
746 | } /* end __key_update() */ |
747 | |
748 | /*****************************************************************************/ |
749 | /* |
750 | * search the specified keyring for a key of the same description; if one is |
751 | * found, update it, otherwise add a new one |
752 | */ |
753 | key_ref_t key_create_or_update(key_ref_t keyring_ref, |
754 | const char *type, |
755 | const char *description, |
756 | const void *payload, |
757 | size_t plen, |
758 | key_perm_t perm, |
759 | unsigned long flags) |
760 | { |
761 | struct keyring_list *prealloc; |
762 | const struct cred *cred = current_cred(); |
763 | struct key_type *ktype; |
764 | struct key *keyring, *key = NULL; |
765 | key_ref_t key_ref; |
766 | int ret; |
767 | |
768 | /* look up the key type to see if it's one of the registered kernel |
769 | * types */ |
770 | ktype = key_type_lookup(type); |
771 | if (IS_ERR(ktype)) { |
772 | key_ref = ERR_PTR(-ENODEV); |
773 | goto error; |
774 | } |
775 | |
776 | key_ref = ERR_PTR(-EINVAL); |
777 | if (!ktype->match || !ktype->instantiate) |
778 | goto error_2; |
779 | |
780 | keyring = key_ref_to_ptr(keyring_ref); |
781 | |
782 | key_check(keyring); |
783 | |
784 | key_ref = ERR_PTR(-ENOTDIR); |
785 | if (keyring->type != &key_type_keyring) |
786 | goto error_2; |
787 | |
788 | ret = __key_link_begin(keyring, ktype, description, &prealloc); |
789 | if (ret < 0) |
790 | goto error_2; |
791 | |
792 | /* if we're going to allocate a new key, we're going to have |
793 | * to modify the keyring */ |
794 | ret = key_permission(keyring_ref, KEY_WRITE); |
795 | if (ret < 0) { |
796 | key_ref = ERR_PTR(ret); |
797 | goto error_3; |
798 | } |
799 | |
800 | /* if it's possible to update this type of key, search for an existing |
801 | * key of the same type and description in the destination keyring and |
802 | * update that instead if possible |
803 | */ |
804 | if (ktype->update) { |
805 | key_ref = __keyring_search_one(keyring_ref, ktype, description, |
806 | 0); |
807 | if (!IS_ERR(key_ref)) |
808 | goto found_matching_key; |
809 | } |
810 | |
811 | /* if the client doesn't provide, decide on the permissions we want */ |
812 | if (perm == KEY_PERM_UNDEF) { |
813 | perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; |
814 | perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR; |
815 | |
816 | if (ktype->read) |
817 | perm |= KEY_POS_READ | KEY_USR_READ; |
818 | |
819 | if (ktype == &key_type_keyring || ktype->update) |
820 | perm |= KEY_USR_WRITE; |
821 | } |
822 | |
823 | /* allocate a new key */ |
824 | key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred, |
825 | perm, flags); |
826 | if (IS_ERR(key)) { |
827 | key_ref = ERR_CAST(key); |
828 | goto error_3; |
829 | } |
830 | |
831 | /* instantiate it and link it into the target keyring */ |
832 | ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL, |
833 | &prealloc); |
834 | if (ret < 0) { |
835 | key_put(key); |
836 | key_ref = ERR_PTR(ret); |
837 | goto error_3; |
838 | } |
839 | |
840 | key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); |
841 | |
842 | error_3: |
843 | __key_link_end(keyring, ktype, prealloc); |
844 | error_2: |
845 | key_type_put(ktype); |
846 | error: |
847 | return key_ref; |
848 | |
849 | found_matching_key: |
850 | /* we found a matching key, so we're going to try to update it |
851 | * - we can drop the locks first as we have the key pinned |
852 | */ |
853 | __key_link_end(keyring, ktype, prealloc); |
854 | key_type_put(ktype); |
855 | |
856 | key_ref = __key_update(key_ref, payload, plen); |
857 | goto error; |
858 | |
859 | } /* end key_create_or_update() */ |
860 | |
861 | EXPORT_SYMBOL(key_create_or_update); |
862 | |
863 | /*****************************************************************************/ |
864 | /* |
865 | * update a key |
866 | */ |
867 | int key_update(key_ref_t key_ref, const void *payload, size_t plen) |
868 | { |
869 | struct key *key = key_ref_to_ptr(key_ref); |
870 | int ret; |
871 | |
872 | key_check(key); |
873 | |
874 | /* the key must be writable */ |
875 | ret = key_permission(key_ref, KEY_WRITE); |
876 | if (ret < 0) |
877 | goto error; |
878 | |
879 | /* attempt to update it if supported */ |
880 | ret = -EOPNOTSUPP; |
881 | if (key->type->update) { |
882 | down_write(&key->sem); |
883 | |
884 | ret = key->type->update(key, payload, plen); |
885 | if (ret == 0) |
886 | /* updating a negative key instantiates it */ |
887 | clear_bit(KEY_FLAG_NEGATIVE, &key->flags); |
888 | |
889 | up_write(&key->sem); |
890 | } |
891 | |
892 | error: |
893 | return ret; |
894 | |
895 | } /* end key_update() */ |
896 | |
897 | EXPORT_SYMBOL(key_update); |
898 | |
899 | /*****************************************************************************/ |
900 | /* |
901 | * revoke a key |
902 | */ |
903 | void key_revoke(struct key *key) |
904 | { |
905 | struct timespec now; |
906 | time_t time; |
907 | |
908 | key_check(key); |
909 | |
910 | /* make sure no one's trying to change or use the key when we mark it |
911 | * - we tell lockdep that we might nest because we might be revoking an |
912 | * authorisation key whilst holding the sem on a key we've just |
913 | * instantiated |
914 | */ |
915 | down_write_nested(&key->sem, 1); |
916 | if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && |
917 | key->type->revoke) |
918 | key->type->revoke(key); |
919 | |
920 | /* set the death time to no more than the expiry time */ |
921 | now = current_kernel_time(); |
922 | time = now.tv_sec; |
923 | if (key->revoked_at == 0 || key->revoked_at > time) { |
924 | key->revoked_at = time; |
925 | key_schedule_gc(key->revoked_at + key_gc_delay); |
926 | } |
927 | |
928 | up_write(&key->sem); |
929 | |
930 | } /* end key_revoke() */ |
931 | |
932 | EXPORT_SYMBOL(key_revoke); |
933 | |
934 | /*****************************************************************************/ |
935 | /* |
936 | * register a type of key |
937 | */ |
938 | int register_key_type(struct key_type *ktype) |
939 | { |
940 | struct key_type *p; |
941 | int ret; |
942 | |
943 | ret = -EEXIST; |
944 | down_write(&key_types_sem); |
945 | |
946 | /* disallow key types with the same name */ |
947 | list_for_each_entry(p, &key_types_list, link) { |
948 | if (strcmp(p->name, ktype->name) == 0) |
949 | goto out; |
950 | } |
951 | |
952 | /* store the type */ |
953 | list_add(&ktype->link, &key_types_list); |
954 | ret = 0; |
955 | |
956 | out: |
957 | up_write(&key_types_sem); |
958 | return ret; |
959 | |
960 | } /* end register_key_type() */ |
961 | |
962 | EXPORT_SYMBOL(register_key_type); |
963 | |
964 | /*****************************************************************************/ |
965 | /* |
966 | * unregister a type of key |
967 | */ |
968 | void unregister_key_type(struct key_type *ktype) |
969 | { |
970 | struct rb_node *_n; |
971 | struct key *key; |
972 | |
973 | down_write(&key_types_sem); |
974 | |
975 | /* withdraw the key type */ |
976 | list_del_init(&ktype->link); |
977 | |
978 | /* mark all the keys of this type dead */ |
979 | spin_lock(&key_serial_lock); |
980 | |
981 | for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { |
982 | key = rb_entry(_n, struct key, serial_node); |
983 | |
984 | if (key->type == ktype) { |
985 | key->type = &key_type_dead; |
986 | set_bit(KEY_FLAG_DEAD, &key->flags); |
987 | } |
988 | } |
989 | |
990 | spin_unlock(&key_serial_lock); |
991 | |
992 | /* make sure everyone revalidates their keys */ |
993 | synchronize_rcu(); |
994 | |
995 | /* we should now be able to destroy the payloads of all the keys of |
996 | * this type with impunity */ |
997 | spin_lock(&key_serial_lock); |
998 | |
999 | for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { |
1000 | key = rb_entry(_n, struct key, serial_node); |
1001 | |
1002 | if (key->type == ktype) { |
1003 | if (ktype->destroy) |
1004 | ktype->destroy(key); |
1005 | memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); |
1006 | } |
1007 | } |
1008 | |
1009 | spin_unlock(&key_serial_lock); |
1010 | up_write(&key_types_sem); |
1011 | |
1012 | key_schedule_gc(0); |
1013 | |
1014 | } /* end unregister_key_type() */ |
1015 | |
1016 | EXPORT_SYMBOL(unregister_key_type); |
1017 | |
1018 | /*****************************************************************************/ |
1019 | /* |
1020 | * initialise the key management stuff |
1021 | */ |
1022 | void __init key_init(void) |
1023 | { |
1024 | /* allocate a slab in which we can store keys */ |
1025 | key_jar = kmem_cache_create("key_jar", sizeof(struct key), |
1026 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1027 | |
1028 | /* add the special key types */ |
1029 | list_add_tail(&key_type_keyring.link, &key_types_list); |
1030 | list_add_tail(&key_type_dead.link, &key_types_list); |
1031 | list_add_tail(&key_type_user.link, &key_types_list); |
1032 | |
1033 | /* record the root user tracking */ |
1034 | rb_link_node(&root_key_user.node, |
1035 | NULL, |
1036 | &key_user_tree.rb_node); |
1037 | |
1038 | rb_insert_color(&root_key_user.node, |
1039 | &key_user_tree); |
1040 | |
1041 | } /* end key_init() */ |
1042 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9