Root/
Source at commit b386be689295730688885552666ea40b2e639b14 created 11 years 11 months ago. By Maarten ter Huurne, Revert "MIPS: JZ4740: reset: Initialize hibernate wakeup counters." | |
---|---|
1 | /* |
2 | * Scatterlist Cryptographic API. |
3 | * |
4 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> |
5 | * Copyright (c) 2002 David S. Miller (davem@redhat.com) |
6 | * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> |
7 | * |
8 | * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> |
9 | * and Nettle, by Niels Möller. |
10 | * |
11 | * This program is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the Free |
13 | * Software Foundation; either version 2 of the License, or (at your option) |
14 | * any later version. |
15 | * |
16 | */ |
17 | |
18 | #include <linux/err.h> |
19 | #include <linux/errno.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/kmod.h> |
22 | #include <linux/module.h> |
23 | #include <linux/param.h> |
24 | #include <linux/sched.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/string.h> |
27 | #include "internal.h" |
28 | |
29 | LIST_HEAD(crypto_alg_list); |
30 | EXPORT_SYMBOL_GPL(crypto_alg_list); |
31 | DECLARE_RWSEM(crypto_alg_sem); |
32 | EXPORT_SYMBOL_GPL(crypto_alg_sem); |
33 | |
34 | BLOCKING_NOTIFIER_HEAD(crypto_chain); |
35 | EXPORT_SYMBOL_GPL(crypto_chain); |
36 | |
37 | static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) |
38 | { |
39 | atomic_inc(&alg->cra_refcnt); |
40 | return alg; |
41 | } |
42 | |
43 | struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) |
44 | { |
45 | return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; |
46 | } |
47 | EXPORT_SYMBOL_GPL(crypto_mod_get); |
48 | |
49 | void crypto_mod_put(struct crypto_alg *alg) |
50 | { |
51 | struct module *module = alg->cra_module; |
52 | |
53 | crypto_alg_put(alg); |
54 | module_put(module); |
55 | } |
56 | EXPORT_SYMBOL_GPL(crypto_mod_put); |
57 | |
58 | static inline int crypto_is_test_larval(struct crypto_larval *larval) |
59 | { |
60 | return larval->alg.cra_driver_name[0]; |
61 | } |
62 | |
63 | static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, |
64 | u32 mask) |
65 | { |
66 | struct crypto_alg *q, *alg = NULL; |
67 | int best = -2; |
68 | |
69 | list_for_each_entry(q, &crypto_alg_list, cra_list) { |
70 | int exact, fuzzy; |
71 | |
72 | if (crypto_is_moribund(q)) |
73 | continue; |
74 | |
75 | if ((q->cra_flags ^ type) & mask) |
76 | continue; |
77 | |
78 | if (crypto_is_larval(q) && |
79 | !crypto_is_test_larval((struct crypto_larval *)q) && |
80 | ((struct crypto_larval *)q)->mask != mask) |
81 | continue; |
82 | |
83 | exact = !strcmp(q->cra_driver_name, name); |
84 | fuzzy = !strcmp(q->cra_name, name); |
85 | if (!exact && !(fuzzy && q->cra_priority > best)) |
86 | continue; |
87 | |
88 | if (unlikely(!crypto_mod_get(q))) |
89 | continue; |
90 | |
91 | best = q->cra_priority; |
92 | if (alg) |
93 | crypto_mod_put(alg); |
94 | alg = q; |
95 | |
96 | if (exact) |
97 | break; |
98 | } |
99 | |
100 | return alg; |
101 | } |
102 | |
103 | static void crypto_larval_destroy(struct crypto_alg *alg) |
104 | { |
105 | struct crypto_larval *larval = (void *)alg; |
106 | |
107 | BUG_ON(!crypto_is_larval(alg)); |
108 | if (larval->adult) |
109 | crypto_mod_put(larval->adult); |
110 | kfree(larval); |
111 | } |
112 | |
113 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask) |
114 | { |
115 | struct crypto_larval *larval; |
116 | |
117 | larval = kzalloc(sizeof(*larval), GFP_KERNEL); |
118 | if (!larval) |
119 | return ERR_PTR(-ENOMEM); |
120 | |
121 | larval->mask = mask; |
122 | larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; |
123 | larval->alg.cra_priority = -1; |
124 | larval->alg.cra_destroy = crypto_larval_destroy; |
125 | |
126 | strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); |
127 | init_completion(&larval->completion); |
128 | |
129 | return larval; |
130 | } |
131 | EXPORT_SYMBOL_GPL(crypto_larval_alloc); |
132 | |
133 | static struct crypto_alg *crypto_larval_add(const char *name, u32 type, |
134 | u32 mask) |
135 | { |
136 | struct crypto_alg *alg; |
137 | struct crypto_larval *larval; |
138 | |
139 | larval = crypto_larval_alloc(name, type, mask); |
140 | if (IS_ERR(larval)) |
141 | return ERR_CAST(larval); |
142 | |
143 | atomic_set(&larval->alg.cra_refcnt, 2); |
144 | |
145 | down_write(&crypto_alg_sem); |
146 | alg = __crypto_alg_lookup(name, type, mask); |
147 | if (!alg) { |
148 | alg = &larval->alg; |
149 | list_add(&alg->cra_list, &crypto_alg_list); |
150 | } |
151 | up_write(&crypto_alg_sem); |
152 | |
153 | if (alg != &larval->alg) |
154 | kfree(larval); |
155 | |
156 | return alg; |
157 | } |
158 | |
159 | void crypto_larval_kill(struct crypto_alg *alg) |
160 | { |
161 | struct crypto_larval *larval = (void *)alg; |
162 | |
163 | down_write(&crypto_alg_sem); |
164 | list_del(&alg->cra_list); |
165 | up_write(&crypto_alg_sem); |
166 | complete_all(&larval->completion); |
167 | crypto_alg_put(alg); |
168 | } |
169 | EXPORT_SYMBOL_GPL(crypto_larval_kill); |
170 | |
171 | static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) |
172 | { |
173 | struct crypto_larval *larval = (void *)alg; |
174 | long timeout; |
175 | |
176 | timeout = wait_for_completion_interruptible_timeout( |
177 | &larval->completion, 60 * HZ); |
178 | |
179 | alg = larval->adult; |
180 | if (timeout < 0) |
181 | alg = ERR_PTR(-EINTR); |
182 | else if (!timeout) |
183 | alg = ERR_PTR(-ETIMEDOUT); |
184 | else if (!alg) |
185 | alg = ERR_PTR(-ENOENT); |
186 | else if (crypto_is_test_larval(larval) && |
187 | !(alg->cra_flags & CRYPTO_ALG_TESTED)) |
188 | alg = ERR_PTR(-EAGAIN); |
189 | else if (!crypto_mod_get(alg)) |
190 | alg = ERR_PTR(-EAGAIN); |
191 | crypto_mod_put(&larval->alg); |
192 | |
193 | return alg; |
194 | } |
195 | |
196 | struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask) |
197 | { |
198 | struct crypto_alg *alg; |
199 | |
200 | down_read(&crypto_alg_sem); |
201 | alg = __crypto_alg_lookup(name, type, mask); |
202 | up_read(&crypto_alg_sem); |
203 | |
204 | return alg; |
205 | } |
206 | EXPORT_SYMBOL_GPL(crypto_alg_lookup); |
207 | |
208 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) |
209 | { |
210 | struct crypto_alg *alg; |
211 | |
212 | if (!name) |
213 | return ERR_PTR(-ENOENT); |
214 | |
215 | mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); |
216 | type &= mask; |
217 | |
218 | alg = crypto_alg_lookup(name, type, mask); |
219 | if (!alg) { |
220 | request_module("%s", name); |
221 | |
222 | if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & |
223 | CRYPTO_ALG_NEED_FALLBACK)) |
224 | request_module("%s-all", name); |
225 | |
226 | alg = crypto_alg_lookup(name, type, mask); |
227 | } |
228 | |
229 | if (alg) |
230 | return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg; |
231 | |
232 | return crypto_larval_add(name, type, mask); |
233 | } |
234 | EXPORT_SYMBOL_GPL(crypto_larval_lookup); |
235 | |
236 | int crypto_probing_notify(unsigned long val, void *v) |
237 | { |
238 | int ok; |
239 | |
240 | ok = blocking_notifier_call_chain(&crypto_chain, val, v); |
241 | if (ok == NOTIFY_DONE) { |
242 | request_module("cryptomgr"); |
243 | ok = blocking_notifier_call_chain(&crypto_chain, val, v); |
244 | } |
245 | |
246 | return ok; |
247 | } |
248 | EXPORT_SYMBOL_GPL(crypto_probing_notify); |
249 | |
250 | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) |
251 | { |
252 | struct crypto_alg *alg; |
253 | struct crypto_alg *larval; |
254 | int ok; |
255 | |
256 | if (!((type | mask) & CRYPTO_ALG_TESTED)) { |
257 | type |= CRYPTO_ALG_TESTED; |
258 | mask |= CRYPTO_ALG_TESTED; |
259 | } |
260 | |
261 | larval = crypto_larval_lookup(name, type, mask); |
262 | if (IS_ERR(larval) || !crypto_is_larval(larval)) |
263 | return larval; |
264 | |
265 | ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval); |
266 | |
267 | if (ok == NOTIFY_STOP) |
268 | alg = crypto_larval_wait(larval); |
269 | else { |
270 | crypto_mod_put(larval); |
271 | alg = ERR_PTR(-ENOENT); |
272 | } |
273 | crypto_larval_kill(larval); |
274 | return alg; |
275 | } |
276 | EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); |
277 | |
278 | static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) |
279 | { |
280 | const struct crypto_type *type_obj = tfm->__crt_alg->cra_type; |
281 | |
282 | if (type_obj) |
283 | return type_obj->init(tfm, type, mask); |
284 | |
285 | switch (crypto_tfm_alg_type(tfm)) { |
286 | case CRYPTO_ALG_TYPE_CIPHER: |
287 | return crypto_init_cipher_ops(tfm); |
288 | |
289 | case CRYPTO_ALG_TYPE_COMPRESS: |
290 | return crypto_init_compress_ops(tfm); |
291 | |
292 | default: |
293 | break; |
294 | } |
295 | |
296 | BUG(); |
297 | return -EINVAL; |
298 | } |
299 | |
300 | static void crypto_exit_ops(struct crypto_tfm *tfm) |
301 | { |
302 | const struct crypto_type *type = tfm->__crt_alg->cra_type; |
303 | |
304 | if (type) { |
305 | if (tfm->exit) |
306 | tfm->exit(tfm); |
307 | return; |
308 | } |
309 | |
310 | switch (crypto_tfm_alg_type(tfm)) { |
311 | case CRYPTO_ALG_TYPE_CIPHER: |
312 | crypto_exit_cipher_ops(tfm); |
313 | break; |
314 | |
315 | case CRYPTO_ALG_TYPE_COMPRESS: |
316 | crypto_exit_compress_ops(tfm); |
317 | break; |
318 | |
319 | default: |
320 | BUG(); |
321 | } |
322 | } |
323 | |
324 | static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) |
325 | { |
326 | const struct crypto_type *type_obj = alg->cra_type; |
327 | unsigned int len; |
328 | |
329 | len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1); |
330 | if (type_obj) |
331 | return len + type_obj->ctxsize(alg, type, mask); |
332 | |
333 | switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { |
334 | default: |
335 | BUG(); |
336 | |
337 | case CRYPTO_ALG_TYPE_CIPHER: |
338 | len += crypto_cipher_ctxsize(alg); |
339 | break; |
340 | |
341 | case CRYPTO_ALG_TYPE_COMPRESS: |
342 | len += crypto_compress_ctxsize(alg); |
343 | break; |
344 | } |
345 | |
346 | return len; |
347 | } |
348 | |
349 | void crypto_shoot_alg(struct crypto_alg *alg) |
350 | { |
351 | down_write(&crypto_alg_sem); |
352 | alg->cra_flags |= CRYPTO_ALG_DYING; |
353 | up_write(&crypto_alg_sem); |
354 | } |
355 | EXPORT_SYMBOL_GPL(crypto_shoot_alg); |
356 | |
357 | struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, |
358 | u32 mask) |
359 | { |
360 | struct crypto_tfm *tfm = NULL; |
361 | unsigned int tfm_size; |
362 | int err = -ENOMEM; |
363 | |
364 | tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask); |
365 | tfm = kzalloc(tfm_size, GFP_KERNEL); |
366 | if (tfm == NULL) |
367 | goto out_err; |
368 | |
369 | tfm->__crt_alg = alg; |
370 | |
371 | err = crypto_init_ops(tfm, type, mask); |
372 | if (err) |
373 | goto out_free_tfm; |
374 | |
375 | if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) |
376 | goto cra_init_failed; |
377 | |
378 | goto out; |
379 | |
380 | cra_init_failed: |
381 | crypto_exit_ops(tfm); |
382 | out_free_tfm: |
383 | if (err == -EAGAIN) |
384 | crypto_shoot_alg(alg); |
385 | kfree(tfm); |
386 | out_err: |
387 | tfm = ERR_PTR(err); |
388 | out: |
389 | return tfm; |
390 | } |
391 | EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); |
392 | |
393 | /* |
394 | * crypto_alloc_base - Locate algorithm and allocate transform |
395 | * @alg_name: Name of algorithm |
396 | * @type: Type of algorithm |
397 | * @mask: Mask for type comparison |
398 | * |
399 | * This function should not be used by new algorithm types. |
400 | * Plesae use crypto_alloc_tfm instead. |
401 | * |
402 | * crypto_alloc_base() will first attempt to locate an already loaded |
403 | * algorithm. If that fails and the kernel supports dynamically loadable |
404 | * modules, it will then attempt to load a module of the same name or |
405 | * alias. If that fails it will send a query to any loaded crypto manager |
406 | * to construct an algorithm on the fly. A refcount is grabbed on the |
407 | * algorithm which is then associated with the new transform. |
408 | * |
409 | * The returned transform is of a non-determinate type. Most people |
410 | * should use one of the more specific allocation functions such as |
411 | * crypto_alloc_blkcipher. |
412 | * |
413 | * In case of error the return value is an error pointer. |
414 | */ |
415 | struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask) |
416 | { |
417 | struct crypto_tfm *tfm; |
418 | int err; |
419 | |
420 | for (;;) { |
421 | struct crypto_alg *alg; |
422 | |
423 | alg = crypto_alg_mod_lookup(alg_name, type, mask); |
424 | if (IS_ERR(alg)) { |
425 | err = PTR_ERR(alg); |
426 | goto err; |
427 | } |
428 | |
429 | tfm = __crypto_alloc_tfm(alg, type, mask); |
430 | if (!IS_ERR(tfm)) |
431 | return tfm; |
432 | |
433 | crypto_mod_put(alg); |
434 | err = PTR_ERR(tfm); |
435 | |
436 | err: |
437 | if (err != -EAGAIN) |
438 | break; |
439 | if (signal_pending(current)) { |
440 | err = -EINTR; |
441 | break; |
442 | } |
443 | } |
444 | |
445 | return ERR_PTR(err); |
446 | } |
447 | EXPORT_SYMBOL_GPL(crypto_alloc_base); |
448 | |
449 | void *crypto_create_tfm(struct crypto_alg *alg, |
450 | const struct crypto_type *frontend) |
451 | { |
452 | char *mem; |
453 | struct crypto_tfm *tfm = NULL; |
454 | unsigned int tfmsize; |
455 | unsigned int total; |
456 | int err = -ENOMEM; |
457 | |
458 | tfmsize = frontend->tfmsize; |
459 | total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); |
460 | |
461 | mem = kzalloc(total, GFP_KERNEL); |
462 | if (mem == NULL) |
463 | goto out_err; |
464 | |
465 | tfm = (struct crypto_tfm *)(mem + tfmsize); |
466 | tfm->__crt_alg = alg; |
467 | |
468 | err = frontend->init_tfm(tfm); |
469 | if (err) |
470 | goto out_free_tfm; |
471 | |
472 | if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) |
473 | goto cra_init_failed; |
474 | |
475 | goto out; |
476 | |
477 | cra_init_failed: |
478 | crypto_exit_ops(tfm); |
479 | out_free_tfm: |
480 | if (err == -EAGAIN) |
481 | crypto_shoot_alg(alg); |
482 | kfree(mem); |
483 | out_err: |
484 | mem = ERR_PTR(err); |
485 | out: |
486 | return mem; |
487 | } |
488 | EXPORT_SYMBOL_GPL(crypto_create_tfm); |
489 | |
490 | struct crypto_alg *crypto_find_alg(const char *alg_name, |
491 | const struct crypto_type *frontend, |
492 | u32 type, u32 mask) |
493 | { |
494 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) = |
495 | crypto_alg_mod_lookup; |
496 | |
497 | if (frontend) { |
498 | type &= frontend->maskclear; |
499 | mask &= frontend->maskclear; |
500 | type |= frontend->type; |
501 | mask |= frontend->maskset; |
502 | |
503 | if (frontend->lookup) |
504 | lookup = frontend->lookup; |
505 | } |
506 | |
507 | return lookup(alg_name, type, mask); |
508 | } |
509 | EXPORT_SYMBOL_GPL(crypto_find_alg); |
510 | |
511 | /* |
512 | * crypto_alloc_tfm - Locate algorithm and allocate transform |
513 | * @alg_name: Name of algorithm |
514 | * @frontend: Frontend algorithm type |
515 | * @type: Type of algorithm |
516 | * @mask: Mask for type comparison |
517 | * |
518 | * crypto_alloc_tfm() will first attempt to locate an already loaded |
519 | * algorithm. If that fails and the kernel supports dynamically loadable |
520 | * modules, it will then attempt to load a module of the same name or |
521 | * alias. If that fails it will send a query to any loaded crypto manager |
522 | * to construct an algorithm on the fly. A refcount is grabbed on the |
523 | * algorithm which is then associated with the new transform. |
524 | * |
525 | * The returned transform is of a non-determinate type. Most people |
526 | * should use one of the more specific allocation functions such as |
527 | * crypto_alloc_blkcipher. |
528 | * |
529 | * In case of error the return value is an error pointer. |
530 | */ |
531 | void *crypto_alloc_tfm(const char *alg_name, |
532 | const struct crypto_type *frontend, u32 type, u32 mask) |
533 | { |
534 | void *tfm; |
535 | int err; |
536 | |
537 | for (;;) { |
538 | struct crypto_alg *alg; |
539 | |
540 | alg = crypto_find_alg(alg_name, frontend, type, mask); |
541 | if (IS_ERR(alg)) { |
542 | err = PTR_ERR(alg); |
543 | goto err; |
544 | } |
545 | |
546 | tfm = crypto_create_tfm(alg, frontend); |
547 | if (!IS_ERR(tfm)) |
548 | return tfm; |
549 | |
550 | crypto_mod_put(alg); |
551 | err = PTR_ERR(tfm); |
552 | |
553 | err: |
554 | if (err != -EAGAIN) |
555 | break; |
556 | if (signal_pending(current)) { |
557 | err = -EINTR; |
558 | break; |
559 | } |
560 | } |
561 | |
562 | return ERR_PTR(err); |
563 | } |
564 | EXPORT_SYMBOL_GPL(crypto_alloc_tfm); |
565 | |
566 | /* |
567 | * crypto_destroy_tfm - Free crypto transform |
568 | * @mem: Start of tfm slab |
569 | * @tfm: Transform to free |
570 | * |
571 | * This function frees up the transform and any associated resources, |
572 | * then drops the refcount on the associated algorithm. |
573 | */ |
574 | void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) |
575 | { |
576 | struct crypto_alg *alg; |
577 | |
578 | if (unlikely(!mem)) |
579 | return; |
580 | |
581 | alg = tfm->__crt_alg; |
582 | |
583 | if (!tfm->exit && alg->cra_exit) |
584 | alg->cra_exit(tfm); |
585 | crypto_exit_ops(tfm); |
586 | crypto_mod_put(alg); |
587 | kzfree(mem); |
588 | } |
589 | EXPORT_SYMBOL_GPL(crypto_destroy_tfm); |
590 | |
591 | int crypto_has_alg(const char *name, u32 type, u32 mask) |
592 | { |
593 | int ret = 0; |
594 | struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); |
595 | |
596 | if (!IS_ERR(alg)) { |
597 | crypto_mod_put(alg); |
598 | ret = 1; |
599 | } |
600 | |
601 | return ret; |
602 | } |
603 | EXPORT_SYMBOL_GPL(crypto_has_alg); |
604 | |
605 | MODULE_DESCRIPTION("Cryptographic core API"); |
606 | MODULE_LICENSE("GPL"); |
607 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9