Root/
Source at commit be977234bfb4a6dca8a39e7c52165e4cd536ad71 created 12 years 9 months ago. By Lars-Peter Clausen, jz4740: Fix compile error | |
---|---|
1 | /* |
2 | * Software async crypto daemon. |
3 | * |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * |
6 | * Added AEAD support to cryptd. |
7 | * Authors: Tadeusz Struk (tadeusz.struk@intel.com) |
8 | * Adrian Hoban <adrian.hoban@intel.com> |
9 | * Gabriele Paoloni <gabriele.paoloni@intel.com> |
10 | * Aidan O'Mahony (aidan.o.mahony@intel.com) |
11 | * Copyright (c) 2010, Intel Corporation. |
12 | * |
13 | * This program is free software; you can redistribute it and/or modify it |
14 | * under the terms of the GNU General Public License as published by the Free |
15 | * Software Foundation; either version 2 of the License, or (at your option) |
16 | * any later version. |
17 | * |
18 | */ |
19 | |
20 | #include <crypto/algapi.h> |
21 | #include <crypto/internal/hash.h> |
22 | #include <crypto/internal/aead.h> |
23 | #include <crypto/cryptd.h> |
24 | #include <crypto/crypto_wq.h> |
25 | #include <linux/err.h> |
26 | #include <linux/init.h> |
27 | #include <linux/kernel.h> |
28 | #include <linux/list.h> |
29 | #include <linux/module.h> |
30 | #include <linux/scatterlist.h> |
31 | #include <linux/sched.h> |
32 | #include <linux/slab.h> |
33 | |
34 | #define CRYPTD_MAX_CPU_QLEN 100 |
35 | |
36 | struct cryptd_cpu_queue { |
37 | struct crypto_queue queue; |
38 | struct work_struct work; |
39 | }; |
40 | |
41 | struct cryptd_queue { |
42 | struct cryptd_cpu_queue __percpu *cpu_queue; |
43 | }; |
44 | |
45 | struct cryptd_instance_ctx { |
46 | struct crypto_spawn spawn; |
47 | struct cryptd_queue *queue; |
48 | }; |
49 | |
50 | struct hashd_instance_ctx { |
51 | struct crypto_shash_spawn spawn; |
52 | struct cryptd_queue *queue; |
53 | }; |
54 | |
55 | struct aead_instance_ctx { |
56 | struct crypto_aead_spawn aead_spawn; |
57 | struct cryptd_queue *queue; |
58 | }; |
59 | |
60 | struct cryptd_blkcipher_ctx { |
61 | struct crypto_blkcipher *child; |
62 | }; |
63 | |
64 | struct cryptd_blkcipher_request_ctx { |
65 | crypto_completion_t complete; |
66 | }; |
67 | |
68 | struct cryptd_hash_ctx { |
69 | struct crypto_shash *child; |
70 | }; |
71 | |
72 | struct cryptd_hash_request_ctx { |
73 | crypto_completion_t complete; |
74 | struct shash_desc desc; |
75 | }; |
76 | |
77 | struct cryptd_aead_ctx { |
78 | struct crypto_aead *child; |
79 | }; |
80 | |
81 | struct cryptd_aead_request_ctx { |
82 | crypto_completion_t complete; |
83 | }; |
84 | |
85 | static void cryptd_queue_worker(struct work_struct *work); |
86 | |
87 | static int cryptd_init_queue(struct cryptd_queue *queue, |
88 | unsigned int max_cpu_qlen) |
89 | { |
90 | int cpu; |
91 | struct cryptd_cpu_queue *cpu_queue; |
92 | |
93 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); |
94 | if (!queue->cpu_queue) |
95 | return -ENOMEM; |
96 | for_each_possible_cpu(cpu) { |
97 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
98 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); |
99 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); |
100 | } |
101 | return 0; |
102 | } |
103 | |
104 | static void cryptd_fini_queue(struct cryptd_queue *queue) |
105 | { |
106 | int cpu; |
107 | struct cryptd_cpu_queue *cpu_queue; |
108 | |
109 | for_each_possible_cpu(cpu) { |
110 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
111 | BUG_ON(cpu_queue->queue.qlen); |
112 | } |
113 | free_percpu(queue->cpu_queue); |
114 | } |
115 | |
116 | static int cryptd_enqueue_request(struct cryptd_queue *queue, |
117 | struct crypto_async_request *request) |
118 | { |
119 | int cpu, err; |
120 | struct cryptd_cpu_queue *cpu_queue; |
121 | |
122 | cpu = get_cpu(); |
123 | cpu_queue = this_cpu_ptr(queue->cpu_queue); |
124 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
125 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
126 | put_cpu(); |
127 | |
128 | return err; |
129 | } |
130 | |
131 | /* Called in workqueue context, do one real cryption work (via |
132 | * req->complete) and reschedule itself if there are more work to |
133 | * do. */ |
134 | static void cryptd_queue_worker(struct work_struct *work) |
135 | { |
136 | struct cryptd_cpu_queue *cpu_queue; |
137 | struct crypto_async_request *req, *backlog; |
138 | |
139 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); |
140 | /* Only handle one request at a time to avoid hogging crypto |
141 | * workqueue. preempt_disable/enable is used to prevent |
142 | * being preempted by cryptd_enqueue_request() */ |
143 | preempt_disable(); |
144 | backlog = crypto_get_backlog(&cpu_queue->queue); |
145 | req = crypto_dequeue_request(&cpu_queue->queue); |
146 | preempt_enable(); |
147 | |
148 | if (!req) |
149 | return; |
150 | |
151 | if (backlog) |
152 | backlog->complete(backlog, -EINPROGRESS); |
153 | req->complete(req, 0); |
154 | |
155 | if (cpu_queue->queue.qlen) |
156 | queue_work(kcrypto_wq, &cpu_queue->work); |
157 | } |
158 | |
159 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) |
160 | { |
161 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
162 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
163 | return ictx->queue; |
164 | } |
165 | |
166 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, |
167 | const u8 *key, unsigned int keylen) |
168 | { |
169 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); |
170 | struct crypto_blkcipher *child = ctx->child; |
171 | int err; |
172 | |
173 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
174 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & |
175 | CRYPTO_TFM_REQ_MASK); |
176 | err = crypto_blkcipher_setkey(child, key, keylen); |
177 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & |
178 | CRYPTO_TFM_RES_MASK); |
179 | return err; |
180 | } |
181 | |
182 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, |
183 | struct crypto_blkcipher *child, |
184 | int err, |
185 | int (*crypt)(struct blkcipher_desc *desc, |
186 | struct scatterlist *dst, |
187 | struct scatterlist *src, |
188 | unsigned int len)) |
189 | { |
190 | struct cryptd_blkcipher_request_ctx *rctx; |
191 | struct blkcipher_desc desc; |
192 | |
193 | rctx = ablkcipher_request_ctx(req); |
194 | |
195 | if (unlikely(err == -EINPROGRESS)) |
196 | goto out; |
197 | |
198 | desc.tfm = child; |
199 | desc.info = req->info; |
200 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
201 | |
202 | err = crypt(&desc, req->dst, req->src, req->nbytes); |
203 | |
204 | req->base.complete = rctx->complete; |
205 | |
206 | out: |
207 | local_bh_disable(); |
208 | rctx->complete(&req->base, err); |
209 | local_bh_enable(); |
210 | } |
211 | |
212 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) |
213 | { |
214 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
215 | struct crypto_blkcipher *child = ctx->child; |
216 | |
217 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
218 | crypto_blkcipher_crt(child)->encrypt); |
219 | } |
220 | |
221 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) |
222 | { |
223 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
224 | struct crypto_blkcipher *child = ctx->child; |
225 | |
226 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
227 | crypto_blkcipher_crt(child)->decrypt); |
228 | } |
229 | |
230 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, |
231 | crypto_completion_t complete) |
232 | { |
233 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); |
234 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
235 | struct cryptd_queue *queue; |
236 | |
237 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
238 | rctx->complete = req->base.complete; |
239 | req->base.complete = complete; |
240 | |
241 | return cryptd_enqueue_request(queue, &req->base); |
242 | } |
243 | |
244 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) |
245 | { |
246 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); |
247 | } |
248 | |
249 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) |
250 | { |
251 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); |
252 | } |
253 | |
254 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) |
255 | { |
256 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
257 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
258 | struct crypto_spawn *spawn = &ictx->spawn; |
259 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
260 | struct crypto_blkcipher *cipher; |
261 | |
262 | cipher = crypto_spawn_blkcipher(spawn); |
263 | if (IS_ERR(cipher)) |
264 | return PTR_ERR(cipher); |
265 | |
266 | ctx->child = cipher; |
267 | tfm->crt_ablkcipher.reqsize = |
268 | sizeof(struct cryptd_blkcipher_request_ctx); |
269 | return 0; |
270 | } |
271 | |
272 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) |
273 | { |
274 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
275 | |
276 | crypto_free_blkcipher(ctx->child); |
277 | } |
278 | |
279 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
280 | unsigned int tail) |
281 | { |
282 | char *p; |
283 | struct crypto_instance *inst; |
284 | int err; |
285 | |
286 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
287 | if (!p) |
288 | return ERR_PTR(-ENOMEM); |
289 | |
290 | inst = (void *)(p + head); |
291 | |
292 | err = -ENAMETOOLONG; |
293 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
294 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
295 | goto out_free_inst; |
296 | |
297 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
298 | |
299 | inst->alg.cra_priority = alg->cra_priority + 50; |
300 | inst->alg.cra_blocksize = alg->cra_blocksize; |
301 | inst->alg.cra_alignmask = alg->cra_alignmask; |
302 | |
303 | out: |
304 | return p; |
305 | |
306 | out_free_inst: |
307 | kfree(p); |
308 | p = ERR_PTR(err); |
309 | goto out; |
310 | } |
311 | |
312 | static int cryptd_create_blkcipher(struct crypto_template *tmpl, |
313 | struct rtattr **tb, |
314 | struct cryptd_queue *queue) |
315 | { |
316 | struct cryptd_instance_ctx *ctx; |
317 | struct crypto_instance *inst; |
318 | struct crypto_alg *alg; |
319 | int err; |
320 | |
321 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, |
322 | CRYPTO_ALG_TYPE_MASK); |
323 | if (IS_ERR(alg)) |
324 | return PTR_ERR(alg); |
325 | |
326 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
327 | err = PTR_ERR(inst); |
328 | if (IS_ERR(inst)) |
329 | goto out_put_alg; |
330 | |
331 | ctx = crypto_instance_ctx(inst); |
332 | ctx->queue = queue; |
333 | |
334 | err = crypto_init_spawn(&ctx->spawn, alg, inst, |
335 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
336 | if (err) |
337 | goto out_free_inst; |
338 | |
339 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
340 | inst->alg.cra_type = &crypto_ablkcipher_type; |
341 | |
342 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; |
343 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; |
344 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; |
345 | |
346 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
347 | |
348 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
349 | |
350 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; |
351 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; |
352 | |
353 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; |
354 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; |
355 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; |
356 | |
357 | err = crypto_register_instance(tmpl, inst); |
358 | if (err) { |
359 | crypto_drop_spawn(&ctx->spawn); |
360 | out_free_inst: |
361 | kfree(inst); |
362 | } |
363 | |
364 | out_put_alg: |
365 | crypto_mod_put(alg); |
366 | return err; |
367 | } |
368 | |
369 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
370 | { |
371 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
372 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
373 | struct crypto_shash_spawn *spawn = &ictx->spawn; |
374 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
375 | struct crypto_shash *hash; |
376 | |
377 | hash = crypto_spawn_shash(spawn); |
378 | if (IS_ERR(hash)) |
379 | return PTR_ERR(hash); |
380 | |
381 | ctx->child = hash; |
382 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
383 | sizeof(struct cryptd_hash_request_ctx) + |
384 | crypto_shash_descsize(hash)); |
385 | return 0; |
386 | } |
387 | |
388 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) |
389 | { |
390 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
391 | |
392 | crypto_free_shash(ctx->child); |
393 | } |
394 | |
395 | static int cryptd_hash_setkey(struct crypto_ahash *parent, |
396 | const u8 *key, unsigned int keylen) |
397 | { |
398 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); |
399 | struct crypto_shash *child = ctx->child; |
400 | int err; |
401 | |
402 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
403 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & |
404 | CRYPTO_TFM_REQ_MASK); |
405 | err = crypto_shash_setkey(child, key, keylen); |
406 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & |
407 | CRYPTO_TFM_RES_MASK); |
408 | return err; |
409 | } |
410 | |
411 | static int cryptd_hash_enqueue(struct ahash_request *req, |
412 | crypto_completion_t complete) |
413 | { |
414 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
415 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
416 | struct cryptd_queue *queue = |
417 | cryptd_get_queue(crypto_ahash_tfm(tfm)); |
418 | |
419 | rctx->complete = req->base.complete; |
420 | req->base.complete = complete; |
421 | |
422 | return cryptd_enqueue_request(queue, &req->base); |
423 | } |
424 | |
425 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) |
426 | { |
427 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
428 | struct crypto_shash *child = ctx->child; |
429 | struct ahash_request *req = ahash_request_cast(req_async); |
430 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
431 | struct shash_desc *desc = &rctx->desc; |
432 | |
433 | if (unlikely(err == -EINPROGRESS)) |
434 | goto out; |
435 | |
436 | desc->tfm = child; |
437 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
438 | |
439 | err = crypto_shash_init(desc); |
440 | |
441 | req->base.complete = rctx->complete; |
442 | |
443 | out: |
444 | local_bh_disable(); |
445 | rctx->complete(&req->base, err); |
446 | local_bh_enable(); |
447 | } |
448 | |
449 | static int cryptd_hash_init_enqueue(struct ahash_request *req) |
450 | { |
451 | return cryptd_hash_enqueue(req, cryptd_hash_init); |
452 | } |
453 | |
454 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) |
455 | { |
456 | struct ahash_request *req = ahash_request_cast(req_async); |
457 | struct cryptd_hash_request_ctx *rctx; |
458 | |
459 | rctx = ahash_request_ctx(req); |
460 | |
461 | if (unlikely(err == -EINPROGRESS)) |
462 | goto out; |
463 | |
464 | err = shash_ahash_update(req, &rctx->desc); |
465 | |
466 | req->base.complete = rctx->complete; |
467 | |
468 | out: |
469 | local_bh_disable(); |
470 | rctx->complete(&req->base, err); |
471 | local_bh_enable(); |
472 | } |
473 | |
474 | static int cryptd_hash_update_enqueue(struct ahash_request *req) |
475 | { |
476 | return cryptd_hash_enqueue(req, cryptd_hash_update); |
477 | } |
478 | |
479 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) |
480 | { |
481 | struct ahash_request *req = ahash_request_cast(req_async); |
482 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
483 | |
484 | if (unlikely(err == -EINPROGRESS)) |
485 | goto out; |
486 | |
487 | err = crypto_shash_final(&rctx->desc, req->result); |
488 | |
489 | req->base.complete = rctx->complete; |
490 | |
491 | out: |
492 | local_bh_disable(); |
493 | rctx->complete(&req->base, err); |
494 | local_bh_enable(); |
495 | } |
496 | |
497 | static int cryptd_hash_final_enqueue(struct ahash_request *req) |
498 | { |
499 | return cryptd_hash_enqueue(req, cryptd_hash_final); |
500 | } |
501 | |
502 | static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) |
503 | { |
504 | struct ahash_request *req = ahash_request_cast(req_async); |
505 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
506 | |
507 | if (unlikely(err == -EINPROGRESS)) |
508 | goto out; |
509 | |
510 | err = shash_ahash_finup(req, &rctx->desc); |
511 | |
512 | req->base.complete = rctx->complete; |
513 | |
514 | out: |
515 | local_bh_disable(); |
516 | rctx->complete(&req->base, err); |
517 | local_bh_enable(); |
518 | } |
519 | |
520 | static int cryptd_hash_finup_enqueue(struct ahash_request *req) |
521 | { |
522 | return cryptd_hash_enqueue(req, cryptd_hash_finup); |
523 | } |
524 | |
525 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) |
526 | { |
527 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
528 | struct crypto_shash *child = ctx->child; |
529 | struct ahash_request *req = ahash_request_cast(req_async); |
530 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
531 | struct shash_desc *desc = &rctx->desc; |
532 | |
533 | if (unlikely(err == -EINPROGRESS)) |
534 | goto out; |
535 | |
536 | desc->tfm = child; |
537 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
538 | |
539 | err = shash_ahash_digest(req, desc); |
540 | |
541 | req->base.complete = rctx->complete; |
542 | |
543 | out: |
544 | local_bh_disable(); |
545 | rctx->complete(&req->base, err); |
546 | local_bh_enable(); |
547 | } |
548 | |
549 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) |
550 | { |
551 | return cryptd_hash_enqueue(req, cryptd_hash_digest); |
552 | } |
553 | |
554 | static int cryptd_hash_export(struct ahash_request *req, void *out) |
555 | { |
556 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
557 | |
558 | return crypto_shash_export(&rctx->desc, out); |
559 | } |
560 | |
561 | static int cryptd_hash_import(struct ahash_request *req, const void *in) |
562 | { |
563 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
564 | |
565 | return crypto_shash_import(&rctx->desc, in); |
566 | } |
567 | |
568 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
569 | struct cryptd_queue *queue) |
570 | { |
571 | struct hashd_instance_ctx *ctx; |
572 | struct ahash_instance *inst; |
573 | struct shash_alg *salg; |
574 | struct crypto_alg *alg; |
575 | int err; |
576 | |
577 | salg = shash_attr_alg(tb[1], 0, 0); |
578 | if (IS_ERR(salg)) |
579 | return PTR_ERR(salg); |
580 | |
581 | alg = &salg->base; |
582 | inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), |
583 | sizeof(*ctx)); |
584 | err = PTR_ERR(inst); |
585 | if (IS_ERR(inst)) |
586 | goto out_put_alg; |
587 | |
588 | ctx = ahash_instance_ctx(inst); |
589 | ctx->queue = queue; |
590 | |
591 | err = crypto_init_shash_spawn(&ctx->spawn, salg, |
592 | ahash_crypto_instance(inst)); |
593 | if (err) |
594 | goto out_free_inst; |
595 | |
596 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; |
597 | |
598 | inst->alg.halg.digestsize = salg->digestsize; |
599 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
600 | |
601 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; |
602 | inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; |
603 | |
604 | inst->alg.init = cryptd_hash_init_enqueue; |
605 | inst->alg.update = cryptd_hash_update_enqueue; |
606 | inst->alg.final = cryptd_hash_final_enqueue; |
607 | inst->alg.finup = cryptd_hash_finup_enqueue; |
608 | inst->alg.export = cryptd_hash_export; |
609 | inst->alg.import = cryptd_hash_import; |
610 | inst->alg.setkey = cryptd_hash_setkey; |
611 | inst->alg.digest = cryptd_hash_digest_enqueue; |
612 | |
613 | err = ahash_register_instance(tmpl, inst); |
614 | if (err) { |
615 | crypto_drop_shash(&ctx->spawn); |
616 | out_free_inst: |
617 | kfree(inst); |
618 | } |
619 | |
620 | out_put_alg: |
621 | crypto_mod_put(alg); |
622 | return err; |
623 | } |
624 | |
625 | static void cryptd_aead_crypt(struct aead_request *req, |
626 | struct crypto_aead *child, |
627 | int err, |
628 | int (*crypt)(struct aead_request *req)) |
629 | { |
630 | struct cryptd_aead_request_ctx *rctx; |
631 | rctx = aead_request_ctx(req); |
632 | |
633 | if (unlikely(err == -EINPROGRESS)) |
634 | goto out; |
635 | aead_request_set_tfm(req, child); |
636 | err = crypt( req ); |
637 | req->base.complete = rctx->complete; |
638 | out: |
639 | local_bh_disable(); |
640 | rctx->complete(&req->base, err); |
641 | local_bh_enable(); |
642 | } |
643 | |
644 | static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) |
645 | { |
646 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); |
647 | struct crypto_aead *child = ctx->child; |
648 | struct aead_request *req; |
649 | |
650 | req = container_of(areq, struct aead_request, base); |
651 | cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt); |
652 | } |
653 | |
654 | static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) |
655 | { |
656 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); |
657 | struct crypto_aead *child = ctx->child; |
658 | struct aead_request *req; |
659 | |
660 | req = container_of(areq, struct aead_request, base); |
661 | cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt); |
662 | } |
663 | |
664 | static int cryptd_aead_enqueue(struct aead_request *req, |
665 | crypto_completion_t complete) |
666 | { |
667 | struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); |
668 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
669 | struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); |
670 | |
671 | rctx->complete = req->base.complete; |
672 | req->base.complete = complete; |
673 | return cryptd_enqueue_request(queue, &req->base); |
674 | } |
675 | |
676 | static int cryptd_aead_encrypt_enqueue(struct aead_request *req) |
677 | { |
678 | return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); |
679 | } |
680 | |
681 | static int cryptd_aead_decrypt_enqueue(struct aead_request *req) |
682 | { |
683 | return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); |
684 | } |
685 | |
686 | static int cryptd_aead_init_tfm(struct crypto_tfm *tfm) |
687 | { |
688 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
689 | struct aead_instance_ctx *ictx = crypto_instance_ctx(inst); |
690 | struct crypto_aead_spawn *spawn = &ictx->aead_spawn; |
691 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); |
692 | struct crypto_aead *cipher; |
693 | |
694 | cipher = crypto_spawn_aead(spawn); |
695 | if (IS_ERR(cipher)) |
696 | return PTR_ERR(cipher); |
697 | |
698 | crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP); |
699 | ctx->child = cipher; |
700 | tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx); |
701 | return 0; |
702 | } |
703 | |
704 | static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm) |
705 | { |
706 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); |
707 | crypto_free_aead(ctx->child); |
708 | } |
709 | |
710 | static int cryptd_create_aead(struct crypto_template *tmpl, |
711 | struct rtattr **tb, |
712 | struct cryptd_queue *queue) |
713 | { |
714 | struct aead_instance_ctx *ctx; |
715 | struct crypto_instance *inst; |
716 | struct crypto_alg *alg; |
717 | int err; |
718 | |
719 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD, |
720 | CRYPTO_ALG_TYPE_MASK); |
721 | if (IS_ERR(alg)) |
722 | return PTR_ERR(alg); |
723 | |
724 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
725 | err = PTR_ERR(inst); |
726 | if (IS_ERR(inst)) |
727 | goto out_put_alg; |
728 | |
729 | ctx = crypto_instance_ctx(inst); |
730 | ctx->queue = queue; |
731 | |
732 | err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst, |
733 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
734 | if (err) |
735 | goto out_free_inst; |
736 | |
737 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; |
738 | inst->alg.cra_type = alg->cra_type; |
739 | inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx); |
740 | inst->alg.cra_init = cryptd_aead_init_tfm; |
741 | inst->alg.cra_exit = cryptd_aead_exit_tfm; |
742 | inst->alg.cra_aead.setkey = alg->cra_aead.setkey; |
743 | inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize; |
744 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; |
745 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; |
746 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; |
747 | inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue; |
748 | inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue; |
749 | inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt; |
750 | inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt; |
751 | |
752 | err = crypto_register_instance(tmpl, inst); |
753 | if (err) { |
754 | crypto_drop_spawn(&ctx->aead_spawn.base); |
755 | out_free_inst: |
756 | kfree(inst); |
757 | } |
758 | out_put_alg: |
759 | crypto_mod_put(alg); |
760 | return err; |
761 | } |
762 | |
763 | static struct cryptd_queue queue; |
764 | |
765 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
766 | { |
767 | struct crypto_attr_type *algt; |
768 | |
769 | algt = crypto_get_attr_type(tb); |
770 | if (IS_ERR(algt)) |
771 | return PTR_ERR(algt); |
772 | |
773 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
774 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
775 | return cryptd_create_blkcipher(tmpl, tb, &queue); |
776 | case CRYPTO_ALG_TYPE_DIGEST: |
777 | return cryptd_create_hash(tmpl, tb, &queue); |
778 | case CRYPTO_ALG_TYPE_AEAD: |
779 | return cryptd_create_aead(tmpl, tb, &queue); |
780 | } |
781 | |
782 | return -EINVAL; |
783 | } |
784 | |
785 | static void cryptd_free(struct crypto_instance *inst) |
786 | { |
787 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
788 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); |
789 | struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); |
790 | |
791 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { |
792 | case CRYPTO_ALG_TYPE_AHASH: |
793 | crypto_drop_shash(&hctx->spawn); |
794 | kfree(ahash_instance(inst)); |
795 | return; |
796 | case CRYPTO_ALG_TYPE_AEAD: |
797 | crypto_drop_spawn(&aead_ctx->aead_spawn.base); |
798 | kfree(inst); |
799 | return; |
800 | default: |
801 | crypto_drop_spawn(&ctx->spawn); |
802 | kfree(inst); |
803 | } |
804 | } |
805 | |
806 | static struct crypto_template cryptd_tmpl = { |
807 | .name = "cryptd", |
808 | .create = cryptd_create, |
809 | .free = cryptd_free, |
810 | .module = THIS_MODULE, |
811 | }; |
812 | |
813 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
814 | u32 type, u32 mask) |
815 | { |
816 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
817 | struct crypto_tfm *tfm; |
818 | |
819 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
820 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
821 | return ERR_PTR(-EINVAL); |
822 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); |
823 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; |
824 | mask &= ~CRYPTO_ALG_TYPE_MASK; |
825 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); |
826 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); |
827 | if (IS_ERR(tfm)) |
828 | return ERR_CAST(tfm); |
829 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { |
830 | crypto_free_tfm(tfm); |
831 | return ERR_PTR(-EINVAL); |
832 | } |
833 | |
834 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); |
835 | } |
836 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); |
837 | |
838 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) |
839 | { |
840 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
841 | return ctx->child; |
842 | } |
843 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); |
844 | |
845 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) |
846 | { |
847 | crypto_free_ablkcipher(&tfm->base); |
848 | } |
849 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); |
850 | |
851 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, |
852 | u32 type, u32 mask) |
853 | { |
854 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
855 | struct crypto_ahash *tfm; |
856 | |
857 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
858 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
859 | return ERR_PTR(-EINVAL); |
860 | tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); |
861 | if (IS_ERR(tfm)) |
862 | return ERR_CAST(tfm); |
863 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { |
864 | crypto_free_ahash(tfm); |
865 | return ERR_PTR(-EINVAL); |
866 | } |
867 | |
868 | return __cryptd_ahash_cast(tfm); |
869 | } |
870 | EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); |
871 | |
872 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) |
873 | { |
874 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); |
875 | |
876 | return ctx->child; |
877 | } |
878 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); |
879 | |
880 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req) |
881 | { |
882 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
883 | return &rctx->desc; |
884 | } |
885 | EXPORT_SYMBOL_GPL(cryptd_shash_desc); |
886 | |
887 | void cryptd_free_ahash(struct cryptd_ahash *tfm) |
888 | { |
889 | crypto_free_ahash(&tfm->base); |
890 | } |
891 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); |
892 | |
893 | struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, |
894 | u32 type, u32 mask) |
895 | { |
896 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
897 | struct crypto_aead *tfm; |
898 | |
899 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
900 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
901 | return ERR_PTR(-EINVAL); |
902 | tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); |
903 | if (IS_ERR(tfm)) |
904 | return ERR_CAST(tfm); |
905 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { |
906 | crypto_free_aead(tfm); |
907 | return ERR_PTR(-EINVAL); |
908 | } |
909 | return __cryptd_aead_cast(tfm); |
910 | } |
911 | EXPORT_SYMBOL_GPL(cryptd_alloc_aead); |
912 | |
913 | struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) |
914 | { |
915 | struct cryptd_aead_ctx *ctx; |
916 | ctx = crypto_aead_ctx(&tfm->base); |
917 | return ctx->child; |
918 | } |
919 | EXPORT_SYMBOL_GPL(cryptd_aead_child); |
920 | |
921 | void cryptd_free_aead(struct cryptd_aead *tfm) |
922 | { |
923 | crypto_free_aead(&tfm->base); |
924 | } |
925 | EXPORT_SYMBOL_GPL(cryptd_free_aead); |
926 | |
927 | static int __init cryptd_init(void) |
928 | { |
929 | int err; |
930 | |
931 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); |
932 | if (err) |
933 | return err; |
934 | |
935 | err = crypto_register_template(&cryptd_tmpl); |
936 | if (err) |
937 | cryptd_fini_queue(&queue); |
938 | |
939 | return err; |
940 | } |
941 | |
942 | static void __exit cryptd_exit(void) |
943 | { |
944 | cryptd_fini_queue(&queue); |
945 | crypto_unregister_template(&cryptd_tmpl); |
946 | } |
947 | |
948 | module_init(cryptd_init); |
949 | module_exit(cryptd_exit); |
950 | |
951 | MODULE_LICENSE("GPL"); |
952 | MODULE_DESCRIPTION("Software async crypto daemon"); |
953 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9