Root/
1 | /* |
2 | * Software async crypto daemon. |
3 | * |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) |
9 | * any later version. |
10 | * |
11 | */ |
12 | |
13 | #include <crypto/algapi.h> |
14 | #include <crypto/internal/hash.h> |
15 | #include <crypto/cryptd.h> |
16 | #include <crypto/crypto_wq.h> |
17 | #include <linux/err.h> |
18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> |
20 | #include <linux/list.h> |
21 | #include <linux/module.h> |
22 | #include <linux/scatterlist.h> |
23 | #include <linux/sched.h> |
24 | #include <linux/slab.h> |
25 | |
26 | #define CRYPTD_MAX_CPU_QLEN 100 |
27 | |
28 | struct cryptd_cpu_queue { |
29 | struct crypto_queue queue; |
30 | struct work_struct work; |
31 | }; |
32 | |
33 | struct cryptd_queue { |
34 | struct cryptd_cpu_queue __percpu *cpu_queue; |
35 | }; |
36 | |
37 | struct cryptd_instance_ctx { |
38 | struct crypto_spawn spawn; |
39 | struct cryptd_queue *queue; |
40 | }; |
41 | |
42 | struct hashd_instance_ctx { |
43 | struct crypto_shash_spawn spawn; |
44 | struct cryptd_queue *queue; |
45 | }; |
46 | |
47 | struct cryptd_blkcipher_ctx { |
48 | struct crypto_blkcipher *child; |
49 | }; |
50 | |
51 | struct cryptd_blkcipher_request_ctx { |
52 | crypto_completion_t complete; |
53 | }; |
54 | |
55 | struct cryptd_hash_ctx { |
56 | struct crypto_shash *child; |
57 | }; |
58 | |
59 | struct cryptd_hash_request_ctx { |
60 | crypto_completion_t complete; |
61 | struct shash_desc desc; |
62 | }; |
63 | |
64 | static void cryptd_queue_worker(struct work_struct *work); |
65 | |
66 | static int cryptd_init_queue(struct cryptd_queue *queue, |
67 | unsigned int max_cpu_qlen) |
68 | { |
69 | int cpu; |
70 | struct cryptd_cpu_queue *cpu_queue; |
71 | |
72 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); |
73 | if (!queue->cpu_queue) |
74 | return -ENOMEM; |
75 | for_each_possible_cpu(cpu) { |
76 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
77 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); |
78 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); |
79 | } |
80 | return 0; |
81 | } |
82 | |
83 | static void cryptd_fini_queue(struct cryptd_queue *queue) |
84 | { |
85 | int cpu; |
86 | struct cryptd_cpu_queue *cpu_queue; |
87 | |
88 | for_each_possible_cpu(cpu) { |
89 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
90 | BUG_ON(cpu_queue->queue.qlen); |
91 | } |
92 | free_percpu(queue->cpu_queue); |
93 | } |
94 | |
95 | static int cryptd_enqueue_request(struct cryptd_queue *queue, |
96 | struct crypto_async_request *request) |
97 | { |
98 | int cpu, err; |
99 | struct cryptd_cpu_queue *cpu_queue; |
100 | |
101 | cpu = get_cpu(); |
102 | cpu_queue = this_cpu_ptr(queue->cpu_queue); |
103 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
104 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
105 | put_cpu(); |
106 | |
107 | return err; |
108 | } |
109 | |
110 | /* Called in workqueue context, do one real cryption work (via |
111 | * req->complete) and reschedule itself if there are more work to |
112 | * do. */ |
113 | static void cryptd_queue_worker(struct work_struct *work) |
114 | { |
115 | struct cryptd_cpu_queue *cpu_queue; |
116 | struct crypto_async_request *req, *backlog; |
117 | |
118 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); |
119 | /* Only handle one request at a time to avoid hogging crypto |
120 | * workqueue. preempt_disable/enable is used to prevent |
121 | * being preempted by cryptd_enqueue_request() */ |
122 | preempt_disable(); |
123 | backlog = crypto_get_backlog(&cpu_queue->queue); |
124 | req = crypto_dequeue_request(&cpu_queue->queue); |
125 | preempt_enable(); |
126 | |
127 | if (!req) |
128 | return; |
129 | |
130 | if (backlog) |
131 | backlog->complete(backlog, -EINPROGRESS); |
132 | req->complete(req, 0); |
133 | |
134 | if (cpu_queue->queue.qlen) |
135 | queue_work(kcrypto_wq, &cpu_queue->work); |
136 | } |
137 | |
138 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) |
139 | { |
140 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
141 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
142 | return ictx->queue; |
143 | } |
144 | |
145 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, |
146 | const u8 *key, unsigned int keylen) |
147 | { |
148 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); |
149 | struct crypto_blkcipher *child = ctx->child; |
150 | int err; |
151 | |
152 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
153 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & |
154 | CRYPTO_TFM_REQ_MASK); |
155 | err = crypto_blkcipher_setkey(child, key, keylen); |
156 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & |
157 | CRYPTO_TFM_RES_MASK); |
158 | return err; |
159 | } |
160 | |
161 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, |
162 | struct crypto_blkcipher *child, |
163 | int err, |
164 | int (*crypt)(struct blkcipher_desc *desc, |
165 | struct scatterlist *dst, |
166 | struct scatterlist *src, |
167 | unsigned int len)) |
168 | { |
169 | struct cryptd_blkcipher_request_ctx *rctx; |
170 | struct blkcipher_desc desc; |
171 | |
172 | rctx = ablkcipher_request_ctx(req); |
173 | |
174 | if (unlikely(err == -EINPROGRESS)) |
175 | goto out; |
176 | |
177 | desc.tfm = child; |
178 | desc.info = req->info; |
179 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
180 | |
181 | err = crypt(&desc, req->dst, req->src, req->nbytes); |
182 | |
183 | req->base.complete = rctx->complete; |
184 | |
185 | out: |
186 | local_bh_disable(); |
187 | rctx->complete(&req->base, err); |
188 | local_bh_enable(); |
189 | } |
190 | |
191 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) |
192 | { |
193 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
194 | struct crypto_blkcipher *child = ctx->child; |
195 | |
196 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
197 | crypto_blkcipher_crt(child)->encrypt); |
198 | } |
199 | |
200 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) |
201 | { |
202 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
203 | struct crypto_blkcipher *child = ctx->child; |
204 | |
205 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
206 | crypto_blkcipher_crt(child)->decrypt); |
207 | } |
208 | |
209 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, |
210 | crypto_completion_t complete) |
211 | { |
212 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); |
213 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
214 | struct cryptd_queue *queue; |
215 | |
216 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
217 | rctx->complete = req->base.complete; |
218 | req->base.complete = complete; |
219 | |
220 | return cryptd_enqueue_request(queue, &req->base); |
221 | } |
222 | |
223 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) |
224 | { |
225 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); |
226 | } |
227 | |
228 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) |
229 | { |
230 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); |
231 | } |
232 | |
233 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) |
234 | { |
235 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
236 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
237 | struct crypto_spawn *spawn = &ictx->spawn; |
238 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
239 | struct crypto_blkcipher *cipher; |
240 | |
241 | cipher = crypto_spawn_blkcipher(spawn); |
242 | if (IS_ERR(cipher)) |
243 | return PTR_ERR(cipher); |
244 | |
245 | ctx->child = cipher; |
246 | tfm->crt_ablkcipher.reqsize = |
247 | sizeof(struct cryptd_blkcipher_request_ctx); |
248 | return 0; |
249 | } |
250 | |
251 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) |
252 | { |
253 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
254 | |
255 | crypto_free_blkcipher(ctx->child); |
256 | } |
257 | |
258 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
259 | unsigned int tail) |
260 | { |
261 | char *p; |
262 | struct crypto_instance *inst; |
263 | int err; |
264 | |
265 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
266 | if (!p) |
267 | return ERR_PTR(-ENOMEM); |
268 | |
269 | inst = (void *)(p + head); |
270 | |
271 | err = -ENAMETOOLONG; |
272 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
273 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
274 | goto out_free_inst; |
275 | |
276 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
277 | |
278 | inst->alg.cra_priority = alg->cra_priority + 50; |
279 | inst->alg.cra_blocksize = alg->cra_blocksize; |
280 | inst->alg.cra_alignmask = alg->cra_alignmask; |
281 | |
282 | out: |
283 | return p; |
284 | |
285 | out_free_inst: |
286 | kfree(p); |
287 | p = ERR_PTR(err); |
288 | goto out; |
289 | } |
290 | |
291 | static int cryptd_create_blkcipher(struct crypto_template *tmpl, |
292 | struct rtattr **tb, |
293 | struct cryptd_queue *queue) |
294 | { |
295 | struct cryptd_instance_ctx *ctx; |
296 | struct crypto_instance *inst; |
297 | struct crypto_alg *alg; |
298 | int err; |
299 | |
300 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, |
301 | CRYPTO_ALG_TYPE_MASK); |
302 | if (IS_ERR(alg)) |
303 | return PTR_ERR(alg); |
304 | |
305 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
306 | err = PTR_ERR(inst); |
307 | if (IS_ERR(inst)) |
308 | goto out_put_alg; |
309 | |
310 | ctx = crypto_instance_ctx(inst); |
311 | ctx->queue = queue; |
312 | |
313 | err = crypto_init_spawn(&ctx->spawn, alg, inst, |
314 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
315 | if (err) |
316 | goto out_free_inst; |
317 | |
318 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
319 | inst->alg.cra_type = &crypto_ablkcipher_type; |
320 | |
321 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; |
322 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; |
323 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; |
324 | |
325 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
326 | |
327 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
328 | |
329 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; |
330 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; |
331 | |
332 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; |
333 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; |
334 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; |
335 | |
336 | err = crypto_register_instance(tmpl, inst); |
337 | if (err) { |
338 | crypto_drop_spawn(&ctx->spawn); |
339 | out_free_inst: |
340 | kfree(inst); |
341 | } |
342 | |
343 | out_put_alg: |
344 | crypto_mod_put(alg); |
345 | return err; |
346 | } |
347 | |
348 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
349 | { |
350 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
351 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
352 | struct crypto_shash_spawn *spawn = &ictx->spawn; |
353 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
354 | struct crypto_shash *hash; |
355 | |
356 | hash = crypto_spawn_shash(spawn); |
357 | if (IS_ERR(hash)) |
358 | return PTR_ERR(hash); |
359 | |
360 | ctx->child = hash; |
361 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
362 | sizeof(struct cryptd_hash_request_ctx) + |
363 | crypto_shash_descsize(hash)); |
364 | return 0; |
365 | } |
366 | |
367 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) |
368 | { |
369 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
370 | |
371 | crypto_free_shash(ctx->child); |
372 | } |
373 | |
374 | static int cryptd_hash_setkey(struct crypto_ahash *parent, |
375 | const u8 *key, unsigned int keylen) |
376 | { |
377 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); |
378 | struct crypto_shash *child = ctx->child; |
379 | int err; |
380 | |
381 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
382 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & |
383 | CRYPTO_TFM_REQ_MASK); |
384 | err = crypto_shash_setkey(child, key, keylen); |
385 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & |
386 | CRYPTO_TFM_RES_MASK); |
387 | return err; |
388 | } |
389 | |
390 | static int cryptd_hash_enqueue(struct ahash_request *req, |
391 | crypto_completion_t complete) |
392 | { |
393 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
394 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
395 | struct cryptd_queue *queue = |
396 | cryptd_get_queue(crypto_ahash_tfm(tfm)); |
397 | |
398 | rctx->complete = req->base.complete; |
399 | req->base.complete = complete; |
400 | |
401 | return cryptd_enqueue_request(queue, &req->base); |
402 | } |
403 | |
404 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) |
405 | { |
406 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
407 | struct crypto_shash *child = ctx->child; |
408 | struct ahash_request *req = ahash_request_cast(req_async); |
409 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
410 | struct shash_desc *desc = &rctx->desc; |
411 | |
412 | if (unlikely(err == -EINPROGRESS)) |
413 | goto out; |
414 | |
415 | desc->tfm = child; |
416 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
417 | |
418 | err = crypto_shash_init(desc); |
419 | |
420 | req->base.complete = rctx->complete; |
421 | |
422 | out: |
423 | local_bh_disable(); |
424 | rctx->complete(&req->base, err); |
425 | local_bh_enable(); |
426 | } |
427 | |
428 | static int cryptd_hash_init_enqueue(struct ahash_request *req) |
429 | { |
430 | return cryptd_hash_enqueue(req, cryptd_hash_init); |
431 | } |
432 | |
433 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) |
434 | { |
435 | struct ahash_request *req = ahash_request_cast(req_async); |
436 | struct cryptd_hash_request_ctx *rctx; |
437 | |
438 | rctx = ahash_request_ctx(req); |
439 | |
440 | if (unlikely(err == -EINPROGRESS)) |
441 | goto out; |
442 | |
443 | err = shash_ahash_update(req, &rctx->desc); |
444 | |
445 | req->base.complete = rctx->complete; |
446 | |
447 | out: |
448 | local_bh_disable(); |
449 | rctx->complete(&req->base, err); |
450 | local_bh_enable(); |
451 | } |
452 | |
453 | static int cryptd_hash_update_enqueue(struct ahash_request *req) |
454 | { |
455 | return cryptd_hash_enqueue(req, cryptd_hash_update); |
456 | } |
457 | |
458 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) |
459 | { |
460 | struct ahash_request *req = ahash_request_cast(req_async); |
461 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
462 | |
463 | if (unlikely(err == -EINPROGRESS)) |
464 | goto out; |
465 | |
466 | err = crypto_shash_final(&rctx->desc, req->result); |
467 | |
468 | req->base.complete = rctx->complete; |
469 | |
470 | out: |
471 | local_bh_disable(); |
472 | rctx->complete(&req->base, err); |
473 | local_bh_enable(); |
474 | } |
475 | |
476 | static int cryptd_hash_final_enqueue(struct ahash_request *req) |
477 | { |
478 | return cryptd_hash_enqueue(req, cryptd_hash_final); |
479 | } |
480 | |
481 | static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) |
482 | { |
483 | struct ahash_request *req = ahash_request_cast(req_async); |
484 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
485 | |
486 | if (unlikely(err == -EINPROGRESS)) |
487 | goto out; |
488 | |
489 | err = shash_ahash_finup(req, &rctx->desc); |
490 | |
491 | req->base.complete = rctx->complete; |
492 | |
493 | out: |
494 | local_bh_disable(); |
495 | rctx->complete(&req->base, err); |
496 | local_bh_enable(); |
497 | } |
498 | |
499 | static int cryptd_hash_finup_enqueue(struct ahash_request *req) |
500 | { |
501 | return cryptd_hash_enqueue(req, cryptd_hash_finup); |
502 | } |
503 | |
504 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) |
505 | { |
506 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
507 | struct crypto_shash *child = ctx->child; |
508 | struct ahash_request *req = ahash_request_cast(req_async); |
509 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
510 | struct shash_desc *desc = &rctx->desc; |
511 | |
512 | if (unlikely(err == -EINPROGRESS)) |
513 | goto out; |
514 | |
515 | desc->tfm = child; |
516 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
517 | |
518 | err = shash_ahash_digest(req, desc); |
519 | |
520 | req->base.complete = rctx->complete; |
521 | |
522 | out: |
523 | local_bh_disable(); |
524 | rctx->complete(&req->base, err); |
525 | local_bh_enable(); |
526 | } |
527 | |
528 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) |
529 | { |
530 | return cryptd_hash_enqueue(req, cryptd_hash_digest); |
531 | } |
532 | |
533 | static int cryptd_hash_export(struct ahash_request *req, void *out) |
534 | { |
535 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
536 | |
537 | return crypto_shash_export(&rctx->desc, out); |
538 | } |
539 | |
540 | static int cryptd_hash_import(struct ahash_request *req, const void *in) |
541 | { |
542 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
543 | |
544 | return crypto_shash_import(&rctx->desc, in); |
545 | } |
546 | |
547 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
548 | struct cryptd_queue *queue) |
549 | { |
550 | struct hashd_instance_ctx *ctx; |
551 | struct ahash_instance *inst; |
552 | struct shash_alg *salg; |
553 | struct crypto_alg *alg; |
554 | int err; |
555 | |
556 | salg = shash_attr_alg(tb[1], 0, 0); |
557 | if (IS_ERR(salg)) |
558 | return PTR_ERR(salg); |
559 | |
560 | alg = &salg->base; |
561 | inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), |
562 | sizeof(*ctx)); |
563 | err = PTR_ERR(inst); |
564 | if (IS_ERR(inst)) |
565 | goto out_put_alg; |
566 | |
567 | ctx = ahash_instance_ctx(inst); |
568 | ctx->queue = queue; |
569 | |
570 | err = crypto_init_shash_spawn(&ctx->spawn, salg, |
571 | ahash_crypto_instance(inst)); |
572 | if (err) |
573 | goto out_free_inst; |
574 | |
575 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; |
576 | |
577 | inst->alg.halg.digestsize = salg->digestsize; |
578 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
579 | |
580 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; |
581 | inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; |
582 | |
583 | inst->alg.init = cryptd_hash_init_enqueue; |
584 | inst->alg.update = cryptd_hash_update_enqueue; |
585 | inst->alg.final = cryptd_hash_final_enqueue; |
586 | inst->alg.finup = cryptd_hash_finup_enqueue; |
587 | inst->alg.export = cryptd_hash_export; |
588 | inst->alg.import = cryptd_hash_import; |
589 | inst->alg.setkey = cryptd_hash_setkey; |
590 | inst->alg.digest = cryptd_hash_digest_enqueue; |
591 | |
592 | err = ahash_register_instance(tmpl, inst); |
593 | if (err) { |
594 | crypto_drop_shash(&ctx->spawn); |
595 | out_free_inst: |
596 | kfree(inst); |
597 | } |
598 | |
599 | out_put_alg: |
600 | crypto_mod_put(alg); |
601 | return err; |
602 | } |
603 | |
604 | static struct cryptd_queue queue; |
605 | |
606 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
607 | { |
608 | struct crypto_attr_type *algt; |
609 | |
610 | algt = crypto_get_attr_type(tb); |
611 | if (IS_ERR(algt)) |
612 | return PTR_ERR(algt); |
613 | |
614 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
615 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
616 | return cryptd_create_blkcipher(tmpl, tb, &queue); |
617 | case CRYPTO_ALG_TYPE_DIGEST: |
618 | return cryptd_create_hash(tmpl, tb, &queue); |
619 | } |
620 | |
621 | return -EINVAL; |
622 | } |
623 | |
624 | static void cryptd_free(struct crypto_instance *inst) |
625 | { |
626 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
627 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); |
628 | |
629 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { |
630 | case CRYPTO_ALG_TYPE_AHASH: |
631 | crypto_drop_shash(&hctx->spawn); |
632 | kfree(ahash_instance(inst)); |
633 | return; |
634 | } |
635 | |
636 | crypto_drop_spawn(&ctx->spawn); |
637 | kfree(inst); |
638 | } |
639 | |
640 | static struct crypto_template cryptd_tmpl = { |
641 | .name = "cryptd", |
642 | .create = cryptd_create, |
643 | .free = cryptd_free, |
644 | .module = THIS_MODULE, |
645 | }; |
646 | |
647 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
648 | u32 type, u32 mask) |
649 | { |
650 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
651 | struct crypto_tfm *tfm; |
652 | |
653 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
654 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
655 | return ERR_PTR(-EINVAL); |
656 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); |
657 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; |
658 | mask &= ~CRYPTO_ALG_TYPE_MASK; |
659 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); |
660 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); |
661 | if (IS_ERR(tfm)) |
662 | return ERR_CAST(tfm); |
663 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { |
664 | crypto_free_tfm(tfm); |
665 | return ERR_PTR(-EINVAL); |
666 | } |
667 | |
668 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); |
669 | } |
670 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); |
671 | |
672 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) |
673 | { |
674 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
675 | return ctx->child; |
676 | } |
677 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); |
678 | |
679 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) |
680 | { |
681 | crypto_free_ablkcipher(&tfm->base); |
682 | } |
683 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); |
684 | |
685 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, |
686 | u32 type, u32 mask) |
687 | { |
688 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
689 | struct crypto_ahash *tfm; |
690 | |
691 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
692 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
693 | return ERR_PTR(-EINVAL); |
694 | tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); |
695 | if (IS_ERR(tfm)) |
696 | return ERR_CAST(tfm); |
697 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { |
698 | crypto_free_ahash(tfm); |
699 | return ERR_PTR(-EINVAL); |
700 | } |
701 | |
702 | return __cryptd_ahash_cast(tfm); |
703 | } |
704 | EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); |
705 | |
706 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) |
707 | { |
708 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); |
709 | |
710 | return ctx->child; |
711 | } |
712 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); |
713 | |
714 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req) |
715 | { |
716 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
717 | return &rctx->desc; |
718 | } |
719 | EXPORT_SYMBOL_GPL(cryptd_shash_desc); |
720 | |
721 | void cryptd_free_ahash(struct cryptd_ahash *tfm) |
722 | { |
723 | crypto_free_ahash(&tfm->base); |
724 | } |
725 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); |
726 | |
727 | static int __init cryptd_init(void) |
728 | { |
729 | int err; |
730 | |
731 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); |
732 | if (err) |
733 | return err; |
734 | |
735 | err = crypto_register_template(&cryptd_tmpl); |
736 | if (err) |
737 | cryptd_fini_queue(&queue); |
738 | |
739 | return err; |
740 | } |
741 | |
742 | static void __exit cryptd_exit(void) |
743 | { |
744 | cryptd_fini_queue(&queue); |
745 | crypto_unregister_template(&cryptd_tmpl); |
746 | } |
747 | |
748 | module_init(cryptd_init); |
749 | module_exit(cryptd_exit); |
750 | |
751 | MODULE_LICENSE("GPL"); |
752 | MODULE_DESCRIPTION("Software async crypto daemon"); |
753 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9