Root/
1 | /* |
2 | * Asynchronous Cryptographic Hash operations. |
3 | * |
4 | * This is the asynchronous version of hash.c with notification of |
5 | * completion via a callback. |
6 | * |
7 | * Copyright (c) 2008 Loc Ho <lho@amcc.com> |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of the GNU General Public License as published by the Free |
11 | * Software Foundation; either version 2 of the License, or (at your option) |
12 | * any later version. |
13 | * |
14 | */ |
15 | |
16 | #include <crypto/internal/hash.h> |
17 | #include <crypto/scatterwalk.h> |
18 | #include <linux/bug.h> |
19 | #include <linux/err.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/module.h> |
22 | #include <linux/sched.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/seq_file.h> |
25 | #include <linux/cryptouser.h> |
26 | #include <net/netlink.h> |
27 | |
28 | #include "internal.h" |
29 | |
30 | struct ahash_request_priv { |
31 | crypto_completion_t complete; |
32 | void *data; |
33 | u8 *result; |
34 | void *ubuf[] CRYPTO_MINALIGN_ATTR; |
35 | }; |
36 | |
37 | static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) |
38 | { |
39 | return container_of(crypto_hash_alg_common(hash), struct ahash_alg, |
40 | halg); |
41 | } |
42 | |
43 | static int hash_walk_next(struct crypto_hash_walk *walk) |
44 | { |
45 | unsigned int alignmask = walk->alignmask; |
46 | unsigned int offset = walk->offset; |
47 | unsigned int nbytes = min(walk->entrylen, |
48 | ((unsigned int)(PAGE_SIZE)) - offset); |
49 | |
50 | if (walk->flags & CRYPTO_ALG_ASYNC) |
51 | walk->data = kmap(walk->pg); |
52 | else |
53 | walk->data = kmap_atomic(walk->pg); |
54 | walk->data += offset; |
55 | |
56 | if (offset & alignmask) { |
57 | unsigned int unaligned = alignmask + 1 - (offset & alignmask); |
58 | if (nbytes > unaligned) |
59 | nbytes = unaligned; |
60 | } |
61 | |
62 | walk->entrylen -= nbytes; |
63 | return nbytes; |
64 | } |
65 | |
66 | static int hash_walk_new_entry(struct crypto_hash_walk *walk) |
67 | { |
68 | struct scatterlist *sg; |
69 | |
70 | sg = walk->sg; |
71 | walk->pg = sg_page(sg); |
72 | walk->offset = sg->offset; |
73 | walk->entrylen = sg->length; |
74 | |
75 | if (walk->entrylen > walk->total) |
76 | walk->entrylen = walk->total; |
77 | walk->total -= walk->entrylen; |
78 | |
79 | return hash_walk_next(walk); |
80 | } |
81 | |
82 | int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) |
83 | { |
84 | unsigned int alignmask = walk->alignmask; |
85 | unsigned int nbytes = walk->entrylen; |
86 | |
87 | walk->data -= walk->offset; |
88 | |
89 | if (nbytes && walk->offset & alignmask && !err) { |
90 | walk->offset = ALIGN(walk->offset, alignmask + 1); |
91 | walk->data += walk->offset; |
92 | |
93 | nbytes = min(nbytes, |
94 | ((unsigned int)(PAGE_SIZE)) - walk->offset); |
95 | walk->entrylen -= nbytes; |
96 | |
97 | return nbytes; |
98 | } |
99 | |
100 | if (walk->flags & CRYPTO_ALG_ASYNC) |
101 | kunmap(walk->pg); |
102 | else { |
103 | kunmap_atomic(walk->data); |
104 | /* |
105 | * The may sleep test only makes sense for sync users. |
106 | * Async users don't need to sleep here anyway. |
107 | */ |
108 | crypto_yield(walk->flags); |
109 | } |
110 | |
111 | if (err) |
112 | return err; |
113 | |
114 | if (nbytes) { |
115 | walk->offset = 0; |
116 | walk->pg++; |
117 | return hash_walk_next(walk); |
118 | } |
119 | |
120 | if (!walk->total) |
121 | return 0; |
122 | |
123 | walk->sg = scatterwalk_sg_next(walk->sg); |
124 | |
125 | return hash_walk_new_entry(walk); |
126 | } |
127 | EXPORT_SYMBOL_GPL(crypto_hash_walk_done); |
128 | |
129 | int crypto_hash_walk_first(struct ahash_request *req, |
130 | struct crypto_hash_walk *walk) |
131 | { |
132 | walk->total = req->nbytes; |
133 | |
134 | if (!walk->total) |
135 | return 0; |
136 | |
137 | walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); |
138 | walk->sg = req->src; |
139 | walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; |
140 | |
141 | return hash_walk_new_entry(walk); |
142 | } |
143 | EXPORT_SYMBOL_GPL(crypto_hash_walk_first); |
144 | |
145 | int crypto_ahash_walk_first(struct ahash_request *req, |
146 | struct crypto_hash_walk *walk) |
147 | { |
148 | walk->total = req->nbytes; |
149 | |
150 | if (!walk->total) |
151 | return 0; |
152 | |
153 | walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); |
154 | walk->sg = req->src; |
155 | walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; |
156 | walk->flags |= CRYPTO_ALG_ASYNC; |
157 | |
158 | BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC); |
159 | |
160 | return hash_walk_new_entry(walk); |
161 | } |
162 | EXPORT_SYMBOL_GPL(crypto_ahash_walk_first); |
163 | |
164 | int crypto_hash_walk_first_compat(struct hash_desc *hdesc, |
165 | struct crypto_hash_walk *walk, |
166 | struct scatterlist *sg, unsigned int len) |
167 | { |
168 | walk->total = len; |
169 | |
170 | if (!walk->total) |
171 | return 0; |
172 | |
173 | walk->alignmask = crypto_hash_alignmask(hdesc->tfm); |
174 | walk->sg = sg; |
175 | walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK; |
176 | |
177 | return hash_walk_new_entry(walk); |
178 | } |
179 | |
180 | static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, |
181 | unsigned int keylen) |
182 | { |
183 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
184 | int ret; |
185 | u8 *buffer, *alignbuffer; |
186 | unsigned long absize; |
187 | |
188 | absize = keylen + alignmask; |
189 | buffer = kmalloc(absize, GFP_KERNEL); |
190 | if (!buffer) |
191 | return -ENOMEM; |
192 | |
193 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
194 | memcpy(alignbuffer, key, keylen); |
195 | ret = tfm->setkey(tfm, alignbuffer, keylen); |
196 | kzfree(buffer); |
197 | return ret; |
198 | } |
199 | |
200 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
201 | unsigned int keylen) |
202 | { |
203 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
204 | |
205 | if ((unsigned long)key & alignmask) |
206 | return ahash_setkey_unaligned(tfm, key, keylen); |
207 | |
208 | return tfm->setkey(tfm, key, keylen); |
209 | } |
210 | EXPORT_SYMBOL_GPL(crypto_ahash_setkey); |
211 | |
212 | static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, |
213 | unsigned int keylen) |
214 | { |
215 | return -ENOSYS; |
216 | } |
217 | |
218 | static inline unsigned int ahash_align_buffer_size(unsigned len, |
219 | unsigned long mask) |
220 | { |
221 | return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); |
222 | } |
223 | |
224 | static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) |
225 | { |
226 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
227 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
228 | unsigned int ds = crypto_ahash_digestsize(tfm); |
229 | struct ahash_request_priv *priv; |
230 | |
231 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), |
232 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
233 | GFP_KERNEL : GFP_ATOMIC); |
234 | if (!priv) |
235 | return -ENOMEM; |
236 | |
237 | /* |
238 | * WARNING: Voodoo programming below! |
239 | * |
240 | * The code below is obscure and hard to understand, thus explanation |
241 | * is necessary. See include/crypto/hash.h and include/linux/crypto.h |
242 | * to understand the layout of structures used here! |
243 | * |
244 | * The code here will replace portions of the ORIGINAL request with |
245 | * pointers to new code and buffers so the hashing operation can store |
246 | * the result in aligned buffer. We will call the modified request |
247 | * an ADJUSTED request. |
248 | * |
249 | * The newly mangled request will look as such: |
250 | * |
251 | * req { |
252 | * .result = ADJUSTED[new aligned buffer] |
253 | * .base.complete = ADJUSTED[pointer to completion function] |
254 | * .base.data = ADJUSTED[*req (pointer to self)] |
255 | * .priv = ADJUSTED[new priv] { |
256 | * .result = ORIGINAL(result) |
257 | * .complete = ORIGINAL(base.complete) |
258 | * .data = ORIGINAL(base.data) |
259 | * } |
260 | */ |
261 | |
262 | priv->result = req->result; |
263 | priv->complete = req->base.complete; |
264 | priv->data = req->base.data; |
265 | /* |
266 | * WARNING: We do not backup req->priv here! The req->priv |
267 | * is for internal use of the Crypto API and the |
268 | * user must _NOT_ _EVER_ depend on it's content! |
269 | */ |
270 | |
271 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); |
272 | req->base.complete = cplt; |
273 | req->base.data = req; |
274 | req->priv = priv; |
275 | |
276 | return 0; |
277 | } |
278 | |
279 | static void ahash_restore_req(struct ahash_request *req) |
280 | { |
281 | struct ahash_request_priv *priv = req->priv; |
282 | |
283 | /* Restore the original crypto request. */ |
284 | req->result = priv->result; |
285 | req->base.complete = priv->complete; |
286 | req->base.data = priv->data; |
287 | req->priv = NULL; |
288 | |
289 | /* Free the req->priv.priv from the ADJUSTED request. */ |
290 | kzfree(priv); |
291 | } |
292 | |
293 | static void ahash_op_unaligned_finish(struct ahash_request *req, int err) |
294 | { |
295 | struct ahash_request_priv *priv = req->priv; |
296 | |
297 | if (err == -EINPROGRESS) |
298 | return; |
299 | |
300 | if (!err) |
301 | memcpy(priv->result, req->result, |
302 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); |
303 | |
304 | ahash_restore_req(req); |
305 | } |
306 | |
307 | static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) |
308 | { |
309 | struct ahash_request *areq = req->data; |
310 | |
311 | /* |
312 | * Restore the original request, see ahash_op_unaligned() for what |
313 | * goes where. |
314 | * |
315 | * The "struct ahash_request *req" here is in fact the "req.base" |
316 | * from the ADJUSTED request from ahash_op_unaligned(), thus as it |
317 | * is a pointer to self, it is also the ADJUSTED "req" . |
318 | */ |
319 | |
320 | /* First copy req->result into req->priv.result */ |
321 | ahash_op_unaligned_finish(areq, err); |
322 | |
323 | /* Complete the ORIGINAL request. */ |
324 | areq->base.complete(&areq->base, err); |
325 | } |
326 | |
327 | static int ahash_op_unaligned(struct ahash_request *req, |
328 | int (*op)(struct ahash_request *)) |
329 | { |
330 | int err; |
331 | |
332 | err = ahash_save_req(req, ahash_op_unaligned_done); |
333 | if (err) |
334 | return err; |
335 | |
336 | err = op(req); |
337 | ahash_op_unaligned_finish(req, err); |
338 | |
339 | return err; |
340 | } |
341 | |
342 | static int crypto_ahash_op(struct ahash_request *req, |
343 | int (*op)(struct ahash_request *)) |
344 | { |
345 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
346 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
347 | |
348 | if ((unsigned long)req->result & alignmask) |
349 | return ahash_op_unaligned(req, op); |
350 | |
351 | return op(req); |
352 | } |
353 | |
354 | int crypto_ahash_final(struct ahash_request *req) |
355 | { |
356 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); |
357 | } |
358 | EXPORT_SYMBOL_GPL(crypto_ahash_final); |
359 | |
360 | int crypto_ahash_finup(struct ahash_request *req) |
361 | { |
362 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); |
363 | } |
364 | EXPORT_SYMBOL_GPL(crypto_ahash_finup); |
365 | |
366 | int crypto_ahash_digest(struct ahash_request *req) |
367 | { |
368 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); |
369 | } |
370 | EXPORT_SYMBOL_GPL(crypto_ahash_digest); |
371 | |
372 | static void ahash_def_finup_finish2(struct ahash_request *req, int err) |
373 | { |
374 | struct ahash_request_priv *priv = req->priv; |
375 | |
376 | if (err == -EINPROGRESS) |
377 | return; |
378 | |
379 | if (!err) |
380 | memcpy(priv->result, req->result, |
381 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); |
382 | |
383 | ahash_restore_req(req); |
384 | } |
385 | |
386 | static void ahash_def_finup_done2(struct crypto_async_request *req, int err) |
387 | { |
388 | struct ahash_request *areq = req->data; |
389 | |
390 | ahash_def_finup_finish2(areq, err); |
391 | |
392 | areq->base.complete(&areq->base, err); |
393 | } |
394 | |
395 | static int ahash_def_finup_finish1(struct ahash_request *req, int err) |
396 | { |
397 | if (err) |
398 | goto out; |
399 | |
400 | req->base.complete = ahash_def_finup_done2; |
401 | req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
402 | err = crypto_ahash_reqtfm(req)->final(req); |
403 | |
404 | out: |
405 | ahash_def_finup_finish2(req, err); |
406 | return err; |
407 | } |
408 | |
409 | static void ahash_def_finup_done1(struct crypto_async_request *req, int err) |
410 | { |
411 | struct ahash_request *areq = req->data; |
412 | |
413 | err = ahash_def_finup_finish1(areq, err); |
414 | |
415 | areq->base.complete(&areq->base, err); |
416 | } |
417 | |
418 | static int ahash_def_finup(struct ahash_request *req) |
419 | { |
420 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
421 | int err; |
422 | |
423 | err = ahash_save_req(req, ahash_def_finup_done1); |
424 | if (err) |
425 | return err; |
426 | |
427 | err = tfm->update(req); |
428 | return ahash_def_finup_finish1(req, err); |
429 | } |
430 | |
431 | static int ahash_no_export(struct ahash_request *req, void *out) |
432 | { |
433 | return -ENOSYS; |
434 | } |
435 | |
436 | static int ahash_no_import(struct ahash_request *req, const void *in) |
437 | { |
438 | return -ENOSYS; |
439 | } |
440 | |
441 | static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) |
442 | { |
443 | struct crypto_ahash *hash = __crypto_ahash_cast(tfm); |
444 | struct ahash_alg *alg = crypto_ahash_alg(hash); |
445 | |
446 | hash->setkey = ahash_nosetkey; |
447 | hash->export = ahash_no_export; |
448 | hash->import = ahash_no_import; |
449 | |
450 | if (tfm->__crt_alg->cra_type != &crypto_ahash_type) |
451 | return crypto_init_shash_ops_async(tfm); |
452 | |
453 | hash->init = alg->init; |
454 | hash->update = alg->update; |
455 | hash->final = alg->final; |
456 | hash->finup = alg->finup ?: ahash_def_finup; |
457 | hash->digest = alg->digest; |
458 | |
459 | if (alg->setkey) |
460 | hash->setkey = alg->setkey; |
461 | if (alg->export) |
462 | hash->export = alg->export; |
463 | if (alg->import) |
464 | hash->import = alg->import; |
465 | |
466 | return 0; |
467 | } |
468 | |
469 | static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) |
470 | { |
471 | if (alg->cra_type == &crypto_ahash_type) |
472 | return alg->cra_ctxsize; |
473 | |
474 | return sizeof(struct crypto_shash *); |
475 | } |
476 | |
477 | #ifdef CONFIG_NET |
478 | static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) |
479 | { |
480 | struct crypto_report_hash rhash; |
481 | |
482 | strncpy(rhash.type, "ahash", sizeof(rhash.type)); |
483 | |
484 | rhash.blocksize = alg->cra_blocksize; |
485 | rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; |
486 | |
487 | if (nla_put(skb, CRYPTOCFGA_REPORT_HASH, |
488 | sizeof(struct crypto_report_hash), &rhash)) |
489 | goto nla_put_failure; |
490 | return 0; |
491 | |
492 | nla_put_failure: |
493 | return -EMSGSIZE; |
494 | } |
495 | #else |
496 | static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) |
497 | { |
498 | return -ENOSYS; |
499 | } |
500 | #endif |
501 | |
502 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
503 | __attribute__ ((unused)); |
504 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
505 | { |
506 | seq_printf(m, "type : ahash\n"); |
507 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? |
508 | "yes" : "no"); |
509 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
510 | seq_printf(m, "digestsize : %u\n", |
511 | __crypto_hash_alg_common(alg)->digestsize); |
512 | } |
513 | |
514 | const struct crypto_type crypto_ahash_type = { |
515 | .extsize = crypto_ahash_extsize, |
516 | .init_tfm = crypto_ahash_init_tfm, |
517 | #ifdef CONFIG_PROC_FS |
518 | .show = crypto_ahash_show, |
519 | #endif |
520 | .report = crypto_ahash_report, |
521 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, |
522 | .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, |
523 | .type = CRYPTO_ALG_TYPE_AHASH, |
524 | .tfmsize = offsetof(struct crypto_ahash, base), |
525 | }; |
526 | EXPORT_SYMBOL_GPL(crypto_ahash_type); |
527 | |
528 | struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, |
529 | u32 mask) |
530 | { |
531 | return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); |
532 | } |
533 | EXPORT_SYMBOL_GPL(crypto_alloc_ahash); |
534 | |
535 | static int ahash_prepare_alg(struct ahash_alg *alg) |
536 | { |
537 | struct crypto_alg *base = &alg->halg.base; |
538 | |
539 | if (alg->halg.digestsize > PAGE_SIZE / 8 || |
540 | alg->halg.statesize > PAGE_SIZE / 8) |
541 | return -EINVAL; |
542 | |
543 | base->cra_type = &crypto_ahash_type; |
544 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; |
545 | base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; |
546 | |
547 | return 0; |
548 | } |
549 | |
550 | int crypto_register_ahash(struct ahash_alg *alg) |
551 | { |
552 | struct crypto_alg *base = &alg->halg.base; |
553 | int err; |
554 | |
555 | err = ahash_prepare_alg(alg); |
556 | if (err) |
557 | return err; |
558 | |
559 | return crypto_register_alg(base); |
560 | } |
561 | EXPORT_SYMBOL_GPL(crypto_register_ahash); |
562 | |
563 | int crypto_unregister_ahash(struct ahash_alg *alg) |
564 | { |
565 | return crypto_unregister_alg(&alg->halg.base); |
566 | } |
567 | EXPORT_SYMBOL_GPL(crypto_unregister_ahash); |
568 | |
569 | int ahash_register_instance(struct crypto_template *tmpl, |
570 | struct ahash_instance *inst) |
571 | { |
572 | int err; |
573 | |
574 | err = ahash_prepare_alg(&inst->alg); |
575 | if (err) |
576 | return err; |
577 | |
578 | return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); |
579 | } |
580 | EXPORT_SYMBOL_GPL(ahash_register_instance); |
581 | |
582 | void ahash_free_instance(struct crypto_instance *inst) |
583 | { |
584 | crypto_drop_spawn(crypto_instance_ctx(inst)); |
585 | kfree(ahash_instance(inst)); |
586 | } |
587 | EXPORT_SYMBOL_GPL(ahash_free_instance); |
588 | |
589 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, |
590 | struct hash_alg_common *alg, |
591 | struct crypto_instance *inst) |
592 | { |
593 | return crypto_init_spawn2(&spawn->base, &alg->base, inst, |
594 | &crypto_ahash_type); |
595 | } |
596 | EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); |
597 | |
598 | struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) |
599 | { |
600 | struct crypto_alg *alg; |
601 | |
602 | alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); |
603 | return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); |
604 | } |
605 | EXPORT_SYMBOL_GPL(ahash_attr_alg); |
606 | |
607 | MODULE_LICENSE("GPL"); |
608 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |
609 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9