Root/crypto/ahash.c

1/*
2 * Asynchronous Cryptographic Hash operations.
3 *
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
6 *
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16#include <crypto/internal/hash.h>
17#include <crypto/scatterwalk.h>
18#include <linux/err.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/seq_file.h>
24
25#include "internal.h"
26
27struct ahash_request_priv {
28    crypto_completion_t complete;
29    void *data;
30    u8 *result;
31    void *ubuf[] CRYPTO_MINALIGN_ATTR;
32};
33
34static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
35{
36    return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
37                halg);
38}
39
40static int hash_walk_next(struct crypto_hash_walk *walk)
41{
42    unsigned int alignmask = walk->alignmask;
43    unsigned int offset = walk->offset;
44    unsigned int nbytes = min(walk->entrylen,
45                  ((unsigned int)(PAGE_SIZE)) - offset);
46
47    walk->data = crypto_kmap(walk->pg, 0);
48    walk->data += offset;
49
50    if (offset & alignmask)
51        nbytes = alignmask + 1 - (offset & alignmask);
52
53    walk->entrylen -= nbytes;
54    return nbytes;
55}
56
57static int hash_walk_new_entry(struct crypto_hash_walk *walk)
58{
59    struct scatterlist *sg;
60
61    sg = walk->sg;
62    walk->pg = sg_page(sg);
63    walk->offset = sg->offset;
64    walk->entrylen = sg->length;
65
66    if (walk->entrylen > walk->total)
67        walk->entrylen = walk->total;
68    walk->total -= walk->entrylen;
69
70    return hash_walk_next(walk);
71}
72
73int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
74{
75    unsigned int alignmask = walk->alignmask;
76    unsigned int nbytes = walk->entrylen;
77
78    walk->data -= walk->offset;
79
80    if (nbytes && walk->offset & alignmask && !err) {
81        walk->offset = ALIGN(walk->offset, alignmask + 1);
82        walk->data += walk->offset;
83
84        nbytes = min(nbytes,
85                 ((unsigned int)(PAGE_SIZE)) - walk->offset);
86        walk->entrylen -= nbytes;
87
88        return nbytes;
89    }
90
91    crypto_kunmap(walk->data, 0);
92    crypto_yield(walk->flags);
93
94    if (err)
95        return err;
96
97    if (nbytes) {
98        walk->offset = 0;
99        walk->pg++;
100        return hash_walk_next(walk);
101    }
102
103    if (!walk->total)
104        return 0;
105
106    walk->sg = scatterwalk_sg_next(walk->sg);
107
108    return hash_walk_new_entry(walk);
109}
110EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
111
112int crypto_hash_walk_first(struct ahash_request *req,
113               struct crypto_hash_walk *walk)
114{
115    walk->total = req->nbytes;
116
117    if (!walk->total)
118        return 0;
119
120    walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
121    walk->sg = req->src;
122    walk->flags = req->base.flags;
123
124    return hash_walk_new_entry(walk);
125}
126EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
127
128int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
129                  struct crypto_hash_walk *walk,
130                  struct scatterlist *sg, unsigned int len)
131{
132    walk->total = len;
133
134    if (!walk->total)
135        return 0;
136
137    walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
138    walk->sg = sg;
139    walk->flags = hdesc->flags;
140
141    return hash_walk_new_entry(walk);
142}
143
144static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
145                unsigned int keylen)
146{
147    unsigned long alignmask = crypto_ahash_alignmask(tfm);
148    int ret;
149    u8 *buffer, *alignbuffer;
150    unsigned long absize;
151
152    absize = keylen + alignmask;
153    buffer = kmalloc(absize, GFP_KERNEL);
154    if (!buffer)
155        return -ENOMEM;
156
157    alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
158    memcpy(alignbuffer, key, keylen);
159    ret = tfm->setkey(tfm, alignbuffer, keylen);
160    kzfree(buffer);
161    return ret;
162}
163
164int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
165            unsigned int keylen)
166{
167    unsigned long alignmask = crypto_ahash_alignmask(tfm);
168
169    if ((unsigned long)key & alignmask)
170        return ahash_setkey_unaligned(tfm, key, keylen);
171
172    return tfm->setkey(tfm, key, keylen);
173}
174EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
175
176static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
177              unsigned int keylen)
178{
179    return -ENOSYS;
180}
181
182static inline unsigned int ahash_align_buffer_size(unsigned len,
183                           unsigned long mask)
184{
185    return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
186}
187
188static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
189{
190    struct ahash_request_priv *priv = req->priv;
191
192    if (err == -EINPROGRESS)
193        return;
194
195    if (!err)
196        memcpy(priv->result, req->result,
197               crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
198
199    kzfree(priv);
200}
201
202static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
203{
204    struct ahash_request *areq = req->data;
205    struct ahash_request_priv *priv = areq->priv;
206    crypto_completion_t complete = priv->complete;
207    void *data = priv->data;
208
209    ahash_op_unaligned_finish(areq, err);
210
211    complete(data, err);
212}
213
214static int ahash_op_unaligned(struct ahash_request *req,
215                  int (*op)(struct ahash_request *))
216{
217    struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
218    unsigned long alignmask = crypto_ahash_alignmask(tfm);
219    unsigned int ds = crypto_ahash_digestsize(tfm);
220    struct ahash_request_priv *priv;
221    int err;
222
223    priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
224               (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
225               GFP_KERNEL : GFP_ATOMIC);
226    if (!priv)
227        return -ENOMEM;
228
229    priv->result = req->result;
230    priv->complete = req->base.complete;
231    priv->data = req->base.data;
232
233    req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
234    req->base.complete = ahash_op_unaligned_done;
235    req->base.data = req;
236    req->priv = priv;
237
238    err = op(req);
239    ahash_op_unaligned_finish(req, err);
240
241    return err;
242}
243
244static int crypto_ahash_op(struct ahash_request *req,
245               int (*op)(struct ahash_request *))
246{
247    struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
248    unsigned long alignmask = crypto_ahash_alignmask(tfm);
249
250    if ((unsigned long)req->result & alignmask)
251        return ahash_op_unaligned(req, op);
252
253    return op(req);
254}
255
256int crypto_ahash_final(struct ahash_request *req)
257{
258    return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
259}
260EXPORT_SYMBOL_GPL(crypto_ahash_final);
261
262int crypto_ahash_finup(struct ahash_request *req)
263{
264    return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
265}
266EXPORT_SYMBOL_GPL(crypto_ahash_finup);
267
268int crypto_ahash_digest(struct ahash_request *req)
269{
270    return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
271}
272EXPORT_SYMBOL_GPL(crypto_ahash_digest);
273
274static void ahash_def_finup_finish2(struct ahash_request *req, int err)
275{
276    struct ahash_request_priv *priv = req->priv;
277
278    if (err == -EINPROGRESS)
279        return;
280
281    if (!err)
282        memcpy(priv->result, req->result,
283               crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
284
285    kzfree(priv);
286}
287
288static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
289{
290    struct ahash_request *areq = req->data;
291    struct ahash_request_priv *priv = areq->priv;
292    crypto_completion_t complete = priv->complete;
293    void *data = priv->data;
294
295    ahash_def_finup_finish2(areq, err);
296
297    complete(data, err);
298}
299
300static int ahash_def_finup_finish1(struct ahash_request *req, int err)
301{
302    if (err)
303        goto out;
304
305    req->base.complete = ahash_def_finup_done2;
306    req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
307    err = crypto_ahash_reqtfm(req)->final(req);
308
309out:
310    ahash_def_finup_finish2(req, err);
311    return err;
312}
313
314static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
315{
316    struct ahash_request *areq = req->data;
317    struct ahash_request_priv *priv = areq->priv;
318    crypto_completion_t complete = priv->complete;
319    void *data = priv->data;
320
321    err = ahash_def_finup_finish1(areq, err);
322
323    complete(data, err);
324}
325
326static int ahash_def_finup(struct ahash_request *req)
327{
328    struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
329    unsigned long alignmask = crypto_ahash_alignmask(tfm);
330    unsigned int ds = crypto_ahash_digestsize(tfm);
331    struct ahash_request_priv *priv;
332
333    priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
334               (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
335               GFP_KERNEL : GFP_ATOMIC);
336    if (!priv)
337        return -ENOMEM;
338
339    priv->result = req->result;
340    priv->complete = req->base.complete;
341    priv->data = req->base.data;
342
343    req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
344    req->base.complete = ahash_def_finup_done1;
345    req->base.data = req;
346    req->priv = priv;
347
348    return ahash_def_finup_finish1(req, tfm->update(req));
349}
350
351static int ahash_no_export(struct ahash_request *req, void *out)
352{
353    return -ENOSYS;
354}
355
356static int ahash_no_import(struct ahash_request *req, const void *in)
357{
358    return -ENOSYS;
359}
360
361static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
362{
363    struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
364    struct ahash_alg *alg = crypto_ahash_alg(hash);
365
366    hash->setkey = ahash_nosetkey;
367    hash->export = ahash_no_export;
368    hash->import = ahash_no_import;
369
370    if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
371        return crypto_init_shash_ops_async(tfm);
372
373    hash->init = alg->init;
374    hash->update = alg->update;
375    hash->final = alg->final;
376    hash->finup = alg->finup ?: ahash_def_finup;
377    hash->digest = alg->digest;
378
379    if (alg->setkey)
380        hash->setkey = alg->setkey;
381    if (alg->export)
382        hash->export = alg->export;
383    if (alg->import)
384        hash->import = alg->import;
385
386    return 0;
387}
388
389static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
390{
391    if (alg->cra_type == &crypto_ahash_type)
392        return alg->cra_ctxsize;
393
394    return sizeof(struct crypto_shash *);
395}
396
397static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
398    __attribute__ ((unused));
399static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
400{
401    seq_printf(m, "type : ahash\n");
402    seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
403                         "yes" : "no");
404    seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
405    seq_printf(m, "digestsize : %u\n",
406           __crypto_hash_alg_common(alg)->digestsize);
407}
408
409const struct crypto_type crypto_ahash_type = {
410    .extsize = crypto_ahash_extsize,
411    .init_tfm = crypto_ahash_init_tfm,
412#ifdef CONFIG_PROC_FS
413    .show = crypto_ahash_show,
414#endif
415    .maskclear = ~CRYPTO_ALG_TYPE_MASK,
416    .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
417    .type = CRYPTO_ALG_TYPE_AHASH,
418    .tfmsize = offsetof(struct crypto_ahash, base),
419};
420EXPORT_SYMBOL_GPL(crypto_ahash_type);
421
422struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
423                    u32 mask)
424{
425    return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
426}
427EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
428
429static int ahash_prepare_alg(struct ahash_alg *alg)
430{
431    struct crypto_alg *base = &alg->halg.base;
432
433    if (alg->halg.digestsize > PAGE_SIZE / 8 ||
434        alg->halg.statesize > PAGE_SIZE / 8)
435        return -EINVAL;
436
437    base->cra_type = &crypto_ahash_type;
438    base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
439    base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
440
441    return 0;
442}
443
444int crypto_register_ahash(struct ahash_alg *alg)
445{
446    struct crypto_alg *base = &alg->halg.base;
447    int err;
448
449    err = ahash_prepare_alg(alg);
450    if (err)
451        return err;
452
453    return crypto_register_alg(base);
454}
455EXPORT_SYMBOL_GPL(crypto_register_ahash);
456
457int crypto_unregister_ahash(struct ahash_alg *alg)
458{
459    return crypto_unregister_alg(&alg->halg.base);
460}
461EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
462
463int ahash_register_instance(struct crypto_template *tmpl,
464                struct ahash_instance *inst)
465{
466    int err;
467
468    err = ahash_prepare_alg(&inst->alg);
469    if (err)
470        return err;
471
472    return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
473}
474EXPORT_SYMBOL_GPL(ahash_register_instance);
475
476void ahash_free_instance(struct crypto_instance *inst)
477{
478    crypto_drop_spawn(crypto_instance_ctx(inst));
479    kfree(ahash_instance(inst));
480}
481EXPORT_SYMBOL_GPL(ahash_free_instance);
482
483int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
484                struct hash_alg_common *alg,
485                struct crypto_instance *inst)
486{
487    return crypto_init_spawn2(&spawn->base, &alg->base, inst,
488                  &crypto_ahash_type);
489}
490EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
491
492struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
493{
494    struct crypto_alg *alg;
495
496    alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
497    return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
498}
499EXPORT_SYMBOL_GPL(ahash_attr_alg);
500
501MODULE_LICENSE("GPL");
502MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
503

Archive Download this file



interactive