Root/target/linux/generic/files/crypto/ocf/cryptosoft.c

1/*
2 * An OCF module that uses the linux kernel cryptoapi, based on the
3 * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
4 * but is mostly unrecognisable,
5 *
6 * Written by David McCullough <david_mccullough@mcafee.com>
7 * Copyright (C) 2004-2010 David McCullough
8 * Copyright (C) 2004-2005 Intel Corporation.
9 *
10 * LICENSE TERMS
11 *
12 * The free distribution and use of this software in both source and binary
13 * form is allowed (with or without changes) provided that:
14 *
15 * 1. distributions of this source code include the above copyright
16 * notice, this list of conditions and the following disclaimer;
17 *
18 * 2. distributions in binary form include the above copyright
19 * notice, this list of conditions and the following disclaimer
20 * in the documentation and/or other associated materials;
21 *
22 * 3. the copyright holder's name is not used to endorse products
23 * built using this software without specific written permission.
24 *
25 * ALTERNATIVELY, provided that this notice is retained in full, this product
26 * may be distributed under the terms of the GNU General Public License (GPL),
27 * in which case the provisions of the GPL apply INSTEAD OF those given above.
28 *
29 * DISCLAIMER
30 *
31 * This software is provided 'as is' with no explicit or implied warranties
32 * in respect of its properties, including, but not limited to, correctness
33 * and/or fitness for purpose.
34 * ---------------------------------------------------------------------------
35 */
36
37#include <linux/version.h>
38#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
39#include <generated/autoconf.h>
40#else
41#include <linux/autoconf.h>
42#endif
43#include <linux/module.h>
44#include <linux/init.h>
45#include <linux/list.h>
46#include <linux/slab.h>
47#include <linux/sched.h>
48#include <linux/wait.h>
49#include <linux/crypto.h>
50#include <linux/mm.h>
51#include <linux/skbuff.h>
52#include <linux/random.h>
53#include <linux/version.h>
54#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
55#include <linux/scatterlist.h>
56#endif
57#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
58#include <crypto/hash.h>
59#endif
60
61#include <cryptodev.h>
62#include <uio.h>
63
64struct {
65    softc_device_decl sc_dev;
66} swcr_softc;
67
68#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
69
70#define SW_TYPE_CIPHER 0x01
71#define SW_TYPE_HMAC 0x02
72#define SW_TYPE_HASH 0x04
73#define SW_TYPE_COMP 0x08
74#define SW_TYPE_BLKCIPHER 0x10
75#define SW_TYPE_ALG_MASK 0x1f
76
77#define SW_TYPE_ASYNC 0x8000
78
79/* We change some of the above if we have an async interface */
80
81#define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
82
83#define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
84#define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC)
85#define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC)
86
87#define SCATTERLIST_MAX 16
88
89struct swcr_data {
90    int sw_type;
91    int sw_alg;
92    struct crypto_tfm *sw_tfm;
93    union {
94        struct {
95            char *sw_key;
96            int sw_klen;
97            int sw_mlen;
98        } hmac;
99        void *sw_comp_buf;
100    } u;
101    struct swcr_data *sw_next;
102};
103
104struct swcr_req {
105    struct swcr_data *sw_head;
106    struct swcr_data *sw;
107    struct cryptop *crp;
108    struct cryptodesc *crd;
109    struct scatterlist sg[SCATTERLIST_MAX];
110    unsigned char iv[EALG_MAX_BLOCK_LEN];
111    char result[HASH_MAX_LEN];
112    void *crypto_req;
113};
114
115#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
116static kmem_cache_t *swcr_req_cache;
117#else
118static struct kmem_cache *swcr_req_cache;
119#endif
120
121#ifndef CRYPTO_TFM_MODE_CBC
122/*
123 * As of linux-2.6.21 this is no longer defined, and presumably no longer
124 * needed to be passed into the crypto core code.
125 */
126#define CRYPTO_TFM_MODE_CBC 0
127#define CRYPTO_TFM_MODE_ECB 0
128#endif
129
130#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
131    /*
132     * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
133     * API into old API.
134     */
135
136    /* Symmetric/Block Cipher */
137    struct blkcipher_desc
138    {
139        struct crypto_tfm *tfm;
140        void *info;
141    };
142    #define ecb(X) #X , CRYPTO_TFM_MODE_ECB
143    #define cbc(X) #X , CRYPTO_TFM_MODE_CBC
144    #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
145    #define crypto_blkcipher_cast(X) X
146    #define crypto_blkcipher_tfm(X) X
147    #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
148    #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
149    #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
150    #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
151    #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
152                crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
153    #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
154                crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
155    #define crypto_blkcipher_set_flags(x, y) /* nop */
156
157    /* Hash/HMAC/Digest */
158    struct hash_desc
159    {
160        struct crypto_tfm *tfm;
161    };
162    #define hmac(X) #X , 0
163    #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
164    #define crypto_hash_cast(X) X
165    #define crypto_hash_tfm(X) X
166    #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
167    #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
168    #define crypto_hash_digest(W, X, Y, Z) \
169                crypto_digest_digest((W)->tfm, X, sg_num, Z)
170
171    /* Asymmetric Cipher */
172    #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
173
174    /* Compression */
175    #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
176    #define crypto_comp_tfm(X) X
177    #define crypto_comp_cast(X) X
178    #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
179    #define plain(X) #X , 0
180#else
181    #define ecb(X) "ecb(" #X ")" , 0
182    #define cbc(X) "cbc(" #X ")" , 0
183    #define hmac(X) "hmac(" #X ")" , 0
184    #define plain(X) #X , 0
185#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
186
187#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
188/* no ablkcipher in older kernels */
189#define crypto_alloc_ablkcipher(a,b,c) (NULL)
190#define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x))
191#define crypto_ablkcipher_set_flags(a, b) /* nop */
192#define crypto_ablkcipher_setkey(x, y, z) (-EINVAL)
193#define crypto_has_ablkcipher(a,b,c) (0)
194#else
195#define HAVE_ABLKCIPHER
196#endif
197
198#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
199/* no ahash in older kernels */
200#define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x))
201#define crypto_alloc_ahash(a,b,c) (NULL)
202#define crypto_ahash_digestsize(x) 0
203#else
204#define HAVE_AHASH
205#endif
206
207struct crypto_details {
208    char *alg_name;
209    int mode;
210    int sw_type;
211};
212
213static struct crypto_details crypto_details[] = {
214    [CRYPTO_DES_CBC] = { cbc(des), SW_TYPE_BLKCIPHER, },
215    [CRYPTO_3DES_CBC] = { cbc(des3_ede), SW_TYPE_BLKCIPHER, },
216    [CRYPTO_BLF_CBC] = { cbc(blowfish), SW_TYPE_BLKCIPHER, },
217    [CRYPTO_CAST_CBC] = { cbc(cast5), SW_TYPE_BLKCIPHER, },
218    [CRYPTO_SKIPJACK_CBC] = { cbc(skipjack), SW_TYPE_BLKCIPHER, },
219    [CRYPTO_MD5_HMAC] = { hmac(md5), SW_TYPE_HMAC, },
220    [CRYPTO_SHA1_HMAC] = { hmac(sha1), SW_TYPE_HMAC, },
221    [CRYPTO_RIPEMD160_HMAC] = { hmac(ripemd160), SW_TYPE_HMAC, },
222    [CRYPTO_MD5_KPDK] = { plain(md5-kpdk), SW_TYPE_HASH, },
223    [CRYPTO_SHA1_KPDK] = { plain(sha1-kpdk), SW_TYPE_HASH, },
224    [CRYPTO_AES_CBC] = { cbc(aes), SW_TYPE_BLKCIPHER, },
225    [CRYPTO_ARC4] = { ecb(arc4), SW_TYPE_BLKCIPHER, },
226    [CRYPTO_MD5] = { plain(md5), SW_TYPE_HASH, },
227    [CRYPTO_SHA1] = { plain(sha1), SW_TYPE_HASH, },
228    [CRYPTO_NULL_HMAC] = { hmac(digest_null), SW_TYPE_HMAC, },
229    [CRYPTO_NULL_CBC] = { cbc(cipher_null), SW_TYPE_BLKCIPHER, },
230    [CRYPTO_DEFLATE_COMP] = { plain(deflate), SW_TYPE_COMP, },
231    [CRYPTO_SHA2_256_HMAC] = { hmac(sha256), SW_TYPE_HMAC, },
232    [CRYPTO_SHA2_384_HMAC] = { hmac(sha384), SW_TYPE_HMAC, },
233    [CRYPTO_SHA2_512_HMAC] = { hmac(sha512), SW_TYPE_HMAC, },
234    [CRYPTO_CAMELLIA_CBC] = { cbc(camellia), SW_TYPE_BLKCIPHER, },
235    [CRYPTO_SHA2_256] = { plain(sha256), SW_TYPE_HASH, },
236    [CRYPTO_SHA2_384] = { plain(sha384), SW_TYPE_HASH, },
237    [CRYPTO_SHA2_512] = { plain(sha512), SW_TYPE_HASH, },
238    [CRYPTO_RIPEMD160] = { plain(ripemd160), SW_TYPE_HASH, },
239};
240
241int32_t swcr_id = -1;
242module_param(swcr_id, int, 0444);
243MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
244
245int swcr_fail_if_compression_grows = 1;
246module_param(swcr_fail_if_compression_grows, int, 0644);
247MODULE_PARM_DESC(swcr_fail_if_compression_grows,
248                "Treat compression that results in more data as a failure");
249
250int swcr_no_ahash = 0;
251module_param(swcr_no_ahash, int, 0644);
252MODULE_PARM_DESC(swcr_no_ahash,
253                "Do not use async hash/hmac even if available");
254
255int swcr_no_ablk = 0;
256module_param(swcr_no_ablk, int, 0644);
257MODULE_PARM_DESC(swcr_no_ablk,
258                "Do not use async blk ciphers even if available");
259
260static struct swcr_data **swcr_sessions = NULL;
261static u_int32_t swcr_sesnum = 0;
262
263static int swcr_process(device_t, struct cryptop *, int);
264static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
265static int swcr_freesession(device_t, u_int64_t);
266
267static device_method_t swcr_methods = {
268    /* crypto device methods */
269    DEVMETHOD(cryptodev_newsession, swcr_newsession),
270    DEVMETHOD(cryptodev_freesession,swcr_freesession),
271    DEVMETHOD(cryptodev_process, swcr_process),
272};
273
274#define debug swcr_debug
275int swcr_debug = 0;
276module_param(swcr_debug, int, 0644);
277MODULE_PARM_DESC(swcr_debug, "Enable debug");
278
279static void swcr_process_req(struct swcr_req *req);
280
281/*
282 * Generate a new software session.
283 */
284static int
285swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
286{
287    struct swcr_data **swd;
288    u_int32_t i;
289    int error;
290    char *algo;
291    int mode;
292
293    dprintk("%s()\n", __FUNCTION__);
294    if (sid == NULL || cri == NULL) {
295        dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
296        return EINVAL;
297    }
298
299    if (swcr_sessions) {
300        for (i = 1; i < swcr_sesnum; i++)
301            if (swcr_sessions[i] == NULL)
302                break;
303    } else
304        i = 1; /* NB: to silence compiler warning */
305
306    if (swcr_sessions == NULL || i == swcr_sesnum) {
307        if (swcr_sessions == NULL) {
308            i = 1; /* We leave swcr_sessions[0] empty */
309            swcr_sesnum = CRYPTO_SW_SESSIONS;
310        } else
311            swcr_sesnum *= 2;
312
313        swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
314        if (swd == NULL) {
315            /* Reset session number */
316            if (swcr_sesnum == CRYPTO_SW_SESSIONS)
317                swcr_sesnum = 0;
318            else
319                swcr_sesnum /= 2;
320            dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
321            return ENOBUFS;
322        }
323        memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
324
325        /* Copy existing sessions */
326        if (swcr_sessions) {
327            memcpy(swd, swcr_sessions,
328                (swcr_sesnum / 2) * sizeof(struct swcr_data *));
329            kfree(swcr_sessions);
330        }
331
332        swcr_sessions = swd;
333    }
334
335    swd = &swcr_sessions[i];
336    *sid = i;
337
338    while (cri) {
339        *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
340                SLAB_ATOMIC);
341        if (*swd == NULL) {
342            swcr_freesession(NULL, i);
343            dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
344            return ENOBUFS;
345        }
346        memset(*swd, 0, sizeof(struct swcr_data));
347
348        if (cri->cri_alg < 0 ||
349                cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){
350            printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
351            swcr_freesession(NULL, i);
352            return EINVAL;
353        }
354
355        algo = crypto_details[cri->cri_alg].alg_name;
356        if (!algo || !*algo) {
357            printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
358            swcr_freesession(NULL, i);
359            return EINVAL;
360        }
361
362        mode = crypto_details[cri->cri_alg].mode;
363        (*swd)->sw_type = crypto_details[cri->cri_alg].sw_type;
364        (*swd)->sw_alg = cri->cri_alg;
365
366        /* Algorithm specific configuration */
367        switch (cri->cri_alg) {
368        case CRYPTO_NULL_CBC:
369            cri->cri_klen = 0; /* make it work with crypto API */
370            break;
371        default:
372            break;
373        }
374
375        if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) {
376            dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__,
377                    algo, mode);
378
379            /* try async first */
380            (*swd)->sw_tfm = swcr_no_ablk ? NULL :
381                    crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0));
382            if ((*swd)->sw_tfm) {
383                dprintk("%s %s cipher is async\n", __FUNCTION__, algo);
384                (*swd)->sw_type |= SW_TYPE_ASYNC;
385            } else {
386                dprintk("%s %s cipher is sync\n", __FUNCTION__, algo);
387                (*swd)->sw_tfm = crypto_blkcipher_tfm(
388                        crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC));
389            }
390            if (!(*swd)->sw_tfm) {
391                dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
392                        algo,mode);
393                swcr_freesession(NULL, i);
394                return EINVAL;
395            }
396
397            if (debug) {
398                dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
399                        __FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8);
400                for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
401                    dprintk("%s0x%x", (i % 8) ? " " : "\n ",
402                            cri->cri_key[i] & 0xff);
403                dprintk("\n");
404            }
405            if ((*swd)->sw_type & SW_TYPE_ASYNC) {
406                /* OCF doesn't enforce keys */
407                crypto_ablkcipher_set_flags(
408                        __crypto_ablkcipher_cast((*swd)->sw_tfm),
409                            CRYPTO_TFM_REQ_WEAK_KEY);
410                error = crypto_ablkcipher_setkey(
411                            __crypto_ablkcipher_cast((*swd)->sw_tfm),
412                                cri->cri_key, (cri->cri_klen + 7) / 8);
413            } else {
414                /* OCF doesn't enforce keys */
415                crypto_blkcipher_set_flags(
416                        crypto_blkcipher_cast((*swd)->sw_tfm),
417                            CRYPTO_TFM_REQ_WEAK_KEY);
418                error = crypto_blkcipher_setkey(
419                            crypto_blkcipher_cast((*swd)->sw_tfm),
420                                cri->cri_key, (cri->cri_klen + 7) / 8);
421            }
422            if (error) {
423                printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
424                        (*swd)->sw_tfm->crt_flags);
425                swcr_freesession(NULL, i);
426                return error;
427            }
428        } else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) {
429            dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__,
430                    algo, mode);
431
432            /* try async first */
433            (*swd)->sw_tfm = swcr_no_ahash ? NULL :
434                    crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0));
435            if ((*swd)->sw_tfm) {
436                dprintk("%s %s hash is async\n", __FUNCTION__, algo);
437                (*swd)->sw_type |= SW_TYPE_ASYNC;
438            } else {
439                dprintk("%s %s hash is sync\n", __FUNCTION__, algo);
440                (*swd)->sw_tfm = crypto_hash_tfm(
441                        crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
442            }
443
444            if (!(*swd)->sw_tfm) {
445                dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
446                        algo, mode);
447                swcr_freesession(NULL, i);
448                return EINVAL;
449            }
450
451            (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
452            (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
453                    SLAB_ATOMIC);
454            if ((*swd)->u.hmac.sw_key == NULL) {
455                swcr_freesession(NULL, i);
456                dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
457                return ENOBUFS;
458            }
459            memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
460            if (cri->cri_mlen) {
461                (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
462            } else if ((*swd)->sw_type & SW_TYPE_ASYNC) {
463                (*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize(
464                        __crypto_ahash_cast((*swd)->sw_tfm));
465            } else {
466                (*swd)->u.hmac.sw_mlen = crypto_hash_digestsize(
467                        crypto_hash_cast((*swd)->sw_tfm));
468            }
469        } else if ((*swd)->sw_type & SW_TYPE_COMP) {
470            (*swd)->sw_tfm = crypto_comp_tfm(
471                    crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
472            if (!(*swd)->sw_tfm) {
473                dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
474                        algo, mode);
475                swcr_freesession(NULL, i);
476                return EINVAL;
477            }
478            (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
479            if ((*swd)->u.sw_comp_buf == NULL) {
480                swcr_freesession(NULL, i);
481                dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
482                return ENOBUFS;
483            }
484        } else {
485            printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type);
486            swcr_freesession(NULL, i);
487            return EINVAL;
488        }
489
490        cri = cri->cri_next;
491        swd = &((*swd)->sw_next);
492    }
493    return 0;
494}
495
496/*
497 * Free a session.
498 */
499static int
500swcr_freesession(device_t dev, u_int64_t tid)
501{
502    struct swcr_data *swd;
503    u_int32_t sid = CRYPTO_SESID2LID(tid);
504
505    dprintk("%s()\n", __FUNCTION__);
506    if (sid > swcr_sesnum || swcr_sessions == NULL ||
507            swcr_sessions[sid] == NULL) {
508        dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
509        return(EINVAL);
510    }
511
512    /* Silently accept and return */
513    if (sid == 0)
514        return(0);
515
516    while ((swd = swcr_sessions[sid]) != NULL) {
517        swcr_sessions[sid] = swd->sw_next;
518        if (swd->sw_tfm) {
519            switch (swd->sw_type & SW_TYPE_ALG_AMASK) {
520#ifdef HAVE_AHASH
521            case SW_TYPE_AHMAC:
522            case SW_TYPE_AHASH:
523                crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm));
524                break;
525#endif
526#ifdef HAVE_ABLKCIPHER
527            case SW_TYPE_ABLKCIPHER:
528                crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm));
529                break;
530#endif
531            case SW_TYPE_BLKCIPHER:
532                crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm));
533                break;
534            case SW_TYPE_HMAC:
535            case SW_TYPE_HASH:
536                crypto_free_hash(crypto_hash_cast(swd->sw_tfm));
537                break;
538            case SW_TYPE_COMP:
539                crypto_free_comp(crypto_comp_cast(swd->sw_tfm));
540            default:
541                crypto_free_tfm(swd->sw_tfm);
542                break;
543            }
544            swd->sw_tfm = NULL;
545        }
546        if (swd->sw_type & SW_TYPE_COMP) {
547            if (swd->u.sw_comp_buf)
548                kfree(swd->u.sw_comp_buf);
549        } else {
550            if (swd->u.hmac.sw_key)
551                kfree(swd->u.hmac.sw_key);
552        }
553        kfree(swd);
554    }
555    return 0;
556}
557
558#if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
559/* older kernels had no async interface */
560
561static void swcr_process_callback(struct crypto_async_request *creq, int err)
562{
563    struct swcr_req *req = creq->data;
564
565    dprintk("%s()\n", __FUNCTION__);
566    if (err) {
567        if (err == -EINPROGRESS)
568            return;
569        dprintk("%s() fail %d\n", __FUNCTION__, -err);
570        req->crp->crp_etype = -err;
571        goto done;
572    }
573
574    switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) {
575    case SW_TYPE_AHMAC:
576    case SW_TYPE_AHASH:
577        crypto_copyback(req->crp->crp_flags, req->crp->crp_buf,
578                req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result);
579        ahash_request_free(req->crypto_req);
580        break;
581    case SW_TYPE_ABLKCIPHER:
582        ablkcipher_request_free(req->crypto_req);
583        break;
584    default:
585        req->crp->crp_etype = EINVAL;
586        goto done;
587    }
588
589    req->crd = req->crd->crd_next;
590    if (req->crd) {
591        swcr_process_req(req);
592        return;
593    }
594
595done:
596    dprintk("%s crypto_done %p\n", __FUNCTION__, req);
597    crypto_done(req->crp);
598    kmem_cache_free(swcr_req_cache, req);
599}
600#endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
601
602
603static void swcr_process_req(struct swcr_req *req)
604{
605    struct swcr_data *sw;
606    struct cryptop *crp = req->crp;
607    struct cryptodesc *crd = req->crd;
608    struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
609    struct uio *uiop = (struct uio *) crp->crp_buf;
610    int sg_num, sg_len, skip;
611
612    dprintk("%s()\n", __FUNCTION__);
613
614    /*
615     * Find the crypto context.
616     *
617     * XXX Note that the logic here prevents us from having
618     * XXX the same algorithm multiple times in a session
619     * XXX (or rather, we can but it won't give us the right
620     * XXX results). To do that, we'd need some way of differentiating
621     * XXX between the various instances of an algorithm (so we can
622     * XXX locate the correct crypto context).
623     */
624    for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next)
625        ;
626
627    /* No such context ? */
628    if (sw == NULL) {
629        crp->crp_etype = EINVAL;
630        dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
631        goto done;
632    }
633
634    req->sw = sw;
635    skip = crd->crd_skip;
636
637    /*
638     * setup the SG list skip from the start of the buffer
639     */
640    memset(req->sg, 0, sizeof(req->sg));
641    sg_init_table(req->sg, SCATTERLIST_MAX);
642    if (crp->crp_flags & CRYPTO_F_SKBUF) {
643        int i, len;
644
645        sg_num = 0;
646        sg_len = 0;
647
648        if (skip < skb_headlen(skb)) {
649            len = skb_headlen(skb) - skip;
650            if (len + sg_len > crd->crd_len)
651                len = crd->crd_len - sg_len;
652            sg_set_page(&req->sg[sg_num],
653                virt_to_page(skb->data + skip), len,
654                offset_in_page(skb->data + skip));
655            sg_len += len;
656            sg_num++;
657            skip = 0;
658        } else
659            skip -= skb_headlen(skb);
660
661        for (i = 0; sg_len < crd->crd_len &&
662                    i < skb_shinfo(skb)->nr_frags &&
663                    sg_num < SCATTERLIST_MAX; i++) {
664            if (skip < skb_shinfo(skb)->frags[i].size) {
665                len = skb_shinfo(skb)->frags[i].size - skip;
666                if (len + sg_len > crd->crd_len)
667                    len = crd->crd_len - sg_len;
668                sg_set_page(&req->sg[sg_num],
669                    skb_shinfo(skb)->frags[i].page,
670                    len,
671                    skb_shinfo(skb)->frags[i].page_offset + skip);
672                sg_len += len;
673                sg_num++;
674                skip = 0;
675            } else
676                skip -= skb_shinfo(skb)->frags[i].size;
677        }
678    } else if (crp->crp_flags & CRYPTO_F_IOV) {
679        int len;
680
681        sg_len = 0;
682        for (sg_num = 0; sg_len < crd->crd_len &&
683                sg_num < uiop->uio_iovcnt &&
684                sg_num < SCATTERLIST_MAX; sg_num++) {
685            if (skip <= uiop->uio_iov[sg_num].iov_len) {
686                len = uiop->uio_iov[sg_num].iov_len - skip;
687                if (len + sg_len > crd->crd_len)
688                    len = crd->crd_len - sg_len;
689                sg_set_page(&req->sg[sg_num],
690                    virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
691                    len,
692                    offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
693                sg_len += len;
694                skip = 0;
695            } else
696                skip -= uiop->uio_iov[sg_num].iov_len;
697        }
698    } else {
699        sg_len = (crp->crp_ilen - skip);
700        if (sg_len > crd->crd_len)
701            sg_len = crd->crd_len;
702        sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip),
703            sg_len, offset_in_page(crp->crp_buf + skip));
704        sg_num = 1;
705    }
706
707    switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
708
709#ifdef HAVE_AHASH
710    case SW_TYPE_AHMAC:
711    case SW_TYPE_AHASH:
712        {
713        int ret;
714
715        /* check we have room for the result */
716        if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
717            dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
718                    "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
719                    crd->crd_inject, sw->u.hmac.sw_mlen);
720            crp->crp_etype = EINVAL;
721            goto done;
722        }
723
724        req->crypto_req =
725                ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_KERNEL);
726        if (!req->crypto_req) {
727            crp->crp_etype = ENOMEM;
728            dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__);
729            goto done;
730        }
731
732        ahash_request_set_callback(req->crypto_req,
733                CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
734
735        memset(req->result, 0, sizeof(req->result));
736
737        if (sw->sw_type & SW_TYPE_AHMAC)
738            crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm),
739                    sw->u.hmac.sw_key, sw->u.hmac.sw_klen);
740        ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len);
741        ret = crypto_ahash_digest(req->crypto_req);
742        switch (ret) {
743        case -EINPROGRESS:
744        case -EBUSY:
745            return;
746        default:
747        case 0:
748            dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret);
749            crp->crp_etype = ret;
750            ahash_request_free(req->crypto_req);
751            goto done;
752        }
753        } break;
754#endif /* HAVE_AHASH */
755
756#ifdef HAVE_ABLKCIPHER
757    case SW_TYPE_ABLKCIPHER: {
758        int ret;
759        unsigned char *ivp = req->iv;
760        int ivsize =
761            crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm));
762
763        if (sg_len < crypto_ablkcipher_blocksize(
764                __crypto_ablkcipher_cast(sw->sw_tfm))) {
765            crp->crp_etype = EINVAL;
766            dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
767                    sg_len, crypto_ablkcipher_blocksize(
768                        __crypto_ablkcipher_cast(sw->sw_tfm)));
769            goto done;
770        }
771
772        if (ivsize > sizeof(req->iv)) {
773            crp->crp_etype = EINVAL;
774            dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
775            goto done;
776        }
777
778        req->crypto_req = ablkcipher_request_alloc(
779                __crypto_ablkcipher_cast(sw->sw_tfm), GFP_KERNEL);
780        if (!req->crypto_req) {
781            crp->crp_etype = ENOMEM;
782            dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
783                    __FILE__, __LINE__);
784            goto done;
785        }
786
787        ablkcipher_request_set_callback(req->crypto_req,
788                CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
789
790        if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
791            int i, error;
792
793            if (debug) {
794                dprintk("%s key:", __FUNCTION__);
795                for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
796                    dprintk("%s0x%x", (i % 8) ? " " : "\n ",
797                            crd->crd_key[i] & 0xff);
798                dprintk("\n");
799            }
800            /* OCF doesn't enforce keys */
801            crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm),
802                    CRYPTO_TFM_REQ_WEAK_KEY);
803            error = crypto_ablkcipher_setkey(
804                        __crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key,
805                        (crd->crd_klen + 7) / 8);
806            if (error) {
807                dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
808                        error, sw->sw_tfm->crt_flags);
809                crp->crp_etype = -error;
810            }
811        }
812
813        if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
814
815            if (crd->crd_flags & CRD_F_IV_EXPLICIT)
816                ivp = crd->crd_iv;
817            else
818                get_random_bytes(ivp, ivsize);
819            /*
820             * do we have to copy the IV back to the buffer ?
821             */
822            if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
823                crypto_copyback(crp->crp_flags, crp->crp_buf,
824                        crd->crd_inject, ivsize, (caddr_t)ivp);
825            }
826            ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
827                    sg_len, ivp);
828            ret = crypto_ablkcipher_encrypt(req->crypto_req);
829
830        } else { /*decrypt */
831
832            if (crd->crd_flags & CRD_F_IV_EXPLICIT)
833                ivp = crd->crd_iv;
834            else
835                crypto_copydata(crp->crp_flags, crp->crp_buf,
836                        crd->crd_inject, ivsize, (caddr_t)ivp);
837            ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
838                    sg_len, ivp);
839            ret = crypto_ablkcipher_decrypt(req->crypto_req);
840        }
841
842        switch (ret) {
843        case -EINPROGRESS:
844        case -EBUSY:
845            return;
846        default:
847        case 0:
848            dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret);
849            crp->crp_etype = ret;
850            goto done;
851        }
852        } break;
853#endif /* HAVE_ABLKCIPHER */
854
855    case SW_TYPE_BLKCIPHER: {
856        unsigned char iv[EALG_MAX_BLOCK_LEN];
857        unsigned char *ivp = iv;
858        struct blkcipher_desc desc;
859        int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
860
861        if (sg_len < crypto_blkcipher_blocksize(
862                crypto_blkcipher_cast(sw->sw_tfm))) {
863            crp->crp_etype = EINVAL;
864            dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
865                    sg_len, crypto_blkcipher_blocksize(
866                        crypto_blkcipher_cast(sw->sw_tfm)));
867            goto done;
868        }
869
870        if (ivsize > sizeof(iv)) {
871            crp->crp_etype = EINVAL;
872            dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
873            goto done;
874        }
875
876        if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
877            int i, error;
878
879            if (debug) {
880                dprintk("%s key:", __FUNCTION__);
881                for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
882                    dprintk("%s0x%x", (i % 8) ? " " : "\n ",
883                            crd->crd_key[i] & 0xff);
884                dprintk("\n");
885            }
886            /* OCF doesn't enforce keys */
887            crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm),
888                    CRYPTO_TFM_REQ_WEAK_KEY);
889            error = crypto_blkcipher_setkey(
890                        crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
891                        (crd->crd_klen + 7) / 8);
892            if (error) {
893                dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
894                        error, sw->sw_tfm->crt_flags);
895                crp->crp_etype = -error;
896            }
897        }
898
899        memset(&desc, 0, sizeof(desc));
900        desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
901
902        if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
903
904            if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
905                ivp = crd->crd_iv;
906            } else {
907                get_random_bytes(ivp, ivsize);
908            }
909            /*
910             * do we have to copy the IV back to the buffer ?
911             */
912            if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
913                crypto_copyback(crp->crp_flags, crp->crp_buf,
914                        crd->crd_inject, ivsize, (caddr_t)ivp);
915            }
916            desc.info = ivp;
917            crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len);
918
919        } else { /*decrypt */
920
921            if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
922                ivp = crd->crd_iv;
923            } else {
924                crypto_copydata(crp->crp_flags, crp->crp_buf,
925                        crd->crd_inject, ivsize, (caddr_t)ivp);
926            }
927            desc.info = ivp;
928            crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len);
929        }
930        } break;
931
932    case SW_TYPE_HMAC:
933    case SW_TYPE_HASH:
934        {
935        char result[HASH_MAX_LEN];
936        struct hash_desc desc;
937
938        /* check we have room for the result */
939        if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
940            dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
941                    "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
942                    crd->crd_inject, sw->u.hmac.sw_mlen);
943            crp->crp_etype = EINVAL;
944            goto done;
945        }
946
947        memset(&desc, 0, sizeof(desc));
948        desc.tfm = crypto_hash_cast(sw->sw_tfm);
949
950        memset(result, 0, sizeof(result));
951
952        if (sw->sw_type & SW_TYPE_HMAC) {
953#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
954            crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
955                    req->sg, sg_num, result);
956#else
957            crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
958                    sw->u.hmac.sw_klen);
959            crypto_hash_digest(&desc, req->sg, sg_len, result);
960#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
961            
962        } else { /* SW_TYPE_HASH */
963            crypto_hash_digest(&desc, req->sg, sg_len, result);
964        }
965
966        crypto_copyback(crp->crp_flags, crp->crp_buf,
967                crd->crd_inject, sw->u.hmac.sw_mlen, result);
968        }
969        break;
970
971    case SW_TYPE_COMP: {
972        void *ibuf = NULL;
973        void *obuf = sw->u.sw_comp_buf;
974        int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
975        int ret = 0;
976
977        /*
978         * we need to use an additional copy if there is more than one
979         * input chunk since the kernel comp routines do not handle
980         * SG yet. Otherwise we just use the input buffer as is.
981         * Rather than allocate another buffer we just split the tmp
982         * buffer we already have.
983         * Perhaps we should just use zlib directly ?
984         */
985        if (sg_num > 1) {
986            int blk;
987
988            ibuf = obuf;
989            for (blk = 0; blk < sg_num; blk++) {
990                memcpy(obuf, sg_virt(&req->sg[blk]),
991                        req->sg[blk].length);
992                obuf += req->sg[blk].length;
993            }
994            olen -= sg_len;
995        } else
996            ibuf = sg_virt(&req->sg[0]);
997
998        if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
999            ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
1000                    ibuf, ilen, obuf, &olen);
1001            if (!ret && olen > crd->crd_len) {
1002                dprintk("cryptosoft: ERANGE compress %d into %d\n",
1003                        crd->crd_len, olen);
1004                if (swcr_fail_if_compression_grows)
1005                    ret = ERANGE;
1006            }
1007        } else { /* decompress */
1008            ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
1009                    ibuf, ilen, obuf, &olen);
1010            if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
1011                dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
1012                        "space for %d,at offset %d\n",
1013                        crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
1014                ret = ETOOSMALL;
1015            }
1016        }
1017        if (ret)
1018            dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
1019
1020        /*
1021         * on success copy result back,
1022         * linux crpyto API returns -errno, we need to fix that
1023         */
1024        crp->crp_etype = ret < 0 ? -ret : ret;
1025        if (ret == 0) {
1026            /* copy back the result and return it's size */
1027            crypto_copyback(crp->crp_flags, crp->crp_buf,
1028                    crd->crd_inject, olen, obuf);
1029            crp->crp_olen = olen;
1030        }
1031
1032
1033        } break;
1034
1035    default:
1036        /* Unknown/unsupported algorithm */
1037        dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1038        crp->crp_etype = EINVAL;
1039        goto done;
1040    }
1041
1042done:
1043    crypto_done(crp);
1044    kmem_cache_free(swcr_req_cache, req);
1045}
1046
1047
1048/*
1049 * Process a crypto request.
1050 */
1051static int
1052swcr_process(device_t dev, struct cryptop *crp, int hint)
1053{
1054    struct swcr_req *req = NULL;
1055    u_int32_t lid;
1056
1057    dprintk("%s()\n", __FUNCTION__);
1058    /* Sanity check */
1059    if (crp == NULL) {
1060        dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1061        return EINVAL;
1062    }
1063
1064    crp->crp_etype = 0;
1065
1066    if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1067        dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1068        crp->crp_etype = EINVAL;
1069        goto done;
1070    }
1071
1072    lid = crp->crp_sid & 0xffffffff;
1073    if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
1074            swcr_sessions[lid] == NULL) {
1075        crp->crp_etype = ENOENT;
1076        dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
1077        goto done;
1078    }
1079
1080    /*
1081     * do some error checking outside of the loop for SKB and IOV processing
1082     * this leaves us with valid skb or uiop pointers for later
1083     */
1084    if (crp->crp_flags & CRYPTO_F_SKBUF) {
1085        struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
1086        if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
1087            printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
1088                    skb_shinfo(skb)->nr_frags);
1089            goto done;
1090        }
1091    } else if (crp->crp_flags & CRYPTO_F_IOV) {
1092        struct uio *uiop = (struct uio *) crp->crp_buf;
1093        if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
1094            printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
1095                    uiop->uio_iovcnt);
1096            goto done;
1097        }
1098    }
1099
1100    /*
1101     * setup a new request ready for queuing
1102     */
1103    req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC);
1104    if (req == NULL) {
1105        dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
1106        crp->crp_etype = ENOMEM;
1107        goto done;
1108    }
1109    memset(req, 0, sizeof(*req));
1110
1111    req->sw_head = swcr_sessions[lid];
1112    req->crp = crp;
1113    req->crd = crp->crp_desc;
1114
1115    swcr_process_req(req);
1116    return 0;
1117
1118done:
1119    crypto_done(crp);
1120    if (req)
1121        kmem_cache_free(swcr_req_cache, req);
1122    return 0;
1123}
1124
1125
1126static int
1127cryptosoft_init(void)
1128{
1129    int i, sw_type, mode;
1130    char *algo;
1131
1132    dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
1133
1134    swcr_req_cache = kmem_cache_create("cryptosoft_req",
1135                sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL
1136#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1137                , NULL
1138#endif
1139                );
1140    if (!swcr_req_cache) {
1141        printk("cryptosoft: failed to create request cache\n");
1142        return -ENOENT;
1143    }
1144
1145    softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
1146
1147    swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
1148            CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1149    if (swcr_id < 0) {
1150        printk("cryptosoft: Software crypto device cannot initialize!");
1151        return -ENODEV;
1152    }
1153
1154#define REGISTER(alg) \
1155        crypto_register(swcr_id, alg, 0,0)
1156
1157    for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) {
1158        int found;
1159        
1160        algo = crypto_details[i].alg_name;
1161        if (!algo || !*algo) {
1162            dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
1163            continue;
1164        }
1165
1166        mode = crypto_details[i].mode;
1167        sw_type = crypto_details[i].sw_type;
1168
1169        found = 0;
1170        switch (sw_type & SW_TYPE_ALG_MASK) {
1171        case SW_TYPE_CIPHER:
1172            found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC);
1173            break;
1174        case SW_TYPE_HMAC:
1175            found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
1176            break;
1177        case SW_TYPE_HASH:
1178            found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
1179            break;
1180        case SW_TYPE_COMP:
1181            found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC);
1182            break;
1183        case SW_TYPE_BLKCIPHER:
1184            found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
1185            if (!found && !swcr_no_ablk)
1186                found = crypto_has_ablkcipher(algo, 0, 0);
1187            break;
1188        }
1189        if (found) {
1190            REGISTER(i);
1191        } else {
1192            dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
1193                    __FUNCTION__, sw_type, i, algo);
1194        }
1195    }
1196    return 0;
1197}
1198
1199static void
1200cryptosoft_exit(void)
1201{
1202    dprintk("%s()\n", __FUNCTION__);
1203    crypto_unregister_all(swcr_id);
1204    swcr_id = -1;
1205    kmem_cache_destroy(swcr_req_cache);
1206}
1207
1208late_initcall(cryptosoft_init);
1209module_exit(cryptosoft_exit);
1210
1211MODULE_LICENSE("Dual BSD/GPL");
1212MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
1213MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");
1214

Archive Download this file



interactive