Root/drivers/crypto/mv_cesa.c

1/*
2 * Support for Marvell's crypto engine which can be found on some Orion5X
3 * boards.
4 *
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6 * License: GPLv2
7 *
8 */
9#include <crypto/aes.h>
10#include <crypto/algapi.h>
11#include <linux/crypto.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/platform_device.h>
16#include <linux/scatterlist.h>
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <linux/clk.h>
20#include <crypto/internal/hash.h>
21#include <crypto/sha.h>
22
23#include "mv_cesa.h"
24
25#define MV_CESA "MV-CESA:"
26#define MAX_HW_HASH_SIZE 0xFFFF
27#define MV_CESA_EXPIRE 500 /* msec */
28
29/*
30 * STM:
31 * /---------------------------------------\
32 * | | request complete
33 * \./ |
34 * IDLE -> new request -> BUSY -> done -> DEQUEUE
35 * /°\ |
36 * | | more scatter entries
37 * \________________/
38 */
39enum engine_status {
40    ENGINE_IDLE,
41    ENGINE_BUSY,
42    ENGINE_W_DEQUEUE,
43};
44
45/**
46 * struct req_progress - used for every crypt request
47 * @src_sg_it: sg iterator for src
48 * @dst_sg_it: sg iterator for dst
49 * @sg_src_left: bytes left in src to process (scatter list)
50 * @src_start: offset to add to src start position (scatter list)
51 * @crypt_len: length of current hw crypt/hash process
52 * @hw_nbytes: total bytes to process in hw for this request
53 * @copy_back: whether to copy data back (crypt) or not (hash)
54 * @sg_dst_left: bytes left dst to process in this scatter list
55 * @dst_start: offset to add to dst start position (scatter list)
56 * @hw_processed_bytes: number of bytes processed by hw (request).
57 *
58 * sg helper are used to iterate over the scatterlist. Since the size of the
59 * SRAM may be less than the scatter size, this struct struct is used to keep
60 * track of progress within current scatterlist.
61 */
62struct req_progress {
63    struct sg_mapping_iter src_sg_it;
64    struct sg_mapping_iter dst_sg_it;
65    void (*complete) (void);
66    void (*process) (int is_first);
67
68    /* src mostly */
69    int sg_src_left;
70    int src_start;
71    int crypt_len;
72    int hw_nbytes;
73    /* dst mostly */
74    int copy_back;
75    int sg_dst_left;
76    int dst_start;
77    int hw_processed_bytes;
78};
79
80struct crypto_priv {
81    void __iomem *reg;
82    void __iomem *sram;
83    int irq;
84    struct clk *clk;
85    struct task_struct *queue_th;
86
87    /* the lock protects queue and eng_st */
88    spinlock_t lock;
89    struct crypto_queue queue;
90    enum engine_status eng_st;
91    struct timer_list completion_timer;
92    struct crypto_async_request *cur_req;
93    struct req_progress p;
94    int max_req_size;
95    int sram_size;
96    int has_sha1;
97    int has_hmac_sha1;
98};
99
100static struct crypto_priv *cpg;
101
102struct mv_ctx {
103    u8 aes_enc_key[AES_KEY_LEN];
104    u32 aes_dec_key[8];
105    int key_len;
106    u32 need_calc_aes_dkey;
107};
108
109enum crypto_op {
110    COP_AES_ECB,
111    COP_AES_CBC,
112};
113
114struct mv_req_ctx {
115    enum crypto_op op;
116    int decrypt;
117};
118
119enum hash_op {
120    COP_SHA1,
121    COP_HMAC_SHA1
122};
123
124struct mv_tfm_hash_ctx {
125    struct crypto_shash *fallback;
126    struct crypto_shash *base_hash;
127    u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
128    int count_add;
129    enum hash_op op;
130};
131
132struct mv_req_hash_ctx {
133    u64 count;
134    u32 state[SHA1_DIGEST_SIZE / 4];
135    u8 buffer[SHA1_BLOCK_SIZE];
136    int first_hash; /* marks that we don't have previous state */
137    int last_chunk; /* marks that this is the 'final' request */
138    int extra_bytes; /* unprocessed bytes in buffer */
139    enum hash_op op;
140    int count_add;
141};
142
143static void mv_completion_timer_callback(unsigned long unused)
144{
145    int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0;
146
147    printk(KERN_ERR MV_CESA
148           "completion timer expired (CESA %sactive), cleaning up.\n",
149           active ? "" : "in");
150
151    del_timer(&cpg->completion_timer);
152    writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD);
153    while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC)
154        printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__);
155    cpg->eng_st = ENGINE_W_DEQUEUE;
156    wake_up_process(cpg->queue_th);
157}
158
159static void mv_setup_timer(void)
160{
161    setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0);
162    mod_timer(&cpg->completion_timer,
163            jiffies + msecs_to_jiffies(MV_CESA_EXPIRE));
164}
165
166static void compute_aes_dec_key(struct mv_ctx *ctx)
167{
168    struct crypto_aes_ctx gen_aes_key;
169    int key_pos;
170
171    if (!ctx->need_calc_aes_dkey)
172        return;
173
174    crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
175
176    key_pos = ctx->key_len + 24;
177    memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
178    switch (ctx->key_len) {
179    case AES_KEYSIZE_256:
180        key_pos -= 2;
181        /* fall */
182    case AES_KEYSIZE_192:
183        key_pos -= 2;
184        memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
185                4 * 4);
186        break;
187    }
188    ctx->need_calc_aes_dkey = 0;
189}
190
191static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
192        unsigned int len)
193{
194    struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
195    struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
196
197    switch (len) {
198    case AES_KEYSIZE_128:
199    case AES_KEYSIZE_192:
200    case AES_KEYSIZE_256:
201        break;
202    default:
203        crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
204        return -EINVAL;
205    }
206    ctx->key_len = len;
207    ctx->need_calc_aes_dkey = 1;
208
209    memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
210    return 0;
211}
212
213static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
214{
215    int ret;
216    void *sbuf;
217    int copy_len;
218
219    while (len) {
220        if (!p->sg_src_left) {
221            ret = sg_miter_next(&p->src_sg_it);
222            BUG_ON(!ret);
223            p->sg_src_left = p->src_sg_it.length;
224            p->src_start = 0;
225        }
226
227        sbuf = p->src_sg_it.addr + p->src_start;
228
229        copy_len = min(p->sg_src_left, len);
230        memcpy(dbuf, sbuf, copy_len);
231
232        p->src_start += copy_len;
233        p->sg_src_left -= copy_len;
234
235        len -= copy_len;
236        dbuf += copy_len;
237    }
238}
239
240static void setup_data_in(void)
241{
242    struct req_progress *p = &cpg->p;
243    int data_in_sram =
244        min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
245    copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
246            data_in_sram - p->crypt_len);
247    p->crypt_len = data_in_sram;
248}
249
250static void mv_process_current_q(int first_block)
251{
252    struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
253    struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
254    struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
255    struct sec_accel_config op;
256
257    switch (req_ctx->op) {
258    case COP_AES_ECB:
259        op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
260        break;
261    case COP_AES_CBC:
262    default:
263        op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
264        op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
265            ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
266        if (first_block)
267            memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
268        break;
269    }
270    if (req_ctx->decrypt) {
271        op.config |= CFG_DIR_DEC;
272        memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
273                AES_KEY_LEN);
274    } else {
275        op.config |= CFG_DIR_ENC;
276        memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
277                AES_KEY_LEN);
278    }
279
280    switch (ctx->key_len) {
281    case AES_KEYSIZE_128:
282        op.config |= CFG_AES_LEN_128;
283        break;
284    case AES_KEYSIZE_192:
285        op.config |= CFG_AES_LEN_192;
286        break;
287    case AES_KEYSIZE_256:
288        op.config |= CFG_AES_LEN_256;
289        break;
290    }
291    op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
292        ENC_P_DST(SRAM_DATA_OUT_START);
293    op.enc_key_p = SRAM_DATA_KEY_P;
294
295    setup_data_in();
296    op.enc_len = cpg->p.crypt_len;
297    memcpy(cpg->sram + SRAM_CONFIG, &op,
298            sizeof(struct sec_accel_config));
299
300    /* GO */
301    mv_setup_timer();
302    writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
303}
304
305static void mv_crypto_algo_completion(void)
306{
307    struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
308    struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
309
310    sg_miter_stop(&cpg->p.src_sg_it);
311    sg_miter_stop(&cpg->p.dst_sg_it);
312
313    if (req_ctx->op != COP_AES_CBC)
314        return ;
315
316    memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
317}
318
319static void mv_process_hash_current(int first_block)
320{
321    struct ahash_request *req = ahash_request_cast(cpg->cur_req);
322    const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
323    struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
324    struct req_progress *p = &cpg->p;
325    struct sec_accel_config op = { 0 };
326    int is_last;
327
328    switch (req_ctx->op) {
329    case COP_SHA1:
330    default:
331        op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
332        break;
333    case COP_HMAC_SHA1:
334        op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
335        memcpy(cpg->sram + SRAM_HMAC_IV_IN,
336                tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
337        break;
338    }
339
340    op.mac_src_p =
341        MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
342        req_ctx->
343        count);
344
345    setup_data_in();
346
347    op.mac_digest =
348        MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
349    op.mac_iv =
350        MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
351        MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
352
353    is_last = req_ctx->last_chunk
354        && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
355        && (req_ctx->count <= MAX_HW_HASH_SIZE);
356    if (req_ctx->first_hash) {
357        if (is_last)
358            op.config |= CFG_NOT_FRAG;
359        else
360            op.config |= CFG_FIRST_FRAG;
361
362        req_ctx->first_hash = 0;
363    } else {
364        if (is_last)
365            op.config |= CFG_LAST_FRAG;
366        else
367            op.config |= CFG_MID_FRAG;
368
369        if (first_block) {
370            writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
371            writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
372            writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
373            writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
374            writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
375        }
376    }
377
378    memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
379
380    /* GO */
381    mv_setup_timer();
382    writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
383}
384
385static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
386                      struct shash_desc *desc)
387{
388    int i;
389    struct sha1_state shash_state;
390
391    shash_state.count = ctx->count + ctx->count_add;
392    for (i = 0; i < 5; i++)
393        shash_state.state[i] = ctx->state[i];
394    memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
395    return crypto_shash_import(desc, &shash_state);
396}
397
398static int mv_hash_final_fallback(struct ahash_request *req)
399{
400    const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
401    struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
402    struct {
403        struct shash_desc shash;
404        char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
405    } desc;
406    int rc;
407
408    desc.shash.tfm = tfm_ctx->fallback;
409    desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
410    if (unlikely(req_ctx->first_hash)) {
411        crypto_shash_init(&desc.shash);
412        crypto_shash_update(&desc.shash, req_ctx->buffer,
413                    req_ctx->extra_bytes);
414    } else {
415        /* only SHA1 for now....
416         */
417        rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
418        if (rc)
419            goto out;
420    }
421    rc = crypto_shash_final(&desc.shash, req->result);
422out:
423    return rc;
424}
425
426static void mv_save_digest_state(struct mv_req_hash_ctx *ctx)
427{
428    ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
429    ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
430    ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
431    ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
432    ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
433}
434
435static void mv_hash_algo_completion(void)
436{
437    struct ahash_request *req = ahash_request_cast(cpg->cur_req);
438    struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
439
440    if (ctx->extra_bytes)
441        copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
442    sg_miter_stop(&cpg->p.src_sg_it);
443
444    if (likely(ctx->last_chunk)) {
445        if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
446            memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
447                   crypto_ahash_digestsize(crypto_ahash_reqtfm
448                               (req)));
449        } else {
450            mv_save_digest_state(ctx);
451            mv_hash_final_fallback(req);
452        }
453    } else {
454        mv_save_digest_state(ctx);
455    }
456}
457
458static void dequeue_complete_req(void)
459{
460    struct crypto_async_request *req = cpg->cur_req;
461    void *buf;
462    int ret;
463    cpg->p.hw_processed_bytes += cpg->p.crypt_len;
464    if (cpg->p.copy_back) {
465        int need_copy_len = cpg->p.crypt_len;
466        int sram_offset = 0;
467        do {
468            int dst_copy;
469
470            if (!cpg->p.sg_dst_left) {
471                ret = sg_miter_next(&cpg->p.dst_sg_it);
472                BUG_ON(!ret);
473                cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
474                cpg->p.dst_start = 0;
475            }
476
477            buf = cpg->p.dst_sg_it.addr;
478            buf += cpg->p.dst_start;
479
480            dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
481
482            memcpy(buf,
483                   cpg->sram + SRAM_DATA_OUT_START + sram_offset,
484                   dst_copy);
485            sram_offset += dst_copy;
486            cpg->p.sg_dst_left -= dst_copy;
487            need_copy_len -= dst_copy;
488            cpg->p.dst_start += dst_copy;
489        } while (need_copy_len > 0);
490    }
491
492    cpg->p.crypt_len = 0;
493
494    BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
495    if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
496        /* process next scatter list entry */
497        cpg->eng_st = ENGINE_BUSY;
498        cpg->p.process(0);
499    } else {
500        cpg->p.complete();
501        cpg->eng_st = ENGINE_IDLE;
502        local_bh_disable();
503        req->complete(req, 0);
504        local_bh_enable();
505    }
506}
507
508static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
509{
510    int i = 0;
511    size_t cur_len;
512
513    while (sl) {
514        cur_len = sl[i].length;
515        ++i;
516        if (total_bytes > cur_len)
517            total_bytes -= cur_len;
518        else
519            break;
520    }
521
522    return i;
523}
524
525static void mv_start_new_crypt_req(struct ablkcipher_request *req)
526{
527    struct req_progress *p = &cpg->p;
528    int num_sgs;
529
530    cpg->cur_req = &req->base;
531    memset(p, 0, sizeof(struct req_progress));
532    p->hw_nbytes = req->nbytes;
533    p->complete = mv_crypto_algo_completion;
534    p->process = mv_process_current_q;
535    p->copy_back = 1;
536
537    num_sgs = count_sgs(req->src, req->nbytes);
538    sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
539
540    num_sgs = count_sgs(req->dst, req->nbytes);
541    sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
542
543    mv_process_current_q(1);
544}
545
546static void mv_start_new_hash_req(struct ahash_request *req)
547{
548    struct req_progress *p = &cpg->p;
549    struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
550    int num_sgs, hw_bytes, old_extra_bytes, rc;
551    cpg->cur_req = &req->base;
552    memset(p, 0, sizeof(struct req_progress));
553    hw_bytes = req->nbytes + ctx->extra_bytes;
554    old_extra_bytes = ctx->extra_bytes;
555
556    ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
557    if (ctx->extra_bytes != 0
558        && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
559        hw_bytes -= ctx->extra_bytes;
560    else
561        ctx->extra_bytes = 0;
562
563    num_sgs = count_sgs(req->src, req->nbytes);
564    sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
565
566    if (hw_bytes) {
567        p->hw_nbytes = hw_bytes;
568        p->complete = mv_hash_algo_completion;
569        p->process = mv_process_hash_current;
570
571        if (unlikely(old_extra_bytes)) {
572            memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
573                   old_extra_bytes);
574            p->crypt_len = old_extra_bytes;
575        }
576
577        mv_process_hash_current(1);
578    } else {
579        copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
580                ctx->extra_bytes - old_extra_bytes);
581        sg_miter_stop(&p->src_sg_it);
582        if (ctx->last_chunk)
583            rc = mv_hash_final_fallback(req);
584        else
585            rc = 0;
586        cpg->eng_st = ENGINE_IDLE;
587        local_bh_disable();
588        req->base.complete(&req->base, rc);
589        local_bh_enable();
590    }
591}
592
593static int queue_manag(void *data)
594{
595    cpg->eng_st = ENGINE_IDLE;
596    do {
597        struct crypto_async_request *async_req = NULL;
598        struct crypto_async_request *backlog;
599
600        __set_current_state(TASK_INTERRUPTIBLE);
601
602        if (cpg->eng_st == ENGINE_W_DEQUEUE)
603            dequeue_complete_req();
604
605        spin_lock_irq(&cpg->lock);
606        if (cpg->eng_st == ENGINE_IDLE) {
607            backlog = crypto_get_backlog(&cpg->queue);
608            async_req = crypto_dequeue_request(&cpg->queue);
609            if (async_req) {
610                BUG_ON(cpg->eng_st != ENGINE_IDLE);
611                cpg->eng_st = ENGINE_BUSY;
612            }
613        }
614        spin_unlock_irq(&cpg->lock);
615
616        if (backlog) {
617            backlog->complete(backlog, -EINPROGRESS);
618            backlog = NULL;
619        }
620
621        if (async_req) {
622            if (async_req->tfm->__crt_alg->cra_type !=
623                &crypto_ahash_type) {
624                struct ablkcipher_request *req =
625                    ablkcipher_request_cast(async_req);
626                mv_start_new_crypt_req(req);
627            } else {
628                struct ahash_request *req =
629                    ahash_request_cast(async_req);
630                mv_start_new_hash_req(req);
631            }
632            async_req = NULL;
633        }
634
635        schedule();
636
637    } while (!kthread_should_stop());
638    return 0;
639}
640
641static int mv_handle_req(struct crypto_async_request *req)
642{
643    unsigned long flags;
644    int ret;
645
646    spin_lock_irqsave(&cpg->lock, flags);
647    ret = crypto_enqueue_request(&cpg->queue, req);
648    spin_unlock_irqrestore(&cpg->lock, flags);
649    wake_up_process(cpg->queue_th);
650    return ret;
651}
652
653static int mv_enc_aes_ecb(struct ablkcipher_request *req)
654{
655    struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
656
657    req_ctx->op = COP_AES_ECB;
658    req_ctx->decrypt = 0;
659
660    return mv_handle_req(&req->base);
661}
662
663static int mv_dec_aes_ecb(struct ablkcipher_request *req)
664{
665    struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
666    struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
667
668    req_ctx->op = COP_AES_ECB;
669    req_ctx->decrypt = 1;
670
671    compute_aes_dec_key(ctx);
672    return mv_handle_req(&req->base);
673}
674
675static int mv_enc_aes_cbc(struct ablkcipher_request *req)
676{
677    struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
678
679    req_ctx->op = COP_AES_CBC;
680    req_ctx->decrypt = 0;
681
682    return mv_handle_req(&req->base);
683}
684
685static int mv_dec_aes_cbc(struct ablkcipher_request *req)
686{
687    struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
688    struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
689
690    req_ctx->op = COP_AES_CBC;
691    req_ctx->decrypt = 1;
692
693    compute_aes_dec_key(ctx);
694    return mv_handle_req(&req->base);
695}
696
697static int mv_cra_init(struct crypto_tfm *tfm)
698{
699    tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
700    return 0;
701}
702
703static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
704                 int is_last, unsigned int req_len,
705                 int count_add)
706{
707    memset(ctx, 0, sizeof(*ctx));
708    ctx->op = op;
709    ctx->count = req_len;
710    ctx->first_hash = 1;
711    ctx->last_chunk = is_last;
712    ctx->count_add = count_add;
713}
714
715static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
716                   unsigned req_len)
717{
718    ctx->last_chunk = is_last;
719    ctx->count += req_len;
720}
721
722static int mv_hash_init(struct ahash_request *req)
723{
724    const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
725    mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
726                 tfm_ctx->count_add);
727    return 0;
728}
729
730static int mv_hash_update(struct ahash_request *req)
731{
732    if (!req->nbytes)
733        return 0;
734
735    mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
736    return mv_handle_req(&req->base);
737}
738
739static int mv_hash_final(struct ahash_request *req)
740{
741    struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
742
743    ahash_request_set_crypt(req, NULL, req->result, 0);
744    mv_update_hash_req_ctx(ctx, 1, 0);
745    return mv_handle_req(&req->base);
746}
747
748static int mv_hash_finup(struct ahash_request *req)
749{
750    mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
751    return mv_handle_req(&req->base);
752}
753
754static int mv_hash_digest(struct ahash_request *req)
755{
756    const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
757    mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
758                 req->nbytes, tfm_ctx->count_add);
759    return mv_handle_req(&req->base);
760}
761
762static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
763                 const void *ostate)
764{
765    const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
766    int i;
767    for (i = 0; i < 5; i++) {
768        ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
769        ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
770    }
771}
772
773static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
774              unsigned int keylen)
775{
776    int rc;
777    struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
778    int bs, ds, ss;
779
780    if (!ctx->base_hash)
781        return 0;
782
783    rc = crypto_shash_setkey(ctx->fallback, key, keylen);
784    if (rc)
785        return rc;
786
787    /* Can't see a way to extract the ipad/opad from the fallback tfm
788       so I'm basically copying code from the hmac module */
789    bs = crypto_shash_blocksize(ctx->base_hash);
790    ds = crypto_shash_digestsize(ctx->base_hash);
791    ss = crypto_shash_statesize(ctx->base_hash);
792
793    {
794        struct {
795            struct shash_desc shash;
796            char ctx[crypto_shash_descsize(ctx->base_hash)];
797        } desc;
798        unsigned int i;
799        char ipad[ss];
800        char opad[ss];
801
802        desc.shash.tfm = ctx->base_hash;
803        desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
804            CRYPTO_TFM_REQ_MAY_SLEEP;
805
806        if (keylen > bs) {
807            int err;
808
809            err =
810                crypto_shash_digest(&desc.shash, key, keylen, ipad);
811            if (err)
812                return err;
813
814            keylen = ds;
815        } else
816            memcpy(ipad, key, keylen);
817
818        memset(ipad + keylen, 0, bs - keylen);
819        memcpy(opad, ipad, bs);
820
821        for (i = 0; i < bs; i++) {
822            ipad[i] ^= 0x36;
823            opad[i] ^= 0x5c;
824        }
825
826        rc = crypto_shash_init(&desc.shash) ? :
827            crypto_shash_update(&desc.shash, ipad, bs) ? :
828            crypto_shash_export(&desc.shash, ipad) ? :
829            crypto_shash_init(&desc.shash) ? :
830            crypto_shash_update(&desc.shash, opad, bs) ? :
831            crypto_shash_export(&desc.shash, opad);
832
833        if (rc == 0)
834            mv_hash_init_ivs(ctx, ipad, opad);
835
836        return rc;
837    }
838}
839
840static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
841                enum hash_op op, int count_add)
842{
843    const char *fallback_driver_name = tfm->__crt_alg->cra_name;
844    struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
845    struct crypto_shash *fallback_tfm = NULL;
846    struct crypto_shash *base_hash = NULL;
847    int err = -ENOMEM;
848
849    ctx->op = op;
850    ctx->count_add = count_add;
851
852    /* Allocate a fallback and abort if it failed. */
853    fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
854                      CRYPTO_ALG_NEED_FALLBACK);
855    if (IS_ERR(fallback_tfm)) {
856        printk(KERN_WARNING MV_CESA
857               "Fallback driver '%s' could not be loaded!\n",
858               fallback_driver_name);
859        err = PTR_ERR(fallback_tfm);
860        goto out;
861    }
862    ctx->fallback = fallback_tfm;
863
864    if (base_hash_name) {
865        /* Allocate a hash to compute the ipad/opad of hmac. */
866        base_hash = crypto_alloc_shash(base_hash_name, 0,
867                           CRYPTO_ALG_NEED_FALLBACK);
868        if (IS_ERR(base_hash)) {
869            printk(KERN_WARNING MV_CESA
870                   "Base driver '%s' could not be loaded!\n",
871                   base_hash_name);
872            err = PTR_ERR(base_hash);
873            goto err_bad_base;
874        }
875    }
876    ctx->base_hash = base_hash;
877
878    crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
879                 sizeof(struct mv_req_hash_ctx) +
880                 crypto_shash_descsize(ctx->fallback));
881    return 0;
882err_bad_base:
883    crypto_free_shash(fallback_tfm);
884out:
885    return err;
886}
887
888static void mv_cra_hash_exit(struct crypto_tfm *tfm)
889{
890    struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
891
892    crypto_free_shash(ctx->fallback);
893    if (ctx->base_hash)
894        crypto_free_shash(ctx->base_hash);
895}
896
897static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
898{
899    return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
900}
901
902static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
903{
904    return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
905}
906
907irqreturn_t crypto_int(int irq, void *priv)
908{
909    u32 val;
910
911    val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
912    if (!(val & SEC_INT_ACCEL0_DONE))
913        return IRQ_NONE;
914
915    if (!del_timer(&cpg->completion_timer)) {
916        printk(KERN_WARNING MV_CESA
917               "got an interrupt but no pending timer?\n");
918    }
919    val &= ~SEC_INT_ACCEL0_DONE;
920    writel(val, cpg->reg + FPGA_INT_STATUS);
921    writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
922    BUG_ON(cpg->eng_st != ENGINE_BUSY);
923    cpg->eng_st = ENGINE_W_DEQUEUE;
924    wake_up_process(cpg->queue_th);
925    return IRQ_HANDLED;
926}
927
928struct crypto_alg mv_aes_alg_ecb = {
929    .cra_name = "ecb(aes)",
930    .cra_driver_name = "mv-ecb-aes",
931    .cra_priority = 300,
932    .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
933              CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
934    .cra_blocksize = 16,
935    .cra_ctxsize = sizeof(struct mv_ctx),
936    .cra_alignmask = 0,
937    .cra_type = &crypto_ablkcipher_type,
938    .cra_module = THIS_MODULE,
939    .cra_init = mv_cra_init,
940    .cra_u = {
941        .ablkcipher = {
942            .min_keysize = AES_MIN_KEY_SIZE,
943            .max_keysize = AES_MAX_KEY_SIZE,
944            .setkey = mv_setkey_aes,
945            .encrypt = mv_enc_aes_ecb,
946            .decrypt = mv_dec_aes_ecb,
947        },
948    },
949};
950
951struct crypto_alg mv_aes_alg_cbc = {
952    .cra_name = "cbc(aes)",
953    .cra_driver_name = "mv-cbc-aes",
954    .cra_priority = 300,
955    .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
956              CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
957    .cra_blocksize = AES_BLOCK_SIZE,
958    .cra_ctxsize = sizeof(struct mv_ctx),
959    .cra_alignmask = 0,
960    .cra_type = &crypto_ablkcipher_type,
961    .cra_module = THIS_MODULE,
962    .cra_init = mv_cra_init,
963    .cra_u = {
964        .ablkcipher = {
965            .ivsize = AES_BLOCK_SIZE,
966            .min_keysize = AES_MIN_KEY_SIZE,
967            .max_keysize = AES_MAX_KEY_SIZE,
968            .setkey = mv_setkey_aes,
969            .encrypt = mv_enc_aes_cbc,
970            .decrypt = mv_dec_aes_cbc,
971        },
972    },
973};
974
975struct ahash_alg mv_sha1_alg = {
976    .init = mv_hash_init,
977    .update = mv_hash_update,
978    .final = mv_hash_final,
979    .finup = mv_hash_finup,
980    .digest = mv_hash_digest,
981    .halg = {
982         .digestsize = SHA1_DIGEST_SIZE,
983         .base = {
984              .cra_name = "sha1",
985              .cra_driver_name = "mv-sha1",
986              .cra_priority = 300,
987              .cra_flags =
988              CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
989              CRYPTO_ALG_NEED_FALLBACK,
990              .cra_blocksize = SHA1_BLOCK_SIZE,
991              .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
992              .cra_init = mv_cra_hash_sha1_init,
993              .cra_exit = mv_cra_hash_exit,
994              .cra_module = THIS_MODULE,
995              }
996         }
997};
998
999struct ahash_alg mv_hmac_sha1_alg = {
1000    .init = mv_hash_init,
1001    .update = mv_hash_update,
1002    .final = mv_hash_final,
1003    .finup = mv_hash_finup,
1004    .digest = mv_hash_digest,
1005    .setkey = mv_hash_setkey,
1006    .halg = {
1007         .digestsize = SHA1_DIGEST_SIZE,
1008         .base = {
1009              .cra_name = "hmac(sha1)",
1010              .cra_driver_name = "mv-hmac-sha1",
1011              .cra_priority = 300,
1012              .cra_flags =
1013              CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
1014              CRYPTO_ALG_NEED_FALLBACK,
1015              .cra_blocksize = SHA1_BLOCK_SIZE,
1016              .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
1017              .cra_init = mv_cra_hash_hmac_sha1_init,
1018              .cra_exit = mv_cra_hash_exit,
1019              .cra_module = THIS_MODULE,
1020              }
1021         }
1022};
1023
1024static int mv_probe(struct platform_device *pdev)
1025{
1026    struct crypto_priv *cp;
1027    struct resource *res;
1028    int irq;
1029    int ret;
1030
1031    if (cpg) {
1032        printk(KERN_ERR MV_CESA "Second crypto dev?\n");
1033        return -EEXIST;
1034    }
1035
1036    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1037    if (!res)
1038        return -ENXIO;
1039
1040    cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1041    if (!cp)
1042        return -ENOMEM;
1043
1044    spin_lock_init(&cp->lock);
1045    crypto_init_queue(&cp->queue, 50);
1046    cp->reg = ioremap(res->start, resource_size(res));
1047    if (!cp->reg) {
1048        ret = -ENOMEM;
1049        goto err;
1050    }
1051
1052    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
1053    if (!res) {
1054        ret = -ENXIO;
1055        goto err_unmap_reg;
1056    }
1057    cp->sram_size = resource_size(res);
1058    cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
1059    cp->sram = ioremap(res->start, cp->sram_size);
1060    if (!cp->sram) {
1061        ret = -ENOMEM;
1062        goto err_unmap_reg;
1063    }
1064
1065    irq = platform_get_irq(pdev, 0);
1066    if (irq < 0 || irq == NO_IRQ) {
1067        ret = irq;
1068        goto err_unmap_sram;
1069    }
1070    cp->irq = irq;
1071
1072    platform_set_drvdata(pdev, cp);
1073    cpg = cp;
1074
1075    cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1076    if (IS_ERR(cp->queue_th)) {
1077        ret = PTR_ERR(cp->queue_th);
1078        goto err_unmap_sram;
1079    }
1080
1081    ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
1082            cp);
1083    if (ret)
1084        goto err_thread;
1085
1086    /* Not all platforms can gate the clock, so it is not
1087       an error if the clock does not exists. */
1088    cp->clk = clk_get(&pdev->dev, NULL);
1089    if (!IS_ERR(cp->clk))
1090        clk_prepare_enable(cp->clk);
1091
1092    writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);
1093    writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1094    writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1095    writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
1096
1097    ret = crypto_register_alg(&mv_aes_alg_ecb);
1098    if (ret) {
1099        printk(KERN_WARNING MV_CESA
1100               "Could not register aes-ecb driver\n");
1101        goto err_irq;
1102    }
1103
1104    ret = crypto_register_alg(&mv_aes_alg_cbc);
1105    if (ret) {
1106        printk(KERN_WARNING MV_CESA
1107               "Could not register aes-cbc driver\n");
1108        goto err_unreg_ecb;
1109    }
1110
1111    ret = crypto_register_ahash(&mv_sha1_alg);
1112    if (ret == 0)
1113        cpg->has_sha1 = 1;
1114    else
1115        printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
1116
1117    ret = crypto_register_ahash(&mv_hmac_sha1_alg);
1118    if (ret == 0) {
1119        cpg->has_hmac_sha1 = 1;
1120    } else {
1121        printk(KERN_WARNING MV_CESA
1122               "Could not register hmac-sha1 driver\n");
1123    }
1124
1125    return 0;
1126err_unreg_ecb:
1127    crypto_unregister_alg(&mv_aes_alg_ecb);
1128err_irq:
1129    free_irq(irq, cp);
1130    if (!IS_ERR(cp->clk)) {
1131        clk_disable_unprepare(cp->clk);
1132        clk_put(cp->clk);
1133    }
1134err_thread:
1135    kthread_stop(cp->queue_th);
1136err_unmap_sram:
1137    iounmap(cp->sram);
1138err_unmap_reg:
1139    iounmap(cp->reg);
1140err:
1141    kfree(cp);
1142    cpg = NULL;
1143    platform_set_drvdata(pdev, NULL);
1144    return ret;
1145}
1146
1147static int mv_remove(struct platform_device *pdev)
1148{
1149    struct crypto_priv *cp = platform_get_drvdata(pdev);
1150
1151    crypto_unregister_alg(&mv_aes_alg_ecb);
1152    crypto_unregister_alg(&mv_aes_alg_cbc);
1153    if (cp->has_sha1)
1154        crypto_unregister_ahash(&mv_sha1_alg);
1155    if (cp->has_hmac_sha1)
1156        crypto_unregister_ahash(&mv_hmac_sha1_alg);
1157    kthread_stop(cp->queue_th);
1158    free_irq(cp->irq, cp);
1159    memset(cp->sram, 0, cp->sram_size);
1160    iounmap(cp->sram);
1161    iounmap(cp->reg);
1162
1163    if (!IS_ERR(cp->clk)) {
1164        clk_disable_unprepare(cp->clk);
1165        clk_put(cp->clk);
1166    }
1167
1168    kfree(cp);
1169    cpg = NULL;
1170    return 0;
1171}
1172
1173static struct platform_driver marvell_crypto = {
1174    .probe = mv_probe,
1175    .remove = mv_remove,
1176    .driver = {
1177        .owner = THIS_MODULE,
1178        .name = "mv_crypto",
1179    },
1180};
1181MODULE_ALIAS("platform:mv_crypto");
1182
1183module_platform_driver(marvell_crypto);
1184
1185MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
1186MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
1187MODULE_LICENSE("GPL");
1188

Archive Download this file



interactive