Root/drivers/crypto/picoxcell_crypto.c

1/*
2 * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#include <crypto/aead.h>
19#include <crypto/aes.h>
20#include <crypto/algapi.h>
21#include <crypto/authenc.h>
22#include <crypto/des.h>
23#include <crypto/md5.h>
24#include <crypto/sha.h>
25#include <crypto/internal/skcipher.h>
26#include <linux/clk.h>
27#include <linux/crypto.h>
28#include <linux/delay.h>
29#include <linux/dma-mapping.h>
30#include <linux/dmapool.h>
31#include <linux/err.h>
32#include <linux/init.h>
33#include <linux/interrupt.h>
34#include <linux/io.h>
35#include <linux/list.h>
36#include <linux/module.h>
37#include <linux/of.h>
38#include <linux/platform_device.h>
39#include <linux/pm.h>
40#include <linux/rtnetlink.h>
41#include <linux/scatterlist.h>
42#include <linux/sched.h>
43#include <linux/slab.h>
44#include <linux/timer.h>
45
46#include "picoxcell_crypto_regs.h"
47
48/*
49 * The threshold for the number of entries in the CMD FIFO available before
50 * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
51 * number of interrupts raised to the CPU.
52 */
53#define CMD0_IRQ_THRESHOLD 1
54
55/*
56 * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
57 * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
58 * When there are packets in flight but lower than the threshold, we enable
59 * the timer and at expiry, attempt to remove any processed packets from the
60 * queue and if there are still packets left, schedule the timer again.
61 */
62#define PACKET_TIMEOUT 1
63
64/* The priority to register each algorithm with. */
65#define SPACC_CRYPTO_ALG_PRIORITY 10000
66
67#define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16
68#define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
69#define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64
70#define SPACC_CRYPTO_IPSEC_MAX_CTXS 32
71#define SPACC_CRYPTO_IPSEC_FIFO_SZ 32
72#define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64
73#define SPACC_CRYPTO_L2_HASH_PG_SZ 64
74#define SPACC_CRYPTO_L2_MAX_CTXS 128
75#define SPACC_CRYPTO_L2_FIFO_SZ 128
76
77#define MAX_DDT_LEN 16
78
79/* DDT format. This must match the hardware DDT format exactly. */
80struct spacc_ddt {
81    dma_addr_t p;
82    u32 len;
83};
84
85/*
86 * Asynchronous crypto request structure.
87 *
88 * This structure defines a request that is either queued for processing or
89 * being processed.
90 */
91struct spacc_req {
92    struct list_head list;
93    struct spacc_engine *engine;
94    struct crypto_async_request *req;
95    int result;
96    bool is_encrypt;
97    unsigned ctx_id;
98    dma_addr_t src_addr, dst_addr;
99    struct spacc_ddt *src_ddt, *dst_ddt;
100    void (*complete)(struct spacc_req *req);
101
102    /* AEAD specific bits. */
103    u8 *giv;
104    size_t giv_len;
105    dma_addr_t giv_pa;
106};
107
108struct spacc_engine {
109    void __iomem *regs;
110    struct list_head pending;
111    int next_ctx;
112    spinlock_t hw_lock;
113    int in_flight;
114    struct list_head completed;
115    struct list_head in_progress;
116    struct tasklet_struct complete;
117    unsigned long fifo_sz;
118    void __iomem *cipher_ctx_base;
119    void __iomem *hash_key_base;
120    struct spacc_alg *algs;
121    unsigned num_algs;
122    struct list_head registered_algs;
123    size_t cipher_pg_sz;
124    size_t hash_pg_sz;
125    const char *name;
126    struct clk *clk;
127    struct device *dev;
128    unsigned max_ctxs;
129    struct timer_list packet_timeout;
130    unsigned stat_irq_thresh;
131    struct dma_pool *req_pool;
132};
133
134/* Algorithm type mask. */
135#define SPACC_CRYPTO_ALG_MASK 0x7
136
137/* SPACC definition of a crypto algorithm. */
138struct spacc_alg {
139    unsigned long ctrl_default;
140    unsigned long type;
141    struct crypto_alg alg;
142    struct spacc_engine *engine;
143    struct list_head entry;
144    int key_offs;
145    int iv_offs;
146};
147
148/* Generic context structure for any algorithm type. */
149struct spacc_generic_ctx {
150    struct spacc_engine *engine;
151    int flags;
152    int key_offs;
153    int iv_offs;
154};
155
156/* Block cipher context. */
157struct spacc_ablk_ctx {
158    struct spacc_generic_ctx generic;
159    u8 key[AES_MAX_KEY_SIZE];
160    u8 key_len;
161    /*
162     * The fallback cipher. If the operation can't be done in hardware,
163     * fallback to a software version.
164     */
165    struct crypto_ablkcipher *sw_cipher;
166};
167
168/* AEAD cipher context. */
169struct spacc_aead_ctx {
170    struct spacc_generic_ctx generic;
171    u8 cipher_key[AES_MAX_KEY_SIZE];
172    u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
173    u8 cipher_key_len;
174    u8 hash_key_len;
175    struct crypto_aead *sw_cipher;
176    size_t auth_size;
177    u8 salt[AES_BLOCK_SIZE];
178};
179
180static int spacc_ablk_submit(struct spacc_req *req);
181
182static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
183{
184    return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
185}
186
187static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
188{
189    u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
190
191    return fifo_stat & SPA_FIFO_CMD_FULL;
192}
193
194/*
195 * Given a cipher context, and a context number, get the base address of the
196 * context page.
197 *
198 * Returns the address of the context page where the key/context may
199 * be written.
200 */
201static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
202                        unsigned indx,
203                        bool is_cipher_ctx)
204{
205    return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
206            (indx * ctx->engine->cipher_pg_sz) :
207        ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
208}
209
210/* The context pages can only be written with 32-bit accesses. */
211static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
212                 unsigned count)
213{
214    const u32 *src32 = (const u32 *) src;
215
216    while (count--)
217        writel(*src32++, dst++);
218}
219
220static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
221                   void __iomem *page_addr, const u8 *key,
222                   size_t key_len, const u8 *iv, size_t iv_len)
223{
224    void __iomem *key_ptr = page_addr + ctx->key_offs;
225    void __iomem *iv_ptr = page_addr + ctx->iv_offs;
226
227    memcpy_toio32(key_ptr, key, key_len / 4);
228    memcpy_toio32(iv_ptr, iv, iv_len / 4);
229}
230
231/*
232 * Load a context into the engines context memory.
233 *
234 * Returns the index of the context page where the context was loaded.
235 */
236static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
237                   const u8 *ciph_key, size_t ciph_len,
238                   const u8 *iv, size_t ivlen, const u8 *hash_key,
239                   size_t hash_len)
240{
241    unsigned indx = ctx->engine->next_ctx++;
242    void __iomem *ciph_page_addr, *hash_page_addr;
243
244    ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
245    hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
246
247    ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
248    spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
249                   ivlen);
250    writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
251           (1 << SPA_KEY_SZ_CIPHER_OFFSET),
252           ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
253
254    if (hash_key) {
255        memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
256        writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
257               ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
258    }
259
260    return indx;
261}
262
263/* Count the number of scatterlist entries in a scatterlist. */
264static int sg_count(struct scatterlist *sg_list, int nbytes)
265{
266    struct scatterlist *sg = sg_list;
267    int sg_nents = 0;
268
269    while (nbytes > 0) {
270        ++sg_nents;
271        nbytes -= sg->length;
272        sg = sg_next(sg);
273    }
274
275    return sg_nents;
276}
277
278static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
279{
280    ddt->p = phys;
281    ddt->len = len;
282}
283
284/*
285 * Take a crypto request and scatterlists for the data and turn them into DDTs
286 * for passing to the crypto engines. This also DMA maps the data so that the
287 * crypto engines can DMA to/from them.
288 */
289static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
290                     struct scatterlist *payload,
291                     unsigned nbytes,
292                     enum dma_data_direction dir,
293                     dma_addr_t *ddt_phys)
294{
295    unsigned nents, mapped_ents;
296    struct scatterlist *cur;
297    struct spacc_ddt *ddt;
298    int i;
299
300    nents = sg_count(payload, nbytes);
301    mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
302
303    if (mapped_ents + 1 > MAX_DDT_LEN)
304        goto out;
305
306    ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
307    if (!ddt)
308        goto out;
309
310    for_each_sg(payload, cur, mapped_ents, i)
311        ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
312    ddt_set(&ddt[mapped_ents], 0, 0);
313
314    return ddt;
315
316out:
317    dma_unmap_sg(engine->dev, payload, nents, dir);
318    return NULL;
319}
320
321static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
322{
323    struct aead_request *areq = container_of(req->req, struct aead_request,
324                         base);
325    struct spacc_engine *engine = req->engine;
326    struct spacc_ddt *src_ddt, *dst_ddt;
327    unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
328    unsigned nents = sg_count(areq->src, areq->cryptlen);
329    dma_addr_t iv_addr;
330    struct scatterlist *cur;
331    int i, dst_ents, src_ents, assoc_ents;
332    u8 *iv = giv ? giv : areq->iv;
333
334    src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
335    if (!src_ddt)
336        return -ENOMEM;
337
338    dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
339    if (!dst_ddt) {
340        dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
341        return -ENOMEM;
342    }
343
344    req->src_ddt = src_ddt;
345    req->dst_ddt = dst_ddt;
346
347    assoc_ents = dma_map_sg(engine->dev, areq->assoc,
348        sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
349    if (areq->src != areq->dst) {
350        src_ents = dma_map_sg(engine->dev, areq->src, nents,
351                      DMA_TO_DEVICE);
352        dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
353                      DMA_FROM_DEVICE);
354    } else {
355        src_ents = dma_map_sg(engine->dev, areq->src, nents,
356                      DMA_BIDIRECTIONAL);
357        dst_ents = 0;
358    }
359
360    /*
361     * Map the IV/GIV. For the GIV it needs to be bidirectional as it is
362     * formed by the crypto block and sent as the ESP IV for IPSEC.
363     */
364    iv_addr = dma_map_single(engine->dev, iv, ivsize,
365                 giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
366    req->giv_pa = iv_addr;
367
368    /*
369     * Map the associated data. For decryption we don't copy the
370     * associated data.
371     */
372    for_each_sg(areq->assoc, cur, assoc_ents, i) {
373        ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
374        if (req->is_encrypt)
375            ddt_set(dst_ddt++, sg_dma_address(cur),
376                sg_dma_len(cur));
377    }
378    ddt_set(src_ddt++, iv_addr, ivsize);
379
380    if (giv || req->is_encrypt)
381        ddt_set(dst_ddt++, iv_addr, ivsize);
382
383    /*
384     * Now map in the payload for the source and destination and terminate
385     * with the NULL pointers.
386     */
387    for_each_sg(areq->src, cur, src_ents, i) {
388        ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
389        if (areq->src == areq->dst)
390            ddt_set(dst_ddt++, sg_dma_address(cur),
391                sg_dma_len(cur));
392    }
393
394    for_each_sg(areq->dst, cur, dst_ents, i)
395        ddt_set(dst_ddt++, sg_dma_address(cur),
396            sg_dma_len(cur));
397
398    ddt_set(src_ddt, 0, 0);
399    ddt_set(dst_ddt, 0, 0);
400
401    return 0;
402}
403
404static void spacc_aead_free_ddts(struct spacc_req *req)
405{
406    struct aead_request *areq = container_of(req->req, struct aead_request,
407                         base);
408    struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
409    struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
410    struct spacc_engine *engine = aead_ctx->generic.engine;
411    unsigned ivsize = alg->alg.cra_aead.ivsize;
412    unsigned nents = sg_count(areq->src, areq->cryptlen);
413
414    if (areq->src != areq->dst) {
415        dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
416        dma_unmap_sg(engine->dev, areq->dst,
417                 sg_count(areq->dst, areq->cryptlen),
418                 DMA_FROM_DEVICE);
419    } else
420        dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
421
422    dma_unmap_sg(engine->dev, areq->assoc,
423             sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
424
425    dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
426
427    dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
428    dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
429}
430
431static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
432               dma_addr_t ddt_addr, struct scatterlist *payload,
433               unsigned nbytes, enum dma_data_direction dir)
434{
435    unsigned nents = sg_count(payload, nbytes);
436
437    dma_unmap_sg(req->engine->dev, payload, nents, dir);
438    dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
439}
440
441/*
442 * Set key for a DES operation in an AEAD cipher. This also performs weak key
443 * checking if required.
444 */
445static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
446                 unsigned int len)
447{
448    struct crypto_tfm *tfm = crypto_aead_tfm(aead);
449    struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
450    u32 tmp[DES_EXPKEY_WORDS];
451
452    if (unlikely(!des_ekey(tmp, key)) &&
453        (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
454        tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
455        return -EINVAL;
456    }
457
458    memcpy(ctx->cipher_key, key, len);
459    ctx->cipher_key_len = len;
460
461    return 0;
462}
463
464/* Set the key for the AES block cipher component of the AEAD transform. */
465static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
466                 unsigned int len)
467{
468    struct crypto_tfm *tfm = crypto_aead_tfm(aead);
469    struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
470
471    /*
472     * IPSec engine only supports 128 and 256 bit AES keys. If we get a
473     * request for any other size (192 bits) then we need to do a software
474     * fallback.
475     */
476    if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
477        /*
478         * Set the fallback transform to use the same request flags as
479         * the hardware transform.
480         */
481        ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
482        ctx->sw_cipher->base.crt_flags |=
483            tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
484        return crypto_aead_setkey(ctx->sw_cipher, key, len);
485    }
486
487    memcpy(ctx->cipher_key, key, len);
488    ctx->cipher_key_len = len;
489
490    return 0;
491}
492
493static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
494                 unsigned int keylen)
495{
496    struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
497    struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
498    struct rtattr *rta = (void *)key;
499    struct crypto_authenc_key_param *param;
500    unsigned int authkeylen, enckeylen;
501    int err = -EINVAL;
502
503    if (!RTA_OK(rta, keylen))
504        goto badkey;
505
506    if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
507        goto badkey;
508
509    if (RTA_PAYLOAD(rta) < sizeof(*param))
510        goto badkey;
511
512    param = RTA_DATA(rta);
513    enckeylen = be32_to_cpu(param->enckeylen);
514
515    key += RTA_ALIGN(rta->rta_len);
516    keylen -= RTA_ALIGN(rta->rta_len);
517
518    if (keylen < enckeylen)
519        goto badkey;
520
521    authkeylen = keylen - enckeylen;
522
523    if (enckeylen > AES_MAX_KEY_SIZE)
524        goto badkey;
525
526    if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
527        SPA_CTRL_CIPH_ALG_AES)
528        err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen);
529    else
530        err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen);
531
532    if (err)
533        goto badkey;
534
535    memcpy(ctx->hash_ctx, key, authkeylen);
536    ctx->hash_key_len = authkeylen;
537
538    return 0;
539
540badkey:
541    crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
542    return -EINVAL;
543}
544
545static int spacc_aead_setauthsize(struct crypto_aead *tfm,
546                  unsigned int authsize)
547{
548    struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
549
550    ctx->auth_size = authsize;
551
552    return 0;
553}
554
555/*
556 * Check if an AEAD request requires a fallback operation. Some requests can't
557 * be completed in hardware because the hardware may not support certain key
558 * sizes. In these cases we need to complete the request in software.
559 */
560static int spacc_aead_need_fallback(struct spacc_req *req)
561{
562    struct aead_request *aead_req;
563    struct crypto_tfm *tfm = req->req->tfm;
564    struct crypto_alg *alg = req->req->tfm->__crt_alg;
565    struct spacc_alg *spacc_alg = to_spacc_alg(alg);
566    struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
567
568    aead_req = container_of(req->req, struct aead_request, base);
569    /*
570     * If we have a non-supported key-length, then we need to do a
571     * software fallback.
572     */
573    if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
574        SPA_CTRL_CIPH_ALG_AES &&
575        ctx->cipher_key_len != AES_KEYSIZE_128 &&
576        ctx->cipher_key_len != AES_KEYSIZE_256)
577        return 1;
578
579    return 0;
580}
581
582static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
583                  bool is_encrypt)
584{
585    struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
586    struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
587    int err;
588
589    if (ctx->sw_cipher) {
590        /*
591         * Change the request to use the software fallback transform,
592         * and once the ciphering has completed, put the old transform
593         * back into the request.
594         */
595        aead_request_set_tfm(req, ctx->sw_cipher);
596        err = is_encrypt ? crypto_aead_encrypt(req) :
597            crypto_aead_decrypt(req);
598        aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
599    } else
600        err = -EINVAL;
601
602    return err;
603}
604
605static void spacc_aead_complete(struct spacc_req *req)
606{
607    spacc_aead_free_ddts(req);
608    req->req->complete(req->req, req->result);
609}
610
611static int spacc_aead_submit(struct spacc_req *req)
612{
613    struct crypto_tfm *tfm = req->req->tfm;
614    struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
615    struct crypto_alg *alg = req->req->tfm->__crt_alg;
616    struct spacc_alg *spacc_alg = to_spacc_alg(alg);
617    struct spacc_engine *engine = ctx->generic.engine;
618    u32 ctrl, proc_len, assoc_len;
619    struct aead_request *aead_req =
620        container_of(req->req, struct aead_request, base);
621
622    req->result = -EINPROGRESS;
623    req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
624        ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
625        ctx->hash_ctx, ctx->hash_key_len);
626
627    /* Set the source and destination DDT pointers. */
628    writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
629    writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
630    writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
631
632    assoc_len = aead_req->assoclen;
633    proc_len = aead_req->cryptlen + assoc_len;
634
635    /*
636     * If we aren't generating an IV, then we need to include the IV in the
637     * associated data so that it is included in the hash.
638     */
639    if (!req->giv) {
640        assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
641        proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
642    } else
643        proc_len += req->giv_len;
644
645    /*
646     * If we are decrypting, we need to take the length of the ICV out of
647     * the processing length.
648     */
649    if (!req->is_encrypt)
650        proc_len -= ctx->auth_size;
651
652    writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
653    writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
654    writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
655    writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
656    writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
657
658    ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
659        (1 << SPA_CTRL_ICV_APPEND);
660    if (req->is_encrypt)
661        ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
662    else
663        ctrl |= (1 << SPA_CTRL_KEY_EXP);
664
665    mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
666
667    writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
668
669    return -EINPROGRESS;
670}
671
672static int spacc_req_submit(struct spacc_req *req);
673
674static void spacc_push(struct spacc_engine *engine)
675{
676    struct spacc_req *req;
677
678    while (!list_empty(&engine->pending) &&
679           engine->in_flight + 1 <= engine->fifo_sz) {
680
681        ++engine->in_flight;
682        req = list_first_entry(&engine->pending, struct spacc_req,
683                       list);
684        list_move_tail(&req->list, &engine->in_progress);
685
686        req->result = spacc_req_submit(req);
687    }
688}
689
690/*
691 * Setup an AEAD request for processing. This will configure the engine, load
692 * the context and then start the packet processing.
693 *
694 * @giv Pointer to destination address for a generated IV. If the
695 * request does not need to generate an IV then this should be set to NULL.
696 */
697static int spacc_aead_setup(struct aead_request *req, u8 *giv,
698                unsigned alg_type, bool is_encrypt)
699{
700    struct crypto_alg *alg = req->base.tfm->__crt_alg;
701    struct spacc_engine *engine = to_spacc_alg(alg)->engine;
702    struct spacc_req *dev_req = aead_request_ctx(req);
703    int err = -EINPROGRESS;
704    unsigned long flags;
705    unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
706
707    dev_req->giv = giv;
708    dev_req->giv_len = ivsize;
709    dev_req->req = &req->base;
710    dev_req->is_encrypt = is_encrypt;
711    dev_req->result = -EBUSY;
712    dev_req->engine = engine;
713    dev_req->complete = spacc_aead_complete;
714
715    if (unlikely(spacc_aead_need_fallback(dev_req)))
716        return spacc_aead_do_fallback(req, alg_type, is_encrypt);
717
718    spacc_aead_make_ddts(dev_req, dev_req->giv);
719
720    err = -EINPROGRESS;
721    spin_lock_irqsave(&engine->hw_lock, flags);
722    if (unlikely(spacc_fifo_cmd_full(engine)) ||
723        engine->in_flight + 1 > engine->fifo_sz) {
724        if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
725            err = -EBUSY;
726            spin_unlock_irqrestore(&engine->hw_lock, flags);
727            goto out_free_ddts;
728        }
729        list_add_tail(&dev_req->list, &engine->pending);
730    } else {
731        list_add_tail(&dev_req->list, &engine->pending);
732        spacc_push(engine);
733    }
734    spin_unlock_irqrestore(&engine->hw_lock, flags);
735
736    goto out;
737
738out_free_ddts:
739    spacc_aead_free_ddts(dev_req);
740out:
741    return err;
742}
743
744static int spacc_aead_encrypt(struct aead_request *req)
745{
746    struct crypto_aead *aead = crypto_aead_reqtfm(req);
747    struct crypto_tfm *tfm = crypto_aead_tfm(aead);
748    struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
749
750    return spacc_aead_setup(req, NULL, alg->type, 1);
751}
752
753static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
754{
755    struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
756    struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
757    size_t ivsize = crypto_aead_ivsize(tfm);
758    struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
759    unsigned len;
760    __be64 seq;
761
762    memcpy(req->areq.iv, ctx->salt, ivsize);
763    len = ivsize;
764    if (ivsize > sizeof(u64)) {
765        memset(req->giv, 0, ivsize - sizeof(u64));
766        len = sizeof(u64);
767    }
768    seq = cpu_to_be64(req->seq);
769    memcpy(req->giv + ivsize - len, &seq, len);
770
771    return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
772}
773
774static int spacc_aead_decrypt(struct aead_request *req)
775{
776    struct crypto_aead *aead = crypto_aead_reqtfm(req);
777    struct crypto_tfm *tfm = crypto_aead_tfm(aead);
778    struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
779
780    return spacc_aead_setup(req, NULL, alg->type, 0);
781}
782
783/*
784 * Initialise a new AEAD context. This is responsible for allocating the
785 * fallback cipher and initialising the context.
786 */
787static int spacc_aead_cra_init(struct crypto_tfm *tfm)
788{
789    struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
790    struct crypto_alg *alg = tfm->__crt_alg;
791    struct spacc_alg *spacc_alg = to_spacc_alg(alg);
792    struct spacc_engine *engine = spacc_alg->engine;
793
794    ctx->generic.flags = spacc_alg->type;
795    ctx->generic.engine = engine;
796    ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
797                       CRYPTO_ALG_ASYNC |
798                       CRYPTO_ALG_NEED_FALLBACK);
799    if (IS_ERR(ctx->sw_cipher)) {
800        dev_warn(engine->dev, "failed to allocate fallback for %s\n",
801             alg->cra_name);
802        ctx->sw_cipher = NULL;
803    }
804    ctx->generic.key_offs = spacc_alg->key_offs;
805    ctx->generic.iv_offs = spacc_alg->iv_offs;
806
807    get_random_bytes(ctx->salt, sizeof(ctx->salt));
808
809    tfm->crt_aead.reqsize = sizeof(struct spacc_req);
810
811    return 0;
812}
813
814/*
815 * Destructor for an AEAD context. This is called when the transform is freed
816 * and must free the fallback cipher.
817 */
818static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
819{
820    struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
821
822    if (ctx->sw_cipher)
823        crypto_free_aead(ctx->sw_cipher);
824    ctx->sw_cipher = NULL;
825}
826
827/*
828 * Set the DES key for a block cipher transform. This also performs weak key
829 * checking if the transform has requested it.
830 */
831static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
832                unsigned int len)
833{
834    struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
835    struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
836    u32 tmp[DES_EXPKEY_WORDS];
837
838    if (len > DES3_EDE_KEY_SIZE) {
839        crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
840        return -EINVAL;
841    }
842
843    if (unlikely(!des_ekey(tmp, key)) &&
844        (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
845        tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
846        return -EINVAL;
847    }
848
849    memcpy(ctx->key, key, len);
850    ctx->key_len = len;
851
852    return 0;
853}
854
855/*
856 * Set the key for an AES block cipher. Some key lengths are not supported in
857 * hardware so this must also check whether a fallback is needed.
858 */
859static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
860                unsigned int len)
861{
862    struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
863    struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
864    int err = 0;
865
866    if (len > AES_MAX_KEY_SIZE) {
867        crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
868        return -EINVAL;
869    }
870
871    /*
872     * IPSec engine only supports 128 and 256 bit AES keys. If we get a
873     * request for any other size (192 bits) then we need to do a software
874     * fallback.
875     */
876    if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
877        ctx->sw_cipher) {
878        /*
879         * Set the fallback transform to use the same request flags as
880         * the hardware transform.
881         */
882        ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
883        ctx->sw_cipher->base.crt_flags |=
884            cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK;
885
886        err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
887        if (err)
888            goto sw_setkey_failed;
889    } else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
890           !ctx->sw_cipher)
891        err = -EINVAL;
892
893    memcpy(ctx->key, key, len);
894    ctx->key_len = len;
895
896sw_setkey_failed:
897    if (err && ctx->sw_cipher) {
898        tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
899        tfm->crt_flags |=
900            ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK;
901    }
902
903    return err;
904}
905
906static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
907                  const u8 *key, unsigned int len)
908{
909    struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
910    struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
911    int err = 0;
912
913    if (len > AES_MAX_KEY_SIZE) {
914        crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
915        err = -EINVAL;
916        goto out;
917    }
918
919    memcpy(ctx->key, key, len);
920    ctx->key_len = len;
921
922out:
923    return err;
924}
925
926static int spacc_ablk_need_fallback(struct spacc_req *req)
927{
928    struct spacc_ablk_ctx *ctx;
929    struct crypto_tfm *tfm = req->req->tfm;
930    struct crypto_alg *alg = req->req->tfm->__crt_alg;
931    struct spacc_alg *spacc_alg = to_spacc_alg(alg);
932
933    ctx = crypto_tfm_ctx(tfm);
934
935    return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
936            SPA_CTRL_CIPH_ALG_AES &&
937            ctx->key_len != AES_KEYSIZE_128 &&
938            ctx->key_len != AES_KEYSIZE_256;
939}
940
941static void spacc_ablk_complete(struct spacc_req *req)
942{
943    struct ablkcipher_request *ablk_req =
944        container_of(req->req, struct ablkcipher_request, base);
945
946    if (ablk_req->src != ablk_req->dst) {
947        spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
948                   ablk_req->nbytes, DMA_TO_DEVICE);
949        spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
950                   ablk_req->nbytes, DMA_FROM_DEVICE);
951    } else
952        spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
953                   ablk_req->nbytes, DMA_BIDIRECTIONAL);
954
955    req->req->complete(req->req, req->result);
956}
957
958static int spacc_ablk_submit(struct spacc_req *req)
959{
960    struct crypto_tfm *tfm = req->req->tfm;
961    struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
962    struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
963    struct crypto_alg *alg = req->req->tfm->__crt_alg;
964    struct spacc_alg *spacc_alg = to_spacc_alg(alg);
965    struct spacc_engine *engine = ctx->generic.engine;
966    u32 ctrl;
967
968    req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
969        ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
970        NULL, 0);
971
972    writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
973    writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
974    writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
975
976    writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
977    writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
978    writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
979    writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
980
981    ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
982        (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
983         (1 << SPA_CTRL_KEY_EXP));
984
985    mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
986
987    writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
988
989    return -EINPROGRESS;
990}
991
992static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
993                  unsigned alg_type, bool is_encrypt)
994{
995    struct crypto_tfm *old_tfm =
996        crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
997    struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
998    int err;
999
1000    if (!ctx->sw_cipher)
1001        return -EINVAL;
1002
1003    /*
1004     * Change the request to use the software fallback transform, and once
1005     * the ciphering has completed, put the old transform back into the
1006     * request.
1007     */
1008    ablkcipher_request_set_tfm(req, ctx->sw_cipher);
1009    err = is_encrypt ? crypto_ablkcipher_encrypt(req) :
1010        crypto_ablkcipher_decrypt(req);
1011    ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm));
1012
1013    return err;
1014}
1015
1016static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
1017                bool is_encrypt)
1018{
1019    struct crypto_alg *alg = req->base.tfm->__crt_alg;
1020    struct spacc_engine *engine = to_spacc_alg(alg)->engine;
1021    struct spacc_req *dev_req = ablkcipher_request_ctx(req);
1022    unsigned long flags;
1023    int err = -ENOMEM;
1024
1025    dev_req->req = &req->base;
1026    dev_req->is_encrypt = is_encrypt;
1027    dev_req->engine = engine;
1028    dev_req->complete = spacc_ablk_complete;
1029    dev_req->result = -EINPROGRESS;
1030
1031    if (unlikely(spacc_ablk_need_fallback(dev_req)))
1032        return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
1033
1034    /*
1035     * Create the DDT's for the engine. If we share the same source and
1036     * destination then we can optimize by reusing the DDT's.
1037     */
1038    if (req->src != req->dst) {
1039        dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
1040            req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
1041        if (!dev_req->src_ddt)
1042            goto out;
1043
1044        dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
1045            req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
1046        if (!dev_req->dst_ddt)
1047            goto out_free_src;
1048    } else {
1049        dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
1050            req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
1051        if (!dev_req->dst_ddt)
1052            goto out;
1053
1054        dev_req->src_ddt = NULL;
1055        dev_req->src_addr = dev_req->dst_addr;
1056    }
1057
1058    err = -EINPROGRESS;
1059    spin_lock_irqsave(&engine->hw_lock, flags);
1060    /*
1061     * Check if the engine will accept the operation now. If it won't then
1062     * we either stick it on the end of a pending list if we can backlog,
1063     * or bailout with an error if not.
1064     */
1065    if (unlikely(spacc_fifo_cmd_full(engine)) ||
1066        engine->in_flight + 1 > engine->fifo_sz) {
1067        if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1068            err = -EBUSY;
1069            spin_unlock_irqrestore(&engine->hw_lock, flags);
1070            goto out_free_ddts;
1071        }
1072        list_add_tail(&dev_req->list, &engine->pending);
1073    } else {
1074        list_add_tail(&dev_req->list, &engine->pending);
1075        spacc_push(engine);
1076    }
1077    spin_unlock_irqrestore(&engine->hw_lock, flags);
1078
1079    goto out;
1080
1081out_free_ddts:
1082    spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
1083               req->nbytes, req->src == req->dst ?
1084               DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
1085out_free_src:
1086    if (req->src != req->dst)
1087        spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
1088                   req->src, req->nbytes, DMA_TO_DEVICE);
1089out:
1090    return err;
1091}
1092
1093static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
1094{
1095    struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1096    struct crypto_alg *alg = tfm->__crt_alg;
1097    struct spacc_alg *spacc_alg = to_spacc_alg(alg);
1098    struct spacc_engine *engine = spacc_alg->engine;
1099
1100    ctx->generic.flags = spacc_alg->type;
1101    ctx->generic.engine = engine;
1102    if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
1103        ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0,
1104                CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1105        if (IS_ERR(ctx->sw_cipher)) {
1106            dev_warn(engine->dev, "failed to allocate fallback for %s\n",
1107                 alg->cra_name);
1108            ctx->sw_cipher = NULL;
1109        }
1110    }
1111    ctx->generic.key_offs = spacc_alg->key_offs;
1112    ctx->generic.iv_offs = spacc_alg->iv_offs;
1113
1114    tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
1115
1116    return 0;
1117}
1118
1119static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
1120{
1121    struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1122
1123    if (ctx->sw_cipher)
1124        crypto_free_ablkcipher(ctx->sw_cipher);
1125    ctx->sw_cipher = NULL;
1126}
1127
1128static int spacc_ablk_encrypt(struct ablkcipher_request *req)
1129{
1130    struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
1131    struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1132    struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
1133
1134    return spacc_ablk_setup(req, alg->type, 1);
1135}
1136
1137static int spacc_ablk_decrypt(struct ablkcipher_request *req)
1138{
1139    struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
1140    struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1141    struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
1142
1143    return spacc_ablk_setup(req, alg->type, 0);
1144}
1145
1146static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
1147{
1148    return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
1149        SPA_FIFO_STAT_EMPTY;
1150}
1151
1152static void spacc_process_done(struct spacc_engine *engine)
1153{
1154    struct spacc_req *req;
1155    unsigned long flags;
1156
1157    spin_lock_irqsave(&engine->hw_lock, flags);
1158
1159    while (!spacc_fifo_stat_empty(engine)) {
1160        req = list_first_entry(&engine->in_progress, struct spacc_req,
1161                       list);
1162        list_move_tail(&req->list, &engine->completed);
1163        --engine->in_flight;
1164
1165        /* POP the status register. */
1166        writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
1167        req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
1168             SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
1169
1170        /*
1171         * Convert the SPAcc error status into the standard POSIX error
1172         * codes.
1173         */
1174        if (unlikely(req->result)) {
1175            switch (req->result) {
1176            case SPA_STATUS_ICV_FAIL:
1177                req->result = -EBADMSG;
1178                break;
1179
1180            case SPA_STATUS_MEMORY_ERROR:
1181                dev_warn(engine->dev,
1182                     "memory error triggered\n");
1183                req->result = -EFAULT;
1184                break;
1185
1186            case SPA_STATUS_BLOCK_ERROR:
1187                dev_warn(engine->dev,
1188                     "block error triggered\n");
1189                req->result = -EIO;
1190                break;
1191            }
1192        }
1193    }
1194
1195    tasklet_schedule(&engine->complete);
1196
1197    spin_unlock_irqrestore(&engine->hw_lock, flags);
1198}
1199
1200static irqreturn_t spacc_spacc_irq(int irq, void *dev)
1201{
1202    struct spacc_engine *engine = (struct spacc_engine *)dev;
1203    u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1204
1205    writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1206    spacc_process_done(engine);
1207
1208    return IRQ_HANDLED;
1209}
1210
1211static void spacc_packet_timeout(unsigned long data)
1212{
1213    struct spacc_engine *engine = (struct spacc_engine *)data;
1214
1215    spacc_process_done(engine);
1216}
1217
1218static int spacc_req_submit(struct spacc_req *req)
1219{
1220    struct crypto_alg *alg = req->req->tfm->__crt_alg;
1221
1222    if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
1223        return spacc_aead_submit(req);
1224    else
1225        return spacc_ablk_submit(req);
1226}
1227
1228static void spacc_spacc_complete(unsigned long data)
1229{
1230    struct spacc_engine *engine = (struct spacc_engine *)data;
1231    struct spacc_req *req, *tmp;
1232    unsigned long flags;
1233    LIST_HEAD(completed);
1234
1235    spin_lock_irqsave(&engine->hw_lock, flags);
1236
1237    list_splice_init(&engine->completed, &completed);
1238    spacc_push(engine);
1239    if (engine->in_flight)
1240        mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
1241
1242    spin_unlock_irqrestore(&engine->hw_lock, flags);
1243
1244    list_for_each_entry_safe(req, tmp, &completed, list) {
1245        list_del(&req->list);
1246        req->complete(req);
1247    }
1248}
1249
1250#ifdef CONFIG_PM
1251static int spacc_suspend(struct device *dev)
1252{
1253    struct platform_device *pdev = to_platform_device(dev);
1254    struct spacc_engine *engine = platform_get_drvdata(pdev);
1255
1256    /*
1257     * We only support standby mode. All we have to do is gate the clock to
1258     * the spacc. The hardware will preserve state until we turn it back
1259     * on again.
1260     */
1261    clk_disable(engine->clk);
1262
1263    return 0;
1264}
1265
1266static int spacc_resume(struct device *dev)
1267{
1268    struct platform_device *pdev = to_platform_device(dev);
1269    struct spacc_engine *engine = platform_get_drvdata(pdev);
1270
1271    return clk_enable(engine->clk);
1272}
1273
1274static const struct dev_pm_ops spacc_pm_ops = {
1275    .suspend = spacc_suspend,
1276    .resume = spacc_resume,
1277};
1278#endif /* CONFIG_PM */
1279
1280static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
1281{
1282    return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
1283}
1284
1285static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
1286                      struct device_attribute *attr,
1287                      char *buf)
1288{
1289    struct spacc_engine *engine = spacc_dev_to_engine(dev);
1290
1291    return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
1292}
1293
1294static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
1295                       struct device_attribute *attr,
1296                       const char *buf, size_t len)
1297{
1298    struct spacc_engine *engine = spacc_dev_to_engine(dev);
1299    unsigned long thresh;
1300
1301    if (strict_strtoul(buf, 0, &thresh))
1302        return -EINVAL;
1303
1304    thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
1305
1306    engine->stat_irq_thresh = thresh;
1307    writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1308           engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1309
1310    return len;
1311}
1312static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
1313           spacc_stat_irq_thresh_store);
1314
1315static struct spacc_alg ipsec_engine_algs[] = {
1316    {
1317        .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
1318        .key_offs = 0,
1319        .iv_offs = AES_MAX_KEY_SIZE,
1320        .alg = {
1321            .cra_name = "cbc(aes)",
1322            .cra_driver_name = "cbc-aes-picoxcell",
1323            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1324            .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1325                     CRYPTO_ALG_KERN_DRIVER_ONLY |
1326                     CRYPTO_ALG_ASYNC |
1327                     CRYPTO_ALG_NEED_FALLBACK,
1328            .cra_blocksize = AES_BLOCK_SIZE,
1329            .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1330            .cra_type = &crypto_ablkcipher_type,
1331            .cra_module = THIS_MODULE,
1332            .cra_ablkcipher = {
1333                .setkey = spacc_aes_setkey,
1334                .encrypt = spacc_ablk_encrypt,
1335                .decrypt = spacc_ablk_decrypt,
1336                .min_keysize = AES_MIN_KEY_SIZE,
1337                .max_keysize = AES_MAX_KEY_SIZE,
1338                .ivsize = AES_BLOCK_SIZE,
1339            },
1340            .cra_init = spacc_ablk_cra_init,
1341            .cra_exit = spacc_ablk_cra_exit,
1342        },
1343    },
1344    {
1345        .key_offs = 0,
1346        .iv_offs = AES_MAX_KEY_SIZE,
1347        .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
1348        .alg = {
1349            .cra_name = "ecb(aes)",
1350            .cra_driver_name = "ecb-aes-picoxcell",
1351            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1352            .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1353                CRYPTO_ALG_KERN_DRIVER_ONLY |
1354                CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1355            .cra_blocksize = AES_BLOCK_SIZE,
1356            .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1357            .cra_type = &crypto_ablkcipher_type,
1358            .cra_module = THIS_MODULE,
1359            .cra_ablkcipher = {
1360                .setkey = spacc_aes_setkey,
1361                .encrypt = spacc_ablk_encrypt,
1362                .decrypt = spacc_ablk_decrypt,
1363                .min_keysize = AES_MIN_KEY_SIZE,
1364                .max_keysize = AES_MAX_KEY_SIZE,
1365            },
1366            .cra_init = spacc_ablk_cra_init,
1367            .cra_exit = spacc_ablk_cra_exit,
1368        },
1369    },
1370    {
1371        .key_offs = DES_BLOCK_SIZE,
1372        .iv_offs = 0,
1373        .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1374        .alg = {
1375            .cra_name = "cbc(des)",
1376            .cra_driver_name = "cbc-des-picoxcell",
1377            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1378            .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1379                    CRYPTO_ALG_ASYNC |
1380                    CRYPTO_ALG_KERN_DRIVER_ONLY,
1381            .cra_blocksize = DES_BLOCK_SIZE,
1382            .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1383            .cra_type = &crypto_ablkcipher_type,
1384            .cra_module = THIS_MODULE,
1385            .cra_ablkcipher = {
1386                .setkey = spacc_des_setkey,
1387                .encrypt = spacc_ablk_encrypt,
1388                .decrypt = spacc_ablk_decrypt,
1389                .min_keysize = DES_KEY_SIZE,
1390                .max_keysize = DES_KEY_SIZE,
1391                .ivsize = DES_BLOCK_SIZE,
1392            },
1393            .cra_init = spacc_ablk_cra_init,
1394            .cra_exit = spacc_ablk_cra_exit,
1395        },
1396    },
1397    {
1398        .key_offs = DES_BLOCK_SIZE,
1399        .iv_offs = 0,
1400        .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1401        .alg = {
1402            .cra_name = "ecb(des)",
1403            .cra_driver_name = "ecb-des-picoxcell",
1404            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1405            .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1406                    CRYPTO_ALG_ASYNC |
1407                    CRYPTO_ALG_KERN_DRIVER_ONLY,
1408            .cra_blocksize = DES_BLOCK_SIZE,
1409            .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1410            .cra_type = &crypto_ablkcipher_type,
1411            .cra_module = THIS_MODULE,
1412            .cra_ablkcipher = {
1413                .setkey = spacc_des_setkey,
1414                .encrypt = spacc_ablk_encrypt,
1415                .decrypt = spacc_ablk_decrypt,
1416                .min_keysize = DES_KEY_SIZE,
1417                .max_keysize = DES_KEY_SIZE,
1418            },
1419            .cra_init = spacc_ablk_cra_init,
1420            .cra_exit = spacc_ablk_cra_exit,
1421        },
1422    },
1423    {
1424        .key_offs = DES_BLOCK_SIZE,
1425        .iv_offs = 0,
1426        .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1427        .alg = {
1428            .cra_name = "cbc(des3_ede)",
1429            .cra_driver_name = "cbc-des3-ede-picoxcell",
1430            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1431            .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1432                    CRYPTO_ALG_ASYNC |
1433                    CRYPTO_ALG_KERN_DRIVER_ONLY,
1434            .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1435            .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1436            .cra_type = &crypto_ablkcipher_type,
1437            .cra_module = THIS_MODULE,
1438            .cra_ablkcipher = {
1439                .setkey = spacc_des_setkey,
1440                .encrypt = spacc_ablk_encrypt,
1441                .decrypt = spacc_ablk_decrypt,
1442                .min_keysize = DES3_EDE_KEY_SIZE,
1443                .max_keysize = DES3_EDE_KEY_SIZE,
1444                .ivsize = DES3_EDE_BLOCK_SIZE,
1445            },
1446            .cra_init = spacc_ablk_cra_init,
1447            .cra_exit = spacc_ablk_cra_exit,
1448        },
1449    },
1450    {
1451        .key_offs = DES_BLOCK_SIZE,
1452        .iv_offs = 0,
1453        .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1454        .alg = {
1455            .cra_name = "ecb(des3_ede)",
1456            .cra_driver_name = "ecb-des3-ede-picoxcell",
1457            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1458            .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1459                    CRYPTO_ALG_ASYNC |
1460                    CRYPTO_ALG_KERN_DRIVER_ONLY,
1461            .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1462            .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1463            .cra_type = &crypto_ablkcipher_type,
1464            .cra_module = THIS_MODULE,
1465            .cra_ablkcipher = {
1466                .setkey = spacc_des_setkey,
1467                .encrypt = spacc_ablk_encrypt,
1468                .decrypt = spacc_ablk_decrypt,
1469                .min_keysize = DES3_EDE_KEY_SIZE,
1470                .max_keysize = DES3_EDE_KEY_SIZE,
1471            },
1472            .cra_init = spacc_ablk_cra_init,
1473            .cra_exit = spacc_ablk_cra_exit,
1474        },
1475    },
1476    {
1477        .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
1478                SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
1479        .key_offs = 0,
1480        .iv_offs = AES_MAX_KEY_SIZE,
1481        .alg = {
1482            .cra_name = "authenc(hmac(sha1),cbc(aes))",
1483            .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
1484            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1485            .cra_flags = CRYPTO_ALG_TYPE_AEAD |
1486                    CRYPTO_ALG_ASYNC |
1487                    CRYPTO_ALG_KERN_DRIVER_ONLY,
1488            .cra_blocksize = AES_BLOCK_SIZE,
1489            .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1490            .cra_type = &crypto_aead_type,
1491            .cra_module = THIS_MODULE,
1492            .cra_aead = {
1493                .setkey = spacc_aead_setkey,
1494                .setauthsize = spacc_aead_setauthsize,
1495                .encrypt = spacc_aead_encrypt,
1496                .decrypt = spacc_aead_decrypt,
1497                .givencrypt = spacc_aead_givencrypt,
1498                .ivsize = AES_BLOCK_SIZE,
1499                .maxauthsize = SHA1_DIGEST_SIZE,
1500            },
1501            .cra_init = spacc_aead_cra_init,
1502            .cra_exit = spacc_aead_cra_exit,
1503        },
1504    },
1505    {
1506        .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
1507                SPA_CTRL_HASH_ALG_SHA256 |
1508                SPA_CTRL_HASH_MODE_HMAC,
1509        .key_offs = 0,
1510        .iv_offs = AES_MAX_KEY_SIZE,
1511        .alg = {
1512            .cra_name = "authenc(hmac(sha256),cbc(aes))",
1513            .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
1514            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1515            .cra_flags = CRYPTO_ALG_TYPE_AEAD |
1516                    CRYPTO_ALG_ASYNC |
1517                    CRYPTO_ALG_KERN_DRIVER_ONLY,
1518            .cra_blocksize = AES_BLOCK_SIZE,
1519            .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1520            .cra_type = &crypto_aead_type,
1521            .cra_module = THIS_MODULE,
1522            .cra_aead = {
1523                .setkey = spacc_aead_setkey,
1524                .setauthsize = spacc_aead_setauthsize,
1525                .encrypt = spacc_aead_encrypt,
1526                .decrypt = spacc_aead_decrypt,
1527                .givencrypt = spacc_aead_givencrypt,
1528                .ivsize = AES_BLOCK_SIZE,
1529                .maxauthsize = SHA256_DIGEST_SIZE,
1530            },
1531            .cra_init = spacc_aead_cra_init,
1532            .cra_exit = spacc_aead_cra_exit,
1533        },
1534    },
1535    {
1536        .key_offs = 0,
1537        .iv_offs = AES_MAX_KEY_SIZE,
1538        .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
1539                SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
1540        .alg = {
1541            .cra_name = "authenc(hmac(md5),cbc(aes))",
1542            .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
1543            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1544            .cra_flags = CRYPTO_ALG_TYPE_AEAD |
1545                    CRYPTO_ALG_ASYNC |
1546                    CRYPTO_ALG_KERN_DRIVER_ONLY,
1547            .cra_blocksize = AES_BLOCK_SIZE,
1548            .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1549            .cra_type = &crypto_aead_type,
1550            .cra_module = THIS_MODULE,
1551            .cra_aead = {
1552                .setkey = spacc_aead_setkey,
1553                .setauthsize = spacc_aead_setauthsize,
1554                .encrypt = spacc_aead_encrypt,
1555                .decrypt = spacc_aead_decrypt,
1556                .givencrypt = spacc_aead_givencrypt,
1557                .ivsize = AES_BLOCK_SIZE,
1558                .maxauthsize = MD5_DIGEST_SIZE,
1559            },
1560            .cra_init = spacc_aead_cra_init,
1561            .cra_exit = spacc_aead_cra_exit,
1562        },
1563    },
1564    {
1565        .key_offs = DES_BLOCK_SIZE,
1566        .iv_offs = 0,
1567        .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
1568                SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
1569        .alg = {
1570            .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1571            .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
1572            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1573            .cra_flags = CRYPTO_ALG_TYPE_AEAD |
1574                    CRYPTO_ALG_ASYNC |
1575                    CRYPTO_ALG_KERN_DRIVER_ONLY,
1576            .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1577            .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1578            .cra_type = &crypto_aead_type,
1579            .cra_module = THIS_MODULE,
1580            .cra_aead = {
1581                .setkey = spacc_aead_setkey,
1582                .setauthsize = spacc_aead_setauthsize,
1583                .encrypt = spacc_aead_encrypt,
1584                .decrypt = spacc_aead_decrypt,
1585                .givencrypt = spacc_aead_givencrypt,
1586                .ivsize = DES3_EDE_BLOCK_SIZE,
1587                .maxauthsize = SHA1_DIGEST_SIZE,
1588            },
1589            .cra_init = spacc_aead_cra_init,
1590            .cra_exit = spacc_aead_cra_exit,
1591        },
1592    },
1593    {
1594        .key_offs = DES_BLOCK_SIZE,
1595        .iv_offs = 0,
1596        .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
1597                SPA_CTRL_HASH_ALG_SHA256 |
1598                SPA_CTRL_HASH_MODE_HMAC,
1599        .alg = {
1600            .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
1601            .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
1602            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1603            .cra_flags = CRYPTO_ALG_TYPE_AEAD |
1604                    CRYPTO_ALG_ASYNC |
1605                    CRYPTO_ALG_KERN_DRIVER_ONLY,
1606            .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1607            .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1608            .cra_type = &crypto_aead_type,
1609            .cra_module = THIS_MODULE,
1610            .cra_aead = {
1611                .setkey = spacc_aead_setkey,
1612                .setauthsize = spacc_aead_setauthsize,
1613                .encrypt = spacc_aead_encrypt,
1614                .decrypt = spacc_aead_decrypt,
1615                .givencrypt = spacc_aead_givencrypt,
1616                .ivsize = DES3_EDE_BLOCK_SIZE,
1617                .maxauthsize = SHA256_DIGEST_SIZE,
1618            },
1619            .cra_init = spacc_aead_cra_init,
1620            .cra_exit = spacc_aead_cra_exit,
1621        },
1622    },
1623    {
1624        .key_offs = DES_BLOCK_SIZE,
1625        .iv_offs = 0,
1626        .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
1627                SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
1628        .alg = {
1629            .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1630            .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
1631            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1632            .cra_flags = CRYPTO_ALG_TYPE_AEAD |
1633                    CRYPTO_ALG_ASYNC |
1634                    CRYPTO_ALG_KERN_DRIVER_ONLY,
1635            .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1636            .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1637            .cra_type = &crypto_aead_type,
1638            .cra_module = THIS_MODULE,
1639            .cra_aead = {
1640                .setkey = spacc_aead_setkey,
1641                .setauthsize = spacc_aead_setauthsize,
1642                .encrypt = spacc_aead_encrypt,
1643                .decrypt = spacc_aead_decrypt,
1644                .givencrypt = spacc_aead_givencrypt,
1645                .ivsize = DES3_EDE_BLOCK_SIZE,
1646                .maxauthsize = MD5_DIGEST_SIZE,
1647            },
1648            .cra_init = spacc_aead_cra_init,
1649            .cra_exit = spacc_aead_cra_exit,
1650        },
1651    },
1652};
1653
1654static struct spacc_alg l2_engine_algs[] = {
1655    {
1656        .key_offs = 0,
1657        .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
1658        .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
1659                SPA_CTRL_CIPH_MODE_F8,
1660        .alg = {
1661            .cra_name = "f8(kasumi)",
1662            .cra_driver_name = "f8-kasumi-picoxcell",
1663            .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1664            .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER |
1665                    CRYPTO_ALG_ASYNC |
1666                    CRYPTO_ALG_KERN_DRIVER_ONLY,
1667            .cra_blocksize = 8,
1668            .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1669            .cra_type = &crypto_ablkcipher_type,
1670            .cra_module = THIS_MODULE,
1671            .cra_ablkcipher = {
1672                .setkey = spacc_kasumi_f8_setkey,
1673                .encrypt = spacc_ablk_encrypt,
1674                .decrypt = spacc_ablk_decrypt,
1675                .min_keysize = 16,
1676                .max_keysize = 16,
1677                .ivsize = 8,
1678            },
1679            .cra_init = spacc_ablk_cra_init,
1680            .cra_exit = spacc_ablk_cra_exit,
1681        },
1682    },
1683};
1684
1685#ifdef CONFIG_OF
1686static const struct of_device_id spacc_of_id_table[] = {
1687    { .compatible = "picochip,spacc-ipsec" },
1688    { .compatible = "picochip,spacc-l2" },
1689    {}
1690};
1691#else /* CONFIG_OF */
1692#define spacc_of_id_table NULL
1693#endif /* CONFIG_OF */
1694
1695static bool spacc_is_compatible(struct platform_device *pdev,
1696                const char *spacc_type)
1697{
1698    const struct platform_device_id *platid = platform_get_device_id(pdev);
1699
1700    if (platid && !strcmp(platid->name, spacc_type))
1701        return true;
1702
1703#ifdef CONFIG_OF
1704    if (of_device_is_compatible(pdev->dev.of_node, spacc_type))
1705        return true;
1706#endif /* CONFIG_OF */
1707
1708    return false;
1709}
1710
1711static int __devinit spacc_probe(struct platform_device *pdev)
1712{
1713    int i, err, ret = -EINVAL;
1714    struct resource *mem, *irq;
1715    struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
1716                           GFP_KERNEL);
1717    if (!engine)
1718        return -ENOMEM;
1719
1720    if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) {
1721        engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS;
1722        engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
1723        engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
1724        engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ;
1725        engine->algs = ipsec_engine_algs;
1726        engine->num_algs = ARRAY_SIZE(ipsec_engine_algs);
1727    } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
1728        engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS;
1729        engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
1730        engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ;
1731        engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ;
1732        engine->algs = l2_engine_algs;
1733        engine->num_algs = ARRAY_SIZE(l2_engine_algs);
1734    } else {
1735        return -EINVAL;
1736    }
1737
1738    engine->name = dev_name(&pdev->dev);
1739
1740    mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1741    irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1742    if (!mem || !irq) {
1743        dev_err(&pdev->dev, "no memory/irq resource for engine\n");
1744        return -ENXIO;
1745    }
1746
1747    if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
1748                     engine->name))
1749        return -ENOMEM;
1750
1751    engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
1752    if (!engine->regs) {
1753        dev_err(&pdev->dev, "memory map failed\n");
1754        return -ENOMEM;
1755    }
1756
1757    if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
1758                 engine->name, engine)) {
1759        dev_err(engine->dev, "failed to request IRQ\n");
1760        return -EBUSY;
1761    }
1762
1763    engine->dev = &pdev->dev;
1764    engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
1765    engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
1766
1767    engine->req_pool = dmam_pool_create(engine->name, engine->dev,
1768        MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
1769    if (!engine->req_pool)
1770        return -ENOMEM;
1771
1772    spin_lock_init(&engine->hw_lock);
1773
1774    engine->clk = clk_get(&pdev->dev, "ref");
1775    if (IS_ERR(engine->clk)) {
1776        dev_info(&pdev->dev, "clk unavailable\n");
1777        device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1778        return PTR_ERR(engine->clk);
1779    }
1780
1781    if (clk_enable(engine->clk)) {
1782        dev_info(&pdev->dev, "unable to enable clk\n");
1783        clk_put(engine->clk);
1784        return -EIO;
1785    }
1786
1787    err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1788    if (err) {
1789        clk_disable(engine->clk);
1790        clk_put(engine->clk);
1791        return err;
1792    }
1793
1794
1795    /*
1796     * Use an IRQ threshold of 50% as a default. This seems to be a
1797     * reasonable trade off of latency against throughput but can be
1798     * changed at runtime.
1799     */
1800    engine->stat_irq_thresh = (engine->fifo_sz / 2);
1801
1802    /*
1803     * Configure the interrupts. We only use the STAT_CNT interrupt as we
1804     * only submit a new packet for processing when we complete another in
1805     * the queue. This minimizes time spent in the interrupt handler.
1806     */
1807    writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1808           engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1809    writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
1810           engine->regs + SPA_IRQ_EN_REG_OFFSET);
1811
1812    setup_timer(&engine->packet_timeout, spacc_packet_timeout,
1813            (unsigned long)engine);
1814
1815    INIT_LIST_HEAD(&engine->pending);
1816    INIT_LIST_HEAD(&engine->completed);
1817    INIT_LIST_HEAD(&engine->in_progress);
1818    engine->in_flight = 0;
1819    tasklet_init(&engine->complete, spacc_spacc_complete,
1820             (unsigned long)engine);
1821
1822    platform_set_drvdata(pdev, engine);
1823
1824    INIT_LIST_HEAD(&engine->registered_algs);
1825    for (i = 0; i < engine->num_algs; ++i) {
1826        engine->algs[i].engine = engine;
1827        err = crypto_register_alg(&engine->algs[i].alg);
1828        if (!err) {
1829            list_add_tail(&engine->algs[i].entry,
1830                      &engine->registered_algs);
1831            ret = 0;
1832        }
1833        if (err)
1834            dev_err(engine->dev, "failed to register alg \"%s\"\n",
1835                engine->algs[i].alg.cra_name);
1836        else
1837            dev_dbg(engine->dev, "registered alg \"%s\"\n",
1838                engine->algs[i].alg.cra_name);
1839    }
1840
1841    return ret;
1842}
1843
1844static int __devexit spacc_remove(struct platform_device *pdev)
1845{
1846    struct spacc_alg *alg, *next;
1847    struct spacc_engine *engine = platform_get_drvdata(pdev);
1848
1849    del_timer_sync(&engine->packet_timeout);
1850    device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1851
1852    list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
1853        list_del(&alg->entry);
1854        crypto_unregister_alg(&alg->alg);
1855    }
1856
1857    clk_disable(engine->clk);
1858    clk_put(engine->clk);
1859
1860    return 0;
1861}
1862
1863static const struct platform_device_id spacc_id_table[] = {
1864    { "picochip,spacc-ipsec", },
1865    { "picochip,spacc-l2", },
1866};
1867
1868static struct platform_driver spacc_driver = {
1869    .probe = spacc_probe,
1870    .remove = __devexit_p(spacc_remove),
1871    .driver = {
1872        .name = "picochip,spacc",
1873#ifdef CONFIG_PM
1874        .pm = &spacc_pm_ops,
1875#endif /* CONFIG_PM */
1876        .of_match_table = spacc_of_id_table,
1877    },
1878    .id_table = spacc_id_table,
1879};
1880
1881module_platform_driver(spacc_driver);
1882
1883MODULE_LICENSE("GPL");
1884MODULE_AUTHOR("Jamie Iles");
1885

Archive Download this file



interactive