| 1 | From c925421b8c35357427499f3d298777535c2c6cfd Mon Sep 17 00:00:00 2001 |
| 2 | From: Alison Wang <b18965@freescale.com> |
| 3 | Date: Thu, 4 Aug 2011 09:59:45 +0800 |
| 4 | Subject: [PATCH 24/52] Add SEC 1.1 support for MCF547x and MCF548x |
| 5 | |
| 6 | Add SEC 1.1 support for MCF547x and MCF548x. The SEC driver is |
| 7 | in drivers/crypto. |
| 8 | |
| 9 | Signed-off-by: Alison Wang <b18965@freescale.com> |
| 10 | --- |
| 11 | arch/m68k/coldfire/m547x/mcf548x-devices.c | 2 +- |
| 12 | arch/m68k/include/asm/cf_io.h | 4 + |
| 13 | crypto/testmgr.c | 18 +- |
| 14 | drivers/crypto/Kconfig | 13 + |
| 15 | drivers/crypto/Makefile | 1 + |
| 16 | drivers/crypto/cf_talitos.c | 1727 ++++++++++++++++++++++++++++ |
| 17 | drivers/crypto/cf_talitos.h | 229 ++++ |
| 18 | 7 files changed, 1989 insertions(+), 5 deletions(-) |
| 19 | create mode 100644 drivers/crypto/cf_talitos.c |
| 20 | create mode 100644 drivers/crypto/cf_talitos.h |
| 21 | |
| 22 | --- a/arch/m68k/coldfire/m547x/mcf548x-devices.c |
| 23 | +++ b/arch/m68k/coldfire/m547x/mcf548x-devices.c |
| 24 | @@ -54,7 +54,7 @@ static struct resource coldfire_sec_reso |
| 25 | }; |
| 26 | |
| 27 | static struct platform_device coldfire_sec_device = { |
| 28 | - .name = "fsl-sec1", |
| 29 | + .name = "talitos", |
| 30 | .id = -1, |
| 31 | .num_resources = ARRAY_SIZE(coldfire_sec_resources), |
| 32 | .resource = coldfire_sec_resources, |
| 33 | --- a/arch/m68k/include/asm/cf_io.h |
| 34 | +++ b/arch/m68k/include/asm/cf_io.h |
| 35 | @@ -192,4 +192,8 @@ static inline void memcpy_toio(volatile |
| 36 | #define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b)) |
| 37 | #endif /* readb */ |
| 38 | |
| 39 | +/* access ports */ |
| 40 | +#define setbits32(_addr, _v) out_be32((_addr), in_be32(_addr) | (_v)) |
| 41 | +#define clrbits32(_addr, _v) out_be32((_addr), in_be32(_addr) & ~(_v)) |
| 42 | + |
| 43 | #endif /* _IO_H */ |
| 44 | --- a/crypto/testmgr.c |
| 45 | +++ b/crypto/testmgr.c |
| 46 | @@ -212,7 +212,11 @@ static int test_hash(struct crypto_ahash |
| 47 | tcrypt_complete, &tresult); |
| 48 | |
| 49 | j = 0; |
| 50 | +#if defined(CONFIG_CRYPTO_DEV_CF_TALITOS) |
| 51 | + for (i = 1; i < tcount; i++) { |
| 52 | +#else |
| 53 | for (i = 0; i < tcount; i++) { |
| 54 | +#endif |
| 55 | if (template[i].np) |
| 56 | continue; |
| 57 | |
| 58 | @@ -276,7 +280,9 @@ static int test_hash(struct crypto_ahash |
| 59 | hexdump(result, crypto_ahash_digestsize(tfm)); |
| 60 | ret = -EINVAL; |
| 61 | goto out; |
| 62 | - } |
| 63 | + } else |
| 64 | + printk(KERN_INFO "alg: hash: Test %d succeed for %s\n", |
| 65 | + j, algo); |
| 66 | } |
| 67 | |
| 68 | j = 0; |
| 69 | @@ -344,7 +350,9 @@ static int test_hash(struct crypto_ahash |
| 70 | hexdump(result, crypto_ahash_digestsize(tfm)); |
| 71 | ret = -EINVAL; |
| 72 | goto out; |
| 73 | - } |
| 74 | + } else |
| 75 | + printk(KERN_INFO "alg: hash: Chunking test %d " |
| 76 | + "succeed for %s\n", j, algo); |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | @@ -788,7 +796,6 @@ static int test_skcipher(struct crypto_a |
| 81 | else |
| 82 | e = "decryption"; |
| 83 | |
| 84 | - printk(KERN_INFO "%s testing %s %s\n", __func__, algo, e); |
| 85 | init_completion(&result.completion); |
| 86 | |
| 87 | req = ablkcipher_request_alloc(tfm, GFP_KERNEL); |
| 88 | @@ -963,7 +970,10 @@ static int test_skcipher(struct crypto_a |
| 89 | "%u for %s\n", j, e, k, algo); |
| 90 | hexdump(q, template[i].tap[k]); |
| 91 | goto out; |
| 92 | - } |
| 93 | + } else |
| 94 | + printk(KERN_INFO "alg: skcipher: Chunk " |
| 95 | + "test %d pass on %s for %s\n", |
| 96 | + j, e, algo); |
| 97 | |
| 98 | q += template[i].tap[k]; |
| 99 | for (n = 0; offset_in_page(q + n) && q[n]; n++) |
| 100 | --- a/drivers/crypto/Kconfig |
| 101 | +++ b/drivers/crypto/Kconfig |
| 102 | @@ -282,6 +282,19 @@ config CRYPTO_DEV_TALITOS |
| 103 | To compile this driver as a module, choose M here: the module |
| 104 | will be called talitos. |
| 105 | |
| 106 | +config CRYPTO_DEV_CF_TALITOS |
| 107 | + tristate "Talitos Freescale Coldfire Security Engine (SEC)" |
| 108 | + select CRYPTO_ALGAPI |
| 109 | + select CRYPTO_AUTHENC |
| 110 | + select HW_RANDOM |
| 111 | + depends on (M547X || M548X) |
| 112 | + help |
| 113 | + Say 'Y' here to use the Freescale Coldfire Security Engine (SEC) |
| 114 | + to offload cryptographic algorithm computation. |
| 115 | + |
| 116 | + The Freescale SEC is present on Coldfire MCF547x and MCF548x |
| 117 | + processors. |
| 118 | + |
| 119 | config CRYPTO_DEV_IXP4XX |
| 120 | tristate "Driver for IXP4xx crypto hardware acceleration" |
| 121 | depends on ARCH_IXP4XX |
| 122 | --- a/drivers/crypto/Makefile |
| 123 | +++ b/drivers/crypto/Makefile |
| 124 | @@ -6,6 +6,7 @@ n2_crypto-y := n2_core.o n2_asm.o |
| 125 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o |
| 126 | obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o |
| 127 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o |
| 128 | +obj-$(CONFIG_CRYPTO_DEV_CF_TALITOS) += cf_talitos.o |
| 129 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o |
| 130 | obj-$(CONFIG_CRYPTO_DEV_MCFCAU) += mcfcau.o |
| 131 | obj-$(CONFIG_CRYPTO_DEV_MCFCAU_DES) += mcfcau-des.o |
| 132 | --- /dev/null |
| 133 | +++ b/drivers/crypto/cf_talitos.c |
| 134 | @@ -0,0 +1,1727 @@ |
| 135 | +/* |
| 136 | + * cf_talitos - Freescale Coldfire Integrated Security Engine |
| 137 | + * (SEC) device driver |
| 138 | + * |
| 139 | + * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. |
| 140 | + * Author: Alison Wang <b18965@freescale.com> |
| 141 | + * based on talitos.c |
| 142 | + * |
| 143 | + * This program is free software; you can redistribute it and/or modify |
| 144 | + * it under the terms of the GNU General Public License as published by |
| 145 | + * the Free Software Foundation; either version 2 of the License, or |
| 146 | + * (at your option) any later version. |
| 147 | + */ |
| 148 | + |
| 149 | +#include <linux/kernel.h> |
| 150 | +#include <linux/module.h> |
| 151 | +#include <linux/mod_devicetable.h> |
| 152 | +#include <linux/device.h> |
| 153 | +#include <linux/interrupt.h> |
| 154 | +#include <linux/crypto.h> |
| 155 | +#include <linux/hw_random.h> |
| 156 | +#include <linux/platform_device.h> |
| 157 | +#include <linux/dma-mapping.h> |
| 158 | +#include <linux/io.h> |
| 159 | +#include <linux/spinlock.h> |
| 160 | +#include <linux/rtnetlink.h> |
| 161 | +#include <linux/slab.h> |
| 162 | + |
| 163 | +#include <crypto/algapi.h> |
| 164 | +#include <crypto/aes.h> |
| 165 | +#include <crypto/des.h> |
| 166 | +#include <crypto/sha.h> |
| 167 | +#include <crypto/md5.h> |
| 168 | +#include <crypto/aead.h> |
| 169 | +#include <crypto/authenc.h> |
| 170 | +#include <crypto/skcipher.h> |
| 171 | +#include <crypto/hash.h> |
| 172 | +#include <crypto/internal/hash.h> |
| 173 | +#include <crypto/scatterwalk.h> |
| 174 | + |
| 175 | +#include <asm/m5485sim.h> |
| 176 | +#include "cf_talitos.h" |
| 177 | + |
| 178 | +#define TALITOS_TIMEOUT 100000 |
| 179 | +#define TALITOS_MAX_DATA_LEN 65535 |
| 180 | + |
| 181 | +#define DESC_TYPE(desc_hdr) (((desc_hdr) >> 4) & 0xf) |
| 182 | +#define PRIMARY_EU(desc_hdr) (((desc_hdr) >> 28) & 0xf) |
| 183 | +#define SECONDARY_EU(desc_hdr) (((desc_hdr) >> 16) & 0xf) |
| 184 | + |
| 185 | +#define CF_TALITOS_DEBUG 0 |
| 186 | +#if CF_TALITOS_DEBUG |
| 187 | +#define dprintk(args...) printk(args) |
| 188 | +#else |
| 189 | +#define dprintk(...) |
| 190 | +#endif |
| 191 | + |
| 192 | +/* descriptor pointer entry */ |
| 193 | +struct talitos_ptr { |
| 194 | + u32 len; /* length */ |
| 195 | + u32 ptr; /* address */ |
| 196 | +}; |
| 197 | + |
| 198 | +static const struct talitos_ptr zero_entry = { |
| 199 | + .len = 0, |
| 200 | + .ptr = 0 |
| 201 | +}; |
| 202 | + |
| 203 | +/* descriptor */ |
| 204 | +struct talitos_desc { |
| 205 | + u32 hdr; /* header */ |
| 206 | + struct talitos_ptr ptr[7]; /* ptr/len pair array */ |
| 207 | + u32 next_hdr; |
| 208 | +}; |
| 209 | + |
| 210 | +/** |
| 211 | + * talitos_request - descriptor submission request |
| 212 | + * @desc: descriptor pointer (kernel virtual) |
| 213 | + * @dma_desc: descriptor's physical bus address |
| 214 | + * @callback: whom to call when descriptor processing is done |
| 215 | + * @context: caller context (optional) |
| 216 | + */ |
| 217 | +struct talitos_request { |
| 218 | + struct talitos_desc *desc; |
| 219 | + dma_addr_t dma_desc; |
| 220 | + void (*callback) (struct device *dev, struct talitos_desc *desc, |
| 221 | + void *context, int error); |
| 222 | + void *context; |
| 223 | +}; |
| 224 | + |
| 225 | +/* per-channel fifo management */ |
| 226 | +struct talitos_channel { |
| 227 | + /* request fifo */ |
| 228 | + struct talitos_request *fifo; |
| 229 | + |
| 230 | + /* number of requests pending in channel h/w fifo */ |
| 231 | + atomic_t submit_count ____cacheline_aligned; |
| 232 | + |
| 233 | + /* request submission (head) lock */ |
| 234 | + spinlock_t head_lock ____cacheline_aligned; |
| 235 | + /* index to next free descriptor request */ |
| 236 | + int head; |
| 237 | + |
| 238 | + /* request release (tail) lock */ |
| 239 | + spinlock_t tail_lock ____cacheline_aligned; |
| 240 | + /* index to next in-progress/done descriptor request */ |
| 241 | + int tail; |
| 242 | +}; |
| 243 | + |
| 244 | +struct talitos_private { |
| 245 | + struct device *dev; |
| 246 | + struct platform_device *pdev; |
| 247 | + void __iomem *reg; |
| 248 | + int irq; |
| 249 | + |
| 250 | + /* SEC version geometry (from device tree node) */ |
| 251 | + unsigned int num_channels; |
| 252 | + unsigned int chfifo_len; |
| 253 | + unsigned int exec_units; |
| 254 | + unsigned int desc_types; |
| 255 | + |
| 256 | + /* SEC Compatibility info */ |
| 257 | + unsigned long features; |
| 258 | + |
| 259 | + /* |
| 260 | + * length of the request fifo |
| 261 | + * fifo_len is chfifo_len rounded up to next power of 2 |
| 262 | + * so we can use bitwise ops to wrap |
| 263 | + */ |
| 264 | + unsigned int fifo_len; |
| 265 | + |
| 266 | + struct talitos_channel *chan; |
| 267 | + |
| 268 | + /* next channel to be assigned next incoming descriptor */ |
| 269 | + atomic_t last_chan ____cacheline_aligned; |
| 270 | + |
| 271 | + /* request callback tasklet */ |
| 272 | + struct tasklet_struct done_task; |
| 273 | + |
| 274 | + /* list of registered algorithms */ |
| 275 | + struct list_head alg_list; |
| 276 | + |
| 277 | + /* hwrng device */ |
| 278 | + struct hwrng rng; |
| 279 | +}; |
| 280 | + |
| 281 | +/* .features flag */ |
| 282 | +#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 |
| 283 | +#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 |
| 284 | +#define TALITOS_FTR_SHA224_HWINIT 0x00000004 |
| 285 | + |
| 286 | +/* |
| 287 | + * map virtual single (contiguous) pointer to h/w descriptor pointer |
| 288 | + */ |
| 289 | +static void map_single_talitos_ptr(struct device *dev, |
| 290 | + struct talitos_ptr *talitos_ptr, |
| 291 | + unsigned short len, void *data, |
| 292 | + unsigned char extent, |
| 293 | + enum dma_data_direction dir) |
| 294 | +{ |
| 295 | + dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); |
| 296 | + |
| 297 | + talitos_ptr->len = len; |
| 298 | + talitos_ptr->ptr = dma_addr; |
| 299 | +} |
| 300 | + |
| 301 | +/* |
| 302 | + * unmap bus single (contiguous) h/w descriptor pointer |
| 303 | + */ |
| 304 | +static void unmap_single_talitos_ptr(struct device *dev, |
| 305 | + struct talitos_ptr *talitos_ptr, |
| 306 | + enum dma_data_direction dir) |
| 307 | +{ |
| 308 | + dma_unmap_single(dev, talitos_ptr->ptr, talitos_ptr->len, dir); |
| 309 | +} |
| 310 | + |
| 311 | +static int reset_channel(struct device *dev, int ch) |
| 312 | +{ |
| 313 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 314 | + unsigned int timeout = TALITOS_TIMEOUT; |
| 315 | + |
| 316 | + setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET); |
| 317 | + |
| 318 | + while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & |
| 319 | + TALITOS_CCCR_RESET) && --timeout) |
| 320 | + cpu_relax(); |
| 321 | + |
| 322 | + if (timeout == 0) { |
| 323 | + dev_err(dev, "failed to reset channel %d\n", ch); |
| 324 | + return -EIO; |
| 325 | + } |
| 326 | + |
| 327 | + /* set 36-bit addressing, done writeback enable and done IRQ enable */ |
| 328 | + setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_NE | |
| 329 | + TALITOS_CCCR_NT | TALITOS_CCCR_CDWE | |
| 330 | + TALITOS_CCCR_CDIE); |
| 331 | + |
| 332 | + return 0; |
| 333 | +} |
| 334 | + |
| 335 | +static int reset_device(struct device *dev) |
| 336 | +{ |
| 337 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 338 | + unsigned int timeout = TALITOS_TIMEOUT; |
| 339 | + |
| 340 | + setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR); |
| 341 | + |
| 342 | + while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR) |
| 343 | + && --timeout) |
| 344 | + cpu_relax(); |
| 345 | + |
| 346 | + if (timeout == 0) { |
| 347 | + dev_err(dev, "failed to reset device\n"); |
| 348 | + return -EIO; |
| 349 | + } |
| 350 | + |
| 351 | + setbits32(priv->reg + TALITOS_DEURCR, TALITOS_DEURCR_RESET); |
| 352 | + setbits32(priv->reg + TALITOS_AFEURCR, TALITOS_AFEURCR_RESET); |
| 353 | + setbits32(priv->reg + TALITOS_AESURCR, TALITOS_AESURCR_RESET); |
| 354 | + setbits32(priv->reg + TALITOS_MDEURCR, TALITOS_MDEURCR_RESET); |
| 355 | + setbits32(priv->reg + TALITOS_RNGRCR, TALITOS_RNGRCR_SR); |
| 356 | + return 0; |
| 357 | +} |
| 358 | + |
| 359 | +/* |
| 360 | + * Reset and initialize the device |
| 361 | + */ |
| 362 | +static int init_device(struct device *dev) |
| 363 | +{ |
| 364 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 365 | + int ch, err; |
| 366 | + |
| 367 | + /* |
| 368 | + * Master reset |
| 369 | + * errata documentation: warning: certain SEC interrupts |
| 370 | + * are not fully cleared by writing the MCR:SWR bit, |
| 371 | + * set bit twice to completely reset |
| 372 | + */ |
| 373 | + err = reset_device(dev); |
| 374 | + if (err) |
| 375 | + return err; |
| 376 | + |
| 377 | + err = reset_device(dev); |
| 378 | + if (err) |
| 379 | + return err; |
| 380 | + |
| 381 | + /* reset channels */ |
| 382 | + for (ch = 0; ch < priv->num_channels; ch++) { |
| 383 | + err = reset_channel(dev, ch); |
| 384 | + if (err) |
| 385 | + return err; |
| 386 | + } |
| 387 | + |
| 388 | + /* enable channel done and error interrupts */ |
| 389 | + out_be32(priv->reg + TALITOS_IMR, 0); |
| 390 | + out_be32(priv->reg + TALITOS_IMR_LO, 0); |
| 391 | + |
| 392 | + out_be32(priv->reg + TALITOS_ICR, |
| 393 | + TALITOS_ICR_CHERR | TALITOS_ICR_CHDONE); |
| 394 | + out_be32(priv->reg + TALITOS_ICR_LO, |
| 395 | + TALITOS_ICR_LO_CHERR | TALITOS_ICR_LO_CHDONE); |
| 396 | + |
| 397 | + return 0; |
| 398 | +} |
| 399 | + |
| 400 | +/** |
| 401 | + * talitos_submit - submits a descriptor to the device for processing |
| 402 | + * @dev: the SEC device to be used |
| 403 | + * @desc: the descriptor to be processed by the device |
| 404 | + * @callback: whom to call when processing is complete |
| 405 | + * @context: a handle for use by caller (optional) |
| 406 | + * |
| 407 | + * desc must contain valid dma-mapped (bus physical) address pointers. |
| 408 | + * callback must check err and feedback in descriptor header |
| 409 | + * for device processing status. |
| 410 | + */ |
| 411 | +static int talitos_submit(struct device *dev, struct talitos_desc *desc, |
| 412 | + void (*callback)(struct device *dev, |
| 413 | + struct talitos_desc *desc, |
| 414 | + void *context, int error), |
| 415 | + void *context) |
| 416 | +{ |
| 417 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 418 | + struct talitos_request *request; |
| 419 | + unsigned long flags, ch; |
| 420 | + int head; |
| 421 | + |
| 422 | + /* ignore key parity check in triple DES */ |
| 423 | + if (((desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_DEU) && |
| 424 | + (desc->hdr & DESC_HDR_MODE0_DEU_3DES)) |
| 425 | + setbits32(priv->reg + TALITOS_DEUIMR, TALITOS_DEUIMR_KPE_MASK); |
| 426 | + |
| 427 | + /* select done notification */ |
| 428 | + desc->hdr |= DESC_HDR_DONE; |
| 429 | + |
| 430 | + /* emulate SEC's round-robin channel fifo polling scheme */ |
| 431 | + ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); |
| 432 | + |
| 433 | + spin_lock_irqsave(&priv->chan[ch].head_lock, flags); |
| 434 | + |
| 435 | + head = priv->chan[ch].head; |
| 436 | + request = &priv->chan[ch].fifo[head]; |
| 437 | + |
| 438 | + /* map descriptor and save caller data */ |
| 439 | + request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), |
| 440 | + DMA_BIDIRECTIONAL); |
| 441 | + request->callback = callback; |
| 442 | + request->context = context; |
| 443 | + |
| 444 | + /* increment fifo head */ |
| 445 | + priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1); |
| 446 | + |
| 447 | + smp_wmb(); |
| 448 | + request->desc = desc; |
| 449 | + |
| 450 | + /* GO! */ |
| 451 | + wmb(); |
| 452 | + out_be32(priv->reg + TALITOS_FF(ch), request->dma_desc); |
| 453 | + |
| 454 | + spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); |
| 455 | + |
| 456 | + return -EINPROGRESS; |
| 457 | +} |
| 458 | + |
| 459 | +/* |
| 460 | + * process what was done, notify callback of error if not |
| 461 | + */ |
| 462 | +static void flush_channel(struct device *dev, int ch, int error, int reset_ch) |
| 463 | +{ |
| 464 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 465 | + struct talitos_request *request, saved_req; |
| 466 | + unsigned long flags; |
| 467 | + int tail, status; |
| 468 | + |
| 469 | + spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); |
| 470 | + |
| 471 | + tail = priv->chan[ch].tail; |
| 472 | + while (priv->chan[ch].fifo[tail].desc) { |
| 473 | + request = &priv->chan[ch].fifo[tail]; |
| 474 | + |
| 475 | + /* descriptors with their done bits set don't get the error */ |
| 476 | + rmb(); |
| 477 | + if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE) |
| 478 | + status = 0; |
| 479 | + else |
| 480 | + if (!error) |
| 481 | + break; |
| 482 | + else |
| 483 | + status = error; |
| 484 | + |
| 485 | + dma_unmap_single(dev, request->dma_desc, |
| 486 | + sizeof(struct talitos_desc), |
| 487 | + DMA_BIDIRECTIONAL); |
| 488 | + |
| 489 | + /* copy entries so we can call callback outside lock */ |
| 490 | + saved_req.desc = request->desc; |
| 491 | + saved_req.callback = request->callback; |
| 492 | + saved_req.context = request->context; |
| 493 | + |
| 494 | + /* release request entry in fifo */ |
| 495 | + smp_wmb(); |
| 496 | + request->desc = NULL; |
| 497 | + |
| 498 | + /* increment fifo tail */ |
| 499 | + priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1); |
| 500 | + |
| 501 | + spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); |
| 502 | + |
| 503 | + atomic_dec(&priv->chan[ch].submit_count); |
| 504 | + |
| 505 | + saved_req.callback(dev, saved_req.desc, saved_req.context, |
| 506 | + status); |
| 507 | + /* channel may resume processing in single desc error case */ |
| 508 | + if (error && !reset_ch && status == error) |
| 509 | + return; |
| 510 | + spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); |
| 511 | + tail = priv->chan[ch].tail; |
| 512 | + } |
| 513 | + |
| 514 | + spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); |
| 515 | +} |
| 516 | + |
| 517 | +/* |
| 518 | + * process completed requests for channels that have done status |
| 519 | + */ |
| 520 | +static void talitos_done(unsigned long data) |
| 521 | +{ |
| 522 | + struct device *dev = (struct device *)data; |
| 523 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 524 | + int ch; |
| 525 | + |
| 526 | + for (ch = 0; ch < priv->num_channels; ch++) |
| 527 | + flush_channel(dev, ch, 0, 0); |
| 528 | + |
| 529 | + /* At this point, all completed channels have been processed. |
| 530 | + * Unmask done interrupts for channels completed later on. |
| 531 | + */ |
| 532 | + out_be32(priv->reg + TALITOS_IMR, 0); |
| 533 | + out_be32(priv->reg + TALITOS_IMR_LO, 0); |
| 534 | + |
| 535 | + out_be32(priv->reg + TALITOS_ICR, |
| 536 | + TALITOS_ICR_CHERR | TALITOS_ICR_CHDONE); |
| 537 | + out_be32(priv->reg + TALITOS_ICR_LO, |
| 538 | + TALITOS_ICR_LO_CHERR | TALITOS_ICR_LO_CHDONE); |
| 539 | +} |
| 540 | + |
| 541 | +/* |
| 542 | + * locate current (offending) descriptor |
| 543 | + */ |
| 544 | +static struct talitos_desc *current_desc(struct device *dev, int ch) |
| 545 | +{ |
| 546 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 547 | + int tail = priv->chan[ch].tail; |
| 548 | + dma_addr_t cur_desc; |
| 549 | + |
| 550 | + cur_desc = in_be32(priv->reg + TALITOS_CDPR(ch)); |
| 551 | + |
| 552 | + while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) { |
| 553 | + tail = (tail + 1) & (priv->fifo_len - 1); |
| 554 | + if (tail == priv->chan[ch].tail) { |
| 555 | + dev_err(dev, "couldn't locate current descriptor\n"); |
| 556 | + return NULL; |
| 557 | + } |
| 558 | + } |
| 559 | + |
| 560 | + return priv->chan[ch].fifo[tail].desc; |
| 561 | +} |
| 562 | + |
| 563 | +/* |
| 564 | + * user diagnostics; report root cause of error based on execution unit status |
| 565 | + */ |
| 566 | +static void report_eu_error(struct device *dev, int ch, |
| 567 | + struct talitos_desc *desc) |
| 568 | +{ |
| 569 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 570 | + int i; |
| 571 | + |
| 572 | + switch (desc->hdr & DESC_HDR_SEL0_MASK) { |
| 573 | + case DESC_HDR_SEL0_AFEU: |
| 574 | + dev_err(dev, "AFEUISR 0x%08x\n", |
| 575 | + in_be32(priv->reg + TALITOS_AFEUISR)); |
| 576 | + break; |
| 577 | + case DESC_HDR_SEL0_DEU: |
| 578 | + dev_err(dev, "DEUISR 0x%08x\n", |
| 579 | + in_be32(priv->reg + TALITOS_DEUISR)); |
| 580 | + break; |
| 581 | + case DESC_HDR_SEL0_MDEU: |
| 582 | + dev_err(dev, "MDEUISR 0x%08x\n", |
| 583 | + in_be32(priv->reg + TALITOS_MDEUISR)); |
| 584 | + break; |
| 585 | + case DESC_HDR_SEL0_RNG: |
| 586 | + dev_err(dev, "RNGISR 0x%08x\n", |
| 587 | + in_be32(priv->reg + TALITOS_RNGISR)); |
| 588 | + break; |
| 589 | + case DESC_HDR_SEL0_AESU: |
| 590 | + dev_err(dev, "AESUISR 0x%08x\n", |
| 591 | + in_be32(priv->reg + TALITOS_AESUISR)); |
| 592 | + break; |
| 593 | + } |
| 594 | + |
| 595 | + switch (desc->hdr & DESC_HDR_SEL1_MASK) { |
| 596 | + case DESC_HDR_SEL1_MDEU: |
| 597 | + dev_err(dev, "MDEUISR 0x%08x\n", |
| 598 | + in_be32(priv->reg + TALITOS_MDEUISR)); |
| 599 | + break; |
| 600 | + } |
| 601 | + |
| 602 | + for (i = 0; i < 8; i++) |
| 603 | + dev_err(dev, "DESCBUF 0x%08x\n", |
| 604 | + in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8 * i)); |
| 605 | +} |
| 606 | + |
| 607 | +/* |
| 608 | + * recover from error interrupts |
| 609 | + */ |
| 610 | +static void talitos_error(unsigned long data, u32 isr, u32 isr_lo) |
| 611 | +{ |
| 612 | + struct device *dev = (struct device *)data; |
| 613 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 614 | + int ch, error, reset_ch = 0; |
| 615 | + u32 v, v_lo; |
| 616 | + |
| 617 | + for (ch = 0; ch < priv->num_channels; ch++) { |
| 618 | + /* skip channels without errors */ |
| 619 | + if (!((isr >> 29) & (1 << (ch * 2)))) |
| 620 | + continue; |
| 621 | + |
| 622 | + error = -EINVAL; |
| 623 | + |
| 624 | + v = in_be32(priv->reg + TALITOS_CCPSR(ch)); |
| 625 | + v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch)); |
| 626 | + |
| 627 | + if (v_lo & TALITOS_CCPSR_LO_TEA) |
| 628 | + dev_err(dev, "master data transfer error\n"); |
| 629 | + if (v_lo & TALITOS_CCPSR_LO_PERR) |
| 630 | + dev_err(dev, "fetch pointer not complete error\n"); |
| 631 | + if (v_lo & TALITOS_CCPSR_LO_DERR) |
| 632 | + dev_err(dev, "illegal descriptor header error\n"); |
| 633 | + if (v_lo & TALITOS_CCPSR_LO_SERR) |
| 634 | + dev_err(dev, "static assignment error\n"); |
| 635 | + if (v_lo & TALITOS_CCPSR_LO_EUERR) |
| 636 | + report_eu_error(dev, ch, current_desc(dev, ch)); |
| 637 | + |
| 638 | + flush_channel(dev, ch, error, reset_ch); |
| 639 | + |
| 640 | + if (reset_ch) |
| 641 | + reset_channel(dev, ch); |
| 642 | + } |
| 643 | + |
| 644 | + /* purge request queues */ |
| 645 | + for (ch = 0; ch < priv->num_channels; ch++) |
| 646 | + flush_channel(dev, ch, -EIO, 1); |
| 647 | + |
| 648 | + /* reset and reinitialize the device */ |
| 649 | + init_device(dev); |
| 650 | +} |
| 651 | + |
| 652 | +static irqreturn_t talitos_interrupt(int irq, void *data) |
| 653 | +{ |
| 654 | + struct device *dev = data; |
| 655 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 656 | + u32 isr, isr_lo; |
| 657 | + |
| 658 | + isr = in_be32(priv->reg + TALITOS_ISR); |
| 659 | + isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); |
| 660 | + /* Acknowledge interrupt */ |
| 661 | + out_be32(priv->reg + TALITOS_ICR, isr); |
| 662 | + out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); |
| 663 | + |
| 664 | + if (unlikely(isr & ~TALITOS_ISR_CHDONE)) { |
| 665 | + talitos_error((unsigned long)data, isr, isr_lo); |
| 666 | + } else if (likely(isr & TALITOS_ISR_CHDONE)) { |
| 667 | + /* mask further done interrupts. */ |
| 668 | + setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE); |
| 669 | + /* done_task will unmask done interrupts at exit */ |
| 670 | + tasklet_schedule(&priv->done_task); |
| 671 | + } |
| 672 | + |
| 673 | + return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE; |
| 674 | +} |
| 675 | + |
| 676 | + |
| 677 | +/* |
| 678 | + * crypto alg |
| 679 | + */ |
| 680 | +#define TALITOS_CRA_PRIORITY 3000 |
| 681 | +#define TALITOS_MAX_KEY_SIZE 64 |
| 682 | +#define TALITOS_MAX_IV_LENGTH 16 |
| 683 | +#define TALITOS_MAX_OUTPUTDATA_SIZE 64 |
| 684 | +#define TALITOS_MAX_INPUTDATA_SIZE 64 |
| 685 | + |
| 686 | +#define ARC4_MIN_KEY_SIZE 4 |
| 687 | +#define ARC4_MAX_KEY_SIZE 16 |
| 688 | +#define ARC4_BLOCK_SIZE 64 |
| 689 | +#define MD5_BLOCK_SIZE 64 |
| 690 | + |
| 691 | +struct talitos_ctx { |
| 692 | + struct device *dev; |
| 693 | + __be32 desc_hdr_template; |
| 694 | + u8 key[TALITOS_MAX_KEY_SIZE]; |
| 695 | + u8 iv[TALITOS_MAX_IV_LENGTH]; |
| 696 | + unsigned int keylen; |
| 697 | + unsigned int enckeylen; |
| 698 | + unsigned int authkeylen; |
| 699 | + unsigned int authsize; |
| 700 | +}; |
| 701 | + |
| 702 | +#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE |
| 703 | +#define TALITOS_MDEU_MAX_CONTEXT_SIZE \ |
| 704 | + TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 |
| 705 | + |
| 706 | +struct talitos_ahash_req_ctx { |
| 707 | + u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; |
| 708 | + unsigned int hw_context_size; |
| 709 | + u8 buf[HASH_MAX_BLOCK_SIZE]; |
| 710 | + u8 bufnext[HASH_MAX_BLOCK_SIZE]; |
| 711 | + unsigned int swinit; |
| 712 | + unsigned int first; |
| 713 | + unsigned int last; |
| 714 | + unsigned int to_hash_later; |
| 715 | + u64 nbuf; |
| 716 | + struct scatterlist bufsl[2]; |
| 717 | + struct scatterlist *psrc; |
| 718 | +}; |
| 719 | + |
| 720 | +/* |
| 721 | + * talitos_edesc - s/w-extended descriptor |
| 722 | + * @src_nents: number of segments in input scatterlist |
| 723 | + * @dst_nents: number of segments in output scatterlist |
| 724 | + * @desc: h/w descriptor |
| 725 | + * |
| 726 | + * if decrypting (with authcheck), or either one of src_nents or dst_nents |
| 727 | + * is greater than 1, an integrity check value is concatenated to the end |
| 728 | + * of link_tbl data |
| 729 | + */ |
| 730 | +struct talitos_edesc { |
| 731 | + int src_nents; |
| 732 | + int dst_nents; |
| 733 | + int src_is_chained; |
| 734 | + int dst_is_chained; |
| 735 | + struct talitos_desc desc; |
| 736 | + u8 src_buf[TALITOS_MAX_INPUTDATA_SIZE]; |
| 737 | + u8 dst_buf[TALITOS_MAX_OUTPUTDATA_SIZE]; |
| 738 | +}; |
| 739 | + |
| 740 | +static int talitos_map_sg(struct device *dev, struct scatterlist *sg, |
| 741 | + unsigned int nents, enum dma_data_direction dir, |
| 742 | + int chained) |
| 743 | +{ |
| 744 | + if (unlikely(chained)) |
| 745 | + while (sg) { |
| 746 | + dma_map_sg(dev, sg, 1, dir); |
| 747 | + sg = scatterwalk_sg_next(sg); |
| 748 | + } |
| 749 | + else |
| 750 | + dma_map_sg(dev, sg, nents, dir); |
| 751 | + return nents; |
| 752 | +} |
| 753 | + |
| 754 | +static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg, |
| 755 | + enum dma_data_direction dir) |
| 756 | +{ |
| 757 | + while (sg) { |
| 758 | + dma_unmap_sg(dev, sg, 1, dir); |
| 759 | + sg = scatterwalk_sg_next(sg); |
| 760 | + } |
| 761 | +} |
| 762 | + |
| 763 | +static void talitos_sg_unmap(struct device *dev, |
| 764 | + struct talitos_edesc *edesc, |
| 765 | + struct scatterlist *src, |
| 766 | + struct scatterlist *dst) |
| 767 | +{ |
| 768 | + unsigned int src_nents = edesc->src_nents ? : 1; |
| 769 | + unsigned int dst_nents = edesc->dst_nents ? : 1; |
| 770 | + |
| 771 | + if (src != dst) { |
| 772 | + if (edesc->src_is_chained) |
| 773 | + talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE); |
| 774 | + else |
| 775 | + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); |
| 776 | + |
| 777 | + if (dst) { |
| 778 | + if (edesc->dst_is_chained) |
| 779 | + talitos_unmap_sg_chain(dev, dst, |
| 780 | + DMA_FROM_DEVICE); |
| 781 | + else |
| 782 | + dma_unmap_sg(dev, dst, dst_nents, |
| 783 | + DMA_FROM_DEVICE); |
| 784 | + } |
| 785 | + } else |
| 786 | + if (edesc->src_is_chained) |
| 787 | + talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); |
| 788 | + else |
| 789 | + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); |
| 790 | +} |
| 791 | + |
| 792 | +/* |
| 793 | + * derive number of elements in scatterlist |
| 794 | + */ |
| 795 | +static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) |
| 796 | +{ |
| 797 | + struct scatterlist *sg = sg_list; |
| 798 | + int sg_nents = 0; |
| 799 | + |
| 800 | + *chained = 0; |
| 801 | + while (nbytes > 0) { |
| 802 | + sg_nents++; |
| 803 | + nbytes -= sg->length; |
| 804 | + if (!sg_is_last(sg) && (sg + 1)->length == 0) |
| 805 | + *chained = 1; |
| 806 | + sg = scatterwalk_sg_next(sg); |
| 807 | + } |
| 808 | + |
| 809 | + return sg_nents; |
| 810 | +} |
| 811 | + |
| 812 | +/** |
| 813 | + * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer |
| 814 | + * @sgl: The SG list |
| 815 | + * @nents: Number of SG entries |
| 816 | + * @buf: Where to copy to |
| 817 | + * @buflen: The number of bytes to copy |
| 818 | + * @skip: The number of bytes to skip before copying. |
| 819 | + * Note: skip + buflen should equal SG total size. |
| 820 | + * |
| 821 | + * Returns the number of copied bytes. |
| 822 | + * |
| 823 | + **/ |
| 824 | +static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents, |
| 825 | + void *buf, size_t buflen, unsigned int skip) |
| 826 | +{ |
| 827 | + unsigned int offset = 0; |
| 828 | + unsigned int boffset = 0; |
| 829 | + struct sg_mapping_iter miter; |
| 830 | + unsigned long flags; |
| 831 | + unsigned int sg_flags = SG_MITER_ATOMIC; |
| 832 | + size_t total_buffer = buflen + skip; |
| 833 | + |
| 834 | + sg_flags |= SG_MITER_FROM_SG; |
| 835 | + |
| 836 | + sg_miter_start(&miter, sgl, nents, sg_flags); |
| 837 | + |
| 838 | + local_irq_save(flags); |
| 839 | + |
| 840 | + while (sg_miter_next(&miter) && offset < total_buffer) { |
| 841 | + unsigned int len; |
| 842 | + unsigned int ignore; |
| 843 | + |
| 844 | + if ((offset + miter.length) > skip) { |
| 845 | + if (offset < skip) { |
| 846 | + /* Copy part of this segment */ |
| 847 | + ignore = skip - offset; |
| 848 | + len = miter.length - ignore; |
| 849 | + if (boffset + len > buflen) |
| 850 | + len = buflen - boffset; |
| 851 | + memcpy(buf + boffset, miter.addr + ignore, len); |
| 852 | + } else { |
| 853 | + /* Copy all of this segment (up to buflen) */ |
| 854 | + len = miter.length; |
| 855 | + if (boffset + len > buflen) |
| 856 | + len = buflen - boffset; |
| 857 | + memcpy(buf + boffset, miter.addr, len); |
| 858 | + } |
| 859 | + boffset += len; |
| 860 | + } |
| 861 | + offset += miter.length; |
| 862 | + } |
| 863 | + |
| 864 | + sg_miter_stop(&miter); |
| 865 | + |
| 866 | + local_irq_restore(flags); |
| 867 | + return boffset; |
| 868 | +} |
| 869 | + |
| 870 | +/* |
| 871 | + * allocate and map the extended descriptor |
| 872 | + */ |
| 873 | +static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, |
| 874 | + struct scatterlist *src, |
| 875 | + struct scatterlist *dst, |
| 876 | + int hash_result, |
| 877 | + unsigned int cryptlen, |
| 878 | + unsigned int authsize, |
| 879 | + int icv_stashing, |
| 880 | + u32 cryptoflags) |
| 881 | +{ |
| 882 | + struct talitos_edesc *edesc; |
| 883 | + int src_nents, dst_nents, alloc_len; |
| 884 | + int src_chained, dst_chained = 0; |
| 885 | + gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
| 886 | + GFP_ATOMIC; |
| 887 | + |
| 888 | + if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) { |
| 889 | + dev_err(dev, "length exceeds h/w max limit\n"); |
| 890 | + return ERR_PTR(-EINVAL); |
| 891 | + } |
| 892 | + |
| 893 | + src_nents = sg_count(src, cryptlen + authsize, &src_chained); |
| 894 | + src_nents = (src_nents == 1) ? 0 : src_nents; |
| 895 | + |
| 896 | + if (hash_result) { |
| 897 | + dst_nents = 0; |
| 898 | + } else { |
| 899 | + if (dst == src) { |
| 900 | + dst_nents = src_nents; |
| 901 | + } else { |
| 902 | + dst_nents = sg_count(dst, cryptlen + authsize, |
| 903 | + &dst_chained); |
| 904 | + dst_nents = (dst_nents == 1) ? 0 : dst_nents; |
| 905 | + } |
| 906 | + } |
| 907 | + |
| 908 | + /* |
| 909 | + * allocate space for base edesc plus the link tables, |
| 910 | + * allowing for two separate entries for ICV and generated ICV (+ 2), |
| 911 | + * and the ICV data itself |
| 912 | + */ |
| 913 | + alloc_len = sizeof(struct talitos_edesc); |
| 914 | + |
| 915 | + edesc = kmalloc(alloc_len, GFP_KERNEL | flags); |
| 916 | + if (!edesc) { |
| 917 | + dev_err(dev, "could not allocate edescriptor\n"); |
| 918 | + return ERR_PTR(-ENOMEM); |
| 919 | + } |
| 920 | + |
| 921 | + edesc->src_nents = src_nents; |
| 922 | + edesc->dst_nents = dst_nents; |
| 923 | + edesc->src_is_chained = src_chained; |
| 924 | + edesc->dst_is_chained = dst_chained; |
| 925 | + return edesc; |
| 926 | +} |
| 927 | + |
| 928 | +static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, |
| 929 | + const u8 *key, unsigned int keylen) |
| 930 | +{ |
| 931 | + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
| 932 | + struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher); |
| 933 | + |
| 934 | + if (keylen > TALITOS_MAX_KEY_SIZE) |
| 935 | + goto badkey; |
| 936 | + |
| 937 | + if (keylen < alg->min_keysize || keylen > alg->max_keysize) |
| 938 | + goto badkey; |
| 939 | + |
| 940 | + memcpy(&ctx->key, key, keylen); |
| 941 | + ctx->keylen = keylen; |
| 942 | + |
| 943 | + return 0; |
| 944 | + |
| 945 | +badkey: |
| 946 | + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 947 | + return -EINVAL; |
| 948 | +} |
| 949 | + |
| 950 | +static void common_nonsnoop_unmap(struct device *dev, |
| 951 | + struct talitos_edesc *edesc, |
| 952 | + struct ablkcipher_request *areq) |
| 953 | +{ |
| 954 | + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); |
| 955 | + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); |
| 956 | + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); |
| 957 | + |
| 958 | + talitos_sg_unmap(dev, edesc, areq->src, areq->dst); |
| 959 | +} |
| 960 | + |
| 961 | +static void ablkcipher_done(struct device *dev, |
| 962 | + struct talitos_desc *desc, void *context, |
| 963 | + int err) |
| 964 | +{ |
| 965 | + struct ablkcipher_request *areq = context; |
| 966 | + struct talitos_edesc *edesc; |
| 967 | + |
| 968 | + edesc = container_of(desc, struct talitos_edesc, desc); |
| 969 | + |
| 970 | + if (edesc->dst_nents != 0) |
| 971 | + sg_copy_from_buffer(areq->dst, edesc->dst_nents, |
| 972 | + edesc->dst_buf, areq->nbytes); |
| 973 | + |
| 974 | + common_nonsnoop_unmap(dev, edesc, areq); |
| 975 | + |
| 976 | + kfree(edesc); |
| 977 | + |
| 978 | + areq->base.complete(&areq->base, err); |
| 979 | +} |
| 980 | + |
| 981 | +static int common_nonsnoop(struct talitos_edesc *edesc, |
| 982 | + struct ablkcipher_request *areq, |
| 983 | + u8 *giv, |
| 984 | + void (*callback) (struct device *dev, |
| 985 | + struct talitos_desc *desc, |
| 986 | + void *context, int error)) |
| 987 | +{ |
| 988 | + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); |
| 989 | + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
| 990 | + struct device *dev = ctx->dev; |
| 991 | + struct talitos_desc *desc = &edesc->desc; |
| 992 | + unsigned int cryptlen = areq->nbytes; |
| 993 | + unsigned int ivsize; |
| 994 | + int sg_count, ret; |
| 995 | + |
| 996 | + desc->next_hdr = 0; |
| 997 | + |
| 998 | + /* first DWORD empty */ |
| 999 | + desc->ptr[0] = zero_entry; |
| 1000 | + |
| 1001 | + /* cipher iv */ |
| 1002 | + ivsize = crypto_ablkcipher_ivsize(cipher); |
| 1003 | + map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0, |
| 1004 | + DMA_TO_DEVICE); |
| 1005 | + |
| 1006 | + /* AFEU using a key */ |
| 1007 | + if (((desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AFEU) && |
| 1008 | + ((desc->hdr & DESC_HDR_MODE0_MASK) == |
| 1009 | + DESC_HDR_MODE0_AFEU_USE_KEY)) |
| 1010 | + desc->ptr[1] = zero_entry; |
| 1011 | + |
| 1012 | + /* cipher key */ |
| 1013 | + map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, |
| 1014 | + (char *)&ctx->key, 0, DMA_TO_DEVICE); |
| 1015 | + |
| 1016 | + /* AFEU using context */ |
| 1017 | + if (((desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AFEU) && |
| 1018 | + ((desc->hdr & DESC_HDR_MODE0_MASK) == |
| 1019 | + DESC_HDR_MODE0_AFEU_USE_CONTEXT)) |
| 1020 | + desc->ptr[2] = zero_entry; |
| 1021 | + |
| 1022 | + /* |
| 1023 | + * cipher in |
| 1024 | + */ |
| 1025 | + desc->ptr[3].len = cpu_to_be16(cryptlen); |
| 1026 | + |
| 1027 | + sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, |
| 1028 | + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL |
| 1029 | + : DMA_TO_DEVICE, |
| 1030 | + edesc->src_is_chained); |
| 1031 | + |
| 1032 | + if (sg_count == 1) |
| 1033 | + desc->ptr[3].ptr = sg_dma_address(areq->src); |
| 1034 | + else { |
| 1035 | + sg_copy_to_buffer(areq->src, sg_count, edesc->src_buf, |
| 1036 | + desc->ptr[3].len); |
| 1037 | + desc->ptr[3].ptr = (u32)edesc->src_buf; |
| 1038 | + } |
| 1039 | + |
| 1040 | + /* cipher out */ |
| 1041 | + desc->ptr[4].len = cpu_to_be16(cryptlen); |
| 1042 | + |
| 1043 | + if (areq->src != areq->dst) |
| 1044 | + sg_count = talitos_map_sg(dev, areq->dst, |
| 1045 | + edesc->dst_nents ? : 1, |
| 1046 | + DMA_FROM_DEVICE, |
| 1047 | + edesc->dst_is_chained); |
| 1048 | + |
| 1049 | + if (sg_count == 1) |
| 1050 | + desc->ptr[4].ptr = sg_dma_address(areq->dst); |
| 1051 | + else |
| 1052 | + desc->ptr[4].ptr = (u32)edesc->dst_buf; |
| 1053 | + |
| 1054 | + /* iv out */ |
| 1055 | + map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, |
| 1056 | + DMA_FROM_DEVICE); |
| 1057 | + |
| 1058 | + /* last DWORD empty */ |
| 1059 | + desc->ptr[6] = zero_entry; |
| 1060 | + |
| 1061 | + ret = talitos_submit(dev, desc, callback, areq); |
| 1062 | + if (ret != -EINPROGRESS) { |
| 1063 | + common_nonsnoop_unmap(dev, edesc, areq); |
| 1064 | + kfree(edesc); |
| 1065 | + } |
| 1066 | + return ret; |
| 1067 | +} |
| 1068 | + |
| 1069 | +static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * |
| 1070 | + areq) |
| 1071 | +{ |
| 1072 | + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); |
| 1073 | + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
| 1074 | + |
| 1075 | + return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0, |
| 1076 | + areq->nbytes, 0, 0, areq->base.flags); |
| 1077 | +} |
| 1078 | + |
| 1079 | +static int ablkcipher_encrypt(struct ablkcipher_request *areq) |
| 1080 | +{ |
| 1081 | + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); |
| 1082 | + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
| 1083 | + struct talitos_edesc *edesc; |
| 1084 | + |
| 1085 | + /* allocate extended descriptor */ |
| 1086 | + edesc = ablkcipher_edesc_alloc(areq); |
| 1087 | + if (IS_ERR(edesc)) |
| 1088 | + return PTR_ERR(edesc); |
| 1089 | + |
| 1090 | + /* set encrypt except AFEU */ |
| 1091 | + if ((ctx->desc_hdr_template & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AFEU) |
| 1092 | + edesc->desc.hdr = ctx->desc_hdr_template; |
| 1093 | + else |
| 1094 | + edesc->desc.hdr = ctx->desc_hdr_template | |
| 1095 | + DESC_HDR_MODE0_ENCRYP; |
| 1096 | + |
| 1097 | + return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); |
| 1098 | +} |
| 1099 | + |
| 1100 | +static int ablkcipher_decrypt(struct ablkcipher_request *areq) |
| 1101 | +{ |
| 1102 | + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); |
| 1103 | + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
| 1104 | + struct talitos_edesc *edesc; |
| 1105 | + |
| 1106 | + /* allocate extended descriptor */ |
| 1107 | + edesc = ablkcipher_edesc_alloc(areq); |
| 1108 | + if (IS_ERR(edesc)) |
| 1109 | + return PTR_ERR(edesc); |
| 1110 | + |
| 1111 | + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; |
| 1112 | + |
| 1113 | + return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); |
| 1114 | +} |
| 1115 | + |
| 1116 | +static void common_nonsnoop_hash_unmap(struct device *dev, |
| 1117 | + struct talitos_edesc *edesc, |
| 1118 | + struct ahash_request *areq) |
| 1119 | +{ |
| 1120 | + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1121 | + |
| 1122 | + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); |
| 1123 | + |
| 1124 | + /* When using hashctx-in, must unmap it. */ |
| 1125 | + if (edesc->desc.ptr[1].len) |
| 1126 | + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], |
| 1127 | + DMA_TO_DEVICE); |
| 1128 | + |
| 1129 | + if (edesc->desc.ptr[2].len) |
| 1130 | + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], |
| 1131 | + DMA_TO_DEVICE); |
| 1132 | + |
| 1133 | + talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL); |
| 1134 | +} |
| 1135 | + |
| 1136 | +static void ahash_done(struct device *dev, |
| 1137 | + struct talitos_desc *desc, void *context, |
| 1138 | + int err) |
| 1139 | +{ |
| 1140 | + struct ahash_request *areq = context; |
| 1141 | + struct talitos_edesc *edesc = |
| 1142 | + container_of(desc, struct talitos_edesc, desc); |
| 1143 | + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1144 | + |
| 1145 | + if (!req_ctx->last && req_ctx->to_hash_later) { |
| 1146 | + /* Position any partial block for next update/final/finup */ |
| 1147 | + memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); |
| 1148 | + req_ctx->nbuf = req_ctx->to_hash_later; |
| 1149 | + } |
| 1150 | + common_nonsnoop_hash_unmap(dev, edesc, areq); |
| 1151 | + |
| 1152 | + kfree(edesc); |
| 1153 | + |
| 1154 | + areq->base.complete(&areq->base, err); |
| 1155 | +} |
| 1156 | + |
| 1157 | +static int common_nonsnoop_hash(struct talitos_edesc *edesc, |
| 1158 | + struct ahash_request *areq, unsigned int length, |
| 1159 | + void (*callback) (struct device *dev, |
| 1160 | + struct talitos_desc *desc, |
| 1161 | + void *context, int error)) |
| 1162 | +{ |
| 1163 | + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
| 1164 | + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); |
| 1165 | + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1166 | + struct device *dev = ctx->dev; |
| 1167 | + struct talitos_desc *desc = &edesc->desc; |
| 1168 | + int sg_count, ret; |
| 1169 | + |
| 1170 | + desc->next_hdr = 0; |
| 1171 | + |
| 1172 | + /* first DWORD empty */ |
| 1173 | + desc->ptr[0] = zero_entry; |
| 1174 | + |
| 1175 | + /* hash context in */ |
| 1176 | + if (!req_ctx->first || req_ctx->swinit) { |
| 1177 | + map_single_talitos_ptr(dev, &desc->ptr[1], |
| 1178 | + req_ctx->hw_context_size, |
| 1179 | + (char *)req_ctx->hw_context, 0, |
| 1180 | + DMA_TO_DEVICE); |
| 1181 | + req_ctx->swinit = 0; |
| 1182 | + } else { |
| 1183 | + desc->ptr[1] = zero_entry; |
| 1184 | + /* Indicate next op is not the first. */ |
| 1185 | + req_ctx->first = 0; |
| 1186 | + } |
| 1187 | + |
| 1188 | + /* HMAC key */ |
| 1189 | + if (ctx->keylen) |
| 1190 | + map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, |
| 1191 | + (char *)&ctx->key, 0, DMA_TO_DEVICE); |
| 1192 | + else |
| 1193 | + desc->ptr[2] = zero_entry; |
| 1194 | + |
| 1195 | + /* |
| 1196 | + * data in |
| 1197 | + */ |
| 1198 | + desc->ptr[3].len = length; |
| 1199 | + sg_count = talitos_map_sg(dev, req_ctx->psrc, |
| 1200 | + edesc->src_nents ? : 1, |
| 1201 | + DMA_TO_DEVICE, |
| 1202 | + edesc->src_is_chained); |
| 1203 | + |
| 1204 | + if (sg_count == 1) |
| 1205 | + desc->ptr[3].ptr = sg_dma_address(req_ctx->psrc); |
| 1206 | + else { |
| 1207 | + sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->src_buf, |
| 1208 | + desc->ptr[3].len); |
| 1209 | + desc->ptr[3].ptr = (u32)edesc->src_buf; |
| 1210 | + } |
| 1211 | + |
| 1212 | + /* fifth DWORD empty */ |
| 1213 | + desc->ptr[4] = zero_entry; |
| 1214 | + |
| 1215 | + /* hash/HMAC out -or- hash context out */ |
| 1216 | + if (req_ctx->last) |
| 1217 | + map_single_talitos_ptr(dev, &desc->ptr[5], |
| 1218 | + crypto_ahash_digestsize(tfm), |
| 1219 | + areq->result, 0, DMA_FROM_DEVICE); |
| 1220 | + else |
| 1221 | + map_single_talitos_ptr(dev, &desc->ptr[5], |
| 1222 | + req_ctx->hw_context_size, |
| 1223 | + req_ctx->hw_context, 0, DMA_FROM_DEVICE); |
| 1224 | + |
| 1225 | + /* last DWORD empty */ |
| 1226 | + desc->ptr[6] = zero_entry; |
| 1227 | + |
| 1228 | + ret = talitos_submit(dev, desc, callback, areq); |
| 1229 | + if (ret != -EINPROGRESS) { |
| 1230 | + common_nonsnoop_hash_unmap(dev, edesc, areq); |
| 1231 | + kfree(edesc); |
| 1232 | + } |
| 1233 | + return ret; |
| 1234 | +} |
| 1235 | + |
| 1236 | +static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, |
| 1237 | + unsigned int nbytes) |
| 1238 | +{ |
| 1239 | + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
| 1240 | + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); |
| 1241 | + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1242 | + |
| 1243 | + return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1, |
| 1244 | + nbytes, 0, 0, areq->base.flags); |
| 1245 | +} |
| 1246 | + |
| 1247 | +static int ahash_init(struct ahash_request *areq) |
| 1248 | +{ |
| 1249 | + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
| 1250 | + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1251 | + |
| 1252 | + /* Initialize the context */ |
| 1253 | + req_ctx->nbuf = 0; |
| 1254 | + req_ctx->first = 1; /* first indicates h/w must init its context */ |
| 1255 | + req_ctx->swinit = 0; /* assume h/w init of context */ |
| 1256 | + req_ctx->hw_context_size = |
| 1257 | + (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) |
| 1258 | + ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 |
| 1259 | + : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; |
| 1260 | + |
| 1261 | + return 0; |
| 1262 | +} |
| 1263 | + |
| 1264 | +/* |
| 1265 | + * on h/w without explicit sha224 support, we initialize h/w context |
| 1266 | + * manually with sha224 constants, and tell it to run sha256. |
| 1267 | + */ |
| 1268 | +static int ahash_init_sha224_swinit(struct ahash_request *areq) |
| 1269 | +{ |
| 1270 | + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1271 | + |
| 1272 | + ahash_init(areq); |
| 1273 | + req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ |
| 1274 | + |
| 1275 | + req_ctx->hw_context[0] = SHA224_H0; |
| 1276 | + req_ctx->hw_context[1] = SHA224_H1; |
| 1277 | + req_ctx->hw_context[2] = SHA224_H2; |
| 1278 | + req_ctx->hw_context[3] = SHA224_H3; |
| 1279 | + req_ctx->hw_context[4] = SHA224_H4; |
| 1280 | + req_ctx->hw_context[5] = SHA224_H5; |
| 1281 | + req_ctx->hw_context[6] = SHA224_H6; |
| 1282 | + req_ctx->hw_context[7] = SHA224_H7; |
| 1283 | + |
| 1284 | + /* init 64-bit count */ |
| 1285 | + req_ctx->hw_context[8] = 0; |
| 1286 | + req_ctx->hw_context[9] = 0; |
| 1287 | + |
| 1288 | + return 0; |
| 1289 | +} |
| 1290 | + |
| 1291 | +static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) |
| 1292 | +{ |
| 1293 | + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
| 1294 | + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); |
| 1295 | + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1296 | + struct talitos_edesc *edesc; |
| 1297 | + unsigned int blocksize = |
| 1298 | + crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); |
| 1299 | + unsigned int nbytes_to_hash; |
| 1300 | + unsigned int to_hash_later; |
| 1301 | + unsigned int nsg; |
| 1302 | + int chained; |
| 1303 | + |
| 1304 | + if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { |
| 1305 | + /* Buffer up to one whole block */ |
| 1306 | + sg_copy_to_buffer(areq->src, |
| 1307 | + sg_count(areq->src, nbytes, &chained), |
| 1308 | + req_ctx->buf + req_ctx->nbuf, nbytes); |
| 1309 | + req_ctx->nbuf += nbytes; |
| 1310 | + return 0; |
| 1311 | + } |
| 1312 | + |
| 1313 | + /* At least (blocksize + 1) bytes are available to hash */ |
| 1314 | + nbytes_to_hash = nbytes + req_ctx->nbuf; |
| 1315 | + to_hash_later = nbytes_to_hash & (blocksize - 1); |
| 1316 | + |
| 1317 | + if (req_ctx->last) |
| 1318 | + to_hash_later = 0; |
| 1319 | + else if (to_hash_later) |
| 1320 | + /* There is a partial block. Hash the full block(s) now */ |
| 1321 | + nbytes_to_hash -= to_hash_later; |
| 1322 | + else { |
| 1323 | + /* Keep one block buffered */ |
| 1324 | + nbytes_to_hash -= blocksize; |
| 1325 | + to_hash_later = blocksize; |
| 1326 | + } |
| 1327 | + |
| 1328 | + /* Chain in any previously buffered data */ |
| 1329 | + if (req_ctx->nbuf) { |
| 1330 | + nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; |
| 1331 | + sg_init_table(req_ctx->bufsl, nsg); |
| 1332 | + sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); |
| 1333 | + if (nsg > 1) |
| 1334 | + scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src); |
| 1335 | + req_ctx->psrc = req_ctx->bufsl; |
| 1336 | + } else |
| 1337 | + req_ctx->psrc = areq->src; |
| 1338 | + |
| 1339 | + if (to_hash_later) { |
| 1340 | + int nents = sg_count(areq->src, nbytes, &chained); |
| 1341 | + sg_copy_end_to_buffer(areq->src, nents, |
| 1342 | + req_ctx->bufnext, |
| 1343 | + to_hash_later, |
| 1344 | + nbytes - to_hash_later); |
| 1345 | + } |
| 1346 | + req_ctx->to_hash_later = to_hash_later; |
| 1347 | + |
| 1348 | + /* Allocate extended descriptor */ |
| 1349 | + edesc = ahash_edesc_alloc(areq, nbytes_to_hash); |
| 1350 | + if (IS_ERR(edesc)) |
| 1351 | + return PTR_ERR(edesc); |
| 1352 | + |
| 1353 | + edesc->desc.hdr = ctx->desc_hdr_template; |
| 1354 | + |
| 1355 | + /* On last one, request SEC to pad; otherwise continue */ |
| 1356 | + if (req_ctx->last) |
| 1357 | + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; |
| 1358 | + else |
| 1359 | + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; |
| 1360 | + |
| 1361 | + /* request SEC to INIT hash. */ |
| 1362 | + if (req_ctx->first && !req_ctx->swinit) |
| 1363 | + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; |
| 1364 | + |
| 1365 | + /* When the tfm context has a keylen, it's an HMAC. |
| 1366 | + * A first or last (ie. not middle) descriptor must request HMAC. |
| 1367 | + */ |
| 1368 | + if (ctx->keylen && (req_ctx->first || req_ctx->last)) |
| 1369 | + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; |
| 1370 | + |
| 1371 | + return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, |
| 1372 | + ahash_done); |
| 1373 | +} |
| 1374 | + |
| 1375 | +static int ahash_update(struct ahash_request *areq) |
| 1376 | +{ |
| 1377 | + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1378 | + |
| 1379 | + req_ctx->last = 0; |
| 1380 | + |
| 1381 | + return ahash_process_req(areq, areq->nbytes); |
| 1382 | +} |
| 1383 | + |
| 1384 | +static int ahash_final(struct ahash_request *areq) |
| 1385 | +{ |
| 1386 | + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1387 | + |
| 1388 | + req_ctx->last = 1; |
| 1389 | + |
| 1390 | + return ahash_process_req(areq, 0); |
| 1391 | +} |
| 1392 | + |
| 1393 | +static int ahash_finup(struct ahash_request *areq) |
| 1394 | +{ |
| 1395 | + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1396 | + |
| 1397 | + req_ctx->last = 1; |
| 1398 | + |
| 1399 | + return ahash_process_req(areq, areq->nbytes); |
| 1400 | +} |
| 1401 | + |
| 1402 | +static int ahash_digest(struct ahash_request *areq) |
| 1403 | +{ |
| 1404 | + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1405 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); |
| 1406 | + |
| 1407 | + ahash->init(areq); |
| 1408 | + req_ctx->last = 1; |
| 1409 | + |
| 1410 | + return ahash_process_req(areq, areq->nbytes); |
| 1411 | +} |
| 1412 | + |
| 1413 | +struct talitos_alg_template { |
| 1414 | + u32 type; |
| 1415 | + union { |
| 1416 | + struct crypto_alg crypto; |
| 1417 | + struct ahash_alg hash; |
| 1418 | + } alg; |
| 1419 | + __be32 desc_hdr_template; |
| 1420 | +}; |
| 1421 | + |
| 1422 | +static struct talitos_alg_template driver_algs[] = { |
| 1423 | + /* ABLKCIPHER algorithms. */ |
| 1424 | + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
| 1425 | + .alg.crypto = { |
| 1426 | + .cra_name = "ecb(arc4)", |
| 1427 | + .cra_driver_name = "ecb-arc4-talitos", |
| 1428 | + .cra_blocksize = ARC4_BLOCK_SIZE, |
| 1429 | + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 1430 | + CRYPTO_ALG_ASYNC, |
| 1431 | + .cra_type = &crypto_ablkcipher_type, |
| 1432 | + .cra_ablkcipher = { |
| 1433 | + .setkey = ablkcipher_setkey, |
| 1434 | + .encrypt = ablkcipher_encrypt, |
| 1435 | + .decrypt = ablkcipher_decrypt, |
| 1436 | + .geniv = "eseqiv", |
| 1437 | + .min_keysize = ARC4_MIN_KEY_SIZE, |
| 1438 | + .max_keysize = ARC4_MAX_KEY_SIZE, |
| 1439 | + .ivsize = ARC4_BLOCK_SIZE, |
| 1440 | + } |
| 1441 | + }, |
| 1442 | + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_AFEU | |
| 1443 | + DESC_HDR_SEL0_AFEU | |
| 1444 | + DESC_HDR_MODE0_AFEU_USE_KEY, |
| 1445 | + }, |
| 1446 | + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
| 1447 | + .alg.crypto = { |
| 1448 | + .cra_name = "cbc(aes)", |
| 1449 | + .cra_driver_name = "cbc-aes-talitos", |
| 1450 | + .cra_blocksize = AES_BLOCK_SIZE, |
| 1451 | + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 1452 | + CRYPTO_ALG_ASYNC, |
| 1453 | + .cra_type = &crypto_ablkcipher_type, |
| 1454 | + .cra_ablkcipher = { |
| 1455 | + .setkey = ablkcipher_setkey, |
| 1456 | + .encrypt = ablkcipher_encrypt, |
| 1457 | + .decrypt = ablkcipher_decrypt, |
| 1458 | + .geniv = "eseqiv", |
| 1459 | + .min_keysize = AES_MIN_KEY_SIZE, |
| 1460 | + .max_keysize = AES_MAX_KEY_SIZE, |
| 1461 | + .ivsize = AES_BLOCK_SIZE, |
| 1462 | + } |
| 1463 | + }, |
| 1464 | + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
| 1465 | + DESC_HDR_SEL0_AESU | |
| 1466 | + DESC_HDR_MODE0_AESU_CBC, |
| 1467 | + }, |
| 1468 | + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
| 1469 | + .alg.crypto = { |
| 1470 | + .cra_name = "cbc(des)", |
| 1471 | + .cra_driver_name = "cbc-des-talitos", |
| 1472 | + .cra_blocksize = DES_BLOCK_SIZE, |
| 1473 | + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 1474 | + CRYPTO_ALG_ASYNC, |
| 1475 | + .cra_type = &crypto_ablkcipher_type, |
| 1476 | + .cra_ablkcipher = { |
| 1477 | + .setkey = ablkcipher_setkey, |
| 1478 | + .encrypt = ablkcipher_encrypt, |
| 1479 | + .decrypt = ablkcipher_decrypt, |
| 1480 | + .geniv = "eseqiv", |
| 1481 | + .min_keysize = DES_KEY_SIZE, |
| 1482 | + .max_keysize = DES_KEY_SIZE, |
| 1483 | + .ivsize = DES_BLOCK_SIZE, |
| 1484 | + } |
| 1485 | + }, |
| 1486 | + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
| 1487 | + DESC_HDR_SEL0_DEU | |
| 1488 | + DESC_HDR_MODE0_DEU_CBC, |
| 1489 | + }, |
| 1490 | + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
| 1491 | + .alg.crypto = { |
| 1492 | + .cra_name = "cbc(des3_ede)", |
| 1493 | + .cra_driver_name = "cbc-3des-talitos", |
| 1494 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 1495 | + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 1496 | + CRYPTO_ALG_ASYNC, |
| 1497 | + .cra_type = &crypto_ablkcipher_type, |
| 1498 | + .cra_ablkcipher = { |
| 1499 | + .setkey = ablkcipher_setkey, |
| 1500 | + .encrypt = ablkcipher_encrypt, |
| 1501 | + .decrypt = ablkcipher_decrypt, |
| 1502 | + .geniv = "eseqiv", |
| 1503 | + .min_keysize = DES3_EDE_KEY_SIZE, |
| 1504 | + .max_keysize = DES3_EDE_KEY_SIZE, |
| 1505 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
| 1506 | + } |
| 1507 | + }, |
| 1508 | + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
| 1509 | + DESC_HDR_SEL0_DEU | |
| 1510 | + DESC_HDR_MODE0_DEU_CBC | |
| 1511 | + DESC_HDR_MODE0_DEU_3DES, |
| 1512 | + }, |
| 1513 | + /* AHASH algorithms. */ |
| 1514 | + { .type = CRYPTO_ALG_TYPE_AHASH, |
| 1515 | + .alg.hash = { |
| 1516 | + .init = ahash_init, |
| 1517 | + .update = ahash_update, |
| 1518 | + .final = ahash_final, |
| 1519 | + .finup = ahash_finup, |
| 1520 | + .digest = ahash_digest, |
| 1521 | + .halg.digestsize = MD5_DIGEST_SIZE, |
| 1522 | + .halg.base = { |
| 1523 | + .cra_name = "md5", |
| 1524 | + .cra_driver_name = "md5-talitos", |
| 1525 | + .cra_blocksize = MD5_BLOCK_SIZE, |
| 1526 | + .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
| 1527 | + CRYPTO_ALG_ASYNC, |
| 1528 | + .cra_type = &crypto_ahash_type |
| 1529 | + } |
| 1530 | + }, |
| 1531 | + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
| 1532 | + DESC_HDR_SEL0_MDEU | |
| 1533 | + DESC_HDR_MODE0_MDEU_MD5, |
| 1534 | + }, |
| 1535 | + { .type = CRYPTO_ALG_TYPE_AHASH, |
| 1536 | + .alg.hash = { |
| 1537 | + .init = ahash_init, |
| 1538 | + .update = ahash_update, |
| 1539 | + .final = ahash_final, |
| 1540 | + .finup = ahash_finup, |
| 1541 | + .digest = ahash_digest, |
| 1542 | + .halg.digestsize = SHA1_DIGEST_SIZE, |
| 1543 | + .halg.base = { |
| 1544 | + .cra_name = "sha1", |
| 1545 | + .cra_driver_name = "sha1-talitos", |
| 1546 | + .cra_blocksize = SHA1_BLOCK_SIZE, |
| 1547 | + .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
| 1548 | + CRYPTO_ALG_ASYNC, |
| 1549 | + .cra_type = &crypto_ahash_type |
| 1550 | + } |
| 1551 | + }, |
| 1552 | + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
| 1553 | + DESC_HDR_SEL0_MDEU | |
| 1554 | + DESC_HDR_MODE0_MDEU_SHA1, |
| 1555 | + }, |
| 1556 | +}; |
| 1557 | + |
| 1558 | +struct talitos_crypto_alg { |
| 1559 | + struct list_head entry; |
| 1560 | + struct device *dev; |
| 1561 | + struct talitos_alg_template algt; |
| 1562 | +}; |
| 1563 | + |
| 1564 | +static int talitos_cra_init(struct crypto_tfm *tfm) |
| 1565 | +{ |
| 1566 | + struct crypto_alg *alg = tfm->__crt_alg; |
| 1567 | + struct talitos_crypto_alg *talitos_alg; |
| 1568 | + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); |
| 1569 | + |
| 1570 | + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) |
| 1571 | + talitos_alg = container_of(__crypto_ahash_alg(alg), |
| 1572 | + struct talitos_crypto_alg, |
| 1573 | + algt.alg.hash); |
| 1574 | + else |
| 1575 | + talitos_alg = container_of(alg, struct talitos_crypto_alg, |
| 1576 | + algt.alg.crypto); |
| 1577 | + |
| 1578 | + /* update context with ptr to dev */ |
| 1579 | + ctx->dev = talitos_alg->dev; |
| 1580 | + |
| 1581 | + /* copy descriptor header template value */ |
| 1582 | + ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; |
| 1583 | + |
| 1584 | + return 0; |
| 1585 | +} |
| 1586 | + |
| 1587 | +static int talitos_cra_init_ahash(struct crypto_tfm *tfm) |
| 1588 | +{ |
| 1589 | + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); |
| 1590 | + |
| 1591 | + talitos_cra_init(tfm); |
| 1592 | + |
| 1593 | + ctx->keylen = 0; |
| 1594 | + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
| 1595 | + sizeof(struct talitos_ahash_req_ctx)); |
| 1596 | + |
| 1597 | + return 0; |
| 1598 | +} |
| 1599 | + |
| 1600 | +/* |
| 1601 | + * given the alg's descriptor header template, determine whether descriptor |
| 1602 | + * type and primary/secondary execution units required match the hw |
| 1603 | + * capabilities description provided in the device tree node. |
| 1604 | + */ |
| 1605 | +static int hw_supports(struct device *dev, __be32 desc_hdr_template) |
| 1606 | +{ |
| 1607 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 1608 | + int ret; |
| 1609 | + |
| 1610 | + ret = (DESC_TYPE(desc_hdr_template) & priv->desc_types) && |
| 1611 | + (PRIMARY_EU(desc_hdr_template) & priv->exec_units); |
| 1612 | + |
| 1613 | + if (SECONDARY_EU(desc_hdr_template)) |
| 1614 | + ret = ret && (SECONDARY_EU(desc_hdr_template) |
| 1615 | + & priv->exec_units); |
| 1616 | + |
| 1617 | + return ret; |
| 1618 | +} |
| 1619 | + |
| 1620 | +static int talitos_remove(struct platform_device *pdev) |
| 1621 | +{ |
| 1622 | + struct device *dev = &pdev->dev; |
| 1623 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 1624 | + struct talitos_crypto_alg *t_alg, *n; |
| 1625 | + int i; |
| 1626 | + |
| 1627 | + list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { |
| 1628 | + switch (t_alg->algt.type) { |
| 1629 | + case CRYPTO_ALG_TYPE_ABLKCIPHER: |
| 1630 | + case CRYPTO_ALG_TYPE_AEAD: |
| 1631 | + crypto_unregister_alg(&t_alg->algt.alg.crypto); |
| 1632 | + break; |
| 1633 | + case CRYPTO_ALG_TYPE_AHASH: |
| 1634 | + crypto_unregister_ahash(&t_alg->algt.alg.hash); |
| 1635 | + break; |
| 1636 | + } |
| 1637 | + list_del(&t_alg->entry); |
| 1638 | + kfree(t_alg); |
| 1639 | + } |
| 1640 | + |
| 1641 | + for (i = 0; i < priv->num_channels; i++) |
| 1642 | + kfree(priv->chan[i].fifo); |
| 1643 | + |
| 1644 | + kfree(priv->chan); |
| 1645 | + |
| 1646 | + if (priv->irq != 0) |
| 1647 | + free_irq(priv->irq, dev); |
| 1648 | + |
| 1649 | + tasklet_kill(&priv->done_task); |
| 1650 | + |
| 1651 | + iounmap(priv->reg); |
| 1652 | + |
| 1653 | + dev_set_drvdata(dev, NULL); |
| 1654 | + |
| 1655 | + kfree(priv); |
| 1656 | + |
| 1657 | + return 0; |
| 1658 | +} |
| 1659 | + |
| 1660 | +static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, |
| 1661 | + struct talitos_alg_template |
| 1662 | + *template) |
| 1663 | +{ |
| 1664 | + struct talitos_private *priv = dev_get_drvdata(dev); |
| 1665 | + struct talitos_crypto_alg *t_alg; |
| 1666 | + struct crypto_alg *alg; |
| 1667 | + |
| 1668 | + t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL); |
| 1669 | + if (!t_alg) |
| 1670 | + return ERR_PTR(-ENOMEM); |
| 1671 | + |
| 1672 | + t_alg->algt = *template; |
| 1673 | + |
| 1674 | + switch (t_alg->algt.type) { |
| 1675 | + case CRYPTO_ALG_TYPE_ABLKCIPHER: |
| 1676 | + alg = &t_alg->algt.alg.crypto; |
| 1677 | + alg->cra_init = talitos_cra_init; |
| 1678 | + break; |
| 1679 | + case CRYPTO_ALG_TYPE_AHASH: |
| 1680 | + alg = &t_alg->algt.alg.hash.halg.base; |
| 1681 | + alg->cra_init = talitos_cra_init_ahash; |
| 1682 | + if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && |
| 1683 | + !strcmp(alg->cra_name, "sha224")) { |
| 1684 | + t_alg->algt.alg.hash.init = ahash_init_sha224_swinit; |
| 1685 | + t_alg->algt.desc_hdr_template = |
| 1686 | + DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
| 1687 | + DESC_HDR_SEL0_MDEU | |
| 1688 | + DESC_HDR_MODE0_MDEU_SHA256; |
| 1689 | + } |
| 1690 | + break; |
| 1691 | + default: |
| 1692 | + dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); |
| 1693 | + return ERR_PTR(-EINVAL); |
| 1694 | + } |
| 1695 | + |
| 1696 | + alg->cra_module = THIS_MODULE; |
| 1697 | + alg->cra_priority = TALITOS_CRA_PRIORITY; |
| 1698 | + alg->cra_alignmask = 0; |
| 1699 | + alg->cra_ctxsize = sizeof(struct talitos_ctx); |
| 1700 | + |
| 1701 | + t_alg->dev = dev; |
| 1702 | + |
| 1703 | + return t_alg; |
| 1704 | +} |
| 1705 | + |
| 1706 | +static int __devinit talitos_probe(struct platform_device *pdev) |
| 1707 | +{ |
| 1708 | + struct device *dev = &pdev->dev; |
| 1709 | + struct talitos_private *priv; |
| 1710 | + int prop; |
| 1711 | + struct resource *r; |
| 1712 | + int i, err; |
| 1713 | + |
| 1714 | + priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); |
| 1715 | + if (!priv) |
| 1716 | + return -ENOMEM; |
| 1717 | + |
| 1718 | + dev_set_drvdata(dev, priv); |
| 1719 | + |
| 1720 | + priv->pdev = pdev; |
| 1721 | + |
| 1722 | + tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev); |
| 1723 | + |
| 1724 | + INIT_LIST_HEAD(&priv->alg_list); |
| 1725 | + |
| 1726 | + priv->irq = 64 + ISC_SEC; |
| 1727 | + /* get the irq line */ |
| 1728 | + err = request_irq(priv->irq, talitos_interrupt, IRQF_DISABLED, |
| 1729 | + dev_driver_string(dev), dev); |
| 1730 | + if (err) { |
| 1731 | + dev_err(dev, "failed to request irq %d\n", priv->irq); |
| 1732 | + goto err_out; |
| 1733 | + } else |
| 1734 | + MCF_ICR(ISC_SEC) = ILP_SEC; |
| 1735 | + |
| 1736 | + |
| 1737 | + /* get a pointer to the register memory */ |
| 1738 | + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1739 | + priv->reg = ioremap(r->start, (r->end - r->start)); |
| 1740 | + if (!priv->reg) |
| 1741 | + dev_err(dev, "failed to ioremap\n"); |
| 1742 | + |
| 1743 | + /* get SEC version capabilities from device tree */ |
| 1744 | + prop = in_be32(priv->reg + TALITOS_ID); |
| 1745 | + if (prop & TALITOS_ID_SEC_1_1) { |
| 1746 | + priv->num_channels = TALITOS_NCHANNELS_SEC_1_1; |
| 1747 | + priv->chfifo_len = TALITOS_CHFIFOLEN_SEC_1_1; |
| 1748 | + priv->exec_units = TALITOS_HAS_EUS_SEC_1_1; |
| 1749 | + priv->desc_types = TALITOS_HAS_DESCTYPES_SEC_1_1; |
| 1750 | + } else { |
| 1751 | + dev_err(dev, "failed to id device\n"); |
| 1752 | + goto err_out; |
| 1753 | + } |
| 1754 | + |
| 1755 | + priv->chan = kzalloc(sizeof(struct talitos_channel) * |
| 1756 | + priv->num_channels, GFP_KERNEL); |
| 1757 | + if (!priv->chan) { |
| 1758 | + dev_err(dev, "failed to allocate channel management space\n"); |
| 1759 | + err = -ENOMEM; |
| 1760 | + goto err_out; |
| 1761 | + } |
| 1762 | + |
| 1763 | + for (i = 0; i < priv->num_channels; i++) { |
| 1764 | + spin_lock_init(&priv->chan[i].head_lock); |
| 1765 | + spin_lock_init(&priv->chan[i].tail_lock); |
| 1766 | + } |
| 1767 | + |
| 1768 | + priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); |
| 1769 | + |
| 1770 | + for (i = 0; i < priv->num_channels; i++) { |
| 1771 | + priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) * |
| 1772 | + priv->fifo_len, GFP_KERNEL); |
| 1773 | + if (!priv->chan[i].fifo) { |
| 1774 | + dev_err(dev, "failed to allocate request fifo %d\n", i); |
| 1775 | + err = -ENOMEM; |
| 1776 | + goto err_out; |
| 1777 | + } |
| 1778 | + } |
| 1779 | + |
| 1780 | + for (i = 0; i < priv->num_channels; i++) |
| 1781 | + atomic_set(&priv->chan[i].submit_count, |
| 1782 | + -(priv->chfifo_len - 1)); |
| 1783 | + |
| 1784 | + dma_set_mask(dev, DMA_BIT_MASK(36)); |
| 1785 | + |
| 1786 | + /* reset and initialize the h/w */ |
| 1787 | + err = init_device(dev); |
| 1788 | + if (err) { |
| 1789 | + dev_err(dev, "failed to initialize device\n"); |
| 1790 | + goto err_out; |
| 1791 | + } |
| 1792 | + |
| 1793 | + /* register crypto algorithms the device supports */ |
| 1794 | + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
| 1795 | + if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { |
| 1796 | + struct talitos_crypto_alg *t_alg; |
| 1797 | + char *name = NULL; |
| 1798 | + |
| 1799 | + t_alg = talitos_alg_alloc(dev, &driver_algs[i]); |
| 1800 | + if (IS_ERR(t_alg)) { |
| 1801 | + err = PTR_ERR(t_alg); |
| 1802 | + goto err_out; |
| 1803 | + } |
| 1804 | + |
| 1805 | + switch (t_alg->algt.type) { |
| 1806 | + case CRYPTO_ALG_TYPE_ABLKCIPHER: |
| 1807 | + case CRYPTO_ALG_TYPE_AEAD: |
| 1808 | + err = crypto_register_alg( |
| 1809 | + &t_alg->algt.alg.crypto); |
| 1810 | + name = t_alg->algt.alg.crypto.cra_driver_name; |
| 1811 | + break; |
| 1812 | + case CRYPTO_ALG_TYPE_AHASH: |
| 1813 | + err = crypto_register_ahash( |
| 1814 | + &t_alg->algt.alg.hash); |
| 1815 | + name = |
| 1816 | + t_alg->algt.alg.hash.halg.base.cra_driver_name; |
| 1817 | + break; |
| 1818 | + } |
| 1819 | + if (err) { |
| 1820 | + dev_err(dev, "%s alg registration failed\n", |
| 1821 | + name); |
| 1822 | + kfree(t_alg); |
| 1823 | + } else { |
| 1824 | + list_add_tail(&t_alg->entry, &priv->alg_list); |
| 1825 | + dev_info(dev, "%s\n", name); |
| 1826 | + } |
| 1827 | + } |
| 1828 | + } |
| 1829 | + |
| 1830 | + return 0; |
| 1831 | + |
| 1832 | +err_out: |
| 1833 | + talitos_remove(pdev); |
| 1834 | + |
| 1835 | + return err; |
| 1836 | +} |
| 1837 | + |
| 1838 | +static struct platform_driver talitos_driver = { |
| 1839 | + .driver = { |
| 1840 | + .name = "talitos", |
| 1841 | + .owner = THIS_MODULE, |
| 1842 | + }, |
| 1843 | + .probe = talitos_probe, |
| 1844 | + .remove = talitos_remove, |
| 1845 | +}; |
| 1846 | + |
| 1847 | +static int __init talitos_init(void) |
| 1848 | +{ |
| 1849 | + return platform_driver_register(&talitos_driver); |
| 1850 | +} |
| 1851 | +module_init(talitos_init); |
| 1852 | + |
| 1853 | +static void __exit talitos_exit(void) |
| 1854 | +{ |
| 1855 | + platform_driver_unregister(&talitos_driver); |
| 1856 | +} |
| 1857 | +module_exit(talitos_exit); |
| 1858 | + |
| 1859 | +MODULE_LICENSE("GPL"); |
| 1860 | +MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>"); |
| 1861 | +MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver"); |
| 1862 | --- /dev/null |
| 1863 | +++ b/drivers/crypto/cf_talitos.h |
| 1864 | @@ -0,0 +1,229 @@ |
| 1865 | +/* |
| 1866 | + * Freescale Coldfire SEC (talitos) device dependent data structures |
| 1867 | + * |
| 1868 | + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. |
| 1869 | + * |
| 1870 | + * This program is free software; you can redistribute it and/or modify |
| 1871 | + * it under the terms of the GNU General Public License as published by |
| 1872 | + * the Free Software Foundation; either version 2 of the License, or |
| 1873 | + * (at your option) any later version. |
| 1874 | + */ |
| 1875 | + |
| 1876 | +/* device ID register values */ |
| 1877 | +#define TALITOS_ID_SEC_1_1 (0x09000000) /* MCF547x and MCF548x */ |
| 1878 | + |
| 1879 | +/* |
| 1880 | + * following num_channels, channel-fifo-depth, exec-unit-mask, and |
| 1881 | + * descriptor-types-mask are for forward-compatibility with openfirmware |
| 1882 | + * flat device trees |
| 1883 | + */ |
| 1884 | + |
| 1885 | +/* |
| 1886 | + * num_channels : the number of channels available in each SEC version. |
| 1887 | + */ |
| 1888 | + |
| 1889 | +/* n.b. this driver requires these values be a power of 2 */ |
| 1890 | +#define TALITOS_NCHANNELS_SEC_1_1 2 |
| 1891 | + |
| 1892 | +/* |
| 1893 | + * channel-fifo-depth : The number of descriptor |
| 1894 | + * pointers a channel fetch fifo can hold. |
| 1895 | + */ |
| 1896 | +#define TALITOS_CHFIFOLEN_SEC_1_1 1 |
| 1897 | + |
| 1898 | +/* the corresponding masks for each SEC version */ |
| 1899 | +#define TALITOS_HAS_EUS_SEC_1_1 0x7 |
| 1900 | + |
| 1901 | +/* the corresponding masks for each SEC version */ |
| 1902 | +#define TALITOS_HAS_DESCTYPES_SEC_1_1 0xf |
| 1903 | + |
| 1904 | +/* |
| 1905 | + * a TALITOS_xxx_HI address points to the low data bits (32-63) of the register |
| 1906 | + */ |
| 1907 | +/* global register offset addresses */ |
| 1908 | +/* EU Assaginment controller register is useless*/ |
| 1909 | +#define TALITOS_EUACR 0x1000 |
| 1910 | +#define TALITOS_EUACR_LO 0x1004 |
| 1911 | + |
| 1912 | +#define TALITOS_IMR 0x1008 /* interrupt mask register */ |
| 1913 | +#define TALITOS_IMR_ALL 0xf8000000 /* enable all interrupts mask */ |
| 1914 | +#define TALITOS_IMR_ERR 0xa8000000 /* mask error interrupts */ |
| 1915 | +#define TALITOS_IMR_DONE 0x50000000 /* mask done interrupts */ |
| 1916 | +#define TALITOS_IMR_LO 0x100C /* interrupt mask register */ |
| 1917 | +/* mask all channel interrupts mask */ |
| 1918 | +#define TALITOS_IMR_LO_ALL 0x03333340 |
| 1919 | +#define TALITOS_IMR_LO_ERR 0x02222240 /* mask error interrupts */ |
| 1920 | +#define TALITOS_IMR_LO_DONE 0x01111100 /* mask done interrupts */ |
| 1921 | + |
| 1922 | +#define TALITOS_ISR 0x1010 /* interrupt status register */ |
| 1923 | +#define TALITOS_ISR_CHERR 0xa8000000 /* errors mask */ |
| 1924 | +#define TALITOS_ISR_CHDONE 0x50000000 /* channel(s) done mask */ |
| 1925 | +#define TALITOS_ISR_LO 0x1014 /* interrupt status register */ |
| 1926 | + |
| 1927 | +#define TALITOS_ICR 0x1018 /* interrupt clear register */ |
| 1928 | +#define TALITOS_ICR_CHERR 0xa8000000 /* errors enable */ |
| 1929 | +#define TALITOS_ICR_CHDONE 0x50000000 /* channel(s) done enable */ |
| 1930 | +#define TALITOS_ICR_LO 0x101C /* interrupt clear register */ |
| 1931 | +#define TALITOS_ICR_LO_CHERR 0x02222240 /* errors enable */ |
| 1932 | +#define TALITOS_ICR_LO_CHDONE 0x01111100 /* channel(s) done enable */ |
| 1933 | + |
| 1934 | +#define TALITOS_ID 0x1020 |
| 1935 | + |
| 1936 | +/* EU Assaginment status register is useless*/ |
| 1937 | +#define TALITOS_EUASR 0x1028 |
| 1938 | +#define TALITOS_EUASR_LO 0x102C |
| 1939 | + |
| 1940 | +#define TALITOS_MCR 0x1030 /* master control register */ |
| 1941 | +#define TALITOS_MCR_SWR 0x01000000 |
| 1942 | + |
| 1943 | +#define TALITOS_MEAR 0x1038 |
| 1944 | + |
| 1945 | +/* channel register address stride */ |
| 1946 | +#define TALITOS_CH_STRIDE 0x1000 |
| 1947 | + |
| 1948 | +/* channel register offset addresses and bits */ |
| 1949 | +#define TALITOS_CCCR(ch) (ch * TALITOS_CH_STRIDE + 0x200c) |
| 1950 | +#define TALITOS_CCCR_RESET 0x1 /* Channel Reset bit */ |
| 1951 | +#define TALITOS_CCCR_CDWE 0x10 /* Channel done writeback enable bit */ |
| 1952 | +#define TALITOS_CCCR_NE 0x8 /* Fetch Next Descriptor Enable bit */ |
| 1953 | +#define TALITOS_CCCR_NT 0x4 /* Notification type bit */ |
| 1954 | +#define TALITOS_CCCR_CDIE 0x2 /* Channel Done Interrupt Enable bit */ |
| 1955 | + |
| 1956 | +/* Crypto-Channel Pointer Status Reg */ |
| 1957 | +#define TALITOS_CCPSR(ch) (ch * TALITOS_CH_STRIDE + 0x2010) |
| 1958 | +#define TALITOS_CCPSR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x2014) |
| 1959 | +#define TALITOS_CCPSR_LO_TEA 0x2000 /* Transfer error acknowledge */ |
| 1960 | +#define TALITOS_CCPSR_LO_PERR 0x1000 /* Pointer not complete error */ |
| 1961 | +#define TALITOS_CCPSR_LO_DERR 0x400 /* Descriptor error */ |
| 1962 | +#define TALITOS_CCPSR_LO_SERR 0x200 /* Static assignment error */ |
| 1963 | +#define TALITOS_CCPSR_LO_EUERR 0x100 /* EU error */ |
| 1964 | + |
| 1965 | +/* channel fetch fifo register */ |
| 1966 | +#define TALITOS_FF(ch) (ch * TALITOS_CH_STRIDE + 0x204c) |
| 1967 | + |
| 1968 | +/* Crypto-Channel Pointer Status Reg */ |
| 1969 | +#define TALITOS_CDPR(ch) (ch * TALITOS_CH_STRIDE + 0x2044) |
| 1970 | + |
| 1971 | +/* Descriptor Buffer (debug) 0x2080-0x20BF*/ |
| 1972 | +#define TALITOS_DESCBUF(ch) (ch * TALITOS_CH_STRIDE + 0x2080) |
| 1973 | + |
| 1974 | +/* execution unit register offset addresses and bits */ |
| 1975 | +#define TALITOS_DEURCR 0xa018 /* DEU reset control register */ |
| 1976 | +#define TALITOS_DEURCR_RESET 0x01000000 /* DEU reset bit */ |
| 1977 | +#define TALITOS_DEUSR 0xa028 /* DEU status register */ |
| 1978 | +#define TALITOS_DEUSR_RESET 0x01000000 /* DEU Reset status bit */ |
| 1979 | +#define TALITOS_DEUISR 0xa030 /* DEU interrupt status register */ |
| 1980 | +#define TALITOS_DEUIMR 0xa038 /* DEU interrupt mask register */ |
| 1981 | +#define TALITOS_DEUIMR_MASK 0xf63f0000 /* DEU interrupt control mask*/ |
| 1982 | +#define TALITOS_DEUIMR_KPE_MASK 0x00200000 /* DEU interrupt KPE mask*/ |
| 1983 | + |
| 1984 | +#define TALITOS_AESURCR 0x12018 /* AESU reset control register */ |
| 1985 | +#define TALITOS_AESURCR_RESET 0x01000000 /* AESU reset bit */ |
| 1986 | +#define TALITOS_AESUSR 0x12028 /* AESU status register */ |
| 1987 | +#define TALITOS_AESUSR_RESET 0x01000000 /* AESU Reset status bit */ |
| 1988 | +#define TALITOS_AESUISR 0x12030 /* AESU interrupt status register */ |
| 1989 | +#define TALITOS_AESUIMR 0x12038 /* AESU interrupt mask register */ |
| 1990 | +#define TALITOS_AESUIMR_MASK 0xf61f0000 /* AESU interrupt control mask*/ |
| 1991 | + |
| 1992 | +#define TALITOS_MDEURCR 0xc018 /* MDEU reset control register */ |
| 1993 | +#define TALITOS_MDEURCR_RESET 0x01000000 /* MDEU reset bit */ |
| 1994 | +#define TALITOS_MDEUSR 0xc028 /* MDEU status register */ |
| 1995 | +#define TALITOS_MDEUSR_RESET 0x01000000 /* MDEU Reset status bit */ |
| 1996 | +#define TALITOS_MDEUISR 0xc030 /* MDEU interrupt status register */ |
| 1997 | +#define TALITOS_MDEUIMR 0xc038 /* MDEU interrupt mask register */ |
| 1998 | +#define TALITOS_MDEUIMR_MASK 0xc41f0000 /* MDEU interrupt control mask*/ |
| 1999 | + |
| 2000 | +#define TALITOS_AFEURCR 0x8018 /* AFEU reset control register */ |
| 2001 | +#define TALITOS_AFEURCR_RESET 0x01000000 /* AFEU reset bit */ |
| 2002 | +#define TALITOS_AFEUSR 0x8028 /* AFEU status register */ |
| 2003 | +#define TALITOS_AFEUSR_RESET 0x01000000 /* AFEU Reset status bit */ |
| 2004 | +#define TALITOS_AFEUISR 0x8030 /* AFEU interrupt status register */ |
| 2005 | +#define TALITOS_AFEUIMR 0x8038 /* AFEU interrupt mask register */ |
| 2006 | +#define TALITOS_AFEUIMR_MASK 0xf61f0000 /* AFEU interrupt control mask*/ |
| 2007 | + |
| 2008 | +#define TALITOS_RNGRCR 0xe018 /* RNG Reset control register */ |
| 2009 | +#define TALITOS_RNGRCR_SR 0x01000000 /* RNG RNGRCR:Software Reset */ |
| 2010 | +#define TALITOS_RNGSR 0xe028 /* RNG status register */ |
| 2011 | +#define TALITOS_RNGSR_RD 0x01000000 /* RNG Reset done */ |
| 2012 | +#define TALITOS_RNGISR 0xe030 /* RNG Interrupt status register */ |
| 2013 | +#define TALITOS_RNGIMR 0xe038 /* RNG interrupt mask register */ |
| 2014 | +#define TALITOS_RNGIMR_MASK 0xc2100000 /* RNG interrupt control mask*/ |
| 2015 | + |
| 2016 | +#define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28 |
| 2017 | +#define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48 |
| 2018 | + |
| 2019 | +/***************************RC4*******************/ |
| 2020 | +#define ARC4_SEC_MIN_KEY_SIZE 5 |
| 2021 | +#define ARC4_SEC_MAX_KEY_SIZE 16 |
| 2022 | +#define ARC4_SEC_CONTEXT_LEN 259 |
| 2023 | +#define SEC_ALG_AFEU_KEY 0x10200050 |
| 2024 | +#define SEC_ALG_AFEU_CONTEXT 0x10700050 |
| 2025 | + |
| 2026 | +/* talitos descriptor header (hdr) bits */ |
| 2027 | + |
| 2028 | +/* primary execution unit select */ |
| 2029 | +#define DESC_HDR_SEL0_MASK 0xf0000000 |
| 2030 | +#define DESC_HDR_SEL0_AFEU 0x10000000 |
| 2031 | +#define DESC_HDR_SEL0_DEU 0x20000000 |
| 2032 | +#define DESC_HDR_SEL0_MDEU 0x30000000 |
| 2033 | +#define DESC_HDR_SEL0_RNG 0x40000000 |
| 2034 | +#define DESC_HDR_SEL0_AESU 0x60000000 |
| 2035 | + |
| 2036 | +/* primary execution unit mode (MODE0) and derivatives */ |
| 2037 | +#define DESC_HDR_MODE0_MASK 0x0ff00000 |
| 2038 | +#define DESC_HDR_MODE0_ENCRYP 0x00100000 |
| 2039 | +#define DESC_HDR_MODE0_AFEU_USE_KEY 0x00200000 |
| 2040 | +#define DESC_HDR_MODE0_AFEU_USE_CONTEXT 0x00700000 |
| 2041 | +#define DESC_HDR_MODE0_AESU_CBC 0x00200000 |
| 2042 | +#define DESC_HDR_MODE0_AESU_ENC 0x00100000 |
| 2043 | +#define DESC_HDR_MODE0_DEU_CBC 0x00400000 |
| 2044 | +#define DESC_HDR_MODE0_DEU_3DES 0x00200000 |
| 2045 | +#define DESC_HDR_MODE0_DEU_ENC 0x00100000 |
| 2046 | +#define DESC_HDR_MODE0_MDEU_CONT 0x08000000 |
| 2047 | +#define DESC_HDR_MODE0_MDEU_INIT 0x01000000 /* init starting regs */ |
| 2048 | +#define DESC_HDR_MODE0_MDEU_HMAC 0x00800000 |
| 2049 | +#define DESC_HDR_MODE0_MDEU_PAD 0x00400000 /* PD */ |
| 2050 | +#define DESC_HDR_MODE0_MDEU_MD5 0x00200000 |
| 2051 | +#define DESC_HDR_MODE0_MDEU_SHA256 0x00100000 |
| 2052 | +#define DESC_HDR_MODE0_MDEU_SHA1 0x00000000 /* SHA-160 */ |
| 2053 | +#define DESC_HDR_MODE0_MDEU_MD5_HMAC \ |
| 2054 | + (DESC_HDR_MODE0_MDEU_MD5 | DESC_HDR_MODE0_MDEU_HMAC) |
| 2055 | +#define DESC_HDR_MODE0_MDEU_SHA256_HMAC \ |
| 2056 | + (DESC_HDR_MODE0_MDEU_SHA256 | DESC_HDR_MODE0_MDEU_HMAC) |
| 2057 | +#define DESC_HDR_MODE0_MDEU_SHA1_HMAC \ |
| 2058 | + (DESC_HDR_MODE0_MDEU_SHA1 | DESC_HDR_MODE0_MDEU_HMAC) |
| 2059 | + |
| 2060 | +/* secondary execution unit select (SEL1) */ |
| 2061 | +/* it's MDEU or nothing */ |
| 2062 | +#define DESC_HDR_SEL1_MASK 0x000f0000 |
| 2063 | +#define DESC_HDR_SEL1_MDEU 0x00030000 |
| 2064 | + |
| 2065 | +/* secondary execution unit mode (MODE1) and derivatives */ |
| 2066 | +#define DESC_HDR_MODE1_MDEU_INIT 0x00001000 /* init starting regs */ |
| 2067 | +#define DESC_HDR_MODE1_MDEU_HMAC 0x00000800 |
| 2068 | +#define DESC_HDR_MODE1_MDEU_PAD 0x00000400 /* PD */ |
| 2069 | +#define DESC_HDR_MODE1_MDEU_MD5 0x00000200 |
| 2070 | +#define DESC_HDR_MODE1_MDEU_SHA256 0x00000100 |
| 2071 | +#define DESC_HDR_MODE1_MDEU_SHA1 0x00000000 /* SHA-160 */ |
| 2072 | +#define DESC_HDR_MODE1_MDEU_MD5_HMAC \ |
| 2073 | + (DESC_HDR_MODE1_MDEU_MD5 | DESC_HDR_MODE1_MDEU_HMAC) |
| 2074 | +#define DESC_HDR_MODE1_MDEU_SHA256_HMAC \ |
| 2075 | + (DESC_HDR_MODE1_MDEU_SHA256 | DESC_HDR_MODE1_MDEU_HMAC) |
| 2076 | +#define DESC_HDR_MODE1_MDEU_SHA1_HMAC \ |
| 2077 | + (DESC_HDR_MODE1_MDEU_SHA1 | DESC_HDR_MODE1_MDEU_HMAC) |
| 2078 | + |
| 2079 | +/* direction of overall data flow (DIR) */ |
| 2080 | +#define DESC_HDR_DIR_OUTBOUND 0x00000000 |
| 2081 | +#define DESC_HDR_DIR_INBOUND 0x00000002 |
| 2082 | + |
| 2083 | +/* done notification (DN) */ |
| 2084 | +#define DESC_HDR_DONE 0x00000001 |
| 2085 | + |
| 2086 | +/* descriptor types */ |
| 2087 | +#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP (0 << 4) |
| 2088 | +#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU (1 << 4) |
| 2089 | +#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU (2 << 4) |
| 2090 | +#define DESC_HDR_TYPE_NONHMAC_SNOOP_NO_AFEU (3 << 4) |
| 2091 | +#define DESC_HDR_TYPE_COMMON_NONSNOOP_AFEU (5 << 4) |
| 2092 | + |
| 2093 | +#define TALITOS_HDR_DONE_BITS 0xff000000 |
| 2094 | |