Root/drivers/crypto/padlock-aes.c

1/*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
7 *
8 */
9
10#include <crypto/algapi.h>
11#include <crypto/aes.h>
12#include <crypto/padlock.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/types.h>
16#include <linux/errno.h>
17#include <linux/interrupt.h>
18#include <linux/kernel.h>
19#include <linux/percpu.h>
20#include <linux/smp.h>
21#include <linux/slab.h>
22#include <asm/cpu_device_id.h>
23#include <asm/byteorder.h>
24#include <asm/processor.h>
25#include <asm/i387.h>
26
27/*
28 * Number of data blocks actually fetched for each xcrypt insn.
29 * Processors with prefetch errata will fetch extra blocks.
30 */
31static unsigned int ecb_fetch_blocks = 2;
32#define MAX_ECB_FETCH_BLOCKS (8)
33#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
34
35static unsigned int cbc_fetch_blocks = 1;
36#define MAX_CBC_FETCH_BLOCKS (4)
37#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
38
39/* Control word. */
40struct cword {
41    unsigned int __attribute__ ((__packed__))
42        rounds:4,
43        algo:3,
44        keygen:1,
45        interm:1,
46        encdec:1,
47        ksize:2;
48} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
49
50/* Whenever making any changes to the following
51 * structure *make sure* you keep E, d_data
52 * and cword aligned on 16 Bytes boundaries and
53 * the Hardware can access 16 * 16 bytes of E and d_data
54 * (only the first 15 * 16 bytes matter but the HW reads
55 * more).
56 */
57struct aes_ctx {
58    u32 E[AES_MAX_KEYLENGTH_U32]
59        __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
60    u32 d_data[AES_MAX_KEYLENGTH_U32]
61        __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
62    struct {
63        struct cword encrypt;
64        struct cword decrypt;
65    } cword;
66    u32 *D;
67};
68
69static DEFINE_PER_CPU(struct cword *, paes_last_cword);
70
71/* Tells whether the ACE is capable to generate
72   the extended key for a given key_len. */
73static inline int
74aes_hw_extkey_available(uint8_t key_len)
75{
76    /* TODO: We should check the actual CPU model/stepping
77             as it's possible that the capability will be
78             added in the next CPU revisions. */
79    if (key_len == 16)
80        return 1;
81    return 0;
82}
83
84static inline struct aes_ctx *aes_ctx_common(void *ctx)
85{
86    unsigned long addr = (unsigned long)ctx;
87    unsigned long align = PADLOCK_ALIGNMENT;
88
89    if (align <= crypto_tfm_ctx_alignment())
90        align = 1;
91    return (struct aes_ctx *)ALIGN(addr, align);
92}
93
94static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
95{
96    return aes_ctx_common(crypto_tfm_ctx(tfm));
97}
98
99static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
100{
101    return aes_ctx_common(crypto_blkcipher_ctx(tfm));
102}
103
104static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
105               unsigned int key_len)
106{
107    struct aes_ctx *ctx = aes_ctx(tfm);
108    const __le32 *key = (const __le32 *)in_key;
109    u32 *flags = &tfm->crt_flags;
110    struct crypto_aes_ctx gen_aes;
111    int cpu;
112
113    if (key_len % 8) {
114        *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
115        return -EINVAL;
116    }
117
118    /*
119     * If the hardware is capable of generating the extended key
120     * itself we must supply the plain key for both encryption
121     * and decryption.
122     */
123    ctx->D = ctx->E;
124
125    ctx->E[0] = le32_to_cpu(key[0]);
126    ctx->E[1] = le32_to_cpu(key[1]);
127    ctx->E[2] = le32_to_cpu(key[2]);
128    ctx->E[3] = le32_to_cpu(key[3]);
129
130    /* Prepare control words. */
131    memset(&ctx->cword, 0, sizeof(ctx->cword));
132
133    ctx->cword.decrypt.encdec = 1;
134    ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
135    ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
136    ctx->cword.encrypt.ksize = (key_len - 16) / 8;
137    ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
138
139    /* Don't generate extended keys if the hardware can do it. */
140    if (aes_hw_extkey_available(key_len))
141        goto ok;
142
143    ctx->D = ctx->d_data;
144    ctx->cword.encrypt.keygen = 1;
145    ctx->cword.decrypt.keygen = 1;
146
147    if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
148        *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
149        return -EINVAL;
150    }
151
152    memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
153    memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
154
155ok:
156    for_each_online_cpu(cpu)
157        if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
158            &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
159            per_cpu(paes_last_cword, cpu) = NULL;
160
161    return 0;
162}
163
164/* ====== Encryption/decryption routines ====== */
165
166/* These are the real call to PadLock. */
167static inline void padlock_reset_key(struct cword *cword)
168{
169    int cpu = raw_smp_processor_id();
170
171    if (cword != per_cpu(paes_last_cword, cpu))
172#ifndef CONFIG_X86_64
173        asm volatile ("pushfl; popfl");
174#else
175        asm volatile ("pushfq; popfq");
176#endif
177}
178
179static inline void padlock_store_cword(struct cword *cword)
180{
181    per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
182}
183
184/*
185 * While the padlock instructions don't use FP/SSE registers, they
186 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
187 * should be used only inside the irq_ts_save/restore() context
188 */
189
190static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
191                  struct cword *control_word, int count)
192{
193    asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
194              : "+S"(input), "+D"(output)
195              : "d"(control_word), "b"(key), "c"(count));
196}
197
198static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
199                 u8 *iv, struct cword *control_word, int count)
200{
201    asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
202              : "+S" (input), "+D" (output), "+a" (iv)
203              : "d" (control_word), "b" (key), "c" (count));
204    return iv;
205}
206
207static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
208               struct cword *cword, int count)
209{
210    /*
211     * Padlock prefetches extra data so we must provide mapped input buffers.
212     * Assume there are at least 16 bytes of stack already in use.
213     */
214    u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
215    u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
216
217    memcpy(tmp, in, count * AES_BLOCK_SIZE);
218    rep_xcrypt_ecb(tmp, out, key, cword, count);
219}
220
221static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
222               u8 *iv, struct cword *cword, int count)
223{
224    /*
225     * Padlock prefetches extra data so we must provide mapped input buffers.
226     * Assume there are at least 16 bytes of stack already in use.
227     */
228    u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
229    u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
230
231    memcpy(tmp, in, count * AES_BLOCK_SIZE);
232    return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
233}
234
235static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
236                 struct cword *cword, int count)
237{
238    /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
239     * We could avoid some copying here but it's probably not worth it.
240     */
241    if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) {
242        ecb_crypt_copy(in, out, key, cword, count);
243        return;
244    }
245
246    rep_xcrypt_ecb(in, out, key, cword, count);
247}
248
249static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
250                u8 *iv, struct cword *cword, int count)
251{
252    /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
253    if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE))
254        return cbc_crypt_copy(in, out, key, iv, cword, count);
255
256    return rep_xcrypt_cbc(in, out, key, iv, cword, count);
257}
258
259static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
260                      void *control_word, u32 count)
261{
262    u32 initial = count & (ecb_fetch_blocks - 1);
263
264    if (count < ecb_fetch_blocks) {
265        ecb_crypt(input, output, key, control_word, count);
266        return;
267    }
268
269    if (initial)
270        asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
271                  : "+S"(input), "+D"(output)
272                  : "d"(control_word), "b"(key), "c"(initial));
273
274    asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
275              : "+S"(input), "+D"(output)
276              : "d"(control_word), "b"(key), "c"(count - initial));
277}
278
279static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
280                     u8 *iv, void *control_word, u32 count)
281{
282    u32 initial = count & (cbc_fetch_blocks - 1);
283
284    if (count < cbc_fetch_blocks)
285        return cbc_crypt(input, output, key, iv, control_word, count);
286
287    if (initial)
288        asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
289                  : "+S" (input), "+D" (output), "+a" (iv)
290                  : "d" (control_word), "b" (key), "c" (initial));
291
292    asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
293              : "+S" (input), "+D" (output), "+a" (iv)
294              : "d" (control_word), "b" (key), "c" (count-initial));
295    return iv;
296}
297
298static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
299{
300    struct aes_ctx *ctx = aes_ctx(tfm);
301    int ts_state;
302
303    padlock_reset_key(&ctx->cword.encrypt);
304    ts_state = irq_ts_save();
305    ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
306    irq_ts_restore(ts_state);
307    padlock_store_cword(&ctx->cword.encrypt);
308}
309
310static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
311{
312    struct aes_ctx *ctx = aes_ctx(tfm);
313    int ts_state;
314
315    padlock_reset_key(&ctx->cword.encrypt);
316    ts_state = irq_ts_save();
317    ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
318    irq_ts_restore(ts_state);
319    padlock_store_cword(&ctx->cword.encrypt);
320}
321
322static struct crypto_alg aes_alg = {
323    .cra_name = "aes",
324    .cra_driver_name = "aes-padlock",
325    .cra_priority = PADLOCK_CRA_PRIORITY,
326    .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
327    .cra_blocksize = AES_BLOCK_SIZE,
328    .cra_ctxsize = sizeof(struct aes_ctx),
329    .cra_alignmask = PADLOCK_ALIGNMENT - 1,
330    .cra_module = THIS_MODULE,
331    .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
332    .cra_u = {
333        .cipher = {
334            .cia_min_keysize = AES_MIN_KEY_SIZE,
335            .cia_max_keysize = AES_MAX_KEY_SIZE,
336            .cia_setkey = aes_set_key,
337            .cia_encrypt = aes_encrypt,
338            .cia_decrypt = aes_decrypt,
339        }
340    }
341};
342
343static int ecb_aes_encrypt(struct blkcipher_desc *desc,
344               struct scatterlist *dst, struct scatterlist *src,
345               unsigned int nbytes)
346{
347    struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
348    struct blkcipher_walk walk;
349    int err;
350    int ts_state;
351
352    padlock_reset_key(&ctx->cword.encrypt);
353
354    blkcipher_walk_init(&walk, dst, src, nbytes);
355    err = blkcipher_walk_virt(desc, &walk);
356
357    ts_state = irq_ts_save();
358    while ((nbytes = walk.nbytes)) {
359        padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
360                   ctx->E, &ctx->cword.encrypt,
361                   nbytes / AES_BLOCK_SIZE);
362        nbytes &= AES_BLOCK_SIZE - 1;
363        err = blkcipher_walk_done(desc, &walk, nbytes);
364    }
365    irq_ts_restore(ts_state);
366
367    padlock_store_cword(&ctx->cword.encrypt);
368
369    return err;
370}
371
372static int ecb_aes_decrypt(struct blkcipher_desc *desc,
373               struct scatterlist *dst, struct scatterlist *src,
374               unsigned int nbytes)
375{
376    struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
377    struct blkcipher_walk walk;
378    int err;
379    int ts_state;
380
381    padlock_reset_key(&ctx->cword.decrypt);
382
383    blkcipher_walk_init(&walk, dst, src, nbytes);
384    err = blkcipher_walk_virt(desc, &walk);
385
386    ts_state = irq_ts_save();
387    while ((nbytes = walk.nbytes)) {
388        padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
389                   ctx->D, &ctx->cword.decrypt,
390                   nbytes / AES_BLOCK_SIZE);
391        nbytes &= AES_BLOCK_SIZE - 1;
392        err = blkcipher_walk_done(desc, &walk, nbytes);
393    }
394    irq_ts_restore(ts_state);
395
396    padlock_store_cword(&ctx->cword.encrypt);
397
398    return err;
399}
400
401static struct crypto_alg ecb_aes_alg = {
402    .cra_name = "ecb(aes)",
403    .cra_driver_name = "ecb-aes-padlock",
404    .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
405    .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
406    .cra_blocksize = AES_BLOCK_SIZE,
407    .cra_ctxsize = sizeof(struct aes_ctx),
408    .cra_alignmask = PADLOCK_ALIGNMENT - 1,
409    .cra_type = &crypto_blkcipher_type,
410    .cra_module = THIS_MODULE,
411    .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
412    .cra_u = {
413        .blkcipher = {
414            .min_keysize = AES_MIN_KEY_SIZE,
415            .max_keysize = AES_MAX_KEY_SIZE,
416            .setkey = aes_set_key,
417            .encrypt = ecb_aes_encrypt,
418            .decrypt = ecb_aes_decrypt,
419        }
420    }
421};
422
423static int cbc_aes_encrypt(struct blkcipher_desc *desc,
424               struct scatterlist *dst, struct scatterlist *src,
425               unsigned int nbytes)
426{
427    struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
428    struct blkcipher_walk walk;
429    int err;
430    int ts_state;
431
432    padlock_reset_key(&ctx->cword.encrypt);
433
434    blkcipher_walk_init(&walk, dst, src, nbytes);
435    err = blkcipher_walk_virt(desc, &walk);
436
437    ts_state = irq_ts_save();
438    while ((nbytes = walk.nbytes)) {
439        u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
440                        walk.dst.virt.addr, ctx->E,
441                        walk.iv, &ctx->cword.encrypt,
442                        nbytes / AES_BLOCK_SIZE);
443        memcpy(walk.iv, iv, AES_BLOCK_SIZE);
444        nbytes &= AES_BLOCK_SIZE - 1;
445        err = blkcipher_walk_done(desc, &walk, nbytes);
446    }
447    irq_ts_restore(ts_state);
448
449    padlock_store_cword(&ctx->cword.decrypt);
450
451    return err;
452}
453
454static int cbc_aes_decrypt(struct blkcipher_desc *desc,
455               struct scatterlist *dst, struct scatterlist *src,
456               unsigned int nbytes)
457{
458    struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
459    struct blkcipher_walk walk;
460    int err;
461    int ts_state;
462
463    padlock_reset_key(&ctx->cword.encrypt);
464
465    blkcipher_walk_init(&walk, dst, src, nbytes);
466    err = blkcipher_walk_virt(desc, &walk);
467
468    ts_state = irq_ts_save();
469    while ((nbytes = walk.nbytes)) {
470        padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
471                   ctx->D, walk.iv, &ctx->cword.decrypt,
472                   nbytes / AES_BLOCK_SIZE);
473        nbytes &= AES_BLOCK_SIZE - 1;
474        err = blkcipher_walk_done(desc, &walk, nbytes);
475    }
476
477    irq_ts_restore(ts_state);
478
479    padlock_store_cword(&ctx->cword.encrypt);
480
481    return err;
482}
483
484static struct crypto_alg cbc_aes_alg = {
485    .cra_name = "cbc(aes)",
486    .cra_driver_name = "cbc-aes-padlock",
487    .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
488    .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
489    .cra_blocksize = AES_BLOCK_SIZE,
490    .cra_ctxsize = sizeof(struct aes_ctx),
491    .cra_alignmask = PADLOCK_ALIGNMENT - 1,
492    .cra_type = &crypto_blkcipher_type,
493    .cra_module = THIS_MODULE,
494    .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
495    .cra_u = {
496        .blkcipher = {
497            .min_keysize = AES_MIN_KEY_SIZE,
498            .max_keysize = AES_MAX_KEY_SIZE,
499            .ivsize = AES_BLOCK_SIZE,
500            .setkey = aes_set_key,
501            .encrypt = cbc_aes_encrypt,
502            .decrypt = cbc_aes_decrypt,
503        }
504    }
505};
506
507static struct x86_cpu_id padlock_cpu_id[] = {
508    X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
509    {}
510};
511MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
512
513static int __init padlock_init(void)
514{
515    int ret;
516    struct cpuinfo_x86 *c = &cpu_data(0);
517
518    if (!x86_match_cpu(padlock_cpu_id))
519        return -ENODEV;
520
521    if (!cpu_has_xcrypt_enabled) {
522        printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
523        return -ENODEV;
524    }
525
526    if ((ret = crypto_register_alg(&aes_alg)))
527        goto aes_err;
528
529    if ((ret = crypto_register_alg(&ecb_aes_alg)))
530        goto ecb_aes_err;
531
532    if ((ret = crypto_register_alg(&cbc_aes_alg)))
533        goto cbc_aes_err;
534
535    printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
536
537    if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
538        ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
539        cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
540        printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
541    }
542
543out:
544    return ret;
545
546cbc_aes_err:
547    crypto_unregister_alg(&ecb_aes_alg);
548ecb_aes_err:
549    crypto_unregister_alg(&aes_alg);
550aes_err:
551    printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
552    goto out;
553}
554
555static void __exit padlock_fini(void)
556{
557    crypto_unregister_alg(&cbc_aes_alg);
558    crypto_unregister_alg(&ecb_aes_alg);
559    crypto_unregister_alg(&aes_alg);
560}
561
562module_init(padlock_init);
563module_exit(padlock_fini);
564
565MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
566MODULE_LICENSE("GPL");
567MODULE_AUTHOR("Michal Ludvig");
568
569MODULE_ALIAS("aes");
570

Archive Download this file



interactive