Root/
1 | /* |
2 | * Copyright (C) 2003 Christophe Saout <christophe@saout.de> |
3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> |
4 | * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. |
5 | * |
6 | * This file is released under the GPL. |
7 | */ |
8 | |
9 | #include <linux/completion.h> |
10 | #include <linux/err.h> |
11 | #include <linux/module.h> |
12 | #include <linux/init.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/bio.h> |
15 | #include <linux/blkdev.h> |
16 | #include <linux/mempool.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/crypto.h> |
19 | #include <linux/workqueue.h> |
20 | #include <linux/backing-dev.h> |
21 | #include <linux/percpu.h> |
22 | #include <linux/atomic.h> |
23 | #include <linux/scatterlist.h> |
24 | #include <asm/page.h> |
25 | #include <asm/unaligned.h> |
26 | #include <crypto/hash.h> |
27 | #include <crypto/md5.h> |
28 | #include <crypto/algapi.h> |
29 | |
30 | #include <linux/device-mapper.h> |
31 | |
32 | #define DM_MSG_PREFIX "crypt" |
33 | |
34 | /* |
35 | * context holding the current state of a multi-part conversion |
36 | */ |
37 | struct convert_context { |
38 | struct completion restart; |
39 | struct bio *bio_in; |
40 | struct bio *bio_out; |
41 | unsigned int offset_in; |
42 | unsigned int offset_out; |
43 | unsigned int idx_in; |
44 | unsigned int idx_out; |
45 | sector_t cc_sector; |
46 | atomic_t cc_pending; |
47 | }; |
48 | |
49 | /* |
50 | * per bio private data |
51 | */ |
52 | struct dm_crypt_io { |
53 | struct crypt_config *cc; |
54 | struct bio *base_bio; |
55 | struct work_struct work; |
56 | |
57 | struct convert_context ctx; |
58 | |
59 | atomic_t io_pending; |
60 | int error; |
61 | sector_t sector; |
62 | struct dm_crypt_io *base_io; |
63 | }; |
64 | |
65 | struct dm_crypt_request { |
66 | struct convert_context *ctx; |
67 | struct scatterlist sg_in; |
68 | struct scatterlist sg_out; |
69 | sector_t iv_sector; |
70 | }; |
71 | |
72 | struct crypt_config; |
73 | |
74 | struct crypt_iv_operations { |
75 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, |
76 | const char *opts); |
77 | void (*dtr)(struct crypt_config *cc); |
78 | int (*init)(struct crypt_config *cc); |
79 | int (*wipe)(struct crypt_config *cc); |
80 | int (*generator)(struct crypt_config *cc, u8 *iv, |
81 | struct dm_crypt_request *dmreq); |
82 | int (*post)(struct crypt_config *cc, u8 *iv, |
83 | struct dm_crypt_request *dmreq); |
84 | }; |
85 | |
86 | struct iv_essiv_private { |
87 | struct crypto_hash *hash_tfm; |
88 | u8 *salt; |
89 | }; |
90 | |
91 | struct iv_benbi_private { |
92 | int shift; |
93 | }; |
94 | |
95 | #define LMK_SEED_SIZE 64 /* hash + 0 */ |
96 | struct iv_lmk_private { |
97 | struct crypto_shash *hash_tfm; |
98 | u8 *seed; |
99 | }; |
100 | |
101 | /* |
102 | * Crypt: maps a linear range of a block device |
103 | * and encrypts / decrypts at the same time. |
104 | */ |
105 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; |
106 | |
107 | /* |
108 | * Duplicated per-CPU state for cipher. |
109 | */ |
110 | struct crypt_cpu { |
111 | struct ablkcipher_request *req; |
112 | }; |
113 | |
114 | /* |
115 | * The fields in here must be read only after initialization, |
116 | * changing state should be in crypt_cpu. |
117 | */ |
118 | struct crypt_config { |
119 | struct dm_dev *dev; |
120 | sector_t start; |
121 | |
122 | /* |
123 | * pool for per bio private data, crypto requests and |
124 | * encryption requeusts/buffer pages |
125 | */ |
126 | mempool_t *io_pool; |
127 | mempool_t *req_pool; |
128 | mempool_t *page_pool; |
129 | struct bio_set *bs; |
130 | |
131 | struct workqueue_struct *io_queue; |
132 | struct workqueue_struct *crypt_queue; |
133 | |
134 | char *cipher; |
135 | char *cipher_string; |
136 | |
137 | struct crypt_iv_operations *iv_gen_ops; |
138 | union { |
139 | struct iv_essiv_private essiv; |
140 | struct iv_benbi_private benbi; |
141 | struct iv_lmk_private lmk; |
142 | } iv_gen_private; |
143 | sector_t iv_offset; |
144 | unsigned int iv_size; |
145 | |
146 | /* |
147 | * Duplicated per cpu state. Access through |
148 | * per_cpu_ptr() only. |
149 | */ |
150 | struct crypt_cpu __percpu *cpu; |
151 | |
152 | /* ESSIV: struct crypto_cipher *essiv_tfm */ |
153 | void *iv_private; |
154 | struct crypto_ablkcipher **tfms; |
155 | unsigned tfms_count; |
156 | |
157 | /* |
158 | * Layout of each crypto request: |
159 | * |
160 | * struct ablkcipher_request |
161 | * context |
162 | * padding |
163 | * struct dm_crypt_request |
164 | * padding |
165 | * IV |
166 | * |
167 | * The padding is added so that dm_crypt_request and the IV are |
168 | * correctly aligned. |
169 | */ |
170 | unsigned int dmreq_start; |
171 | |
172 | unsigned long flags; |
173 | unsigned int key_size; |
174 | unsigned int key_parts; |
175 | u8 key[0]; |
176 | }; |
177 | |
178 | #define MIN_IOS 16 |
179 | #define MIN_POOL_PAGES 32 |
180 | |
181 | static struct kmem_cache *_crypt_io_pool; |
182 | |
183 | static void clone_init(struct dm_crypt_io *, struct bio *); |
184 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
185 | static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); |
186 | |
187 | static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) |
188 | { |
189 | return this_cpu_ptr(cc->cpu); |
190 | } |
191 | |
192 | /* |
193 | * Use this to access cipher attributes that are the same for each CPU. |
194 | */ |
195 | static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) |
196 | { |
197 | return cc->tfms[0]; |
198 | } |
199 | |
200 | /* |
201 | * Different IV generation algorithms: |
202 | * |
203 | * plain: the initial vector is the 32-bit little-endian version of the sector |
204 | * number, padded with zeros if necessary. |
205 | * |
206 | * plain64: the initial vector is the 64-bit little-endian version of the sector |
207 | * number, padded with zeros if necessary. |
208 | * |
209 | * essiv: "encrypted sector|salt initial vector", the sector number is |
210 | * encrypted with the bulk cipher using a salt as key. The salt |
211 | * should be derived from the bulk cipher's key via hashing. |
212 | * |
213 | * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 |
214 | * (needed for LRW-32-AES and possible other narrow block modes) |
215 | * |
216 | * null: the initial vector is always zero. Provides compatibility with |
217 | * obsolete loop_fish2 devices. Do not use for new devices. |
218 | * |
219 | * lmk: Compatible implementation of the block chaining mode used |
220 | * by the Loop-AES block device encryption system |
221 | * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ |
222 | * It operates on full 512 byte sectors and uses CBC |
223 | * with an IV derived from the sector number, the data and |
224 | * optionally extra IV seed. |
225 | * This means that after decryption the first block |
226 | * of sector must be tweaked according to decrypted data. |
227 | * Loop-AES can use three encryption schemes: |
228 | * version 1: is plain aes-cbc mode |
229 | * version 2: uses 64 multikey scheme with lmk IV generator |
230 | * version 3: the same as version 2 with additional IV seed |
231 | * (it uses 65 keys, last key is used as IV seed) |
232 | * |
233 | * plumb: unimplemented, see: |
234 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 |
235 | */ |
236 | |
237 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, |
238 | struct dm_crypt_request *dmreq) |
239 | { |
240 | memset(iv, 0, cc->iv_size); |
241 | *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); |
242 | |
243 | return 0; |
244 | } |
245 | |
246 | static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, |
247 | struct dm_crypt_request *dmreq) |
248 | { |
249 | memset(iv, 0, cc->iv_size); |
250 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); |
251 | |
252 | return 0; |
253 | } |
254 | |
255 | /* Initialise ESSIV - compute salt but no local memory allocations */ |
256 | static int crypt_iv_essiv_init(struct crypt_config *cc) |
257 | { |
258 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
259 | struct hash_desc desc; |
260 | struct scatterlist sg; |
261 | struct crypto_cipher *essiv_tfm; |
262 | int err; |
263 | |
264 | sg_init_one(&sg, cc->key, cc->key_size); |
265 | desc.tfm = essiv->hash_tfm; |
266 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
267 | |
268 | err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); |
269 | if (err) |
270 | return err; |
271 | |
272 | essiv_tfm = cc->iv_private; |
273 | |
274 | err = crypto_cipher_setkey(essiv_tfm, essiv->salt, |
275 | crypto_hash_digestsize(essiv->hash_tfm)); |
276 | if (err) |
277 | return err; |
278 | |
279 | return 0; |
280 | } |
281 | |
282 | /* Wipe salt and reset key derived from volume key */ |
283 | static int crypt_iv_essiv_wipe(struct crypt_config *cc) |
284 | { |
285 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
286 | unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); |
287 | struct crypto_cipher *essiv_tfm; |
288 | int r, err = 0; |
289 | |
290 | memset(essiv->salt, 0, salt_size); |
291 | |
292 | essiv_tfm = cc->iv_private; |
293 | r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); |
294 | if (r) |
295 | err = r; |
296 | |
297 | return err; |
298 | } |
299 | |
300 | /* Set up per cpu cipher state */ |
301 | static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, |
302 | struct dm_target *ti, |
303 | u8 *salt, unsigned saltsize) |
304 | { |
305 | struct crypto_cipher *essiv_tfm; |
306 | int err; |
307 | |
308 | /* Setup the essiv_tfm with the given salt */ |
309 | essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
310 | if (IS_ERR(essiv_tfm)) { |
311 | ti->error = "Error allocating crypto tfm for ESSIV"; |
312 | return essiv_tfm; |
313 | } |
314 | |
315 | if (crypto_cipher_blocksize(essiv_tfm) != |
316 | crypto_ablkcipher_ivsize(any_tfm(cc))) { |
317 | ti->error = "Block size of ESSIV cipher does " |
318 | "not match IV size of block cipher"; |
319 | crypto_free_cipher(essiv_tfm); |
320 | return ERR_PTR(-EINVAL); |
321 | } |
322 | |
323 | err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); |
324 | if (err) { |
325 | ti->error = "Failed to set key for ESSIV cipher"; |
326 | crypto_free_cipher(essiv_tfm); |
327 | return ERR_PTR(err); |
328 | } |
329 | |
330 | return essiv_tfm; |
331 | } |
332 | |
333 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) |
334 | { |
335 | struct crypto_cipher *essiv_tfm; |
336 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
337 | |
338 | crypto_free_hash(essiv->hash_tfm); |
339 | essiv->hash_tfm = NULL; |
340 | |
341 | kzfree(essiv->salt); |
342 | essiv->salt = NULL; |
343 | |
344 | essiv_tfm = cc->iv_private; |
345 | |
346 | if (essiv_tfm) |
347 | crypto_free_cipher(essiv_tfm); |
348 | |
349 | cc->iv_private = NULL; |
350 | } |
351 | |
352 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, |
353 | const char *opts) |
354 | { |
355 | struct crypto_cipher *essiv_tfm = NULL; |
356 | struct crypto_hash *hash_tfm = NULL; |
357 | u8 *salt = NULL; |
358 | int err; |
359 | |
360 | if (!opts) { |
361 | ti->error = "Digest algorithm missing for ESSIV mode"; |
362 | return -EINVAL; |
363 | } |
364 | |
365 | /* Allocate hash algorithm */ |
366 | hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); |
367 | if (IS_ERR(hash_tfm)) { |
368 | ti->error = "Error initializing ESSIV hash"; |
369 | err = PTR_ERR(hash_tfm); |
370 | goto bad; |
371 | } |
372 | |
373 | salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); |
374 | if (!salt) { |
375 | ti->error = "Error kmallocing salt storage in ESSIV"; |
376 | err = -ENOMEM; |
377 | goto bad; |
378 | } |
379 | |
380 | cc->iv_gen_private.essiv.salt = salt; |
381 | cc->iv_gen_private.essiv.hash_tfm = hash_tfm; |
382 | |
383 | essiv_tfm = setup_essiv_cpu(cc, ti, salt, |
384 | crypto_hash_digestsize(hash_tfm)); |
385 | if (IS_ERR(essiv_tfm)) { |
386 | crypt_iv_essiv_dtr(cc); |
387 | return PTR_ERR(essiv_tfm); |
388 | } |
389 | cc->iv_private = essiv_tfm; |
390 | |
391 | return 0; |
392 | |
393 | bad: |
394 | if (hash_tfm && !IS_ERR(hash_tfm)) |
395 | crypto_free_hash(hash_tfm); |
396 | kfree(salt); |
397 | return err; |
398 | } |
399 | |
400 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, |
401 | struct dm_crypt_request *dmreq) |
402 | { |
403 | struct crypto_cipher *essiv_tfm = cc->iv_private; |
404 | |
405 | memset(iv, 0, cc->iv_size); |
406 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); |
407 | crypto_cipher_encrypt_one(essiv_tfm, iv, iv); |
408 | |
409 | return 0; |
410 | } |
411 | |
412 | static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, |
413 | const char *opts) |
414 | { |
415 | unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc)); |
416 | int log = ilog2(bs); |
417 | |
418 | /* we need to calculate how far we must shift the sector count |
419 | * to get the cipher block count, we use this shift in _gen */ |
420 | |
421 | if (1 << log != bs) { |
422 | ti->error = "cypher blocksize is not a power of 2"; |
423 | return -EINVAL; |
424 | } |
425 | |
426 | if (log > 9) { |
427 | ti->error = "cypher blocksize is > 512"; |
428 | return -EINVAL; |
429 | } |
430 | |
431 | cc->iv_gen_private.benbi.shift = 9 - log; |
432 | |
433 | return 0; |
434 | } |
435 | |
436 | static void crypt_iv_benbi_dtr(struct crypt_config *cc) |
437 | { |
438 | } |
439 | |
440 | static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, |
441 | struct dm_crypt_request *dmreq) |
442 | { |
443 | __be64 val; |
444 | |
445 | memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ |
446 | |
447 | val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); |
448 | put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); |
449 | |
450 | return 0; |
451 | } |
452 | |
453 | static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, |
454 | struct dm_crypt_request *dmreq) |
455 | { |
456 | memset(iv, 0, cc->iv_size); |
457 | |
458 | return 0; |
459 | } |
460 | |
461 | static void crypt_iv_lmk_dtr(struct crypt_config *cc) |
462 | { |
463 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; |
464 | |
465 | if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) |
466 | crypto_free_shash(lmk->hash_tfm); |
467 | lmk->hash_tfm = NULL; |
468 | |
469 | kzfree(lmk->seed); |
470 | lmk->seed = NULL; |
471 | } |
472 | |
473 | static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, |
474 | const char *opts) |
475 | { |
476 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; |
477 | |
478 | lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); |
479 | if (IS_ERR(lmk->hash_tfm)) { |
480 | ti->error = "Error initializing LMK hash"; |
481 | return PTR_ERR(lmk->hash_tfm); |
482 | } |
483 | |
484 | /* No seed in LMK version 2 */ |
485 | if (cc->key_parts == cc->tfms_count) { |
486 | lmk->seed = NULL; |
487 | return 0; |
488 | } |
489 | |
490 | lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); |
491 | if (!lmk->seed) { |
492 | crypt_iv_lmk_dtr(cc); |
493 | ti->error = "Error kmallocing seed storage in LMK"; |
494 | return -ENOMEM; |
495 | } |
496 | |
497 | return 0; |
498 | } |
499 | |
500 | static int crypt_iv_lmk_init(struct crypt_config *cc) |
501 | { |
502 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; |
503 | int subkey_size = cc->key_size / cc->key_parts; |
504 | |
505 | /* LMK seed is on the position of LMK_KEYS + 1 key */ |
506 | if (lmk->seed) |
507 | memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), |
508 | crypto_shash_digestsize(lmk->hash_tfm)); |
509 | |
510 | return 0; |
511 | } |
512 | |
513 | static int crypt_iv_lmk_wipe(struct crypt_config *cc) |
514 | { |
515 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; |
516 | |
517 | if (lmk->seed) |
518 | memset(lmk->seed, 0, LMK_SEED_SIZE); |
519 | |
520 | return 0; |
521 | } |
522 | |
523 | static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, |
524 | struct dm_crypt_request *dmreq, |
525 | u8 *data) |
526 | { |
527 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; |
528 | struct { |
529 | struct shash_desc desc; |
530 | char ctx[crypto_shash_descsize(lmk->hash_tfm)]; |
531 | } sdesc; |
532 | struct md5_state md5state; |
533 | u32 buf[4]; |
534 | int i, r; |
535 | |
536 | sdesc.desc.tfm = lmk->hash_tfm; |
537 | sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
538 | |
539 | r = crypto_shash_init(&sdesc.desc); |
540 | if (r) |
541 | return r; |
542 | |
543 | if (lmk->seed) { |
544 | r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE); |
545 | if (r) |
546 | return r; |
547 | } |
548 | |
549 | /* Sector is always 512B, block size 16, add data of blocks 1-31 */ |
550 | r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31); |
551 | if (r) |
552 | return r; |
553 | |
554 | /* Sector is cropped to 56 bits here */ |
555 | buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); |
556 | buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); |
557 | buf[2] = cpu_to_le32(4024); |
558 | buf[3] = 0; |
559 | r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf)); |
560 | if (r) |
561 | return r; |
562 | |
563 | /* No MD5 padding here */ |
564 | r = crypto_shash_export(&sdesc.desc, &md5state); |
565 | if (r) |
566 | return r; |
567 | |
568 | for (i = 0; i < MD5_HASH_WORDS; i++) |
569 | __cpu_to_le32s(&md5state.hash[i]); |
570 | memcpy(iv, &md5state.hash, cc->iv_size); |
571 | |
572 | return 0; |
573 | } |
574 | |
575 | static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, |
576 | struct dm_crypt_request *dmreq) |
577 | { |
578 | u8 *src; |
579 | int r = 0; |
580 | |
581 | if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { |
582 | src = kmap_atomic(sg_page(&dmreq->sg_in)); |
583 | r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); |
584 | kunmap_atomic(src); |
585 | } else |
586 | memset(iv, 0, cc->iv_size); |
587 | |
588 | return r; |
589 | } |
590 | |
591 | static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, |
592 | struct dm_crypt_request *dmreq) |
593 | { |
594 | u8 *dst; |
595 | int r; |
596 | |
597 | if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) |
598 | return 0; |
599 | |
600 | dst = kmap_atomic(sg_page(&dmreq->sg_out)); |
601 | r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); |
602 | |
603 | /* Tweak the first block of plaintext sector */ |
604 | if (!r) |
605 | crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); |
606 | |
607 | kunmap_atomic(dst); |
608 | return r; |
609 | } |
610 | |
611 | static struct crypt_iv_operations crypt_iv_plain_ops = { |
612 | .generator = crypt_iv_plain_gen |
613 | }; |
614 | |
615 | static struct crypt_iv_operations crypt_iv_plain64_ops = { |
616 | .generator = crypt_iv_plain64_gen |
617 | }; |
618 | |
619 | static struct crypt_iv_operations crypt_iv_essiv_ops = { |
620 | .ctr = crypt_iv_essiv_ctr, |
621 | .dtr = crypt_iv_essiv_dtr, |
622 | .init = crypt_iv_essiv_init, |
623 | .wipe = crypt_iv_essiv_wipe, |
624 | .generator = crypt_iv_essiv_gen |
625 | }; |
626 | |
627 | static struct crypt_iv_operations crypt_iv_benbi_ops = { |
628 | .ctr = crypt_iv_benbi_ctr, |
629 | .dtr = crypt_iv_benbi_dtr, |
630 | .generator = crypt_iv_benbi_gen |
631 | }; |
632 | |
633 | static struct crypt_iv_operations crypt_iv_null_ops = { |
634 | .generator = crypt_iv_null_gen |
635 | }; |
636 | |
637 | static struct crypt_iv_operations crypt_iv_lmk_ops = { |
638 | .ctr = crypt_iv_lmk_ctr, |
639 | .dtr = crypt_iv_lmk_dtr, |
640 | .init = crypt_iv_lmk_init, |
641 | .wipe = crypt_iv_lmk_wipe, |
642 | .generator = crypt_iv_lmk_gen, |
643 | .post = crypt_iv_lmk_post |
644 | }; |
645 | |
646 | static void crypt_convert_init(struct crypt_config *cc, |
647 | struct convert_context *ctx, |
648 | struct bio *bio_out, struct bio *bio_in, |
649 | sector_t sector) |
650 | { |
651 | ctx->bio_in = bio_in; |
652 | ctx->bio_out = bio_out; |
653 | ctx->offset_in = 0; |
654 | ctx->offset_out = 0; |
655 | ctx->idx_in = bio_in ? bio_in->bi_idx : 0; |
656 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; |
657 | ctx->cc_sector = sector + cc->iv_offset; |
658 | init_completion(&ctx->restart); |
659 | } |
660 | |
661 | static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, |
662 | struct ablkcipher_request *req) |
663 | { |
664 | return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); |
665 | } |
666 | |
667 | static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, |
668 | struct dm_crypt_request *dmreq) |
669 | { |
670 | return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); |
671 | } |
672 | |
673 | static u8 *iv_of_dmreq(struct crypt_config *cc, |
674 | struct dm_crypt_request *dmreq) |
675 | { |
676 | return (u8 *)ALIGN((unsigned long)(dmreq + 1), |
677 | crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); |
678 | } |
679 | |
680 | static int crypt_convert_block(struct crypt_config *cc, |
681 | struct convert_context *ctx, |
682 | struct ablkcipher_request *req) |
683 | { |
684 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); |
685 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); |
686 | struct dm_crypt_request *dmreq; |
687 | u8 *iv; |
688 | int r; |
689 | |
690 | dmreq = dmreq_of_req(cc, req); |
691 | iv = iv_of_dmreq(cc, dmreq); |
692 | |
693 | dmreq->iv_sector = ctx->cc_sector; |
694 | dmreq->ctx = ctx; |
695 | sg_init_table(&dmreq->sg_in, 1); |
696 | sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, |
697 | bv_in->bv_offset + ctx->offset_in); |
698 | |
699 | sg_init_table(&dmreq->sg_out, 1); |
700 | sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, |
701 | bv_out->bv_offset + ctx->offset_out); |
702 | |
703 | ctx->offset_in += 1 << SECTOR_SHIFT; |
704 | if (ctx->offset_in >= bv_in->bv_len) { |
705 | ctx->offset_in = 0; |
706 | ctx->idx_in++; |
707 | } |
708 | |
709 | ctx->offset_out += 1 << SECTOR_SHIFT; |
710 | if (ctx->offset_out >= bv_out->bv_len) { |
711 | ctx->offset_out = 0; |
712 | ctx->idx_out++; |
713 | } |
714 | |
715 | if (cc->iv_gen_ops) { |
716 | r = cc->iv_gen_ops->generator(cc, iv, dmreq); |
717 | if (r < 0) |
718 | return r; |
719 | } |
720 | |
721 | ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, |
722 | 1 << SECTOR_SHIFT, iv); |
723 | |
724 | if (bio_data_dir(ctx->bio_in) == WRITE) |
725 | r = crypto_ablkcipher_encrypt(req); |
726 | else |
727 | r = crypto_ablkcipher_decrypt(req); |
728 | |
729 | if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) |
730 | r = cc->iv_gen_ops->post(cc, iv, dmreq); |
731 | |
732 | return r; |
733 | } |
734 | |
735 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
736 | int error); |
737 | |
738 | static void crypt_alloc_req(struct crypt_config *cc, |
739 | struct convert_context *ctx) |
740 | { |
741 | struct crypt_cpu *this_cc = this_crypt_config(cc); |
742 | unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); |
743 | |
744 | if (!this_cc->req) |
745 | this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); |
746 | |
747 | ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); |
748 | ablkcipher_request_set_callback(this_cc->req, |
749 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
750 | kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); |
751 | } |
752 | |
753 | /* |
754 | * Encrypt / decrypt data from one bio to another one (can be the same one) |
755 | */ |
756 | static int crypt_convert(struct crypt_config *cc, |
757 | struct convert_context *ctx) |
758 | { |
759 | struct crypt_cpu *this_cc = this_crypt_config(cc); |
760 | int r; |
761 | |
762 | atomic_set(&ctx->cc_pending, 1); |
763 | |
764 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && |
765 | ctx->idx_out < ctx->bio_out->bi_vcnt) { |
766 | |
767 | crypt_alloc_req(cc, ctx); |
768 | |
769 | atomic_inc(&ctx->cc_pending); |
770 | |
771 | r = crypt_convert_block(cc, ctx, this_cc->req); |
772 | |
773 | switch (r) { |
774 | /* async */ |
775 | case -EBUSY: |
776 | wait_for_completion(&ctx->restart); |
777 | INIT_COMPLETION(ctx->restart); |
778 | /* fall through*/ |
779 | case -EINPROGRESS: |
780 | this_cc->req = NULL; |
781 | ctx->cc_sector++; |
782 | continue; |
783 | |
784 | /* sync */ |
785 | case 0: |
786 | atomic_dec(&ctx->cc_pending); |
787 | ctx->cc_sector++; |
788 | cond_resched(); |
789 | continue; |
790 | |
791 | /* error */ |
792 | default: |
793 | atomic_dec(&ctx->cc_pending); |
794 | return r; |
795 | } |
796 | } |
797 | |
798 | return 0; |
799 | } |
800 | |
801 | static void dm_crypt_bio_destructor(struct bio *bio) |
802 | { |
803 | struct dm_crypt_io *io = bio->bi_private; |
804 | struct crypt_config *cc = io->cc; |
805 | |
806 | bio_free(bio, cc->bs); |
807 | } |
808 | |
809 | /* |
810 | * Generate a new unfragmented bio with the given size |
811 | * This should never violate the device limitations |
812 | * May return a smaller bio when running out of pages, indicated by |
813 | * *out_of_pages set to 1. |
814 | */ |
815 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, |
816 | unsigned *out_of_pages) |
817 | { |
818 | struct crypt_config *cc = io->cc; |
819 | struct bio *clone; |
820 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
821 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
822 | unsigned i, len; |
823 | struct page *page; |
824 | |
825 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); |
826 | if (!clone) |
827 | return NULL; |
828 | |
829 | clone_init(io, clone); |
830 | *out_of_pages = 0; |
831 | |
832 | for (i = 0; i < nr_iovecs; i++) { |
833 | page = mempool_alloc(cc->page_pool, gfp_mask); |
834 | if (!page) { |
835 | *out_of_pages = 1; |
836 | break; |
837 | } |
838 | |
839 | /* |
840 | * If additional pages cannot be allocated without waiting, |
841 | * return a partially-allocated bio. The caller will then try |
842 | * to allocate more bios while submitting this partial bio. |
843 | */ |
844 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; |
845 | |
846 | len = (size > PAGE_SIZE) ? PAGE_SIZE : size; |
847 | |
848 | if (!bio_add_page(clone, page, len, 0)) { |
849 | mempool_free(page, cc->page_pool); |
850 | break; |
851 | } |
852 | |
853 | size -= len; |
854 | } |
855 | |
856 | if (!clone->bi_size) { |
857 | bio_put(clone); |
858 | return NULL; |
859 | } |
860 | |
861 | return clone; |
862 | } |
863 | |
864 | static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) |
865 | { |
866 | unsigned int i; |
867 | struct bio_vec *bv; |
868 | |
869 | for (i = 0; i < clone->bi_vcnt; i++) { |
870 | bv = bio_iovec_idx(clone, i); |
871 | BUG_ON(!bv->bv_page); |
872 | mempool_free(bv->bv_page, cc->page_pool); |
873 | bv->bv_page = NULL; |
874 | } |
875 | } |
876 | |
877 | static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, |
878 | struct bio *bio, sector_t sector) |
879 | { |
880 | struct dm_crypt_io *io; |
881 | |
882 | io = mempool_alloc(cc->io_pool, GFP_NOIO); |
883 | io->cc = cc; |
884 | io->base_bio = bio; |
885 | io->sector = sector; |
886 | io->error = 0; |
887 | io->base_io = NULL; |
888 | atomic_set(&io->io_pending, 0); |
889 | |
890 | return io; |
891 | } |
892 | |
893 | static void crypt_inc_pending(struct dm_crypt_io *io) |
894 | { |
895 | atomic_inc(&io->io_pending); |
896 | } |
897 | |
898 | /* |
899 | * One of the bios was finished. Check for completion of |
900 | * the whole request and correctly clean up the buffer. |
901 | * If base_io is set, wait for the last fragment to complete. |
902 | */ |
903 | static void crypt_dec_pending(struct dm_crypt_io *io) |
904 | { |
905 | struct crypt_config *cc = io->cc; |
906 | struct bio *base_bio = io->base_bio; |
907 | struct dm_crypt_io *base_io = io->base_io; |
908 | int error = io->error; |
909 | |
910 | if (!atomic_dec_and_test(&io->io_pending)) |
911 | return; |
912 | |
913 | mempool_free(io, cc->io_pool); |
914 | |
915 | if (likely(!base_io)) |
916 | bio_endio(base_bio, error); |
917 | else { |
918 | if (error && !base_io->error) |
919 | base_io->error = error; |
920 | crypt_dec_pending(base_io); |
921 | } |
922 | } |
923 | |
924 | /* |
925 | * kcryptd/kcryptd_io: |
926 | * |
927 | * Needed because it would be very unwise to do decryption in an |
928 | * interrupt context. |
929 | * |
930 | * kcryptd performs the actual encryption or decryption. |
931 | * |
932 | * kcryptd_io performs the IO submission. |
933 | * |
934 | * They must be separated as otherwise the final stages could be |
935 | * starved by new requests which can block in the first stages due |
936 | * to memory allocation. |
937 | * |
938 | * The work is done per CPU global for all dm-crypt instances. |
939 | * They should not depend on each other and do not block. |
940 | */ |
941 | static void crypt_endio(struct bio *clone, int error) |
942 | { |
943 | struct dm_crypt_io *io = clone->bi_private; |
944 | struct crypt_config *cc = io->cc; |
945 | unsigned rw = bio_data_dir(clone); |
946 | |
947 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) |
948 | error = -EIO; |
949 | |
950 | /* |
951 | * free the processed pages |
952 | */ |
953 | if (rw == WRITE) |
954 | crypt_free_buffer_pages(cc, clone); |
955 | |
956 | bio_put(clone); |
957 | |
958 | if (rw == READ && !error) { |
959 | kcryptd_queue_crypt(io); |
960 | return; |
961 | } |
962 | |
963 | if (unlikely(error)) |
964 | io->error = error; |
965 | |
966 | crypt_dec_pending(io); |
967 | } |
968 | |
969 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) |
970 | { |
971 | struct crypt_config *cc = io->cc; |
972 | |
973 | clone->bi_private = io; |
974 | clone->bi_end_io = crypt_endio; |
975 | clone->bi_bdev = cc->dev->bdev; |
976 | clone->bi_rw = io->base_bio->bi_rw; |
977 | clone->bi_destructor = dm_crypt_bio_destructor; |
978 | } |
979 | |
980 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) |
981 | { |
982 | struct crypt_config *cc = io->cc; |
983 | struct bio *base_bio = io->base_bio; |
984 | struct bio *clone; |
985 | |
986 | /* |
987 | * The block layer might modify the bvec array, so always |
988 | * copy the required bvecs because we need the original |
989 | * one in order to decrypt the whole bio data *afterwards*. |
990 | */ |
991 | clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); |
992 | if (!clone) |
993 | return 1; |
994 | |
995 | crypt_inc_pending(io); |
996 | |
997 | clone_init(io, clone); |
998 | clone->bi_idx = 0; |
999 | clone->bi_vcnt = bio_segments(base_bio); |
1000 | clone->bi_size = base_bio->bi_size; |
1001 | clone->bi_sector = cc->start + io->sector; |
1002 | memcpy(clone->bi_io_vec, bio_iovec(base_bio), |
1003 | sizeof(struct bio_vec) * clone->bi_vcnt); |
1004 | |
1005 | generic_make_request(clone); |
1006 | return 0; |
1007 | } |
1008 | |
1009 | static void kcryptd_io_write(struct dm_crypt_io *io) |
1010 | { |
1011 | struct bio *clone = io->ctx.bio_out; |
1012 | generic_make_request(clone); |
1013 | } |
1014 | |
1015 | static void kcryptd_io(struct work_struct *work) |
1016 | { |
1017 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
1018 | |
1019 | if (bio_data_dir(io->base_bio) == READ) { |
1020 | crypt_inc_pending(io); |
1021 | if (kcryptd_io_read(io, GFP_NOIO)) |
1022 | io->error = -ENOMEM; |
1023 | crypt_dec_pending(io); |
1024 | } else |
1025 | kcryptd_io_write(io); |
1026 | } |
1027 | |
1028 | static void kcryptd_queue_io(struct dm_crypt_io *io) |
1029 | { |
1030 | struct crypt_config *cc = io->cc; |
1031 | |
1032 | INIT_WORK(&io->work, kcryptd_io); |
1033 | queue_work(cc->io_queue, &io->work); |
1034 | } |
1035 | |
1036 | static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) |
1037 | { |
1038 | struct bio *clone = io->ctx.bio_out; |
1039 | struct crypt_config *cc = io->cc; |
1040 | |
1041 | if (unlikely(io->error < 0)) { |
1042 | crypt_free_buffer_pages(cc, clone); |
1043 | bio_put(clone); |
1044 | crypt_dec_pending(io); |
1045 | return; |
1046 | } |
1047 | |
1048 | /* crypt_convert should have filled the clone bio */ |
1049 | BUG_ON(io->ctx.idx_out < clone->bi_vcnt); |
1050 | |
1051 | clone->bi_sector = cc->start + io->sector; |
1052 | |
1053 | if (async) |
1054 | kcryptd_queue_io(io); |
1055 | else |
1056 | generic_make_request(clone); |
1057 | } |
1058 | |
1059 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
1060 | { |
1061 | struct crypt_config *cc = io->cc; |
1062 | struct bio *clone; |
1063 | struct dm_crypt_io *new_io; |
1064 | int crypt_finished; |
1065 | unsigned out_of_pages = 0; |
1066 | unsigned remaining = io->base_bio->bi_size; |
1067 | sector_t sector = io->sector; |
1068 | int r; |
1069 | |
1070 | /* |
1071 | * Prevent io from disappearing until this function completes. |
1072 | */ |
1073 | crypt_inc_pending(io); |
1074 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); |
1075 | |
1076 | /* |
1077 | * The allocated buffers can be smaller than the whole bio, |
1078 | * so repeat the whole process until all the data can be handled. |
1079 | */ |
1080 | while (remaining) { |
1081 | clone = crypt_alloc_buffer(io, remaining, &out_of_pages); |
1082 | if (unlikely(!clone)) { |
1083 | io->error = -ENOMEM; |
1084 | break; |
1085 | } |
1086 | |
1087 | io->ctx.bio_out = clone; |
1088 | io->ctx.idx_out = 0; |
1089 | |
1090 | remaining -= clone->bi_size; |
1091 | sector += bio_sectors(clone); |
1092 | |
1093 | crypt_inc_pending(io); |
1094 | |
1095 | r = crypt_convert(cc, &io->ctx); |
1096 | if (r < 0) |
1097 | io->error = -EIO; |
1098 | |
1099 | crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); |
1100 | |
1101 | /* Encryption was already finished, submit io now */ |
1102 | if (crypt_finished) { |
1103 | kcryptd_crypt_write_io_submit(io, 0); |
1104 | |
1105 | /* |
1106 | * If there was an error, do not try next fragments. |
1107 | * For async, error is processed in async handler. |
1108 | */ |
1109 | if (unlikely(r < 0)) |
1110 | break; |
1111 | |
1112 | io->sector = sector; |
1113 | } |
1114 | |
1115 | /* |
1116 | * Out of memory -> run queues |
1117 | * But don't wait if split was due to the io size restriction |
1118 | */ |
1119 | if (unlikely(out_of_pages)) |
1120 | congestion_wait(BLK_RW_ASYNC, HZ/100); |
1121 | |
1122 | /* |
1123 | * With async crypto it is unsafe to share the crypto context |
1124 | * between fragments, so switch to a new dm_crypt_io structure. |
1125 | */ |
1126 | if (unlikely(!crypt_finished && remaining)) { |
1127 | new_io = crypt_io_alloc(io->cc, io->base_bio, |
1128 | sector); |
1129 | crypt_inc_pending(new_io); |
1130 | crypt_convert_init(cc, &new_io->ctx, NULL, |
1131 | io->base_bio, sector); |
1132 | new_io->ctx.idx_in = io->ctx.idx_in; |
1133 | new_io->ctx.offset_in = io->ctx.offset_in; |
1134 | |
1135 | /* |
1136 | * Fragments after the first use the base_io |
1137 | * pending count. |
1138 | */ |
1139 | if (!io->base_io) |
1140 | new_io->base_io = io; |
1141 | else { |
1142 | new_io->base_io = io->base_io; |
1143 | crypt_inc_pending(io->base_io); |
1144 | crypt_dec_pending(io); |
1145 | } |
1146 | |
1147 | io = new_io; |
1148 | } |
1149 | } |
1150 | |
1151 | crypt_dec_pending(io); |
1152 | } |
1153 | |
1154 | static void kcryptd_crypt_read_done(struct dm_crypt_io *io) |
1155 | { |
1156 | crypt_dec_pending(io); |
1157 | } |
1158 | |
1159 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) |
1160 | { |
1161 | struct crypt_config *cc = io->cc; |
1162 | int r = 0; |
1163 | |
1164 | crypt_inc_pending(io); |
1165 | |
1166 | crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, |
1167 | io->sector); |
1168 | |
1169 | r = crypt_convert(cc, &io->ctx); |
1170 | if (r < 0) |
1171 | io->error = -EIO; |
1172 | |
1173 | if (atomic_dec_and_test(&io->ctx.cc_pending)) |
1174 | kcryptd_crypt_read_done(io); |
1175 | |
1176 | crypt_dec_pending(io); |
1177 | } |
1178 | |
1179 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
1180 | int error) |
1181 | { |
1182 | struct dm_crypt_request *dmreq = async_req->data; |
1183 | struct convert_context *ctx = dmreq->ctx; |
1184 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); |
1185 | struct crypt_config *cc = io->cc; |
1186 | |
1187 | if (error == -EINPROGRESS) { |
1188 | complete(&ctx->restart); |
1189 | return; |
1190 | } |
1191 | |
1192 | if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) |
1193 | error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); |
1194 | |
1195 | if (error < 0) |
1196 | io->error = -EIO; |
1197 | |
1198 | mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); |
1199 | |
1200 | if (!atomic_dec_and_test(&ctx->cc_pending)) |
1201 | return; |
1202 | |
1203 | if (bio_data_dir(io->base_bio) == READ) |
1204 | kcryptd_crypt_read_done(io); |
1205 | else |
1206 | kcryptd_crypt_write_io_submit(io, 1); |
1207 | } |
1208 | |
1209 | static void kcryptd_crypt(struct work_struct *work) |
1210 | { |
1211 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
1212 | |
1213 | if (bio_data_dir(io->base_bio) == READ) |
1214 | kcryptd_crypt_read_convert(io); |
1215 | else |
1216 | kcryptd_crypt_write_convert(io); |
1217 | } |
1218 | |
1219 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) |
1220 | { |
1221 | struct crypt_config *cc = io->cc; |
1222 | |
1223 | INIT_WORK(&io->work, kcryptd_crypt); |
1224 | queue_work(cc->crypt_queue, &io->work); |
1225 | } |
1226 | |
1227 | /* |
1228 | * Decode key from its hex representation |
1229 | */ |
1230 | static int crypt_decode_key(u8 *key, char *hex, unsigned int size) |
1231 | { |
1232 | char buffer[3]; |
1233 | unsigned int i; |
1234 | |
1235 | buffer[2] = '\0'; |
1236 | |
1237 | for (i = 0; i < size; i++) { |
1238 | buffer[0] = *hex++; |
1239 | buffer[1] = *hex++; |
1240 | |
1241 | if (kstrtou8(buffer, 16, &key[i])) |
1242 | return -EINVAL; |
1243 | } |
1244 | |
1245 | if (*hex != '\0') |
1246 | return -EINVAL; |
1247 | |
1248 | return 0; |
1249 | } |
1250 | |
1251 | /* |
1252 | * Encode key into its hex representation |
1253 | */ |
1254 | static void crypt_encode_key(char *hex, u8 *key, unsigned int size) |
1255 | { |
1256 | unsigned int i; |
1257 | |
1258 | for (i = 0; i < size; i++) { |
1259 | sprintf(hex, "%02x", *key); |
1260 | hex += 2; |
1261 | key++; |
1262 | } |
1263 | } |
1264 | |
1265 | static void crypt_free_tfms(struct crypt_config *cc) |
1266 | { |
1267 | unsigned i; |
1268 | |
1269 | if (!cc->tfms) |
1270 | return; |
1271 | |
1272 | for (i = 0; i < cc->tfms_count; i++) |
1273 | if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) { |
1274 | crypto_free_ablkcipher(cc->tfms[i]); |
1275 | cc->tfms[i] = NULL; |
1276 | } |
1277 | |
1278 | kfree(cc->tfms); |
1279 | cc->tfms = NULL; |
1280 | } |
1281 | |
1282 | static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) |
1283 | { |
1284 | unsigned i; |
1285 | int err; |
1286 | |
1287 | cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *), |
1288 | GFP_KERNEL); |
1289 | if (!cc->tfms) |
1290 | return -ENOMEM; |
1291 | |
1292 | for (i = 0; i < cc->tfms_count; i++) { |
1293 | cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); |
1294 | if (IS_ERR(cc->tfms[i])) { |
1295 | err = PTR_ERR(cc->tfms[i]); |
1296 | crypt_free_tfms(cc); |
1297 | return err; |
1298 | } |
1299 | } |
1300 | |
1301 | return 0; |
1302 | } |
1303 | |
1304 | static int crypt_setkey_allcpus(struct crypt_config *cc) |
1305 | { |
1306 | unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count); |
1307 | int err = 0, i, r; |
1308 | |
1309 | for (i = 0; i < cc->tfms_count; i++) { |
1310 | r = crypto_ablkcipher_setkey(cc->tfms[i], |
1311 | cc->key + (i * subkey_size), |
1312 | subkey_size); |
1313 | if (r) |
1314 | err = r; |
1315 | } |
1316 | |
1317 | return err; |
1318 | } |
1319 | |
1320 | static int crypt_set_key(struct crypt_config *cc, char *key) |
1321 | { |
1322 | int r = -EINVAL; |
1323 | int key_string_len = strlen(key); |
1324 | |
1325 | /* The key size may not be changed. */ |
1326 | if (cc->key_size != (key_string_len >> 1)) |
1327 | goto out; |
1328 | |
1329 | /* Hyphen (which gives a key_size of zero) means there is no key. */ |
1330 | if (!cc->key_size && strcmp(key, "-")) |
1331 | goto out; |
1332 | |
1333 | if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) |
1334 | goto out; |
1335 | |
1336 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
1337 | |
1338 | r = crypt_setkey_allcpus(cc); |
1339 | |
1340 | out: |
1341 | /* Hex key string not needed after here, so wipe it. */ |
1342 | memset(key, '0', key_string_len); |
1343 | |
1344 | return r; |
1345 | } |
1346 | |
1347 | static int crypt_wipe_key(struct crypt_config *cc) |
1348 | { |
1349 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
1350 | memset(&cc->key, 0, cc->key_size * sizeof(u8)); |
1351 | |
1352 | return crypt_setkey_allcpus(cc); |
1353 | } |
1354 | |
1355 | static void crypt_dtr(struct dm_target *ti) |
1356 | { |
1357 | struct crypt_config *cc = ti->private; |
1358 | struct crypt_cpu *cpu_cc; |
1359 | int cpu; |
1360 | |
1361 | ti->private = NULL; |
1362 | |
1363 | if (!cc) |
1364 | return; |
1365 | |
1366 | if (cc->io_queue) |
1367 | destroy_workqueue(cc->io_queue); |
1368 | if (cc->crypt_queue) |
1369 | destroy_workqueue(cc->crypt_queue); |
1370 | |
1371 | if (cc->cpu) |
1372 | for_each_possible_cpu(cpu) { |
1373 | cpu_cc = per_cpu_ptr(cc->cpu, cpu); |
1374 | if (cpu_cc->req) |
1375 | mempool_free(cpu_cc->req, cc->req_pool); |
1376 | } |
1377 | |
1378 | crypt_free_tfms(cc); |
1379 | |
1380 | if (cc->bs) |
1381 | bioset_free(cc->bs); |
1382 | |
1383 | if (cc->page_pool) |
1384 | mempool_destroy(cc->page_pool); |
1385 | if (cc->req_pool) |
1386 | mempool_destroy(cc->req_pool); |
1387 | if (cc->io_pool) |
1388 | mempool_destroy(cc->io_pool); |
1389 | |
1390 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
1391 | cc->iv_gen_ops->dtr(cc); |
1392 | |
1393 | if (cc->dev) |
1394 | dm_put_device(ti, cc->dev); |
1395 | |
1396 | if (cc->cpu) |
1397 | free_percpu(cc->cpu); |
1398 | |
1399 | kzfree(cc->cipher); |
1400 | kzfree(cc->cipher_string); |
1401 | |
1402 | /* Must zero key material before freeing */ |
1403 | kzfree(cc); |
1404 | } |
1405 | |
1406 | static int crypt_ctr_cipher(struct dm_target *ti, |
1407 | char *cipher_in, char *key) |
1408 | { |
1409 | struct crypt_config *cc = ti->private; |
1410 | char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; |
1411 | char *cipher_api = NULL; |
1412 | int ret = -EINVAL; |
1413 | char dummy; |
1414 | |
1415 | /* Convert to crypto api definition? */ |
1416 | if (strchr(cipher_in, '(')) { |
1417 | ti->error = "Bad cipher specification"; |
1418 | return -EINVAL; |
1419 | } |
1420 | |
1421 | cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); |
1422 | if (!cc->cipher_string) |
1423 | goto bad_mem; |
1424 | |
1425 | /* |
1426 | * Legacy dm-crypt cipher specification |
1427 | * cipher[:keycount]-mode-iv:ivopts |
1428 | */ |
1429 | tmp = cipher_in; |
1430 | keycount = strsep(&tmp, "-"); |
1431 | cipher = strsep(&keycount, ":"); |
1432 | |
1433 | if (!keycount) |
1434 | cc->tfms_count = 1; |
1435 | else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 || |
1436 | !is_power_of_2(cc->tfms_count)) { |
1437 | ti->error = "Bad cipher key count specification"; |
1438 | return -EINVAL; |
1439 | } |
1440 | cc->key_parts = cc->tfms_count; |
1441 | |
1442 | cc->cipher = kstrdup(cipher, GFP_KERNEL); |
1443 | if (!cc->cipher) |
1444 | goto bad_mem; |
1445 | |
1446 | chainmode = strsep(&tmp, "-"); |
1447 | ivopts = strsep(&tmp, "-"); |
1448 | ivmode = strsep(&ivopts, ":"); |
1449 | |
1450 | if (tmp) |
1451 | DMWARN("Ignoring unexpected additional cipher options"); |
1452 | |
1453 | cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)), |
1454 | __alignof__(struct crypt_cpu)); |
1455 | if (!cc->cpu) { |
1456 | ti->error = "Cannot allocate per cpu state"; |
1457 | goto bad_mem; |
1458 | } |
1459 | |
1460 | /* |
1461 | * For compatibility with the original dm-crypt mapping format, if |
1462 | * only the cipher name is supplied, use cbc-plain. |
1463 | */ |
1464 | if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { |
1465 | chainmode = "cbc"; |
1466 | ivmode = "plain"; |
1467 | } |
1468 | |
1469 | if (strcmp(chainmode, "ecb") && !ivmode) { |
1470 | ti->error = "IV mechanism required"; |
1471 | return -EINVAL; |
1472 | } |
1473 | |
1474 | cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); |
1475 | if (!cipher_api) |
1476 | goto bad_mem; |
1477 | |
1478 | ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, |
1479 | "%s(%s)", chainmode, cipher); |
1480 | if (ret < 0) { |
1481 | kfree(cipher_api); |
1482 | goto bad_mem; |
1483 | } |
1484 | |
1485 | /* Allocate cipher */ |
1486 | ret = crypt_alloc_tfms(cc, cipher_api); |
1487 | if (ret < 0) { |
1488 | ti->error = "Error allocating crypto tfm"; |
1489 | goto bad; |
1490 | } |
1491 | |
1492 | /* Initialize and set key */ |
1493 | ret = crypt_set_key(cc, key); |
1494 | if (ret < 0) { |
1495 | ti->error = "Error decoding and setting key"; |
1496 | goto bad; |
1497 | } |
1498 | |
1499 | /* Initialize IV */ |
1500 | cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc)); |
1501 | if (cc->iv_size) |
1502 | /* at least a 64 bit sector number should fit in our buffer */ |
1503 | cc->iv_size = max(cc->iv_size, |
1504 | (unsigned int)(sizeof(u64) / sizeof(u8))); |
1505 | else if (ivmode) { |
1506 | DMWARN("Selected cipher does not support IVs"); |
1507 | ivmode = NULL; |
1508 | } |
1509 | |
1510 | /* Choose ivmode, see comments at iv code. */ |
1511 | if (ivmode == NULL) |
1512 | cc->iv_gen_ops = NULL; |
1513 | else if (strcmp(ivmode, "plain") == 0) |
1514 | cc->iv_gen_ops = &crypt_iv_plain_ops; |
1515 | else if (strcmp(ivmode, "plain64") == 0) |
1516 | cc->iv_gen_ops = &crypt_iv_plain64_ops; |
1517 | else if (strcmp(ivmode, "essiv") == 0) |
1518 | cc->iv_gen_ops = &crypt_iv_essiv_ops; |
1519 | else if (strcmp(ivmode, "benbi") == 0) |
1520 | cc->iv_gen_ops = &crypt_iv_benbi_ops; |
1521 | else if (strcmp(ivmode, "null") == 0) |
1522 | cc->iv_gen_ops = &crypt_iv_null_ops; |
1523 | else if (strcmp(ivmode, "lmk") == 0) { |
1524 | cc->iv_gen_ops = &crypt_iv_lmk_ops; |
1525 | /* Version 2 and 3 is recognised according |
1526 | * to length of provided multi-key string. |
1527 | * If present (version 3), last key is used as IV seed. |
1528 | */ |
1529 | if (cc->key_size % cc->key_parts) |
1530 | cc->key_parts++; |
1531 | } else { |
1532 | ret = -EINVAL; |
1533 | ti->error = "Invalid IV mode"; |
1534 | goto bad; |
1535 | } |
1536 | |
1537 | /* Allocate IV */ |
1538 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { |
1539 | ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); |
1540 | if (ret < 0) { |
1541 | ti->error = "Error creating IV"; |
1542 | goto bad; |
1543 | } |
1544 | } |
1545 | |
1546 | /* Initialize IV (set keys for ESSIV etc) */ |
1547 | if (cc->iv_gen_ops && cc->iv_gen_ops->init) { |
1548 | ret = cc->iv_gen_ops->init(cc); |
1549 | if (ret < 0) { |
1550 | ti->error = "Error initialising IV"; |
1551 | goto bad; |
1552 | } |
1553 | } |
1554 | |
1555 | ret = 0; |
1556 | bad: |
1557 | kfree(cipher_api); |
1558 | return ret; |
1559 | |
1560 | bad_mem: |
1561 | ti->error = "Cannot allocate cipher strings"; |
1562 | return -ENOMEM; |
1563 | } |
1564 | |
1565 | /* |
1566 | * Construct an encryption mapping: |
1567 | * <cipher> <key> <iv_offset> <dev_path> <start> |
1568 | */ |
1569 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1570 | { |
1571 | struct crypt_config *cc; |
1572 | unsigned int key_size, opt_params; |
1573 | unsigned long long tmpll; |
1574 | int ret; |
1575 | struct dm_arg_set as; |
1576 | const char *opt_string; |
1577 | char dummy; |
1578 | |
1579 | static struct dm_arg _args[] = { |
1580 | {0, 1, "Invalid number of feature args"}, |
1581 | }; |
1582 | |
1583 | if (argc < 5) { |
1584 | ti->error = "Not enough arguments"; |
1585 | return -EINVAL; |
1586 | } |
1587 | |
1588 | key_size = strlen(argv[1]) >> 1; |
1589 | |
1590 | cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); |
1591 | if (!cc) { |
1592 | ti->error = "Cannot allocate encryption context"; |
1593 | return -ENOMEM; |
1594 | } |
1595 | cc->key_size = key_size; |
1596 | |
1597 | ti->private = cc; |
1598 | ret = crypt_ctr_cipher(ti, argv[0], argv[1]); |
1599 | if (ret < 0) |
1600 | goto bad; |
1601 | |
1602 | ret = -ENOMEM; |
1603 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); |
1604 | if (!cc->io_pool) { |
1605 | ti->error = "Cannot allocate crypt io mempool"; |
1606 | goto bad; |
1607 | } |
1608 | |
1609 | cc->dmreq_start = sizeof(struct ablkcipher_request); |
1610 | cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); |
1611 | cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); |
1612 | cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & |
1613 | ~(crypto_tfm_ctx_alignment() - 1); |
1614 | |
1615 | cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + |
1616 | sizeof(struct dm_crypt_request) + cc->iv_size); |
1617 | if (!cc->req_pool) { |
1618 | ti->error = "Cannot allocate crypt request mempool"; |
1619 | goto bad; |
1620 | } |
1621 | |
1622 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
1623 | if (!cc->page_pool) { |
1624 | ti->error = "Cannot allocate page mempool"; |
1625 | goto bad; |
1626 | } |
1627 | |
1628 | cc->bs = bioset_create(MIN_IOS, 0); |
1629 | if (!cc->bs) { |
1630 | ti->error = "Cannot allocate crypt bioset"; |
1631 | goto bad; |
1632 | } |
1633 | |
1634 | ret = -EINVAL; |
1635 | if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { |
1636 | ti->error = "Invalid iv_offset sector"; |
1637 | goto bad; |
1638 | } |
1639 | cc->iv_offset = tmpll; |
1640 | |
1641 | if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { |
1642 | ti->error = "Device lookup failed"; |
1643 | goto bad; |
1644 | } |
1645 | |
1646 | if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { |
1647 | ti->error = "Invalid device sector"; |
1648 | goto bad; |
1649 | } |
1650 | cc->start = tmpll; |
1651 | |
1652 | argv += 5; |
1653 | argc -= 5; |
1654 | |
1655 | /* Optional parameters */ |
1656 | if (argc) { |
1657 | as.argc = argc; |
1658 | as.argv = argv; |
1659 | |
1660 | ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); |
1661 | if (ret) |
1662 | goto bad; |
1663 | |
1664 | opt_string = dm_shift_arg(&as); |
1665 | |
1666 | if (opt_params == 1 && opt_string && |
1667 | !strcasecmp(opt_string, "allow_discards")) |
1668 | ti->num_discard_requests = 1; |
1669 | else if (opt_params) { |
1670 | ret = -EINVAL; |
1671 | ti->error = "Invalid feature arguments"; |
1672 | goto bad; |
1673 | } |
1674 | } |
1675 | |
1676 | ret = -ENOMEM; |
1677 | cc->io_queue = alloc_workqueue("kcryptd_io", |
1678 | WQ_NON_REENTRANT| |
1679 | WQ_MEM_RECLAIM, |
1680 | 1); |
1681 | if (!cc->io_queue) { |
1682 | ti->error = "Couldn't create kcryptd io queue"; |
1683 | goto bad; |
1684 | } |
1685 | |
1686 | cc->crypt_queue = alloc_workqueue("kcryptd", |
1687 | WQ_NON_REENTRANT| |
1688 | WQ_CPU_INTENSIVE| |
1689 | WQ_MEM_RECLAIM, |
1690 | 1); |
1691 | if (!cc->crypt_queue) { |
1692 | ti->error = "Couldn't create kcryptd queue"; |
1693 | goto bad; |
1694 | } |
1695 | |
1696 | ti->num_flush_requests = 1; |
1697 | ti->discard_zeroes_data_unsupported = true; |
1698 | |
1699 | return 0; |
1700 | |
1701 | bad: |
1702 | crypt_dtr(ti); |
1703 | return ret; |
1704 | } |
1705 | |
1706 | static int crypt_map(struct dm_target *ti, struct bio *bio, |
1707 | union map_info *map_context) |
1708 | { |
1709 | struct dm_crypt_io *io; |
1710 | struct crypt_config *cc = ti->private; |
1711 | |
1712 | /* |
1713 | * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. |
1714 | * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight |
1715 | * - for REQ_DISCARD caller must use flush if IO ordering matters |
1716 | */ |
1717 | if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { |
1718 | bio->bi_bdev = cc->dev->bdev; |
1719 | if (bio_sectors(bio)) |
1720 | bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); |
1721 | return DM_MAPIO_REMAPPED; |
1722 | } |
1723 | |
1724 | io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); |
1725 | |
1726 | if (bio_data_dir(io->base_bio) == READ) { |
1727 | if (kcryptd_io_read(io, GFP_NOWAIT)) |
1728 | kcryptd_queue_io(io); |
1729 | } else |
1730 | kcryptd_queue_crypt(io); |
1731 | |
1732 | return DM_MAPIO_SUBMITTED; |
1733 | } |
1734 | |
1735 | static int crypt_status(struct dm_target *ti, status_type_t type, |
1736 | unsigned status_flags, char *result, unsigned maxlen) |
1737 | { |
1738 | struct crypt_config *cc = ti->private; |
1739 | unsigned int sz = 0; |
1740 | |
1741 | switch (type) { |
1742 | case STATUSTYPE_INFO: |
1743 | result[0] = '\0'; |
1744 | break; |
1745 | |
1746 | case STATUSTYPE_TABLE: |
1747 | DMEMIT("%s ", cc->cipher_string); |
1748 | |
1749 | if (cc->key_size > 0) { |
1750 | if ((maxlen - sz) < ((cc->key_size << 1) + 1)) |
1751 | return -ENOMEM; |
1752 | |
1753 | crypt_encode_key(result + sz, cc->key, cc->key_size); |
1754 | sz += cc->key_size << 1; |
1755 | } else { |
1756 | if (sz >= maxlen) |
1757 | return -ENOMEM; |
1758 | result[sz++] = '-'; |
1759 | } |
1760 | |
1761 | DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, |
1762 | cc->dev->name, (unsigned long long)cc->start); |
1763 | |
1764 | if (ti->num_discard_requests) |
1765 | DMEMIT(" 1 allow_discards"); |
1766 | |
1767 | break; |
1768 | } |
1769 | return 0; |
1770 | } |
1771 | |
1772 | static void crypt_postsuspend(struct dm_target *ti) |
1773 | { |
1774 | struct crypt_config *cc = ti->private; |
1775 | |
1776 | set_bit(DM_CRYPT_SUSPENDED, &cc->flags); |
1777 | } |
1778 | |
1779 | static int crypt_preresume(struct dm_target *ti) |
1780 | { |
1781 | struct crypt_config *cc = ti->private; |
1782 | |
1783 | if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { |
1784 | DMERR("aborting resume - crypt key is not set."); |
1785 | return -EAGAIN; |
1786 | } |
1787 | |
1788 | return 0; |
1789 | } |
1790 | |
1791 | static void crypt_resume(struct dm_target *ti) |
1792 | { |
1793 | struct crypt_config *cc = ti->private; |
1794 | |
1795 | clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); |
1796 | } |
1797 | |
1798 | /* Message interface |
1799 | * key set <key> |
1800 | * key wipe |
1801 | */ |
1802 | static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) |
1803 | { |
1804 | struct crypt_config *cc = ti->private; |
1805 | int ret = -EINVAL; |
1806 | |
1807 | if (argc < 2) |
1808 | goto error; |
1809 | |
1810 | if (!strcasecmp(argv[0], "key")) { |
1811 | if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { |
1812 | DMWARN("not suspended during key manipulation."); |
1813 | return -EINVAL; |
1814 | } |
1815 | if (argc == 3 && !strcasecmp(argv[1], "set")) { |
1816 | ret = crypt_set_key(cc, argv[2]); |
1817 | if (ret) |
1818 | return ret; |
1819 | if (cc->iv_gen_ops && cc->iv_gen_ops->init) |
1820 | ret = cc->iv_gen_ops->init(cc); |
1821 | return ret; |
1822 | } |
1823 | if (argc == 2 && !strcasecmp(argv[1], "wipe")) { |
1824 | if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { |
1825 | ret = cc->iv_gen_ops->wipe(cc); |
1826 | if (ret) |
1827 | return ret; |
1828 | } |
1829 | return crypt_wipe_key(cc); |
1830 | } |
1831 | } |
1832 | |
1833 | error: |
1834 | DMWARN("unrecognised message received."); |
1835 | return -EINVAL; |
1836 | } |
1837 | |
1838 | static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, |
1839 | struct bio_vec *biovec, int max_size) |
1840 | { |
1841 | struct crypt_config *cc = ti->private; |
1842 | struct request_queue *q = bdev_get_queue(cc->dev->bdev); |
1843 | |
1844 | if (!q->merge_bvec_fn) |
1845 | return max_size; |
1846 | |
1847 | bvm->bi_bdev = cc->dev->bdev; |
1848 | bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); |
1849 | |
1850 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); |
1851 | } |
1852 | |
1853 | static int crypt_iterate_devices(struct dm_target *ti, |
1854 | iterate_devices_callout_fn fn, void *data) |
1855 | { |
1856 | struct crypt_config *cc = ti->private; |
1857 | |
1858 | return fn(ti, cc->dev, cc->start, ti->len, data); |
1859 | } |
1860 | |
1861 | static struct target_type crypt_target = { |
1862 | .name = "crypt", |
1863 | .version = {1, 11, 0}, |
1864 | .module = THIS_MODULE, |
1865 | .ctr = crypt_ctr, |
1866 | .dtr = crypt_dtr, |
1867 | .map = crypt_map, |
1868 | .status = crypt_status, |
1869 | .postsuspend = crypt_postsuspend, |
1870 | .preresume = crypt_preresume, |
1871 | .resume = crypt_resume, |
1872 | .message = crypt_message, |
1873 | .merge = crypt_merge, |
1874 | .iterate_devices = crypt_iterate_devices, |
1875 | }; |
1876 | |
1877 | static int __init dm_crypt_init(void) |
1878 | { |
1879 | int r; |
1880 | |
1881 | _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); |
1882 | if (!_crypt_io_pool) |
1883 | return -ENOMEM; |
1884 | |
1885 | r = dm_register_target(&crypt_target); |
1886 | if (r < 0) { |
1887 | DMERR("register failed %d", r); |
1888 | kmem_cache_destroy(_crypt_io_pool); |
1889 | } |
1890 | |
1891 | return r; |
1892 | } |
1893 | |
1894 | static void __exit dm_crypt_exit(void) |
1895 | { |
1896 | dm_unregister_target(&crypt_target); |
1897 | kmem_cache_destroy(_crypt_io_pool); |
1898 | } |
1899 | |
1900 | module_init(dm_crypt_init); |
1901 | module_exit(dm_crypt_exit); |
1902 | |
1903 | MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); |
1904 | MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); |
1905 | MODULE_LICENSE("GPL"); |
1906 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9