Root/
1 | /* |
2 | * Copyright (C) 2003 Christophe Saout <christophe@saout.de> |
3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> |
4 | * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. |
5 | * |
6 | * This file is released under the GPL. |
7 | */ |
8 | |
9 | #include <linux/completion.h> |
10 | #include <linux/err.h> |
11 | #include <linux/module.h> |
12 | #include <linux/init.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/bio.h> |
15 | #include <linux/blkdev.h> |
16 | #include <linux/mempool.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/crypto.h> |
19 | #include <linux/workqueue.h> |
20 | #include <linux/backing-dev.h> |
21 | #include <asm/atomic.h> |
22 | #include <linux/scatterlist.h> |
23 | #include <asm/page.h> |
24 | #include <asm/unaligned.h> |
25 | |
26 | #include <linux/device-mapper.h> |
27 | |
28 | #define DM_MSG_PREFIX "crypt" |
29 | #define MESG_STR(x) x, sizeof(x) |
30 | |
31 | /* |
32 | * context holding the current state of a multi-part conversion |
33 | */ |
34 | struct convert_context { |
35 | struct completion restart; |
36 | struct bio *bio_in; |
37 | struct bio *bio_out; |
38 | unsigned int offset_in; |
39 | unsigned int offset_out; |
40 | unsigned int idx_in; |
41 | unsigned int idx_out; |
42 | sector_t sector; |
43 | atomic_t pending; |
44 | }; |
45 | |
46 | /* |
47 | * per bio private data |
48 | */ |
49 | struct dm_crypt_io { |
50 | struct dm_target *target; |
51 | struct bio *base_bio; |
52 | struct work_struct work; |
53 | |
54 | struct convert_context ctx; |
55 | |
56 | atomic_t pending; |
57 | int error; |
58 | sector_t sector; |
59 | struct dm_crypt_io *base_io; |
60 | }; |
61 | |
62 | struct dm_crypt_request { |
63 | struct convert_context *ctx; |
64 | struct scatterlist sg_in; |
65 | struct scatterlist sg_out; |
66 | }; |
67 | |
68 | struct crypt_config; |
69 | |
70 | struct crypt_iv_operations { |
71 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, |
72 | const char *opts); |
73 | void (*dtr)(struct crypt_config *cc); |
74 | int (*init)(struct crypt_config *cc); |
75 | int (*wipe)(struct crypt_config *cc); |
76 | int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); |
77 | }; |
78 | |
79 | struct iv_essiv_private { |
80 | struct crypto_cipher *tfm; |
81 | struct crypto_hash *hash_tfm; |
82 | u8 *salt; |
83 | }; |
84 | |
85 | struct iv_benbi_private { |
86 | int shift; |
87 | }; |
88 | |
89 | /* |
90 | * Crypt: maps a linear range of a block device |
91 | * and encrypts / decrypts at the same time. |
92 | */ |
93 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; |
94 | struct crypt_config { |
95 | struct dm_dev *dev; |
96 | sector_t start; |
97 | |
98 | /* |
99 | * pool for per bio private data, crypto requests and |
100 | * encryption requeusts/buffer pages |
101 | */ |
102 | mempool_t *io_pool; |
103 | mempool_t *req_pool; |
104 | mempool_t *page_pool; |
105 | struct bio_set *bs; |
106 | |
107 | struct workqueue_struct *io_queue; |
108 | struct workqueue_struct *crypt_queue; |
109 | |
110 | /* |
111 | * crypto related data |
112 | */ |
113 | struct crypt_iv_operations *iv_gen_ops; |
114 | char *iv_mode; |
115 | union { |
116 | struct iv_essiv_private essiv; |
117 | struct iv_benbi_private benbi; |
118 | } iv_gen_private; |
119 | sector_t iv_offset; |
120 | unsigned int iv_size; |
121 | |
122 | /* |
123 | * Layout of each crypto request: |
124 | * |
125 | * struct ablkcipher_request |
126 | * context |
127 | * padding |
128 | * struct dm_crypt_request |
129 | * padding |
130 | * IV |
131 | * |
132 | * The padding is added so that dm_crypt_request and the IV are |
133 | * correctly aligned. |
134 | */ |
135 | unsigned int dmreq_start; |
136 | struct ablkcipher_request *req; |
137 | |
138 | char cipher[CRYPTO_MAX_ALG_NAME]; |
139 | char chainmode[CRYPTO_MAX_ALG_NAME]; |
140 | struct crypto_ablkcipher *tfm; |
141 | unsigned long flags; |
142 | unsigned int key_size; |
143 | u8 key[0]; |
144 | }; |
145 | |
146 | #define MIN_IOS 16 |
147 | #define MIN_POOL_PAGES 32 |
148 | #define MIN_BIO_PAGES 8 |
149 | |
150 | static struct kmem_cache *_crypt_io_pool; |
151 | |
152 | static void clone_init(struct dm_crypt_io *, struct bio *); |
153 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
154 | |
155 | /* |
156 | * Different IV generation algorithms: |
157 | * |
158 | * plain: the initial vector is the 32-bit little-endian version of the sector |
159 | * number, padded with zeros if necessary. |
160 | * |
161 | * plain64: the initial vector is the 64-bit little-endian version of the sector |
162 | * number, padded with zeros if necessary. |
163 | * |
164 | * essiv: "encrypted sector|salt initial vector", the sector number is |
165 | * encrypted with the bulk cipher using a salt as key. The salt |
166 | * should be derived from the bulk cipher's key via hashing. |
167 | * |
168 | * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 |
169 | * (needed for LRW-32-AES and possible other narrow block modes) |
170 | * |
171 | * null: the initial vector is always zero. Provides compatibility with |
172 | * obsolete loop_fish2 devices. Do not use for new devices. |
173 | * |
174 | * plumb: unimplemented, see: |
175 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 |
176 | */ |
177 | |
178 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) |
179 | { |
180 | memset(iv, 0, cc->iv_size); |
181 | *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); |
182 | |
183 | return 0; |
184 | } |
185 | |
186 | static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, |
187 | sector_t sector) |
188 | { |
189 | memset(iv, 0, cc->iv_size); |
190 | *(u64 *)iv = cpu_to_le64(sector); |
191 | |
192 | return 0; |
193 | } |
194 | |
195 | /* Initialise ESSIV - compute salt but no local memory allocations */ |
196 | static int crypt_iv_essiv_init(struct crypt_config *cc) |
197 | { |
198 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
199 | struct hash_desc desc; |
200 | struct scatterlist sg; |
201 | int err; |
202 | |
203 | sg_init_one(&sg, cc->key, cc->key_size); |
204 | desc.tfm = essiv->hash_tfm; |
205 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
206 | |
207 | err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); |
208 | if (err) |
209 | return err; |
210 | |
211 | return crypto_cipher_setkey(essiv->tfm, essiv->salt, |
212 | crypto_hash_digestsize(essiv->hash_tfm)); |
213 | } |
214 | |
215 | /* Wipe salt and reset key derived from volume key */ |
216 | static int crypt_iv_essiv_wipe(struct crypt_config *cc) |
217 | { |
218 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
219 | unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); |
220 | |
221 | memset(essiv->salt, 0, salt_size); |
222 | |
223 | return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); |
224 | } |
225 | |
226 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) |
227 | { |
228 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
229 | |
230 | crypto_free_cipher(essiv->tfm); |
231 | essiv->tfm = NULL; |
232 | |
233 | crypto_free_hash(essiv->hash_tfm); |
234 | essiv->hash_tfm = NULL; |
235 | |
236 | kzfree(essiv->salt); |
237 | essiv->salt = NULL; |
238 | } |
239 | |
240 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, |
241 | const char *opts) |
242 | { |
243 | struct crypto_cipher *essiv_tfm = NULL; |
244 | struct crypto_hash *hash_tfm = NULL; |
245 | u8 *salt = NULL; |
246 | int err; |
247 | |
248 | if (!opts) { |
249 | ti->error = "Digest algorithm missing for ESSIV mode"; |
250 | return -EINVAL; |
251 | } |
252 | |
253 | /* Allocate hash algorithm */ |
254 | hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); |
255 | if (IS_ERR(hash_tfm)) { |
256 | ti->error = "Error initializing ESSIV hash"; |
257 | err = PTR_ERR(hash_tfm); |
258 | goto bad; |
259 | } |
260 | |
261 | salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); |
262 | if (!salt) { |
263 | ti->error = "Error kmallocing salt storage in ESSIV"; |
264 | err = -ENOMEM; |
265 | goto bad; |
266 | } |
267 | |
268 | /* Allocate essiv_tfm */ |
269 | essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
270 | if (IS_ERR(essiv_tfm)) { |
271 | ti->error = "Error allocating crypto tfm for ESSIV"; |
272 | err = PTR_ERR(essiv_tfm); |
273 | goto bad; |
274 | } |
275 | if (crypto_cipher_blocksize(essiv_tfm) != |
276 | crypto_ablkcipher_ivsize(cc->tfm)) { |
277 | ti->error = "Block size of ESSIV cipher does " |
278 | "not match IV size of block cipher"; |
279 | err = -EINVAL; |
280 | goto bad; |
281 | } |
282 | |
283 | cc->iv_gen_private.essiv.salt = salt; |
284 | cc->iv_gen_private.essiv.tfm = essiv_tfm; |
285 | cc->iv_gen_private.essiv.hash_tfm = hash_tfm; |
286 | |
287 | return 0; |
288 | |
289 | bad: |
290 | if (essiv_tfm && !IS_ERR(essiv_tfm)) |
291 | crypto_free_cipher(essiv_tfm); |
292 | if (hash_tfm && !IS_ERR(hash_tfm)) |
293 | crypto_free_hash(hash_tfm); |
294 | kfree(salt); |
295 | return err; |
296 | } |
297 | |
298 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) |
299 | { |
300 | memset(iv, 0, cc->iv_size); |
301 | *(u64 *)iv = cpu_to_le64(sector); |
302 | crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); |
303 | return 0; |
304 | } |
305 | |
306 | static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, |
307 | const char *opts) |
308 | { |
309 | unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); |
310 | int log = ilog2(bs); |
311 | |
312 | /* we need to calculate how far we must shift the sector count |
313 | * to get the cipher block count, we use this shift in _gen */ |
314 | |
315 | if (1 << log != bs) { |
316 | ti->error = "cypher blocksize is not a power of 2"; |
317 | return -EINVAL; |
318 | } |
319 | |
320 | if (log > 9) { |
321 | ti->error = "cypher blocksize is > 512"; |
322 | return -EINVAL; |
323 | } |
324 | |
325 | cc->iv_gen_private.benbi.shift = 9 - log; |
326 | |
327 | return 0; |
328 | } |
329 | |
330 | static void crypt_iv_benbi_dtr(struct crypt_config *cc) |
331 | { |
332 | } |
333 | |
334 | static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) |
335 | { |
336 | __be64 val; |
337 | |
338 | memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ |
339 | |
340 | val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); |
341 | put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); |
342 | |
343 | return 0; |
344 | } |
345 | |
346 | static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) |
347 | { |
348 | memset(iv, 0, cc->iv_size); |
349 | |
350 | return 0; |
351 | } |
352 | |
353 | static struct crypt_iv_operations crypt_iv_plain_ops = { |
354 | .generator = crypt_iv_plain_gen |
355 | }; |
356 | |
357 | static struct crypt_iv_operations crypt_iv_plain64_ops = { |
358 | .generator = crypt_iv_plain64_gen |
359 | }; |
360 | |
361 | static struct crypt_iv_operations crypt_iv_essiv_ops = { |
362 | .ctr = crypt_iv_essiv_ctr, |
363 | .dtr = crypt_iv_essiv_dtr, |
364 | .init = crypt_iv_essiv_init, |
365 | .wipe = crypt_iv_essiv_wipe, |
366 | .generator = crypt_iv_essiv_gen |
367 | }; |
368 | |
369 | static struct crypt_iv_operations crypt_iv_benbi_ops = { |
370 | .ctr = crypt_iv_benbi_ctr, |
371 | .dtr = crypt_iv_benbi_dtr, |
372 | .generator = crypt_iv_benbi_gen |
373 | }; |
374 | |
375 | static struct crypt_iv_operations crypt_iv_null_ops = { |
376 | .generator = crypt_iv_null_gen |
377 | }; |
378 | |
379 | static void crypt_convert_init(struct crypt_config *cc, |
380 | struct convert_context *ctx, |
381 | struct bio *bio_out, struct bio *bio_in, |
382 | sector_t sector) |
383 | { |
384 | ctx->bio_in = bio_in; |
385 | ctx->bio_out = bio_out; |
386 | ctx->offset_in = 0; |
387 | ctx->offset_out = 0; |
388 | ctx->idx_in = bio_in ? bio_in->bi_idx : 0; |
389 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; |
390 | ctx->sector = sector + cc->iv_offset; |
391 | init_completion(&ctx->restart); |
392 | } |
393 | |
394 | static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, |
395 | struct ablkcipher_request *req) |
396 | { |
397 | return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); |
398 | } |
399 | |
400 | static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, |
401 | struct dm_crypt_request *dmreq) |
402 | { |
403 | return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); |
404 | } |
405 | |
406 | static int crypt_convert_block(struct crypt_config *cc, |
407 | struct convert_context *ctx, |
408 | struct ablkcipher_request *req) |
409 | { |
410 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); |
411 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); |
412 | struct dm_crypt_request *dmreq; |
413 | u8 *iv; |
414 | int r = 0; |
415 | |
416 | dmreq = dmreq_of_req(cc, req); |
417 | iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), |
418 | crypto_ablkcipher_alignmask(cc->tfm) + 1); |
419 | |
420 | dmreq->ctx = ctx; |
421 | sg_init_table(&dmreq->sg_in, 1); |
422 | sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, |
423 | bv_in->bv_offset + ctx->offset_in); |
424 | |
425 | sg_init_table(&dmreq->sg_out, 1); |
426 | sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, |
427 | bv_out->bv_offset + ctx->offset_out); |
428 | |
429 | ctx->offset_in += 1 << SECTOR_SHIFT; |
430 | if (ctx->offset_in >= bv_in->bv_len) { |
431 | ctx->offset_in = 0; |
432 | ctx->idx_in++; |
433 | } |
434 | |
435 | ctx->offset_out += 1 << SECTOR_SHIFT; |
436 | if (ctx->offset_out >= bv_out->bv_len) { |
437 | ctx->offset_out = 0; |
438 | ctx->idx_out++; |
439 | } |
440 | |
441 | if (cc->iv_gen_ops) { |
442 | r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); |
443 | if (r < 0) |
444 | return r; |
445 | } |
446 | |
447 | ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, |
448 | 1 << SECTOR_SHIFT, iv); |
449 | |
450 | if (bio_data_dir(ctx->bio_in) == WRITE) |
451 | r = crypto_ablkcipher_encrypt(req); |
452 | else |
453 | r = crypto_ablkcipher_decrypt(req); |
454 | |
455 | return r; |
456 | } |
457 | |
458 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
459 | int error); |
460 | static void crypt_alloc_req(struct crypt_config *cc, |
461 | struct convert_context *ctx) |
462 | { |
463 | if (!cc->req) |
464 | cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); |
465 | ablkcipher_request_set_tfm(cc->req, cc->tfm); |
466 | ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | |
467 | CRYPTO_TFM_REQ_MAY_SLEEP, |
468 | kcryptd_async_done, |
469 | dmreq_of_req(cc, cc->req)); |
470 | } |
471 | |
472 | /* |
473 | * Encrypt / decrypt data from one bio to another one (can be the same one) |
474 | */ |
475 | static int crypt_convert(struct crypt_config *cc, |
476 | struct convert_context *ctx) |
477 | { |
478 | int r; |
479 | |
480 | atomic_set(&ctx->pending, 1); |
481 | |
482 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && |
483 | ctx->idx_out < ctx->bio_out->bi_vcnt) { |
484 | |
485 | crypt_alloc_req(cc, ctx); |
486 | |
487 | atomic_inc(&ctx->pending); |
488 | |
489 | r = crypt_convert_block(cc, ctx, cc->req); |
490 | |
491 | switch (r) { |
492 | /* async */ |
493 | case -EBUSY: |
494 | wait_for_completion(&ctx->restart); |
495 | INIT_COMPLETION(ctx->restart); |
496 | /* fall through*/ |
497 | case -EINPROGRESS: |
498 | cc->req = NULL; |
499 | ctx->sector++; |
500 | continue; |
501 | |
502 | /* sync */ |
503 | case 0: |
504 | atomic_dec(&ctx->pending); |
505 | ctx->sector++; |
506 | cond_resched(); |
507 | continue; |
508 | |
509 | /* error */ |
510 | default: |
511 | atomic_dec(&ctx->pending); |
512 | return r; |
513 | } |
514 | } |
515 | |
516 | return 0; |
517 | } |
518 | |
519 | static void dm_crypt_bio_destructor(struct bio *bio) |
520 | { |
521 | struct dm_crypt_io *io = bio->bi_private; |
522 | struct crypt_config *cc = io->target->private; |
523 | |
524 | bio_free(bio, cc->bs); |
525 | } |
526 | |
527 | /* |
528 | * Generate a new unfragmented bio with the given size |
529 | * This should never violate the device limitations |
530 | * May return a smaller bio when running out of pages, indicated by |
531 | * *out_of_pages set to 1. |
532 | */ |
533 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, |
534 | unsigned *out_of_pages) |
535 | { |
536 | struct crypt_config *cc = io->target->private; |
537 | struct bio *clone; |
538 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
539 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
540 | unsigned i, len; |
541 | struct page *page; |
542 | |
543 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); |
544 | if (!clone) |
545 | return NULL; |
546 | |
547 | clone_init(io, clone); |
548 | *out_of_pages = 0; |
549 | |
550 | for (i = 0; i < nr_iovecs; i++) { |
551 | page = mempool_alloc(cc->page_pool, gfp_mask); |
552 | if (!page) { |
553 | *out_of_pages = 1; |
554 | break; |
555 | } |
556 | |
557 | /* |
558 | * if additional pages cannot be allocated without waiting, |
559 | * return a partially allocated bio, the caller will then try |
560 | * to allocate additional bios while submitting this partial bio |
561 | */ |
562 | if (i == (MIN_BIO_PAGES - 1)) |
563 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; |
564 | |
565 | len = (size > PAGE_SIZE) ? PAGE_SIZE : size; |
566 | |
567 | if (!bio_add_page(clone, page, len, 0)) { |
568 | mempool_free(page, cc->page_pool); |
569 | break; |
570 | } |
571 | |
572 | size -= len; |
573 | } |
574 | |
575 | if (!clone->bi_size) { |
576 | bio_put(clone); |
577 | return NULL; |
578 | } |
579 | |
580 | return clone; |
581 | } |
582 | |
583 | static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) |
584 | { |
585 | unsigned int i; |
586 | struct bio_vec *bv; |
587 | |
588 | for (i = 0; i < clone->bi_vcnt; i++) { |
589 | bv = bio_iovec_idx(clone, i); |
590 | BUG_ON(!bv->bv_page); |
591 | mempool_free(bv->bv_page, cc->page_pool); |
592 | bv->bv_page = NULL; |
593 | } |
594 | } |
595 | |
596 | static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, |
597 | struct bio *bio, sector_t sector) |
598 | { |
599 | struct crypt_config *cc = ti->private; |
600 | struct dm_crypt_io *io; |
601 | |
602 | io = mempool_alloc(cc->io_pool, GFP_NOIO); |
603 | io->target = ti; |
604 | io->base_bio = bio; |
605 | io->sector = sector; |
606 | io->error = 0; |
607 | io->base_io = NULL; |
608 | atomic_set(&io->pending, 0); |
609 | |
610 | return io; |
611 | } |
612 | |
613 | static void crypt_inc_pending(struct dm_crypt_io *io) |
614 | { |
615 | atomic_inc(&io->pending); |
616 | } |
617 | |
618 | /* |
619 | * One of the bios was finished. Check for completion of |
620 | * the whole request and correctly clean up the buffer. |
621 | * If base_io is set, wait for the last fragment to complete. |
622 | */ |
623 | static void crypt_dec_pending(struct dm_crypt_io *io) |
624 | { |
625 | struct crypt_config *cc = io->target->private; |
626 | struct bio *base_bio = io->base_bio; |
627 | struct dm_crypt_io *base_io = io->base_io; |
628 | int error = io->error; |
629 | |
630 | if (!atomic_dec_and_test(&io->pending)) |
631 | return; |
632 | |
633 | mempool_free(io, cc->io_pool); |
634 | |
635 | if (likely(!base_io)) |
636 | bio_endio(base_bio, error); |
637 | else { |
638 | if (error && !base_io->error) |
639 | base_io->error = error; |
640 | crypt_dec_pending(base_io); |
641 | } |
642 | } |
643 | |
644 | /* |
645 | * kcryptd/kcryptd_io: |
646 | * |
647 | * Needed because it would be very unwise to do decryption in an |
648 | * interrupt context. |
649 | * |
650 | * kcryptd performs the actual encryption or decryption. |
651 | * |
652 | * kcryptd_io performs the IO submission. |
653 | * |
654 | * They must be separated as otherwise the final stages could be |
655 | * starved by new requests which can block in the first stages due |
656 | * to memory allocation. |
657 | */ |
658 | static void crypt_endio(struct bio *clone, int error) |
659 | { |
660 | struct dm_crypt_io *io = clone->bi_private; |
661 | struct crypt_config *cc = io->target->private; |
662 | unsigned rw = bio_data_dir(clone); |
663 | |
664 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) |
665 | error = -EIO; |
666 | |
667 | /* |
668 | * free the processed pages |
669 | */ |
670 | if (rw == WRITE) |
671 | crypt_free_buffer_pages(cc, clone); |
672 | |
673 | bio_put(clone); |
674 | |
675 | if (rw == READ && !error) { |
676 | kcryptd_queue_crypt(io); |
677 | return; |
678 | } |
679 | |
680 | if (unlikely(error)) |
681 | io->error = error; |
682 | |
683 | crypt_dec_pending(io); |
684 | } |
685 | |
686 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) |
687 | { |
688 | struct crypt_config *cc = io->target->private; |
689 | |
690 | clone->bi_private = io; |
691 | clone->bi_end_io = crypt_endio; |
692 | clone->bi_bdev = cc->dev->bdev; |
693 | clone->bi_rw = io->base_bio->bi_rw; |
694 | clone->bi_destructor = dm_crypt_bio_destructor; |
695 | } |
696 | |
697 | static void kcryptd_io_read(struct dm_crypt_io *io) |
698 | { |
699 | struct crypt_config *cc = io->target->private; |
700 | struct bio *base_bio = io->base_bio; |
701 | struct bio *clone; |
702 | |
703 | crypt_inc_pending(io); |
704 | |
705 | /* |
706 | * The block layer might modify the bvec array, so always |
707 | * copy the required bvecs because we need the original |
708 | * one in order to decrypt the whole bio data *afterwards*. |
709 | */ |
710 | clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); |
711 | if (unlikely(!clone)) { |
712 | io->error = -ENOMEM; |
713 | crypt_dec_pending(io); |
714 | return; |
715 | } |
716 | |
717 | clone_init(io, clone); |
718 | clone->bi_idx = 0; |
719 | clone->bi_vcnt = bio_segments(base_bio); |
720 | clone->bi_size = base_bio->bi_size; |
721 | clone->bi_sector = cc->start + io->sector; |
722 | memcpy(clone->bi_io_vec, bio_iovec(base_bio), |
723 | sizeof(struct bio_vec) * clone->bi_vcnt); |
724 | |
725 | generic_make_request(clone); |
726 | } |
727 | |
728 | static void kcryptd_io_write(struct dm_crypt_io *io) |
729 | { |
730 | struct bio *clone = io->ctx.bio_out; |
731 | generic_make_request(clone); |
732 | } |
733 | |
734 | static void kcryptd_io(struct work_struct *work) |
735 | { |
736 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
737 | |
738 | if (bio_data_dir(io->base_bio) == READ) |
739 | kcryptd_io_read(io); |
740 | else |
741 | kcryptd_io_write(io); |
742 | } |
743 | |
744 | static void kcryptd_queue_io(struct dm_crypt_io *io) |
745 | { |
746 | struct crypt_config *cc = io->target->private; |
747 | |
748 | INIT_WORK(&io->work, kcryptd_io); |
749 | queue_work(cc->io_queue, &io->work); |
750 | } |
751 | |
752 | static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, |
753 | int error, int async) |
754 | { |
755 | struct bio *clone = io->ctx.bio_out; |
756 | struct crypt_config *cc = io->target->private; |
757 | |
758 | if (unlikely(error < 0)) { |
759 | crypt_free_buffer_pages(cc, clone); |
760 | bio_put(clone); |
761 | io->error = -EIO; |
762 | crypt_dec_pending(io); |
763 | return; |
764 | } |
765 | |
766 | /* crypt_convert should have filled the clone bio */ |
767 | BUG_ON(io->ctx.idx_out < clone->bi_vcnt); |
768 | |
769 | clone->bi_sector = cc->start + io->sector; |
770 | |
771 | if (async) |
772 | kcryptd_queue_io(io); |
773 | else |
774 | generic_make_request(clone); |
775 | } |
776 | |
777 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
778 | { |
779 | struct crypt_config *cc = io->target->private; |
780 | struct bio *clone; |
781 | struct dm_crypt_io *new_io; |
782 | int crypt_finished; |
783 | unsigned out_of_pages = 0; |
784 | unsigned remaining = io->base_bio->bi_size; |
785 | sector_t sector = io->sector; |
786 | int r; |
787 | |
788 | /* |
789 | * Prevent io from disappearing until this function completes. |
790 | */ |
791 | crypt_inc_pending(io); |
792 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); |
793 | |
794 | /* |
795 | * The allocated buffers can be smaller than the whole bio, |
796 | * so repeat the whole process until all the data can be handled. |
797 | */ |
798 | while (remaining) { |
799 | clone = crypt_alloc_buffer(io, remaining, &out_of_pages); |
800 | if (unlikely(!clone)) { |
801 | io->error = -ENOMEM; |
802 | break; |
803 | } |
804 | |
805 | io->ctx.bio_out = clone; |
806 | io->ctx.idx_out = 0; |
807 | |
808 | remaining -= clone->bi_size; |
809 | sector += bio_sectors(clone); |
810 | |
811 | crypt_inc_pending(io); |
812 | r = crypt_convert(cc, &io->ctx); |
813 | crypt_finished = atomic_dec_and_test(&io->ctx.pending); |
814 | |
815 | /* Encryption was already finished, submit io now */ |
816 | if (crypt_finished) { |
817 | kcryptd_crypt_write_io_submit(io, r, 0); |
818 | |
819 | /* |
820 | * If there was an error, do not try next fragments. |
821 | * For async, error is processed in async handler. |
822 | */ |
823 | if (unlikely(r < 0)) |
824 | break; |
825 | |
826 | io->sector = sector; |
827 | } |
828 | |
829 | /* |
830 | * Out of memory -> run queues |
831 | * But don't wait if split was due to the io size restriction |
832 | */ |
833 | if (unlikely(out_of_pages)) |
834 | congestion_wait(BLK_RW_ASYNC, HZ/100); |
835 | |
836 | /* |
837 | * With async crypto it is unsafe to share the crypto context |
838 | * between fragments, so switch to a new dm_crypt_io structure. |
839 | */ |
840 | if (unlikely(!crypt_finished && remaining)) { |
841 | new_io = crypt_io_alloc(io->target, io->base_bio, |
842 | sector); |
843 | crypt_inc_pending(new_io); |
844 | crypt_convert_init(cc, &new_io->ctx, NULL, |
845 | io->base_bio, sector); |
846 | new_io->ctx.idx_in = io->ctx.idx_in; |
847 | new_io->ctx.offset_in = io->ctx.offset_in; |
848 | |
849 | /* |
850 | * Fragments after the first use the base_io |
851 | * pending count. |
852 | */ |
853 | if (!io->base_io) |
854 | new_io->base_io = io; |
855 | else { |
856 | new_io->base_io = io->base_io; |
857 | crypt_inc_pending(io->base_io); |
858 | crypt_dec_pending(io); |
859 | } |
860 | |
861 | io = new_io; |
862 | } |
863 | } |
864 | |
865 | crypt_dec_pending(io); |
866 | } |
867 | |
868 | static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) |
869 | { |
870 | if (unlikely(error < 0)) |
871 | io->error = -EIO; |
872 | |
873 | crypt_dec_pending(io); |
874 | } |
875 | |
876 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) |
877 | { |
878 | struct crypt_config *cc = io->target->private; |
879 | int r = 0; |
880 | |
881 | crypt_inc_pending(io); |
882 | |
883 | crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, |
884 | io->sector); |
885 | |
886 | r = crypt_convert(cc, &io->ctx); |
887 | |
888 | if (atomic_dec_and_test(&io->ctx.pending)) |
889 | kcryptd_crypt_read_done(io, r); |
890 | |
891 | crypt_dec_pending(io); |
892 | } |
893 | |
894 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
895 | int error) |
896 | { |
897 | struct dm_crypt_request *dmreq = async_req->data; |
898 | struct convert_context *ctx = dmreq->ctx; |
899 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); |
900 | struct crypt_config *cc = io->target->private; |
901 | |
902 | if (error == -EINPROGRESS) { |
903 | complete(&ctx->restart); |
904 | return; |
905 | } |
906 | |
907 | mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); |
908 | |
909 | if (!atomic_dec_and_test(&ctx->pending)) |
910 | return; |
911 | |
912 | if (bio_data_dir(io->base_bio) == READ) |
913 | kcryptd_crypt_read_done(io, error); |
914 | else |
915 | kcryptd_crypt_write_io_submit(io, error, 1); |
916 | } |
917 | |
918 | static void kcryptd_crypt(struct work_struct *work) |
919 | { |
920 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
921 | |
922 | if (bio_data_dir(io->base_bio) == READ) |
923 | kcryptd_crypt_read_convert(io); |
924 | else |
925 | kcryptd_crypt_write_convert(io); |
926 | } |
927 | |
928 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) |
929 | { |
930 | struct crypt_config *cc = io->target->private; |
931 | |
932 | INIT_WORK(&io->work, kcryptd_crypt); |
933 | queue_work(cc->crypt_queue, &io->work); |
934 | } |
935 | |
936 | /* |
937 | * Decode key from its hex representation |
938 | */ |
939 | static int crypt_decode_key(u8 *key, char *hex, unsigned int size) |
940 | { |
941 | char buffer[3]; |
942 | char *endp; |
943 | unsigned int i; |
944 | |
945 | buffer[2] = '\0'; |
946 | |
947 | for (i = 0; i < size; i++) { |
948 | buffer[0] = *hex++; |
949 | buffer[1] = *hex++; |
950 | |
951 | key[i] = (u8)simple_strtoul(buffer, &endp, 16); |
952 | |
953 | if (endp != &buffer[2]) |
954 | return -EINVAL; |
955 | } |
956 | |
957 | if (*hex != '\0') |
958 | return -EINVAL; |
959 | |
960 | return 0; |
961 | } |
962 | |
963 | /* |
964 | * Encode key into its hex representation |
965 | */ |
966 | static void crypt_encode_key(char *hex, u8 *key, unsigned int size) |
967 | { |
968 | unsigned int i; |
969 | |
970 | for (i = 0; i < size; i++) { |
971 | sprintf(hex, "%02x", *key); |
972 | hex += 2; |
973 | key++; |
974 | } |
975 | } |
976 | |
977 | static int crypt_set_key(struct crypt_config *cc, char *key) |
978 | { |
979 | unsigned key_size = strlen(key) >> 1; |
980 | |
981 | if (cc->key_size && cc->key_size != key_size) |
982 | return -EINVAL; |
983 | |
984 | cc->key_size = key_size; /* initial settings */ |
985 | |
986 | if ((!key_size && strcmp(key, "-")) || |
987 | (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) |
988 | return -EINVAL; |
989 | |
990 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
991 | |
992 | return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); |
993 | } |
994 | |
995 | static int crypt_wipe_key(struct crypt_config *cc) |
996 | { |
997 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
998 | memset(&cc->key, 0, cc->key_size * sizeof(u8)); |
999 | return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); |
1000 | } |
1001 | |
1002 | /* |
1003 | * Construct an encryption mapping: |
1004 | * <cipher> <key> <iv_offset> <dev_path> <start> |
1005 | */ |
1006 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1007 | { |
1008 | struct crypt_config *cc; |
1009 | struct crypto_ablkcipher *tfm; |
1010 | char *tmp; |
1011 | char *cipher; |
1012 | char *chainmode; |
1013 | char *ivmode; |
1014 | char *ivopts; |
1015 | unsigned int key_size; |
1016 | unsigned long long tmpll; |
1017 | |
1018 | if (argc != 5) { |
1019 | ti->error = "Not enough arguments"; |
1020 | return -EINVAL; |
1021 | } |
1022 | |
1023 | tmp = argv[0]; |
1024 | cipher = strsep(&tmp, "-"); |
1025 | chainmode = strsep(&tmp, "-"); |
1026 | ivopts = strsep(&tmp, "-"); |
1027 | ivmode = strsep(&ivopts, ":"); |
1028 | |
1029 | if (tmp) |
1030 | DMWARN("Unexpected additional cipher options"); |
1031 | |
1032 | key_size = strlen(argv[1]) >> 1; |
1033 | |
1034 | cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); |
1035 | if (cc == NULL) { |
1036 | ti->error = |
1037 | "Cannot allocate transparent encryption context"; |
1038 | return -ENOMEM; |
1039 | } |
1040 | |
1041 | /* Compatibility mode for old dm-crypt cipher strings */ |
1042 | if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { |
1043 | chainmode = "cbc"; |
1044 | ivmode = "plain"; |
1045 | } |
1046 | |
1047 | if (strcmp(chainmode, "ecb") && !ivmode) { |
1048 | ti->error = "This chaining mode requires an IV mechanism"; |
1049 | goto bad_cipher; |
1050 | } |
1051 | |
1052 | if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", |
1053 | chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) { |
1054 | ti->error = "Chain mode + cipher name is too long"; |
1055 | goto bad_cipher; |
1056 | } |
1057 | |
1058 | tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0); |
1059 | if (IS_ERR(tfm)) { |
1060 | ti->error = "Error allocating crypto tfm"; |
1061 | goto bad_cipher; |
1062 | } |
1063 | |
1064 | strcpy(cc->cipher, cipher); |
1065 | strcpy(cc->chainmode, chainmode); |
1066 | cc->tfm = tfm; |
1067 | |
1068 | if (crypt_set_key(cc, argv[1]) < 0) { |
1069 | ti->error = "Error decoding and setting key"; |
1070 | goto bad_ivmode; |
1071 | } |
1072 | |
1073 | /* |
1074 | * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". |
1075 | * See comments at iv code |
1076 | */ |
1077 | |
1078 | if (ivmode == NULL) |
1079 | cc->iv_gen_ops = NULL; |
1080 | else if (strcmp(ivmode, "plain") == 0) |
1081 | cc->iv_gen_ops = &crypt_iv_plain_ops; |
1082 | else if (strcmp(ivmode, "plain64") == 0) |
1083 | cc->iv_gen_ops = &crypt_iv_plain64_ops; |
1084 | else if (strcmp(ivmode, "essiv") == 0) |
1085 | cc->iv_gen_ops = &crypt_iv_essiv_ops; |
1086 | else if (strcmp(ivmode, "benbi") == 0) |
1087 | cc->iv_gen_ops = &crypt_iv_benbi_ops; |
1088 | else if (strcmp(ivmode, "null") == 0) |
1089 | cc->iv_gen_ops = &crypt_iv_null_ops; |
1090 | else { |
1091 | ti->error = "Invalid IV mode"; |
1092 | goto bad_ivmode; |
1093 | } |
1094 | |
1095 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && |
1096 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) |
1097 | goto bad_ivmode; |
1098 | |
1099 | if (cc->iv_gen_ops && cc->iv_gen_ops->init && |
1100 | cc->iv_gen_ops->init(cc) < 0) { |
1101 | ti->error = "Error initialising IV"; |
1102 | goto bad_slab_pool; |
1103 | } |
1104 | |
1105 | cc->iv_size = crypto_ablkcipher_ivsize(tfm); |
1106 | if (cc->iv_size) |
1107 | /* at least a 64 bit sector number should fit in our buffer */ |
1108 | cc->iv_size = max(cc->iv_size, |
1109 | (unsigned int)(sizeof(u64) / sizeof(u8))); |
1110 | else { |
1111 | if (cc->iv_gen_ops) { |
1112 | DMWARN("Selected cipher does not support IVs"); |
1113 | if (cc->iv_gen_ops->dtr) |
1114 | cc->iv_gen_ops->dtr(cc); |
1115 | cc->iv_gen_ops = NULL; |
1116 | } |
1117 | } |
1118 | |
1119 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); |
1120 | if (!cc->io_pool) { |
1121 | ti->error = "Cannot allocate crypt io mempool"; |
1122 | goto bad_slab_pool; |
1123 | } |
1124 | |
1125 | cc->dmreq_start = sizeof(struct ablkcipher_request); |
1126 | cc->dmreq_start += crypto_ablkcipher_reqsize(tfm); |
1127 | cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); |
1128 | cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) & |
1129 | ~(crypto_tfm_ctx_alignment() - 1); |
1130 | |
1131 | cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + |
1132 | sizeof(struct dm_crypt_request) + cc->iv_size); |
1133 | if (!cc->req_pool) { |
1134 | ti->error = "Cannot allocate crypt request mempool"; |
1135 | goto bad_req_pool; |
1136 | } |
1137 | cc->req = NULL; |
1138 | |
1139 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
1140 | if (!cc->page_pool) { |
1141 | ti->error = "Cannot allocate page mempool"; |
1142 | goto bad_page_pool; |
1143 | } |
1144 | |
1145 | cc->bs = bioset_create(MIN_IOS, 0); |
1146 | if (!cc->bs) { |
1147 | ti->error = "Cannot allocate crypt bioset"; |
1148 | goto bad_bs; |
1149 | } |
1150 | |
1151 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { |
1152 | ti->error = "Invalid iv_offset sector"; |
1153 | goto bad_device; |
1154 | } |
1155 | cc->iv_offset = tmpll; |
1156 | |
1157 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { |
1158 | ti->error = "Invalid device sector"; |
1159 | goto bad_device; |
1160 | } |
1161 | cc->start = tmpll; |
1162 | |
1163 | if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { |
1164 | ti->error = "Device lookup failed"; |
1165 | goto bad_device; |
1166 | } |
1167 | |
1168 | if (ivmode && cc->iv_gen_ops) { |
1169 | if (ivopts) |
1170 | *(ivopts - 1) = ':'; |
1171 | cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); |
1172 | if (!cc->iv_mode) { |
1173 | ti->error = "Error kmallocing iv_mode string"; |
1174 | goto bad_ivmode_string; |
1175 | } |
1176 | strcpy(cc->iv_mode, ivmode); |
1177 | } else |
1178 | cc->iv_mode = NULL; |
1179 | |
1180 | cc->io_queue = create_singlethread_workqueue("kcryptd_io"); |
1181 | if (!cc->io_queue) { |
1182 | ti->error = "Couldn't create kcryptd io queue"; |
1183 | goto bad_io_queue; |
1184 | } |
1185 | |
1186 | cc->crypt_queue = create_singlethread_workqueue("kcryptd"); |
1187 | if (!cc->crypt_queue) { |
1188 | ti->error = "Couldn't create kcryptd queue"; |
1189 | goto bad_crypt_queue; |
1190 | } |
1191 | |
1192 | ti->num_flush_requests = 1; |
1193 | ti->private = cc; |
1194 | return 0; |
1195 | |
1196 | bad_crypt_queue: |
1197 | destroy_workqueue(cc->io_queue); |
1198 | bad_io_queue: |
1199 | kfree(cc->iv_mode); |
1200 | bad_ivmode_string: |
1201 | dm_put_device(ti, cc->dev); |
1202 | bad_device: |
1203 | bioset_free(cc->bs); |
1204 | bad_bs: |
1205 | mempool_destroy(cc->page_pool); |
1206 | bad_page_pool: |
1207 | mempool_destroy(cc->req_pool); |
1208 | bad_req_pool: |
1209 | mempool_destroy(cc->io_pool); |
1210 | bad_slab_pool: |
1211 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
1212 | cc->iv_gen_ops->dtr(cc); |
1213 | bad_ivmode: |
1214 | crypto_free_ablkcipher(tfm); |
1215 | bad_cipher: |
1216 | /* Must zero key material before freeing */ |
1217 | kzfree(cc); |
1218 | return -EINVAL; |
1219 | } |
1220 | |
1221 | static void crypt_dtr(struct dm_target *ti) |
1222 | { |
1223 | struct crypt_config *cc = (struct crypt_config *) ti->private; |
1224 | |
1225 | destroy_workqueue(cc->io_queue); |
1226 | destroy_workqueue(cc->crypt_queue); |
1227 | |
1228 | if (cc->req) |
1229 | mempool_free(cc->req, cc->req_pool); |
1230 | |
1231 | bioset_free(cc->bs); |
1232 | mempool_destroy(cc->page_pool); |
1233 | mempool_destroy(cc->req_pool); |
1234 | mempool_destroy(cc->io_pool); |
1235 | |
1236 | kfree(cc->iv_mode); |
1237 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
1238 | cc->iv_gen_ops->dtr(cc); |
1239 | crypto_free_ablkcipher(cc->tfm); |
1240 | dm_put_device(ti, cc->dev); |
1241 | |
1242 | /* Must zero key material before freeing */ |
1243 | kzfree(cc); |
1244 | } |
1245 | |
1246 | static int crypt_map(struct dm_target *ti, struct bio *bio, |
1247 | union map_info *map_context) |
1248 | { |
1249 | struct dm_crypt_io *io; |
1250 | struct crypt_config *cc; |
1251 | |
1252 | if (unlikely(bio_empty_barrier(bio))) { |
1253 | cc = ti->private; |
1254 | bio->bi_bdev = cc->dev->bdev; |
1255 | return DM_MAPIO_REMAPPED; |
1256 | } |
1257 | |
1258 | io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); |
1259 | |
1260 | if (bio_data_dir(io->base_bio) == READ) |
1261 | kcryptd_queue_io(io); |
1262 | else |
1263 | kcryptd_queue_crypt(io); |
1264 | |
1265 | return DM_MAPIO_SUBMITTED; |
1266 | } |
1267 | |
1268 | static int crypt_status(struct dm_target *ti, status_type_t type, |
1269 | char *result, unsigned int maxlen) |
1270 | { |
1271 | struct crypt_config *cc = (struct crypt_config *) ti->private; |
1272 | unsigned int sz = 0; |
1273 | |
1274 | switch (type) { |
1275 | case STATUSTYPE_INFO: |
1276 | result[0] = '\0'; |
1277 | break; |
1278 | |
1279 | case STATUSTYPE_TABLE: |
1280 | if (cc->iv_mode) |
1281 | DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode, |
1282 | cc->iv_mode); |
1283 | else |
1284 | DMEMIT("%s-%s ", cc->cipher, cc->chainmode); |
1285 | |
1286 | if (cc->key_size > 0) { |
1287 | if ((maxlen - sz) < ((cc->key_size << 1) + 1)) |
1288 | return -ENOMEM; |
1289 | |
1290 | crypt_encode_key(result + sz, cc->key, cc->key_size); |
1291 | sz += cc->key_size << 1; |
1292 | } else { |
1293 | if (sz >= maxlen) |
1294 | return -ENOMEM; |
1295 | result[sz++] = '-'; |
1296 | } |
1297 | |
1298 | DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, |
1299 | cc->dev->name, (unsigned long long)cc->start); |
1300 | break; |
1301 | } |
1302 | return 0; |
1303 | } |
1304 | |
1305 | static void crypt_postsuspend(struct dm_target *ti) |
1306 | { |
1307 | struct crypt_config *cc = ti->private; |
1308 | |
1309 | set_bit(DM_CRYPT_SUSPENDED, &cc->flags); |
1310 | } |
1311 | |
1312 | static int crypt_preresume(struct dm_target *ti) |
1313 | { |
1314 | struct crypt_config *cc = ti->private; |
1315 | |
1316 | if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { |
1317 | DMERR("aborting resume - crypt key is not set."); |
1318 | return -EAGAIN; |
1319 | } |
1320 | |
1321 | return 0; |
1322 | } |
1323 | |
1324 | static void crypt_resume(struct dm_target *ti) |
1325 | { |
1326 | struct crypt_config *cc = ti->private; |
1327 | |
1328 | clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); |
1329 | } |
1330 | |
1331 | /* Message interface |
1332 | * key set <key> |
1333 | * key wipe |
1334 | */ |
1335 | static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) |
1336 | { |
1337 | struct crypt_config *cc = ti->private; |
1338 | int ret = -EINVAL; |
1339 | |
1340 | if (argc < 2) |
1341 | goto error; |
1342 | |
1343 | if (!strnicmp(argv[0], MESG_STR("key"))) { |
1344 | if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { |
1345 | DMWARN("not suspended during key manipulation."); |
1346 | return -EINVAL; |
1347 | } |
1348 | if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { |
1349 | ret = crypt_set_key(cc, argv[2]); |
1350 | if (ret) |
1351 | return ret; |
1352 | if (cc->iv_gen_ops && cc->iv_gen_ops->init) |
1353 | ret = cc->iv_gen_ops->init(cc); |
1354 | return ret; |
1355 | } |
1356 | if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { |
1357 | if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { |
1358 | ret = cc->iv_gen_ops->wipe(cc); |
1359 | if (ret) |
1360 | return ret; |
1361 | } |
1362 | return crypt_wipe_key(cc); |
1363 | } |
1364 | } |
1365 | |
1366 | error: |
1367 | DMWARN("unrecognised message received."); |
1368 | return -EINVAL; |
1369 | } |
1370 | |
1371 | static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, |
1372 | struct bio_vec *biovec, int max_size) |
1373 | { |
1374 | struct crypt_config *cc = ti->private; |
1375 | struct request_queue *q = bdev_get_queue(cc->dev->bdev); |
1376 | |
1377 | if (!q->merge_bvec_fn) |
1378 | return max_size; |
1379 | |
1380 | bvm->bi_bdev = cc->dev->bdev; |
1381 | bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin; |
1382 | |
1383 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); |
1384 | } |
1385 | |
1386 | static int crypt_iterate_devices(struct dm_target *ti, |
1387 | iterate_devices_callout_fn fn, void *data) |
1388 | { |
1389 | struct crypt_config *cc = ti->private; |
1390 | |
1391 | return fn(ti, cc->dev, cc->start, ti->len, data); |
1392 | } |
1393 | |
1394 | static struct target_type crypt_target = { |
1395 | .name = "crypt", |
1396 | .version = {1, 7, 0}, |
1397 | .module = THIS_MODULE, |
1398 | .ctr = crypt_ctr, |
1399 | .dtr = crypt_dtr, |
1400 | .map = crypt_map, |
1401 | .status = crypt_status, |
1402 | .postsuspend = crypt_postsuspend, |
1403 | .preresume = crypt_preresume, |
1404 | .resume = crypt_resume, |
1405 | .message = crypt_message, |
1406 | .merge = crypt_merge, |
1407 | .iterate_devices = crypt_iterate_devices, |
1408 | }; |
1409 | |
1410 | static int __init dm_crypt_init(void) |
1411 | { |
1412 | int r; |
1413 | |
1414 | _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); |
1415 | if (!_crypt_io_pool) |
1416 | return -ENOMEM; |
1417 | |
1418 | r = dm_register_target(&crypt_target); |
1419 | if (r < 0) { |
1420 | DMERR("register failed %d", r); |
1421 | kmem_cache_destroy(_crypt_io_pool); |
1422 | } |
1423 | |
1424 | return r; |
1425 | } |
1426 | |
1427 | static void __exit dm_crypt_exit(void) |
1428 | { |
1429 | dm_unregister_target(&crypt_target); |
1430 | kmem_cache_destroy(_crypt_io_pool); |
1431 | } |
1432 | |
1433 | module_init(dm_crypt_init); |
1434 | module_exit(dm_crypt_exit); |
1435 | |
1436 | MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); |
1437 | MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); |
1438 | MODULE_LICENSE("GPL"); |
1439 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9