Root/
1 | /* |
2 | * Copyright (C) 2003 Sistina Software |
3 | * Copyright (C) 2006 Red Hat GmbH |
4 | * |
5 | * This file is released under the GPL. |
6 | */ |
7 | |
8 | #include "dm.h" |
9 | |
10 | #include <linux/device-mapper.h> |
11 | |
12 | #include <linux/bio.h> |
13 | #include <linux/mempool.h> |
14 | #include <linux/module.h> |
15 | #include <linux/sched.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/dm-io.h> |
18 | |
19 | #define DM_MSG_PREFIX "io" |
20 | |
21 | #define DM_IO_MAX_REGIONS BITS_PER_LONG |
22 | #define MIN_IOS 16 |
23 | #define MIN_BIOS 16 |
24 | |
25 | struct dm_io_client { |
26 | mempool_t *pool; |
27 | struct bio_set *bios; |
28 | }; |
29 | |
30 | /* |
31 | * Aligning 'struct io' reduces the number of bits required to store |
32 | * its address. Refer to store_io_and_region_in_bio() below. |
33 | */ |
34 | struct io { |
35 | unsigned long error_bits; |
36 | atomic_t count; |
37 | struct task_struct *sleeper; |
38 | struct dm_io_client *client; |
39 | io_notify_fn callback; |
40 | void *context; |
41 | void *vma_invalidate_address; |
42 | unsigned long vma_invalidate_size; |
43 | } __attribute__((aligned(DM_IO_MAX_REGIONS))); |
44 | |
45 | static struct kmem_cache *_dm_io_cache; |
46 | |
47 | /* |
48 | * Create a client with mempool and bioset. |
49 | */ |
50 | struct dm_io_client *dm_io_client_create(void) |
51 | { |
52 | struct dm_io_client *client; |
53 | |
54 | client = kmalloc(sizeof(*client), GFP_KERNEL); |
55 | if (!client) |
56 | return ERR_PTR(-ENOMEM); |
57 | |
58 | client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); |
59 | if (!client->pool) |
60 | goto bad; |
61 | |
62 | client->bios = bioset_create(MIN_BIOS, 0); |
63 | if (!client->bios) |
64 | goto bad; |
65 | |
66 | return client; |
67 | |
68 | bad: |
69 | if (client->pool) |
70 | mempool_destroy(client->pool); |
71 | kfree(client); |
72 | return ERR_PTR(-ENOMEM); |
73 | } |
74 | EXPORT_SYMBOL(dm_io_client_create); |
75 | |
76 | void dm_io_client_destroy(struct dm_io_client *client) |
77 | { |
78 | mempool_destroy(client->pool); |
79 | bioset_free(client->bios); |
80 | kfree(client); |
81 | } |
82 | EXPORT_SYMBOL(dm_io_client_destroy); |
83 | |
84 | /*----------------------------------------------------------------- |
85 | * We need to keep track of which region a bio is doing io for. |
86 | * To avoid a memory allocation to store just 5 or 6 bits, we |
87 | * ensure the 'struct io' pointer is aligned so enough low bits are |
88 | * always zero and then combine it with the region number directly in |
89 | * bi_private. |
90 | *---------------------------------------------------------------*/ |
91 | static void store_io_and_region_in_bio(struct bio *bio, struct io *io, |
92 | unsigned region) |
93 | { |
94 | if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { |
95 | DMCRIT("Unaligned struct io pointer %p", io); |
96 | BUG(); |
97 | } |
98 | |
99 | bio->bi_private = (void *)((unsigned long)io | region); |
100 | } |
101 | |
102 | static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, |
103 | unsigned *region) |
104 | { |
105 | unsigned long val = (unsigned long)bio->bi_private; |
106 | |
107 | *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); |
108 | *region = val & (DM_IO_MAX_REGIONS - 1); |
109 | } |
110 | |
111 | /*----------------------------------------------------------------- |
112 | * We need an io object to keep track of the number of bios that |
113 | * have been dispatched for a particular io. |
114 | *---------------------------------------------------------------*/ |
115 | static void dec_count(struct io *io, unsigned int region, int error) |
116 | { |
117 | if (error) |
118 | set_bit(region, &io->error_bits); |
119 | |
120 | if (atomic_dec_and_test(&io->count)) { |
121 | if (io->vma_invalidate_size) |
122 | invalidate_kernel_vmap_range(io->vma_invalidate_address, |
123 | io->vma_invalidate_size); |
124 | |
125 | if (io->sleeper) |
126 | wake_up_process(io->sleeper); |
127 | |
128 | else { |
129 | unsigned long r = io->error_bits; |
130 | io_notify_fn fn = io->callback; |
131 | void *context = io->context; |
132 | |
133 | mempool_free(io, io->client->pool); |
134 | fn(r, context); |
135 | } |
136 | } |
137 | } |
138 | |
139 | static void endio(struct bio *bio, int error) |
140 | { |
141 | struct io *io; |
142 | unsigned region; |
143 | |
144 | if (error && bio_data_dir(bio) == READ) |
145 | zero_fill_bio(bio); |
146 | |
147 | /* |
148 | * The bio destructor in bio_put() may use the io object. |
149 | */ |
150 | retrieve_io_and_region_from_bio(bio, &io, ®ion); |
151 | |
152 | bio_put(bio); |
153 | |
154 | dec_count(io, region, error); |
155 | } |
156 | |
157 | /*----------------------------------------------------------------- |
158 | * These little objects provide an abstraction for getting a new |
159 | * destination page for io. |
160 | *---------------------------------------------------------------*/ |
161 | struct dpages { |
162 | void (*get_page)(struct dpages *dp, |
163 | struct page **p, unsigned long *len, unsigned *offset); |
164 | void (*next_page)(struct dpages *dp); |
165 | |
166 | unsigned context_u; |
167 | void *context_ptr; |
168 | |
169 | void *vma_invalidate_address; |
170 | unsigned long vma_invalidate_size; |
171 | }; |
172 | |
173 | /* |
174 | * Functions for getting the pages from a list. |
175 | */ |
176 | static void list_get_page(struct dpages *dp, |
177 | struct page **p, unsigned long *len, unsigned *offset) |
178 | { |
179 | unsigned o = dp->context_u; |
180 | struct page_list *pl = (struct page_list *) dp->context_ptr; |
181 | |
182 | *p = pl->page; |
183 | *len = PAGE_SIZE - o; |
184 | *offset = o; |
185 | } |
186 | |
187 | static void list_next_page(struct dpages *dp) |
188 | { |
189 | struct page_list *pl = (struct page_list *) dp->context_ptr; |
190 | dp->context_ptr = pl->next; |
191 | dp->context_u = 0; |
192 | } |
193 | |
194 | static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) |
195 | { |
196 | dp->get_page = list_get_page; |
197 | dp->next_page = list_next_page; |
198 | dp->context_u = offset; |
199 | dp->context_ptr = pl; |
200 | } |
201 | |
202 | /* |
203 | * Functions for getting the pages from a bvec. |
204 | */ |
205 | static void bvec_get_page(struct dpages *dp, |
206 | struct page **p, unsigned long *len, unsigned *offset) |
207 | { |
208 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; |
209 | *p = bvec->bv_page; |
210 | *len = bvec->bv_len; |
211 | *offset = bvec->bv_offset; |
212 | } |
213 | |
214 | static void bvec_next_page(struct dpages *dp) |
215 | { |
216 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; |
217 | dp->context_ptr = bvec + 1; |
218 | } |
219 | |
220 | static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) |
221 | { |
222 | dp->get_page = bvec_get_page; |
223 | dp->next_page = bvec_next_page; |
224 | dp->context_ptr = bvec; |
225 | } |
226 | |
227 | /* |
228 | * Functions for getting the pages from a VMA. |
229 | */ |
230 | static void vm_get_page(struct dpages *dp, |
231 | struct page **p, unsigned long *len, unsigned *offset) |
232 | { |
233 | *p = vmalloc_to_page(dp->context_ptr); |
234 | *offset = dp->context_u; |
235 | *len = PAGE_SIZE - dp->context_u; |
236 | } |
237 | |
238 | static void vm_next_page(struct dpages *dp) |
239 | { |
240 | dp->context_ptr += PAGE_SIZE - dp->context_u; |
241 | dp->context_u = 0; |
242 | } |
243 | |
244 | static void vm_dp_init(struct dpages *dp, void *data) |
245 | { |
246 | dp->get_page = vm_get_page; |
247 | dp->next_page = vm_next_page; |
248 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); |
249 | dp->context_ptr = data; |
250 | } |
251 | |
252 | static void dm_bio_destructor(struct bio *bio) |
253 | { |
254 | unsigned region; |
255 | struct io *io; |
256 | |
257 | retrieve_io_and_region_from_bio(bio, &io, ®ion); |
258 | |
259 | bio_free(bio, io->client->bios); |
260 | } |
261 | |
262 | /* |
263 | * Functions for getting the pages from kernel memory. |
264 | */ |
265 | static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, |
266 | unsigned *offset) |
267 | { |
268 | *p = virt_to_page(dp->context_ptr); |
269 | *offset = dp->context_u; |
270 | *len = PAGE_SIZE - dp->context_u; |
271 | } |
272 | |
273 | static void km_next_page(struct dpages *dp) |
274 | { |
275 | dp->context_ptr += PAGE_SIZE - dp->context_u; |
276 | dp->context_u = 0; |
277 | } |
278 | |
279 | static void km_dp_init(struct dpages *dp, void *data) |
280 | { |
281 | dp->get_page = km_get_page; |
282 | dp->next_page = km_next_page; |
283 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); |
284 | dp->context_ptr = data; |
285 | } |
286 | |
287 | /*----------------------------------------------------------------- |
288 | * IO routines that accept a list of pages. |
289 | *---------------------------------------------------------------*/ |
290 | static void do_region(int rw, unsigned region, struct dm_io_region *where, |
291 | struct dpages *dp, struct io *io) |
292 | { |
293 | struct bio *bio; |
294 | struct page *page; |
295 | unsigned long len; |
296 | unsigned offset; |
297 | unsigned num_bvecs; |
298 | sector_t remaining = where->count; |
299 | struct request_queue *q = bdev_get_queue(where->bdev); |
300 | sector_t discard_sectors; |
301 | |
302 | /* |
303 | * where->count may be zero if rw holds a flush and we need to |
304 | * send a zero-sized flush. |
305 | */ |
306 | do { |
307 | /* |
308 | * Allocate a suitably sized-bio. |
309 | */ |
310 | if (rw & REQ_DISCARD) |
311 | num_bvecs = 1; |
312 | else |
313 | num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), |
314 | dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); |
315 | |
316 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); |
317 | bio->bi_sector = where->sector + (where->count - remaining); |
318 | bio->bi_bdev = where->bdev; |
319 | bio->bi_end_io = endio; |
320 | bio->bi_destructor = dm_bio_destructor; |
321 | store_io_and_region_in_bio(bio, io, region); |
322 | |
323 | if (rw & REQ_DISCARD) { |
324 | discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); |
325 | bio->bi_size = discard_sectors << SECTOR_SHIFT; |
326 | remaining -= discard_sectors; |
327 | } else while (remaining) { |
328 | /* |
329 | * Try and add as many pages as possible. |
330 | */ |
331 | dp->get_page(dp, &page, &len, &offset); |
332 | len = min(len, to_bytes(remaining)); |
333 | if (!bio_add_page(bio, page, len, offset)) |
334 | break; |
335 | |
336 | offset = 0; |
337 | remaining -= to_sector(len); |
338 | dp->next_page(dp); |
339 | } |
340 | |
341 | atomic_inc(&io->count); |
342 | submit_bio(rw, bio); |
343 | } while (remaining); |
344 | } |
345 | |
346 | static void dispatch_io(int rw, unsigned int num_regions, |
347 | struct dm_io_region *where, struct dpages *dp, |
348 | struct io *io, int sync) |
349 | { |
350 | int i; |
351 | struct dpages old_pages = *dp; |
352 | |
353 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); |
354 | |
355 | if (sync) |
356 | rw |= REQ_SYNC; |
357 | |
358 | /* |
359 | * For multiple regions we need to be careful to rewind |
360 | * the dp object for each call to do_region. |
361 | */ |
362 | for (i = 0; i < num_regions; i++) { |
363 | *dp = old_pages; |
364 | if (where[i].count || (rw & REQ_FLUSH)) |
365 | do_region(rw, i, where + i, dp, io); |
366 | } |
367 | |
368 | /* |
369 | * Drop the extra reference that we were holding to avoid |
370 | * the io being completed too early. |
371 | */ |
372 | dec_count(io, 0, 0); |
373 | } |
374 | |
375 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
376 | struct dm_io_region *where, int rw, struct dpages *dp, |
377 | unsigned long *error_bits) |
378 | { |
379 | /* |
380 | * gcc <= 4.3 can't do the alignment for stack variables, so we must |
381 | * align it on our own. |
382 | * volatile prevents the optimizer from removing or reusing |
383 | * "io_" field from the stack frame (allowed in ANSI C). |
384 | */ |
385 | volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; |
386 | struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); |
387 | |
388 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
389 | WARN_ON(1); |
390 | return -EIO; |
391 | } |
392 | |
393 | io->error_bits = 0; |
394 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
395 | io->sleeper = current; |
396 | io->client = client; |
397 | |
398 | io->vma_invalidate_address = dp->vma_invalidate_address; |
399 | io->vma_invalidate_size = dp->vma_invalidate_size; |
400 | |
401 | dispatch_io(rw, num_regions, where, dp, io, 1); |
402 | |
403 | while (1) { |
404 | set_current_state(TASK_UNINTERRUPTIBLE); |
405 | |
406 | if (!atomic_read(&io->count)) |
407 | break; |
408 | |
409 | io_schedule(); |
410 | } |
411 | set_current_state(TASK_RUNNING); |
412 | |
413 | if (error_bits) |
414 | *error_bits = io->error_bits; |
415 | |
416 | return io->error_bits ? -EIO : 0; |
417 | } |
418 | |
419 | static int async_io(struct dm_io_client *client, unsigned int num_regions, |
420 | struct dm_io_region *where, int rw, struct dpages *dp, |
421 | io_notify_fn fn, void *context) |
422 | { |
423 | struct io *io; |
424 | |
425 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
426 | WARN_ON(1); |
427 | fn(1, context); |
428 | return -EIO; |
429 | } |
430 | |
431 | io = mempool_alloc(client->pool, GFP_NOIO); |
432 | io->error_bits = 0; |
433 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
434 | io->sleeper = NULL; |
435 | io->client = client; |
436 | io->callback = fn; |
437 | io->context = context; |
438 | |
439 | io->vma_invalidate_address = dp->vma_invalidate_address; |
440 | io->vma_invalidate_size = dp->vma_invalidate_size; |
441 | |
442 | dispatch_io(rw, num_regions, where, dp, io, 0); |
443 | return 0; |
444 | } |
445 | |
446 | static int dp_init(struct dm_io_request *io_req, struct dpages *dp, |
447 | unsigned long size) |
448 | { |
449 | /* Set up dpages based on memory type */ |
450 | |
451 | dp->vma_invalidate_address = NULL; |
452 | dp->vma_invalidate_size = 0; |
453 | |
454 | switch (io_req->mem.type) { |
455 | case DM_IO_PAGE_LIST: |
456 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); |
457 | break; |
458 | |
459 | case DM_IO_BVEC: |
460 | bvec_dp_init(dp, io_req->mem.ptr.bvec); |
461 | break; |
462 | |
463 | case DM_IO_VMA: |
464 | flush_kernel_vmap_range(io_req->mem.ptr.vma, size); |
465 | if ((io_req->bi_rw & RW_MASK) == READ) { |
466 | dp->vma_invalidate_address = io_req->mem.ptr.vma; |
467 | dp->vma_invalidate_size = size; |
468 | } |
469 | vm_dp_init(dp, io_req->mem.ptr.vma); |
470 | break; |
471 | |
472 | case DM_IO_KMEM: |
473 | km_dp_init(dp, io_req->mem.ptr.addr); |
474 | break; |
475 | |
476 | default: |
477 | return -EINVAL; |
478 | } |
479 | |
480 | return 0; |
481 | } |
482 | |
483 | /* |
484 | * New collapsed (a)synchronous interface. |
485 | * |
486 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug |
487 | * the queue with blk_unplug() some time later or set REQ_SYNC in |
488 | io_req->bi_rw. If you fail to do one of these, the IO will be submitted to |
489 | * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. |
490 | */ |
491 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, |
492 | struct dm_io_region *where, unsigned long *sync_error_bits) |
493 | { |
494 | int r; |
495 | struct dpages dp; |
496 | |
497 | r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); |
498 | if (r) |
499 | return r; |
500 | |
501 | if (!io_req->notify.fn) |
502 | return sync_io(io_req->client, num_regions, where, |
503 | io_req->bi_rw, &dp, sync_error_bits); |
504 | |
505 | return async_io(io_req->client, num_regions, where, io_req->bi_rw, |
506 | &dp, io_req->notify.fn, io_req->notify.context); |
507 | } |
508 | EXPORT_SYMBOL(dm_io); |
509 | |
510 | int __init dm_io_init(void) |
511 | { |
512 | _dm_io_cache = KMEM_CACHE(io, 0); |
513 | if (!_dm_io_cache) |
514 | return -ENOMEM; |
515 | |
516 | return 0; |
517 | } |
518 | |
519 | void dm_io_exit(void) |
520 | { |
521 | kmem_cache_destroy(_dm_io_cache); |
522 | _dm_io_cache = NULL; |
523 | } |
524 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9