Root/
1 | /* |
2 | * Functions related to mapping data to requests |
3 | */ |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <linux/bio.h> |
7 | #include <linux/blkdev.h> |
8 | #include <scsi/sg.h> /* for struct sg_iovec */ |
9 | |
10 | #include "blk.h" |
11 | |
12 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, |
13 | struct bio *bio) |
14 | { |
15 | if (!rq->bio) |
16 | blk_rq_bio_prep(q, rq, bio); |
17 | else if (!ll_back_merge_fn(q, rq, bio)) |
18 | return -EINVAL; |
19 | else { |
20 | rq->biotail->bi_next = bio; |
21 | rq->biotail = bio; |
22 | |
23 | rq->__data_len += bio->bi_size; |
24 | } |
25 | return 0; |
26 | } |
27 | |
28 | static int __blk_rq_unmap_user(struct bio *bio) |
29 | { |
30 | int ret = 0; |
31 | |
32 | if (bio) { |
33 | if (bio_flagged(bio, BIO_USER_MAPPED)) |
34 | bio_unmap_user(bio); |
35 | else |
36 | ret = bio_uncopy_user(bio); |
37 | } |
38 | |
39 | return ret; |
40 | } |
41 | |
42 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, |
43 | struct rq_map_data *map_data, void __user *ubuf, |
44 | unsigned int len, gfp_t gfp_mask) |
45 | { |
46 | unsigned long uaddr; |
47 | struct bio *bio, *orig_bio; |
48 | int reading, ret; |
49 | |
50 | reading = rq_data_dir(rq) == READ; |
51 | |
52 | /* |
53 | * if alignment requirement is satisfied, map in user pages for |
54 | * direct dma. else, set up kernel bounce buffers |
55 | */ |
56 | uaddr = (unsigned long) ubuf; |
57 | if (blk_rq_aligned(q, uaddr, len) && !map_data) |
58 | bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); |
59 | else |
60 | bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); |
61 | |
62 | if (IS_ERR(bio)) |
63 | return PTR_ERR(bio); |
64 | |
65 | if (map_data && map_data->null_mapped) |
66 | bio->bi_flags |= (1 << BIO_NULL_MAPPED); |
67 | |
68 | orig_bio = bio; |
69 | blk_queue_bounce(q, &bio); |
70 | |
71 | /* |
72 | * We link the bounce buffer in and could have to traverse it |
73 | * later so we have to get a ref to prevent it from being freed |
74 | */ |
75 | bio_get(bio); |
76 | |
77 | ret = blk_rq_append_bio(q, rq, bio); |
78 | if (!ret) |
79 | return bio->bi_size; |
80 | |
81 | /* if it was boucned we must call the end io function */ |
82 | bio_endio(bio, 0); |
83 | __blk_rq_unmap_user(orig_bio); |
84 | bio_put(bio); |
85 | return ret; |
86 | } |
87 | |
88 | /** |
89 | * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
90 | * @q: request queue where request should be inserted |
91 | * @rq: request structure to fill |
92 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
93 | * @ubuf: the user buffer |
94 | * @len: length of user data |
95 | * @gfp_mask: memory allocation flags |
96 | * |
97 | * Description: |
98 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
99 | * a kernel bounce buffer is used. |
100 | * |
101 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
102 | * still in process context. |
103 | * |
104 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() |
105 | * before being submitted to the device, as pages mapped may be out of |
106 | * reach. It's the callers responsibility to make sure this happens. The |
107 | * original bio must be passed back in to blk_rq_unmap_user() for proper |
108 | * unmapping. |
109 | */ |
110 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
111 | struct rq_map_data *map_data, void __user *ubuf, |
112 | unsigned long len, gfp_t gfp_mask) |
113 | { |
114 | unsigned long bytes_read = 0; |
115 | struct bio *bio = NULL; |
116 | int ret; |
117 | |
118 | if (len > (queue_max_hw_sectors(q) << 9)) |
119 | return -EINVAL; |
120 | if (!len) |
121 | return -EINVAL; |
122 | |
123 | if (!ubuf && (!map_data || !map_data->null_mapped)) |
124 | return -EINVAL; |
125 | |
126 | while (bytes_read != len) { |
127 | unsigned long map_len, end, start; |
128 | |
129 | map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); |
130 | end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) |
131 | >> PAGE_SHIFT; |
132 | start = (unsigned long)ubuf >> PAGE_SHIFT; |
133 | |
134 | /* |
135 | * A bad offset could cause us to require BIO_MAX_PAGES + 1 |
136 | * pages. If this happens we just lower the requested |
137 | * mapping len by a page so that we can fit |
138 | */ |
139 | if (end - start > BIO_MAX_PAGES) |
140 | map_len -= PAGE_SIZE; |
141 | |
142 | ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, |
143 | gfp_mask); |
144 | if (ret < 0) |
145 | goto unmap_rq; |
146 | if (!bio) |
147 | bio = rq->bio; |
148 | bytes_read += ret; |
149 | ubuf += ret; |
150 | |
151 | if (map_data) |
152 | map_data->offset += ret; |
153 | } |
154 | |
155 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
156 | rq->cmd_flags |= REQ_COPY_USER; |
157 | |
158 | rq->buffer = NULL; |
159 | return 0; |
160 | unmap_rq: |
161 | blk_rq_unmap_user(bio); |
162 | rq->bio = NULL; |
163 | return ret; |
164 | } |
165 | EXPORT_SYMBOL(blk_rq_map_user); |
166 | |
167 | /** |
168 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
169 | * @q: request queue where request should be inserted |
170 | * @rq: request to map data to |
171 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
172 | * @iov: pointer to the iovec |
173 | * @iov_count: number of elements in the iovec |
174 | * @len: I/O byte count |
175 | * @gfp_mask: memory allocation flags |
176 | * |
177 | * Description: |
178 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
179 | * a kernel bounce buffer is used. |
180 | * |
181 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
182 | * still in process context. |
183 | * |
184 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() |
185 | * before being submitted to the device, as pages mapped may be out of |
186 | * reach. It's the callers responsibility to make sure this happens. The |
187 | * original bio must be passed back in to blk_rq_unmap_user() for proper |
188 | * unmapping. |
189 | */ |
190 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
191 | struct rq_map_data *map_data, struct sg_iovec *iov, |
192 | int iov_count, unsigned int len, gfp_t gfp_mask) |
193 | { |
194 | struct bio *bio; |
195 | int i, read = rq_data_dir(rq) == READ; |
196 | int unaligned = 0; |
197 | |
198 | if (!iov || iov_count <= 0) |
199 | return -EINVAL; |
200 | |
201 | for (i = 0; i < iov_count; i++) { |
202 | unsigned long uaddr = (unsigned long)iov[i].iov_base; |
203 | |
204 | if (!iov[i].iov_len) |
205 | return -EINVAL; |
206 | |
207 | if (uaddr & queue_dma_alignment(q)) { |
208 | unaligned = 1; |
209 | break; |
210 | } |
211 | } |
212 | |
213 | if (unaligned || (q->dma_pad_mask & len) || map_data) |
214 | bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, |
215 | gfp_mask); |
216 | else |
217 | bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); |
218 | |
219 | if (IS_ERR(bio)) |
220 | return PTR_ERR(bio); |
221 | |
222 | if (bio->bi_size != len) { |
223 | /* |
224 | * Grab an extra reference to this bio, as bio_unmap_user() |
225 | * expects to be able to drop it twice as it happens on the |
226 | * normal IO completion path |
227 | */ |
228 | bio_get(bio); |
229 | bio_endio(bio, 0); |
230 | __blk_rq_unmap_user(bio); |
231 | return -EINVAL; |
232 | } |
233 | |
234 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
235 | rq->cmd_flags |= REQ_COPY_USER; |
236 | |
237 | blk_queue_bounce(q, &bio); |
238 | bio_get(bio); |
239 | blk_rq_bio_prep(q, rq, bio); |
240 | rq->buffer = NULL; |
241 | return 0; |
242 | } |
243 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
244 | |
245 | /** |
246 | * blk_rq_unmap_user - unmap a request with user data |
247 | * @bio: start of bio list |
248 | * |
249 | * Description: |
250 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must |
251 | * supply the original rq->bio from the blk_rq_map_user() return, since |
252 | * the I/O completion may have changed rq->bio. |
253 | */ |
254 | int blk_rq_unmap_user(struct bio *bio) |
255 | { |
256 | struct bio *mapped_bio; |
257 | int ret = 0, ret2; |
258 | |
259 | while (bio) { |
260 | mapped_bio = bio; |
261 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) |
262 | mapped_bio = bio->bi_private; |
263 | |
264 | ret2 = __blk_rq_unmap_user(mapped_bio); |
265 | if (ret2 && !ret) |
266 | ret = ret2; |
267 | |
268 | mapped_bio = bio; |
269 | bio = bio->bi_next; |
270 | bio_put(mapped_bio); |
271 | } |
272 | |
273 | return ret; |
274 | } |
275 | EXPORT_SYMBOL(blk_rq_unmap_user); |
276 | |
277 | /** |
278 | * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage |
279 | * @q: request queue where request should be inserted |
280 | * @rq: request to fill |
281 | * @kbuf: the kernel buffer |
282 | * @len: length of user data |
283 | * @gfp_mask: memory allocation flags |
284 | * |
285 | * Description: |
286 | * Data will be mapped directly if possible. Otherwise a bounce |
287 | * buffer is used. Can be called multple times to append multple |
288 | * buffers. |
289 | */ |
290 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, |
291 | unsigned int len, gfp_t gfp_mask) |
292 | { |
293 | int reading = rq_data_dir(rq) == READ; |
294 | unsigned long addr = (unsigned long) kbuf; |
295 | int do_copy = 0; |
296 | struct bio *bio; |
297 | int ret; |
298 | |
299 | if (len > (queue_max_hw_sectors(q) << 9)) |
300 | return -EINVAL; |
301 | if (!len || !kbuf) |
302 | return -EINVAL; |
303 | |
304 | do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
305 | if (do_copy) |
306 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); |
307 | else |
308 | bio = bio_map_kern(q, kbuf, len, gfp_mask); |
309 | |
310 | if (IS_ERR(bio)) |
311 | return PTR_ERR(bio); |
312 | |
313 | if (rq_data_dir(rq) == WRITE) |
314 | bio->bi_rw |= REQ_WRITE; |
315 | |
316 | if (do_copy) |
317 | rq->cmd_flags |= REQ_COPY_USER; |
318 | |
319 | ret = blk_rq_append_bio(q, rq, bio); |
320 | if (unlikely(ret)) { |
321 | /* request is too big */ |
322 | bio_put(bio); |
323 | return ret; |
324 | } |
325 | |
326 | blk_queue_bounce(q, &rq->bio); |
327 | rq->buffer = NULL; |
328 | return 0; |
329 | } |
330 | EXPORT_SYMBOL(blk_rq_map_kern); |
331 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9