Root/block/blk-map.c

1/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <scsi/sg.h> /* for struct sg_iovec */
9
10#include "blk.h"
11
12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13              struct bio *bio)
14{
15    if (!rq->bio)
16        blk_rq_bio_prep(q, rq, bio);
17    else if (!ll_back_merge_fn(q, rq, bio))
18        return -EINVAL;
19    else {
20        rq->biotail->bi_next = bio;
21        rq->biotail = bio;
22
23        rq->__data_len += bio->bi_size;
24    }
25    return 0;
26}
27
28static int __blk_rq_unmap_user(struct bio *bio)
29{
30    int ret = 0;
31
32    if (bio) {
33        if (bio_flagged(bio, BIO_USER_MAPPED))
34            bio_unmap_user(bio);
35        else
36            ret = bio_uncopy_user(bio);
37    }
38
39    return ret;
40}
41
42static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
43                 struct rq_map_data *map_data, void __user *ubuf,
44                 unsigned int len, gfp_t gfp_mask)
45{
46    unsigned long uaddr;
47    struct bio *bio, *orig_bio;
48    int reading, ret;
49
50    reading = rq_data_dir(rq) == READ;
51
52    /*
53     * if alignment requirement is satisfied, map in user pages for
54     * direct dma. else, set up kernel bounce buffers
55     */
56    uaddr = (unsigned long) ubuf;
57    if (blk_rq_aligned(q, uaddr, len) && !map_data)
58        bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
59    else
60        bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
61
62    if (IS_ERR(bio))
63        return PTR_ERR(bio);
64
65    if (map_data && map_data->null_mapped)
66        bio->bi_flags |= (1 << BIO_NULL_MAPPED);
67
68    orig_bio = bio;
69    blk_queue_bounce(q, &bio);
70
71    /*
72     * We link the bounce buffer in and could have to traverse it
73     * later so we have to get a ref to prevent it from being freed
74     */
75    bio_get(bio);
76
77    ret = blk_rq_append_bio(q, rq, bio);
78    if (!ret)
79        return bio->bi_size;
80
81    /* if it was boucned we must call the end io function */
82    bio_endio(bio, 0);
83    __blk_rq_unmap_user(orig_bio);
84    bio_put(bio);
85    return ret;
86}
87
88/**
89 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
90 * @q: request queue where request should be inserted
91 * @rq: request structure to fill
92 * @map_data: pointer to the rq_map_data holding pages (if necessary)
93 * @ubuf: the user buffer
94 * @len: length of user data
95 * @gfp_mask: memory allocation flags
96 *
97 * Description:
98 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
99 * a kernel bounce buffer is used.
100 *
101 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
102 * still in process context.
103 *
104 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
105 * before being submitted to the device, as pages mapped may be out of
106 * reach. It's the callers responsibility to make sure this happens. The
107 * original bio must be passed back in to blk_rq_unmap_user() for proper
108 * unmapping.
109 */
110int blk_rq_map_user(struct request_queue *q, struct request *rq,
111            struct rq_map_data *map_data, void __user *ubuf,
112            unsigned long len, gfp_t gfp_mask)
113{
114    unsigned long bytes_read = 0;
115    struct bio *bio = NULL;
116    int ret;
117
118    if (len > (queue_max_hw_sectors(q) << 9))
119        return -EINVAL;
120    if (!len)
121        return -EINVAL;
122
123    if (!ubuf && (!map_data || !map_data->null_mapped))
124        return -EINVAL;
125
126    while (bytes_read != len) {
127        unsigned long map_len, end, start;
128
129        map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
130        end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
131                                >> PAGE_SHIFT;
132        start = (unsigned long)ubuf >> PAGE_SHIFT;
133
134        /*
135         * A bad offset could cause us to require BIO_MAX_PAGES + 1
136         * pages. If this happens we just lower the requested
137         * mapping len by a page so that we can fit
138         */
139        if (end - start > BIO_MAX_PAGES)
140            map_len -= PAGE_SIZE;
141
142        ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
143                    gfp_mask);
144        if (ret < 0)
145            goto unmap_rq;
146        if (!bio)
147            bio = rq->bio;
148        bytes_read += ret;
149        ubuf += ret;
150
151        if (map_data)
152            map_data->offset += ret;
153    }
154
155    if (!bio_flagged(bio, BIO_USER_MAPPED))
156        rq->cmd_flags |= REQ_COPY_USER;
157
158    rq->buffer = NULL;
159    return 0;
160unmap_rq:
161    blk_rq_unmap_user(bio);
162    rq->bio = NULL;
163    return ret;
164}
165EXPORT_SYMBOL(blk_rq_map_user);
166
167/**
168 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
169 * @q: request queue where request should be inserted
170 * @rq: request to map data to
171 * @map_data: pointer to the rq_map_data holding pages (if necessary)
172 * @iov: pointer to the iovec
173 * @iov_count: number of elements in the iovec
174 * @len: I/O byte count
175 * @gfp_mask: memory allocation flags
176 *
177 * Description:
178 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
179 * a kernel bounce buffer is used.
180 *
181 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
182 * still in process context.
183 *
184 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
185 * before being submitted to the device, as pages mapped may be out of
186 * reach. It's the callers responsibility to make sure this happens. The
187 * original bio must be passed back in to blk_rq_unmap_user() for proper
188 * unmapping.
189 */
190int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
191            struct rq_map_data *map_data, struct sg_iovec *iov,
192            int iov_count, unsigned int len, gfp_t gfp_mask)
193{
194    struct bio *bio;
195    int i, read = rq_data_dir(rq) == READ;
196    int unaligned = 0;
197
198    if (!iov || iov_count <= 0)
199        return -EINVAL;
200
201    for (i = 0; i < iov_count; i++) {
202        unsigned long uaddr = (unsigned long)iov[i].iov_base;
203
204        if (!iov[i].iov_len)
205            return -EINVAL;
206
207        /*
208         * Keep going so we check length of all segments
209         */
210        if (uaddr & queue_dma_alignment(q))
211            unaligned = 1;
212    }
213
214    if (unaligned || (q->dma_pad_mask & len) || map_data)
215        bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
216                    gfp_mask);
217    else
218        bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
219
220    if (IS_ERR(bio))
221        return PTR_ERR(bio);
222
223    if (bio->bi_size != len) {
224        /*
225         * Grab an extra reference to this bio, as bio_unmap_user()
226         * expects to be able to drop it twice as it happens on the
227         * normal IO completion path
228         */
229        bio_get(bio);
230        bio_endio(bio, 0);
231        __blk_rq_unmap_user(bio);
232        return -EINVAL;
233    }
234
235    if (!bio_flagged(bio, BIO_USER_MAPPED))
236        rq->cmd_flags |= REQ_COPY_USER;
237
238    blk_queue_bounce(q, &bio);
239    bio_get(bio);
240    blk_rq_bio_prep(q, rq, bio);
241    rq->buffer = NULL;
242    return 0;
243}
244EXPORT_SYMBOL(blk_rq_map_user_iov);
245
246/**
247 * blk_rq_unmap_user - unmap a request with user data
248 * @bio: start of bio list
249 *
250 * Description:
251 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
252 * supply the original rq->bio from the blk_rq_map_user() return, since
253 * the I/O completion may have changed rq->bio.
254 */
255int blk_rq_unmap_user(struct bio *bio)
256{
257    struct bio *mapped_bio;
258    int ret = 0, ret2;
259
260    while (bio) {
261        mapped_bio = bio;
262        if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
263            mapped_bio = bio->bi_private;
264
265        ret2 = __blk_rq_unmap_user(mapped_bio);
266        if (ret2 && !ret)
267            ret = ret2;
268
269        mapped_bio = bio;
270        bio = bio->bi_next;
271        bio_put(mapped_bio);
272    }
273
274    return ret;
275}
276EXPORT_SYMBOL(blk_rq_unmap_user);
277
278/**
279 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
280 * @q: request queue where request should be inserted
281 * @rq: request to fill
282 * @kbuf: the kernel buffer
283 * @len: length of user data
284 * @gfp_mask: memory allocation flags
285 *
286 * Description:
287 * Data will be mapped directly if possible. Otherwise a bounce
288 * buffer is used. Can be called multple times to append multple
289 * buffers.
290 */
291int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
292            unsigned int len, gfp_t gfp_mask)
293{
294    int reading = rq_data_dir(rq) == READ;
295    unsigned long addr = (unsigned long) kbuf;
296    int do_copy = 0;
297    struct bio *bio;
298    int ret;
299
300    if (len > (queue_max_hw_sectors(q) << 9))
301        return -EINVAL;
302    if (!len || !kbuf)
303        return -EINVAL;
304
305    do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
306    if (do_copy)
307        bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
308    else
309        bio = bio_map_kern(q, kbuf, len, gfp_mask);
310
311    if (IS_ERR(bio))
312        return PTR_ERR(bio);
313
314    if (!reading)
315        bio->bi_rw |= REQ_WRITE;
316
317    if (do_copy)
318        rq->cmd_flags |= REQ_COPY_USER;
319
320    ret = blk_rq_append_bio(q, rq, bio);
321    if (unlikely(ret)) {
322        /* request is too big */
323        bio_put(bio);
324        return ret;
325    }
326
327    blk_queue_bounce(q, &rq->bio);
328    rq->buffer = NULL;
329    return 0;
330}
331EXPORT_SYMBOL(blk_rq_map_kern);
332

Archive Download this file



interactive