Root/mm/bounce.c

1/* bounce buffer handling for block devices
2 *
3 * - Split from highmem.c
4 */
5
6#include <linux/mm.h>
7#include <linux/export.h>
8#include <linux/swap.h>
9#include <linux/gfp.h>
10#include <linux/bio.h>
11#include <linux/pagemap.h>
12#include <linux/mempool.h>
13#include <linux/blkdev.h>
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/highmem.h>
17#include <linux/bootmem.h>
18#include <asm/tlbflush.h>
19
20#include <trace/events/block.h>
21
22#define POOL_SIZE 64
23#define ISA_POOL_SIZE 16
24
25static mempool_t *page_pool, *isa_page_pool;
26
27#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
28static __init int init_emergency_pool(void)
29{
30#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
31    if (max_pfn <= max_low_pfn)
32        return 0;
33#endif
34
35    page_pool = mempool_create_page_pool(POOL_SIZE, 0);
36    BUG_ON(!page_pool);
37    printk("bounce pool size: %d pages\n", POOL_SIZE);
38
39    return 0;
40}
41
42__initcall(init_emergency_pool);
43#endif
44
45#ifdef CONFIG_HIGHMEM
46/*
47 * highmem version, map in to vec
48 */
49static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
50{
51    unsigned long flags;
52    unsigned char *vto;
53
54    local_irq_save(flags);
55    vto = kmap_atomic(to->bv_page);
56    memcpy(vto + to->bv_offset, vfrom, to->bv_len);
57    kunmap_atomic(vto);
58    local_irq_restore(flags);
59}
60
61#else /* CONFIG_HIGHMEM */
62
63#define bounce_copy_vec(to, vfrom) \
64    memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
65
66#endif /* CONFIG_HIGHMEM */
67
68/*
69 * allocate pages in the DMA region for the ISA pool
70 */
71static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
72{
73    return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
74}
75
76/*
77 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
78 * as the max address, so check if the pool has already been created.
79 */
80int init_emergency_isa_pool(void)
81{
82    if (isa_page_pool)
83        return 0;
84
85    isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
86                       mempool_free_pages, (void *) 0);
87    BUG_ON(!isa_page_pool);
88
89    printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
90    return 0;
91}
92
93/*
94 * Simple bounce buffer support for highmem pages. Depending on the
95 * queue gfp mask set, *to may or may not be a highmem page. kmap it
96 * always, it will do the Right Thing
97 */
98static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
99{
100    unsigned char *vfrom;
101    struct bio_vec *tovec, *fromvec;
102    int i;
103
104    __bio_for_each_segment(tovec, to, i, 0) {
105        fromvec = from->bi_io_vec + i;
106
107        /*
108         * not bounced
109         */
110        if (tovec->bv_page == fromvec->bv_page)
111            continue;
112
113        /*
114         * fromvec->bv_offset and fromvec->bv_len might have been
115         * modified by the block layer, so use the original copy,
116         * bounce_copy_vec already uses tovec->bv_len
117         */
118        vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
119
120        bounce_copy_vec(tovec, vfrom);
121        flush_dcache_page(tovec->bv_page);
122    }
123}
124
125static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
126{
127    struct bio *bio_orig = bio->bi_private;
128    struct bio_vec *bvec, *org_vec;
129    int i;
130
131    if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
132        set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
133
134    /*
135     * free up bounce indirect pages used
136     */
137    __bio_for_each_segment(bvec, bio, i, 0) {
138        org_vec = bio_orig->bi_io_vec + i;
139        if (bvec->bv_page == org_vec->bv_page)
140            continue;
141
142        dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
143        mempool_free(bvec->bv_page, pool);
144    }
145
146    bio_endio(bio_orig, err);
147    bio_put(bio);
148}
149
150static void bounce_end_io_write(struct bio *bio, int err)
151{
152    bounce_end_io(bio, page_pool, err);
153}
154
155static void bounce_end_io_write_isa(struct bio *bio, int err)
156{
157
158    bounce_end_io(bio, isa_page_pool, err);
159}
160
161static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
162{
163    struct bio *bio_orig = bio->bi_private;
164
165    if (test_bit(BIO_UPTODATE, &bio->bi_flags))
166        copy_to_high_bio_irq(bio_orig, bio);
167
168    bounce_end_io(bio, pool, err);
169}
170
171static void bounce_end_io_read(struct bio *bio, int err)
172{
173    __bounce_end_io_read(bio, page_pool, err);
174}
175
176static void bounce_end_io_read_isa(struct bio *bio, int err)
177{
178    __bounce_end_io_read(bio, isa_page_pool, err);
179}
180
181#ifdef CONFIG_NEED_BOUNCE_POOL
182static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
183{
184    struct page *page;
185    struct backing_dev_info *bdi;
186    struct address_space *mapping;
187    struct bio_vec *from;
188    int i;
189
190    if (bio_data_dir(bio) != WRITE)
191        return 0;
192
193    if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
194        return 0;
195
196    /*
197     * Based on the first page that has a valid mapping, decide whether or
198     * not we have to employ bounce buffering to guarantee stable pages.
199     */
200    bio_for_each_segment(from, bio, i) {
201        page = from->bv_page;
202        mapping = page_mapping(page);
203        if (!mapping)
204            continue;
205        bdi = mapping->backing_dev_info;
206        return mapping->host->i_sb->s_flags & MS_SNAP_STABLE;
207    }
208
209    return 0;
210}
211#else
212static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
213{
214    return 0;
215}
216#endif /* CONFIG_NEED_BOUNCE_POOL */
217
218static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
219                   mempool_t *pool, int force)
220{
221    struct page *page;
222    struct bio *bio = NULL;
223    int i, rw = bio_data_dir(*bio_orig);
224    struct bio_vec *to, *from;
225
226    bio_for_each_segment(from, *bio_orig, i) {
227        page = from->bv_page;
228
229        /*
230         * is destination page below bounce pfn?
231         */
232        if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
233            continue;
234
235        /*
236         * irk, bounce it
237         */
238        if (!bio) {
239            unsigned int cnt = (*bio_orig)->bi_vcnt;
240
241            bio = bio_alloc(GFP_NOIO, cnt);
242            memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
243        }
244            
245
246        to = bio->bi_io_vec + i;
247
248        to->bv_page = mempool_alloc(pool, q->bounce_gfp);
249        to->bv_len = from->bv_len;
250        to->bv_offset = from->bv_offset;
251        inc_zone_page_state(to->bv_page, NR_BOUNCE);
252
253        if (rw == WRITE) {
254            char *vto, *vfrom;
255
256            flush_dcache_page(from->bv_page);
257            vto = page_address(to->bv_page) + to->bv_offset;
258            vfrom = kmap(from->bv_page) + from->bv_offset;
259            memcpy(vto, vfrom, to->bv_len);
260            kunmap(from->bv_page);
261        }
262    }
263
264    /*
265     * no pages bounced
266     */
267    if (!bio)
268        return;
269
270    trace_block_bio_bounce(q, *bio_orig);
271
272    /*
273     * at least one page was bounced, fill in possible non-highmem
274     * pages
275     */
276    __bio_for_each_segment(from, *bio_orig, i, 0) {
277        to = bio_iovec_idx(bio, i);
278        if (!to->bv_page) {
279            to->bv_page = from->bv_page;
280            to->bv_len = from->bv_len;
281            to->bv_offset = from->bv_offset;
282        }
283    }
284
285    bio->bi_bdev = (*bio_orig)->bi_bdev;
286    bio->bi_flags |= (1 << BIO_BOUNCED);
287    bio->bi_sector = (*bio_orig)->bi_sector;
288    bio->bi_rw = (*bio_orig)->bi_rw;
289
290    bio->bi_vcnt = (*bio_orig)->bi_vcnt;
291    bio->bi_idx = (*bio_orig)->bi_idx;
292    bio->bi_size = (*bio_orig)->bi_size;
293
294    if (pool == page_pool) {
295        bio->bi_end_io = bounce_end_io_write;
296        if (rw == READ)
297            bio->bi_end_io = bounce_end_io_read;
298    } else {
299        bio->bi_end_io = bounce_end_io_write_isa;
300        if (rw == READ)
301            bio->bi_end_io = bounce_end_io_read_isa;
302    }
303
304    bio->bi_private = *bio_orig;
305    *bio_orig = bio;
306}
307
308void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
309{
310    int must_bounce;
311    mempool_t *pool;
312
313    /*
314     * Data-less bio, nothing to bounce
315     */
316    if (!bio_has_data(*bio_orig))
317        return;
318
319    must_bounce = must_snapshot_stable_pages(q, *bio_orig);
320
321    /*
322     * for non-isa bounce case, just check if the bounce pfn is equal
323     * to or bigger than the highest pfn in the system -- in that case,
324     * don't waste time iterating over bio segments
325     */
326    if (!(q->bounce_gfp & GFP_DMA)) {
327        if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
328            return;
329        pool = page_pool;
330    } else {
331        BUG_ON(!isa_page_pool);
332        pool = isa_page_pool;
333    }
334
335    /*
336     * slow path
337     */
338    __blk_queue_bounce(q, bio_orig, pool, must_bounce);
339}
340
341EXPORT_SYMBOL(blk_queue_bounce);
342

Archive Download this file



interactive