Root/
1 | /* |
2 | * Functions related to generic helpers functions |
3 | */ |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <linux/bio.h> |
7 | #include <linux/blkdev.h> |
8 | #include <linux/scatterlist.h> |
9 | |
10 | #include "blk.h" |
11 | |
12 | struct bio_batch { |
13 | atomic_t done; |
14 | unsigned long flags; |
15 | struct completion *wait; |
16 | }; |
17 | |
18 | static void bio_batch_end_io(struct bio *bio, int err) |
19 | { |
20 | struct bio_batch *bb = bio->bi_private; |
21 | |
22 | if (err && (err != -EOPNOTSUPP)) |
23 | clear_bit(BIO_UPTODATE, &bb->flags); |
24 | if (atomic_dec_and_test(&bb->done)) |
25 | complete(bb->wait); |
26 | bio_put(bio); |
27 | } |
28 | |
29 | /** |
30 | * blkdev_issue_discard - queue a discard |
31 | * @bdev: blockdev to issue discard for |
32 | * @sector: start sector |
33 | * @nr_sects: number of sectors to discard |
34 | * @gfp_mask: memory allocation flags (for bio_alloc) |
35 | * @flags: BLKDEV_IFL_* flags to control behaviour |
36 | * |
37 | * Description: |
38 | * Issue a discard request for the sectors in question. |
39 | */ |
40 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
41 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) |
42 | { |
43 | DECLARE_COMPLETION_ONSTACK(wait); |
44 | struct request_queue *q = bdev_get_queue(bdev); |
45 | int type = REQ_WRITE | REQ_DISCARD; |
46 | sector_t max_discard_sectors; |
47 | sector_t granularity, alignment; |
48 | struct bio_batch bb; |
49 | struct bio *bio; |
50 | int ret = 0; |
51 | struct blk_plug plug; |
52 | |
53 | if (!q) |
54 | return -ENXIO; |
55 | |
56 | if (!blk_queue_discard(q)) |
57 | return -EOPNOTSUPP; |
58 | |
59 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
60 | granularity = max(q->limits.discard_granularity >> 9, 1U); |
61 | alignment = bdev_discard_alignment(bdev) >> 9; |
62 | alignment = sector_div(alignment, granularity); |
63 | |
64 | /* |
65 | * Ensure that max_discard_sectors is of the proper |
66 | * granularity, so that requests stay aligned after a split. |
67 | */ |
68 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
69 | sector_div(max_discard_sectors, granularity); |
70 | max_discard_sectors *= granularity; |
71 | if (unlikely(!max_discard_sectors)) { |
72 | /* Avoid infinite loop below. Being cautious never hurts. */ |
73 | return -EOPNOTSUPP; |
74 | } |
75 | |
76 | if (flags & BLKDEV_DISCARD_SECURE) { |
77 | if (!blk_queue_secdiscard(q)) |
78 | return -EOPNOTSUPP; |
79 | type |= REQ_SECURE; |
80 | } |
81 | |
82 | atomic_set(&bb.done, 1); |
83 | bb.flags = 1 << BIO_UPTODATE; |
84 | bb.wait = &wait; |
85 | |
86 | blk_start_plug(&plug); |
87 | while (nr_sects) { |
88 | unsigned int req_sects; |
89 | sector_t end_sect, tmp; |
90 | |
91 | bio = bio_alloc(gfp_mask, 1); |
92 | if (!bio) { |
93 | ret = -ENOMEM; |
94 | break; |
95 | } |
96 | |
97 | req_sects = min_t(sector_t, nr_sects, max_discard_sectors); |
98 | |
99 | /* |
100 | * If splitting a request, and the next starting sector would be |
101 | * misaligned, stop the discard at the previous aligned sector. |
102 | */ |
103 | end_sect = sector + req_sects; |
104 | tmp = end_sect; |
105 | if (req_sects < nr_sects && |
106 | sector_div(tmp, granularity) != alignment) { |
107 | end_sect = end_sect - alignment; |
108 | sector_div(end_sect, granularity); |
109 | end_sect = end_sect * granularity + alignment; |
110 | req_sects = end_sect - sector; |
111 | } |
112 | |
113 | bio->bi_sector = sector; |
114 | bio->bi_end_io = bio_batch_end_io; |
115 | bio->bi_bdev = bdev; |
116 | bio->bi_private = &bb; |
117 | |
118 | bio->bi_size = req_sects << 9; |
119 | nr_sects -= req_sects; |
120 | sector = end_sect; |
121 | |
122 | atomic_inc(&bb.done); |
123 | submit_bio(type, bio); |
124 | } |
125 | blk_finish_plug(&plug); |
126 | |
127 | /* Wait for bios in-flight */ |
128 | if (!atomic_dec_and_test(&bb.done)) |
129 | wait_for_completion_io(&wait); |
130 | |
131 | if (!test_bit(BIO_UPTODATE, &bb.flags)) |
132 | ret = -EIO; |
133 | |
134 | return ret; |
135 | } |
136 | EXPORT_SYMBOL(blkdev_issue_discard); |
137 | |
138 | /** |
139 | * blkdev_issue_write_same - queue a write same operation |
140 | * @bdev: target blockdev |
141 | * @sector: start sector |
142 | * @nr_sects: number of sectors to write |
143 | * @gfp_mask: memory allocation flags (for bio_alloc) |
144 | * @page: page containing data to write |
145 | * |
146 | * Description: |
147 | * Issue a write same request for the sectors in question. |
148 | */ |
149 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
150 | sector_t nr_sects, gfp_t gfp_mask, |
151 | struct page *page) |
152 | { |
153 | DECLARE_COMPLETION_ONSTACK(wait); |
154 | struct request_queue *q = bdev_get_queue(bdev); |
155 | unsigned int max_write_same_sectors; |
156 | struct bio_batch bb; |
157 | struct bio *bio; |
158 | int ret = 0; |
159 | |
160 | if (!q) |
161 | return -ENXIO; |
162 | |
163 | max_write_same_sectors = q->limits.max_write_same_sectors; |
164 | |
165 | if (max_write_same_sectors == 0) |
166 | return -EOPNOTSUPP; |
167 | |
168 | atomic_set(&bb.done, 1); |
169 | bb.flags = 1 << BIO_UPTODATE; |
170 | bb.wait = &wait; |
171 | |
172 | while (nr_sects) { |
173 | bio = bio_alloc(gfp_mask, 1); |
174 | if (!bio) { |
175 | ret = -ENOMEM; |
176 | break; |
177 | } |
178 | |
179 | bio->bi_sector = sector; |
180 | bio->bi_end_io = bio_batch_end_io; |
181 | bio->bi_bdev = bdev; |
182 | bio->bi_private = &bb; |
183 | bio->bi_vcnt = 1; |
184 | bio->bi_io_vec->bv_page = page; |
185 | bio->bi_io_vec->bv_offset = 0; |
186 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); |
187 | |
188 | if (nr_sects > max_write_same_sectors) { |
189 | bio->bi_size = max_write_same_sectors << 9; |
190 | nr_sects -= max_write_same_sectors; |
191 | sector += max_write_same_sectors; |
192 | } else { |
193 | bio->bi_size = nr_sects << 9; |
194 | nr_sects = 0; |
195 | } |
196 | |
197 | atomic_inc(&bb.done); |
198 | submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio); |
199 | } |
200 | |
201 | /* Wait for bios in-flight */ |
202 | if (!atomic_dec_and_test(&bb.done)) |
203 | wait_for_completion_io(&wait); |
204 | |
205 | if (!test_bit(BIO_UPTODATE, &bb.flags)) |
206 | ret = -ENOTSUPP; |
207 | |
208 | return ret; |
209 | } |
210 | EXPORT_SYMBOL(blkdev_issue_write_same); |
211 | |
212 | /** |
213 | * blkdev_issue_zeroout - generate number of zero filed write bios |
214 | * @bdev: blockdev to issue |
215 | * @sector: start sector |
216 | * @nr_sects: number of sectors to write |
217 | * @gfp_mask: memory allocation flags (for bio_alloc) |
218 | * |
219 | * Description: |
220 | * Generate and issue number of bios with zerofiled pages. |
221 | */ |
222 | |
223 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
224 | sector_t nr_sects, gfp_t gfp_mask) |
225 | { |
226 | int ret; |
227 | struct bio *bio; |
228 | struct bio_batch bb; |
229 | unsigned int sz; |
230 | DECLARE_COMPLETION_ONSTACK(wait); |
231 | |
232 | atomic_set(&bb.done, 1); |
233 | bb.flags = 1 << BIO_UPTODATE; |
234 | bb.wait = &wait; |
235 | |
236 | ret = 0; |
237 | while (nr_sects != 0) { |
238 | bio = bio_alloc(gfp_mask, |
239 | min(nr_sects, (sector_t)BIO_MAX_PAGES)); |
240 | if (!bio) { |
241 | ret = -ENOMEM; |
242 | break; |
243 | } |
244 | |
245 | bio->bi_sector = sector; |
246 | bio->bi_bdev = bdev; |
247 | bio->bi_end_io = bio_batch_end_io; |
248 | bio->bi_private = &bb; |
249 | |
250 | while (nr_sects != 0) { |
251 | sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); |
252 | ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); |
253 | nr_sects -= ret >> 9; |
254 | sector += ret >> 9; |
255 | if (ret < (sz << 9)) |
256 | break; |
257 | } |
258 | ret = 0; |
259 | atomic_inc(&bb.done); |
260 | submit_bio(WRITE, bio); |
261 | } |
262 | |
263 | /* Wait for bios in-flight */ |
264 | if (!atomic_dec_and_test(&bb.done)) |
265 | wait_for_completion_io(&wait); |
266 | |
267 | if (!test_bit(BIO_UPTODATE, &bb.flags)) |
268 | /* One of bios in the batch was completed with error.*/ |
269 | ret = -EIO; |
270 | |
271 | return ret; |
272 | } |
273 | |
274 | /** |
275 | * blkdev_issue_zeroout - zero-fill a block range |
276 | * @bdev: blockdev to write |
277 | * @sector: start sector |
278 | * @nr_sects: number of sectors to write |
279 | * @gfp_mask: memory allocation flags (for bio_alloc) |
280 | * |
281 | * Description: |
282 | * Generate and issue number of bios with zerofiled pages. |
283 | */ |
284 | |
285 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
286 | sector_t nr_sects, gfp_t gfp_mask) |
287 | { |
288 | if (bdev_write_same(bdev)) { |
289 | unsigned char bdn[BDEVNAME_SIZE]; |
290 | |
291 | if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, |
292 | ZERO_PAGE(0))) |
293 | return 0; |
294 | |
295 | bdevname(bdev, bdn); |
296 | pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn); |
297 | } |
298 | |
299 | return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); |
300 | } |
301 | EXPORT_SYMBOL(blkdev_issue_zeroout); |
302 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9