Root/
1 | /* |
2 | * Functions related to segment and merge handling |
3 | */ |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <linux/bio.h> |
7 | #include <linux/blkdev.h> |
8 | #include <linux/scatterlist.h> |
9 | |
10 | #include "blk.h" |
11 | |
12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
13 | struct bio *bio) |
14 | { |
15 | struct bio_vec *bv, *bvprv = NULL; |
16 | int cluster, i, high, highprv = 1; |
17 | unsigned int seg_size, nr_phys_segs; |
18 | struct bio *fbio, *bbio; |
19 | |
20 | if (!bio) |
21 | return 0; |
22 | |
23 | fbio = bio; |
24 | cluster = blk_queue_cluster(q); |
25 | seg_size = 0; |
26 | nr_phys_segs = 0; |
27 | for_each_bio(bio) { |
28 | bio_for_each_segment(bv, bio, i) { |
29 | /* |
30 | * the trick here is making sure that a high page is |
31 | * never considered part of another segment, since that |
32 | * might change with the bounce page. |
33 | */ |
34 | high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); |
35 | if (high || highprv) |
36 | goto new_segment; |
37 | if (cluster) { |
38 | if (seg_size + bv->bv_len |
39 | > queue_max_segment_size(q)) |
40 | goto new_segment; |
41 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) |
42 | goto new_segment; |
43 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) |
44 | goto new_segment; |
45 | |
46 | seg_size += bv->bv_len; |
47 | bvprv = bv; |
48 | continue; |
49 | } |
50 | new_segment: |
51 | if (nr_phys_segs == 1 && seg_size > |
52 | fbio->bi_seg_front_size) |
53 | fbio->bi_seg_front_size = seg_size; |
54 | |
55 | nr_phys_segs++; |
56 | bvprv = bv; |
57 | seg_size = bv->bv_len; |
58 | highprv = high; |
59 | } |
60 | bbio = bio; |
61 | } |
62 | |
63 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
64 | fbio->bi_seg_front_size = seg_size; |
65 | if (seg_size > bbio->bi_seg_back_size) |
66 | bbio->bi_seg_back_size = seg_size; |
67 | |
68 | return nr_phys_segs; |
69 | } |
70 | |
71 | void blk_recalc_rq_segments(struct request *rq) |
72 | { |
73 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); |
74 | } |
75 | |
76 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
77 | { |
78 | struct bio *nxt = bio->bi_next; |
79 | |
80 | bio->bi_next = NULL; |
81 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); |
82 | bio->bi_next = nxt; |
83 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
84 | } |
85 | EXPORT_SYMBOL(blk_recount_segments); |
86 | |
87 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
88 | struct bio *nxt) |
89 | { |
90 | if (!blk_queue_cluster(q)) |
91 | return 0; |
92 | |
93 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
94 | queue_max_segment_size(q)) |
95 | return 0; |
96 | |
97 | if (!bio_has_data(bio)) |
98 | return 1; |
99 | |
100 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) |
101 | return 0; |
102 | |
103 | /* |
104 | * bio and nxt are contiguous in memory; check if the queue allows |
105 | * these two to be merged into one |
106 | */ |
107 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) |
108 | return 1; |
109 | |
110 | return 0; |
111 | } |
112 | |
113 | static void |
114 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
115 | struct scatterlist *sglist, struct bio_vec **bvprv, |
116 | struct scatterlist **sg, int *nsegs, int *cluster) |
117 | { |
118 | |
119 | int nbytes = bvec->bv_len; |
120 | |
121 | if (*bvprv && *cluster) { |
122 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
123 | goto new_segment; |
124 | |
125 | if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) |
126 | goto new_segment; |
127 | if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) |
128 | goto new_segment; |
129 | |
130 | (*sg)->length += nbytes; |
131 | } else { |
132 | new_segment: |
133 | if (!*sg) |
134 | *sg = sglist; |
135 | else { |
136 | /* |
137 | * If the driver previously mapped a shorter |
138 | * list, we could see a termination bit |
139 | * prematurely unless it fully inits the sg |
140 | * table on each mapping. We KNOW that there |
141 | * must be more entries here or the driver |
142 | * would be buggy, so force clear the |
143 | * termination bit to avoid doing a full |
144 | * sg_init_table() in drivers for each command. |
145 | */ |
146 | (*sg)->page_link &= ~0x02; |
147 | *sg = sg_next(*sg); |
148 | } |
149 | |
150 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); |
151 | (*nsegs)++; |
152 | } |
153 | *bvprv = bvec; |
154 | } |
155 | |
156 | /* |
157 | * map a request to scatterlist, return number of sg entries setup. Caller |
158 | * must make sure sg can hold rq->nr_phys_segments entries |
159 | */ |
160 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
161 | struct scatterlist *sglist) |
162 | { |
163 | struct bio_vec *bvec, *bvprv; |
164 | struct req_iterator iter; |
165 | struct scatterlist *sg; |
166 | int nsegs, cluster; |
167 | |
168 | nsegs = 0; |
169 | cluster = blk_queue_cluster(q); |
170 | |
171 | /* |
172 | * for each bio in rq |
173 | */ |
174 | bvprv = NULL; |
175 | sg = NULL; |
176 | rq_for_each_segment(bvec, rq, iter) { |
177 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, |
178 | &nsegs, &cluster); |
179 | } /* segments in rq */ |
180 | |
181 | |
182 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && |
183 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
184 | unsigned int pad_len = |
185 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; |
186 | |
187 | sg->length += pad_len; |
188 | rq->extra_len += pad_len; |
189 | } |
190 | |
191 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
192 | if (rq->cmd_flags & REQ_WRITE) |
193 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
194 | |
195 | sg->page_link &= ~0x02; |
196 | sg = sg_next(sg); |
197 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), |
198 | q->dma_drain_size, |
199 | ((unsigned long)q->dma_drain_buffer) & |
200 | (PAGE_SIZE - 1)); |
201 | nsegs++; |
202 | rq->extra_len += q->dma_drain_size; |
203 | } |
204 | |
205 | if (sg) |
206 | sg_mark_end(sg); |
207 | |
208 | return nsegs; |
209 | } |
210 | EXPORT_SYMBOL(blk_rq_map_sg); |
211 | |
212 | /** |
213 | * blk_bio_map_sg - map a bio to a scatterlist |
214 | * @q: request_queue in question |
215 | * @bio: bio being mapped |
216 | * @sglist: scatterlist being mapped |
217 | * |
218 | * Note: |
219 | * Caller must make sure sg can hold bio->bi_phys_segments entries |
220 | * |
221 | * Will return the number of sg entries setup |
222 | */ |
223 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, |
224 | struct scatterlist *sglist) |
225 | { |
226 | struct bio_vec *bvec, *bvprv; |
227 | struct scatterlist *sg; |
228 | int nsegs, cluster; |
229 | unsigned long i; |
230 | |
231 | nsegs = 0; |
232 | cluster = blk_queue_cluster(q); |
233 | |
234 | bvprv = NULL; |
235 | sg = NULL; |
236 | bio_for_each_segment(bvec, bio, i) { |
237 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, |
238 | &nsegs, &cluster); |
239 | } /* segments in bio */ |
240 | |
241 | if (sg) |
242 | sg_mark_end(sg); |
243 | |
244 | BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments); |
245 | return nsegs; |
246 | } |
247 | EXPORT_SYMBOL(blk_bio_map_sg); |
248 | |
249 | static inline int ll_new_hw_segment(struct request_queue *q, |
250 | struct request *req, |
251 | struct bio *bio) |
252 | { |
253 | int nr_phys_segs = bio_phys_segments(q, bio); |
254 | |
255 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
256 | goto no_merge; |
257 | |
258 | if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio)) |
259 | goto no_merge; |
260 | |
261 | /* |
262 | * This will form the start of a new hw segment. Bump both |
263 | * counters. |
264 | */ |
265 | req->nr_phys_segments += nr_phys_segs; |
266 | return 1; |
267 | |
268 | no_merge: |
269 | req->cmd_flags |= REQ_NOMERGE; |
270 | if (req == q->last_merge) |
271 | q->last_merge = NULL; |
272 | return 0; |
273 | } |
274 | |
275 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
276 | struct bio *bio) |
277 | { |
278 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
279 | blk_rq_get_max_sectors(req)) { |
280 | req->cmd_flags |= REQ_NOMERGE; |
281 | if (req == q->last_merge) |
282 | q->last_merge = NULL; |
283 | return 0; |
284 | } |
285 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
286 | blk_recount_segments(q, req->biotail); |
287 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
288 | blk_recount_segments(q, bio); |
289 | |
290 | return ll_new_hw_segment(q, req, bio); |
291 | } |
292 | |
293 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
294 | struct bio *bio) |
295 | { |
296 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
297 | blk_rq_get_max_sectors(req)) { |
298 | req->cmd_flags |= REQ_NOMERGE; |
299 | if (req == q->last_merge) |
300 | q->last_merge = NULL; |
301 | return 0; |
302 | } |
303 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
304 | blk_recount_segments(q, bio); |
305 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
306 | blk_recount_segments(q, req->bio); |
307 | |
308 | return ll_new_hw_segment(q, req, bio); |
309 | } |
310 | |
311 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
312 | struct request *next) |
313 | { |
314 | int total_phys_segments; |
315 | unsigned int seg_size = |
316 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; |
317 | |
318 | /* |
319 | * First check if the either of the requests are re-queued |
320 | * requests. Can't merge them if they are. |
321 | */ |
322 | if (req->special || next->special) |
323 | return 0; |
324 | |
325 | /* |
326 | * Will it become too large? |
327 | */ |
328 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
329 | blk_rq_get_max_sectors(req)) |
330 | return 0; |
331 | |
332 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
333 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
334 | if (req->nr_phys_segments == 1) |
335 | req->bio->bi_seg_front_size = seg_size; |
336 | if (next->nr_phys_segments == 1) |
337 | next->biotail->bi_seg_back_size = seg_size; |
338 | total_phys_segments--; |
339 | } |
340 | |
341 | if (total_phys_segments > queue_max_segments(q)) |
342 | return 0; |
343 | |
344 | if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next)) |
345 | return 0; |
346 | |
347 | /* Merge is OK... */ |
348 | req->nr_phys_segments = total_phys_segments; |
349 | return 1; |
350 | } |
351 | |
352 | /** |
353 | * blk_rq_set_mixed_merge - mark a request as mixed merge |
354 | * @rq: request to mark as mixed merge |
355 | * |
356 | * Description: |
357 | * @rq is about to be mixed merged. Make sure the attributes |
358 | * which can be mixed are set in each bio and mark @rq as mixed |
359 | * merged. |
360 | */ |
361 | void blk_rq_set_mixed_merge(struct request *rq) |
362 | { |
363 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
364 | struct bio *bio; |
365 | |
366 | if (rq->cmd_flags & REQ_MIXED_MERGE) |
367 | return; |
368 | |
369 | /* |
370 | * @rq will no longer represent mixable attributes for all the |
371 | * contained bios. It will just track those of the first one. |
372 | * Distributes the attributs to each bio. |
373 | */ |
374 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
375 | WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && |
376 | (bio->bi_rw & REQ_FAILFAST_MASK) != ff); |
377 | bio->bi_rw |= ff; |
378 | } |
379 | rq->cmd_flags |= REQ_MIXED_MERGE; |
380 | } |
381 | |
382 | static void blk_account_io_merge(struct request *req) |
383 | { |
384 | if (blk_do_io_stat(req)) { |
385 | struct hd_struct *part; |
386 | int cpu; |
387 | |
388 | cpu = part_stat_lock(); |
389 | part = req->part; |
390 | |
391 | part_round_stats(cpu, part); |
392 | part_dec_in_flight(part, rq_data_dir(req)); |
393 | |
394 | hd_struct_put(part); |
395 | part_stat_unlock(); |
396 | } |
397 | } |
398 | |
399 | /* |
400 | * Has to be called with the request spinlock acquired |
401 | */ |
402 | static int attempt_merge(struct request_queue *q, struct request *req, |
403 | struct request *next) |
404 | { |
405 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
406 | return 0; |
407 | |
408 | if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) |
409 | return 0; |
410 | |
411 | /* |
412 | * not contiguous |
413 | */ |
414 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
415 | return 0; |
416 | |
417 | if (rq_data_dir(req) != rq_data_dir(next) |
418 | || req->rq_disk != next->rq_disk |
419 | || next->special) |
420 | return 0; |
421 | |
422 | if (req->cmd_flags & REQ_WRITE_SAME && |
423 | !blk_write_same_mergeable(req->bio, next->bio)) |
424 | return 0; |
425 | |
426 | /* |
427 | * If we are allowed to merge, then append bio list |
428 | * from next to rq and release next. merge_requests_fn |
429 | * will have updated segment counts, update sector |
430 | * counts here. |
431 | */ |
432 | if (!ll_merge_requests_fn(q, req, next)) |
433 | return 0; |
434 | |
435 | /* |
436 | * If failfast settings disagree or any of the two is already |
437 | * a mixed merge, mark both as mixed before proceeding. This |
438 | * makes sure that all involved bios have mixable attributes |
439 | * set properly. |
440 | */ |
441 | if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || |
442 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
443 | (next->cmd_flags & REQ_FAILFAST_MASK)) { |
444 | blk_rq_set_mixed_merge(req); |
445 | blk_rq_set_mixed_merge(next); |
446 | } |
447 | |
448 | /* |
449 | * At this point we have either done a back merge |
450 | * or front merge. We need the smaller start_time of |
451 | * the merged requests to be the current request |
452 | * for accounting purposes. |
453 | */ |
454 | if (time_after(req->start_time, next->start_time)) |
455 | req->start_time = next->start_time; |
456 | |
457 | req->biotail->bi_next = next->bio; |
458 | req->biotail = next->biotail; |
459 | |
460 | req->__data_len += blk_rq_bytes(next); |
461 | |
462 | elv_merge_requests(q, req, next); |
463 | |
464 | /* |
465 | * 'next' is going away, so update stats accordingly |
466 | */ |
467 | blk_account_io_merge(next); |
468 | |
469 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); |
470 | if (blk_rq_cpu_valid(next)) |
471 | req->cpu = next->cpu; |
472 | |
473 | /* owner-ship of bio passed from next to req */ |
474 | next->bio = NULL; |
475 | __blk_put_request(q, next); |
476 | return 1; |
477 | } |
478 | |
479 | int attempt_back_merge(struct request_queue *q, struct request *rq) |
480 | { |
481 | struct request *next = elv_latter_request(q, rq); |
482 | |
483 | if (next) |
484 | return attempt_merge(q, rq, next); |
485 | |
486 | return 0; |
487 | } |
488 | |
489 | int attempt_front_merge(struct request_queue *q, struct request *rq) |
490 | { |
491 | struct request *prev = elv_former_request(q, rq); |
492 | |
493 | if (prev) |
494 | return attempt_merge(q, prev, rq); |
495 | |
496 | return 0; |
497 | } |
498 | |
499 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, |
500 | struct request *next) |
501 | { |
502 | return attempt_merge(q, rq, next); |
503 | } |
504 | |
505 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) |
506 | { |
507 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
508 | return false; |
509 | |
510 | if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) |
511 | return false; |
512 | |
513 | /* different data direction or already started, don't merge */ |
514 | if (bio_data_dir(bio) != rq_data_dir(rq)) |
515 | return false; |
516 | |
517 | /* must be same device and not a special request */ |
518 | if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) |
519 | return false; |
520 | |
521 | /* only merge integrity protected bio into ditto rq */ |
522 | if (bio_integrity(bio) != blk_integrity_rq(rq)) |
523 | return false; |
524 | |
525 | /* must be using the same buffer */ |
526 | if (rq->cmd_flags & REQ_WRITE_SAME && |
527 | !blk_write_same_mergeable(rq->bio, bio)) |
528 | return false; |
529 | |
530 | return true; |
531 | } |
532 | |
533 | int blk_try_merge(struct request *rq, struct bio *bio) |
534 | { |
535 | if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector) |
536 | return ELEVATOR_BACK_MERGE; |
537 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector) |
538 | return ELEVATOR_FRONT_MERGE; |
539 | return ELEVATOR_NO_MERGE; |
540 | } |
541 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9