Root/
1 | /* |
2 | * Functions related to setting various queue properties from drivers |
3 | */ |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <linux/init.h> |
7 | #include <linux/bio.h> |
8 | #include <linux/blkdev.h> |
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ |
10 | #include <linux/gcd.h> |
11 | #include <linux/lcm.h> |
12 | #include <linux/jiffies.h> |
13 | #include <linux/gfp.h> |
14 | |
15 | #include "blk.h" |
16 | |
17 | unsigned long blk_max_low_pfn; |
18 | EXPORT_SYMBOL(blk_max_low_pfn); |
19 | |
20 | unsigned long blk_max_pfn; |
21 | |
22 | /** |
23 | * blk_queue_prep_rq - set a prepare_request function for queue |
24 | * @q: queue |
25 | * @pfn: prepare_request function |
26 | * |
27 | * It's possible for a queue to register a prepare_request callback which |
28 | * is invoked before the request is handed to the request_fn. The goal of |
29 | * the function is to prepare a request for I/O, it can be used to build a |
30 | * cdb from the request data for instance. |
31 | * |
32 | */ |
33 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) |
34 | { |
35 | q->prep_rq_fn = pfn; |
36 | } |
37 | EXPORT_SYMBOL(blk_queue_prep_rq); |
38 | |
39 | /** |
40 | * blk_queue_unprep_rq - set an unprepare_request function for queue |
41 | * @q: queue |
42 | * @ufn: unprepare_request function |
43 | * |
44 | * It's possible for a queue to register an unprepare_request callback |
45 | * which is invoked before the request is finally completed. The goal |
46 | * of the function is to deallocate any data that was allocated in the |
47 | * prepare_request callback. |
48 | * |
49 | */ |
50 | void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) |
51 | { |
52 | q->unprep_rq_fn = ufn; |
53 | } |
54 | EXPORT_SYMBOL(blk_queue_unprep_rq); |
55 | |
56 | /** |
57 | * blk_queue_merge_bvec - set a merge_bvec function for queue |
58 | * @q: queue |
59 | * @mbfn: merge_bvec_fn |
60 | * |
61 | * Usually queues have static limitations on the max sectors or segments that |
62 | * we can put in a request. Stacking drivers may have some settings that |
63 | * are dynamic, and thus we have to query the queue whether it is ok to |
64 | * add a new bio_vec to a bio at a given offset or not. If the block device |
65 | * has such limitations, it needs to register a merge_bvec_fn to control |
66 | * the size of bio's sent to it. Note that a block device *must* allow a |
67 | * single page to be added to an empty bio. The block device driver may want |
68 | * to use the bio_split() function to deal with these bio's. By default |
69 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are |
70 | * honored. |
71 | */ |
72 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) |
73 | { |
74 | q->merge_bvec_fn = mbfn; |
75 | } |
76 | EXPORT_SYMBOL(blk_queue_merge_bvec); |
77 | |
78 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) |
79 | { |
80 | q->softirq_done_fn = fn; |
81 | } |
82 | EXPORT_SYMBOL(blk_queue_softirq_done); |
83 | |
84 | void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) |
85 | { |
86 | q->rq_timeout = timeout; |
87 | } |
88 | EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); |
89 | |
90 | void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) |
91 | { |
92 | q->rq_timed_out_fn = fn; |
93 | } |
94 | EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); |
95 | |
96 | void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) |
97 | { |
98 | q->lld_busy_fn = fn; |
99 | } |
100 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); |
101 | |
102 | /** |
103 | * blk_set_default_limits - reset limits to default values |
104 | * @lim: the queue_limits structure to reset |
105 | * |
106 | * Description: |
107 | * Returns a queue_limit struct to its default state. |
108 | */ |
109 | void blk_set_default_limits(struct queue_limits *lim) |
110 | { |
111 | lim->max_segments = BLK_MAX_SEGMENTS; |
112 | lim->max_integrity_segments = 0; |
113 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
114 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
115 | lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; |
116 | lim->max_write_same_sectors = 0; |
117 | lim->max_discard_sectors = 0; |
118 | lim->discard_granularity = 0; |
119 | lim->discard_alignment = 0; |
120 | lim->discard_misaligned = 0; |
121 | lim->discard_zeroes_data = 0; |
122 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; |
123 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); |
124 | lim->alignment_offset = 0; |
125 | lim->io_opt = 0; |
126 | lim->misaligned = 0; |
127 | lim->cluster = 1; |
128 | } |
129 | EXPORT_SYMBOL(blk_set_default_limits); |
130 | |
131 | /** |
132 | * blk_set_stacking_limits - set default limits for stacking devices |
133 | * @lim: the queue_limits structure to reset |
134 | * |
135 | * Description: |
136 | * Returns a queue_limit struct to its default state. Should be used |
137 | * by stacking drivers like DM that have no internal limits. |
138 | */ |
139 | void blk_set_stacking_limits(struct queue_limits *lim) |
140 | { |
141 | blk_set_default_limits(lim); |
142 | |
143 | /* Inherit limits from component devices */ |
144 | lim->discard_zeroes_data = 1; |
145 | lim->max_segments = USHRT_MAX; |
146 | lim->max_hw_sectors = UINT_MAX; |
147 | lim->max_sectors = UINT_MAX; |
148 | lim->max_write_same_sectors = UINT_MAX; |
149 | } |
150 | EXPORT_SYMBOL(blk_set_stacking_limits); |
151 | |
152 | /** |
153 | * blk_queue_make_request - define an alternate make_request function for a device |
154 | * @q: the request queue for the device to be affected |
155 | * @mfn: the alternate make_request function |
156 | * |
157 | * Description: |
158 | * The normal way for &struct bios to be passed to a device |
159 | * driver is for them to be collected into requests on a request |
160 | * queue, and then to allow the device driver to select requests |
161 | * off that queue when it is ready. This works well for many block |
162 | * devices. However some block devices (typically virtual devices |
163 | * such as md or lvm) do not benefit from the processing on the |
164 | * request queue, and are served best by having the requests passed |
165 | * directly to them. This can be achieved by providing a function |
166 | * to blk_queue_make_request(). |
167 | * |
168 | * Caveat: |
169 | * The driver that does this *must* be able to deal appropriately |
170 | * with buffers in "highmemory". This can be accomplished by either calling |
171 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling |
172 | * blk_queue_bounce() to create a buffer in normal memory. |
173 | **/ |
174 | void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) |
175 | { |
176 | /* |
177 | * set defaults |
178 | */ |
179 | q->nr_requests = BLKDEV_MAX_RQ; |
180 | |
181 | q->make_request_fn = mfn; |
182 | blk_queue_dma_alignment(q, 511); |
183 | blk_queue_congestion_threshold(q); |
184 | q->nr_batching = BLK_BATCH_REQ; |
185 | |
186 | blk_set_default_limits(&q->limits); |
187 | |
188 | /* |
189 | * by default assume old behaviour and bounce for any highmem page |
190 | */ |
191 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
192 | } |
193 | EXPORT_SYMBOL(blk_queue_make_request); |
194 | |
195 | /** |
196 | * blk_queue_bounce_limit - set bounce buffer limit for queue |
197 | * @q: the request queue for the device |
198 | * @dma_mask: the maximum address the device can handle |
199 | * |
200 | * Description: |
201 | * Different hardware can have different requirements as to what pages |
202 | * it can do I/O directly to. A low level driver can call |
203 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce |
204 | * buffers for doing I/O to pages residing above @dma_mask. |
205 | **/ |
206 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) |
207 | { |
208 | unsigned long b_pfn = dma_mask >> PAGE_SHIFT; |
209 | int dma = 0; |
210 | |
211 | q->bounce_gfp = GFP_NOIO; |
212 | #if BITS_PER_LONG == 64 |
213 | /* |
214 | * Assume anything <= 4GB can be handled by IOMMU. Actually |
215 | * some IOMMUs can handle everything, but I don't know of a |
216 | * way to test this here. |
217 | */ |
218 | if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
219 | dma = 1; |
220 | q->limits.bounce_pfn = max(max_low_pfn, b_pfn); |
221 | #else |
222 | if (b_pfn < blk_max_low_pfn) |
223 | dma = 1; |
224 | q->limits.bounce_pfn = b_pfn; |
225 | #endif |
226 | if (dma) { |
227 | init_emergency_isa_pool(); |
228 | q->bounce_gfp = GFP_NOIO | GFP_DMA; |
229 | q->limits.bounce_pfn = b_pfn; |
230 | } |
231 | } |
232 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
233 | |
234 | /** |
235 | * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request |
236 | * @limits: the queue limits |
237 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
238 | * |
239 | * Description: |
240 | * Enables a low level driver to set a hard upper limit, |
241 | * max_hw_sectors, on the size of requests. max_hw_sectors is set by |
242 | * the device driver based upon the combined capabilities of I/O |
243 | * controller and storage device. |
244 | * |
245 | * max_sectors is a soft limit imposed by the block layer for |
246 | * filesystem type requests. This value can be overridden on a |
247 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. |
248 | * The soft limit can not exceed max_hw_sectors. |
249 | **/ |
250 | void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) |
251 | { |
252 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { |
253 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
254 | printk(KERN_INFO "%s: set to minimum %d\n", |
255 | __func__, max_hw_sectors); |
256 | } |
257 | |
258 | limits->max_hw_sectors = max_hw_sectors; |
259 | limits->max_sectors = min_t(unsigned int, max_hw_sectors, |
260 | BLK_DEF_MAX_SECTORS); |
261 | } |
262 | EXPORT_SYMBOL(blk_limits_max_hw_sectors); |
263 | |
264 | /** |
265 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue |
266 | * @q: the request queue for the device |
267 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
268 | * |
269 | * Description: |
270 | * See description for blk_limits_max_hw_sectors(). |
271 | **/ |
272 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) |
273 | { |
274 | blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); |
275 | } |
276 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
277 | |
278 | /** |
279 | * blk_queue_max_discard_sectors - set max sectors for a single discard |
280 | * @q: the request queue for the device |
281 | * @max_discard_sectors: maximum number of sectors to discard |
282 | **/ |
283 | void blk_queue_max_discard_sectors(struct request_queue *q, |
284 | unsigned int max_discard_sectors) |
285 | { |
286 | q->limits.max_discard_sectors = max_discard_sectors; |
287 | } |
288 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); |
289 | |
290 | /** |
291 | * blk_queue_max_write_same_sectors - set max sectors for a single write same |
292 | * @q: the request queue for the device |
293 | * @max_write_same_sectors: maximum number of sectors to write per command |
294 | **/ |
295 | void blk_queue_max_write_same_sectors(struct request_queue *q, |
296 | unsigned int max_write_same_sectors) |
297 | { |
298 | q->limits.max_write_same_sectors = max_write_same_sectors; |
299 | } |
300 | EXPORT_SYMBOL(blk_queue_max_write_same_sectors); |
301 | |
302 | /** |
303 | * blk_queue_max_segments - set max hw segments for a request for this queue |
304 | * @q: the request queue for the device |
305 | * @max_segments: max number of segments |
306 | * |
307 | * Description: |
308 | * Enables a low level driver to set an upper limit on the number of |
309 | * hw data segments in a request. |
310 | **/ |
311 | void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) |
312 | { |
313 | if (!max_segments) { |
314 | max_segments = 1; |
315 | printk(KERN_INFO "%s: set to minimum %d\n", |
316 | __func__, max_segments); |
317 | } |
318 | |
319 | q->limits.max_segments = max_segments; |
320 | } |
321 | EXPORT_SYMBOL(blk_queue_max_segments); |
322 | |
323 | /** |
324 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg |
325 | * @q: the request queue for the device |
326 | * @max_size: max size of segment in bytes |
327 | * |
328 | * Description: |
329 | * Enables a low level driver to set an upper limit on the size of a |
330 | * coalesced segment |
331 | **/ |
332 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) |
333 | { |
334 | if (max_size < PAGE_CACHE_SIZE) { |
335 | max_size = PAGE_CACHE_SIZE; |
336 | printk(KERN_INFO "%s: set to minimum %d\n", |
337 | __func__, max_size); |
338 | } |
339 | |
340 | q->limits.max_segment_size = max_size; |
341 | } |
342 | EXPORT_SYMBOL(blk_queue_max_segment_size); |
343 | |
344 | /** |
345 | * blk_queue_logical_block_size - set logical block size for the queue |
346 | * @q: the request queue for the device |
347 | * @size: the logical block size, in bytes |
348 | * |
349 | * Description: |
350 | * This should be set to the lowest possible block size that the |
351 | * storage device can address. The default of 512 covers most |
352 | * hardware. |
353 | **/ |
354 | void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) |
355 | { |
356 | q->limits.logical_block_size = size; |
357 | |
358 | if (q->limits.physical_block_size < size) |
359 | q->limits.physical_block_size = size; |
360 | |
361 | if (q->limits.io_min < q->limits.physical_block_size) |
362 | q->limits.io_min = q->limits.physical_block_size; |
363 | } |
364 | EXPORT_SYMBOL(blk_queue_logical_block_size); |
365 | |
366 | /** |
367 | * blk_queue_physical_block_size - set physical block size for the queue |
368 | * @q: the request queue for the device |
369 | * @size: the physical block size, in bytes |
370 | * |
371 | * Description: |
372 | * This should be set to the lowest possible sector size that the |
373 | * hardware can operate on without reverting to read-modify-write |
374 | * operations. |
375 | */ |
376 | void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) |
377 | { |
378 | q->limits.physical_block_size = size; |
379 | |
380 | if (q->limits.physical_block_size < q->limits.logical_block_size) |
381 | q->limits.physical_block_size = q->limits.logical_block_size; |
382 | |
383 | if (q->limits.io_min < q->limits.physical_block_size) |
384 | q->limits.io_min = q->limits.physical_block_size; |
385 | } |
386 | EXPORT_SYMBOL(blk_queue_physical_block_size); |
387 | |
388 | /** |
389 | * blk_queue_alignment_offset - set physical block alignment offset |
390 | * @q: the request queue for the device |
391 | * @offset: alignment offset in bytes |
392 | * |
393 | * Description: |
394 | * Some devices are naturally misaligned to compensate for things like |
395 | * the legacy DOS partition table 63-sector offset. Low-level drivers |
396 | * should call this function for devices whose first sector is not |
397 | * naturally aligned. |
398 | */ |
399 | void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) |
400 | { |
401 | q->limits.alignment_offset = |
402 | offset & (q->limits.physical_block_size - 1); |
403 | q->limits.misaligned = 0; |
404 | } |
405 | EXPORT_SYMBOL(blk_queue_alignment_offset); |
406 | |
407 | /** |
408 | * blk_limits_io_min - set minimum request size for a device |
409 | * @limits: the queue limits |
410 | * @min: smallest I/O size in bytes |
411 | * |
412 | * Description: |
413 | * Some devices have an internal block size bigger than the reported |
414 | * hardware sector size. This function can be used to signal the |
415 | * smallest I/O the device can perform without incurring a performance |
416 | * penalty. |
417 | */ |
418 | void blk_limits_io_min(struct queue_limits *limits, unsigned int min) |
419 | { |
420 | limits->io_min = min; |
421 | |
422 | if (limits->io_min < limits->logical_block_size) |
423 | limits->io_min = limits->logical_block_size; |
424 | |
425 | if (limits->io_min < limits->physical_block_size) |
426 | limits->io_min = limits->physical_block_size; |
427 | } |
428 | EXPORT_SYMBOL(blk_limits_io_min); |
429 | |
430 | /** |
431 | * blk_queue_io_min - set minimum request size for the queue |
432 | * @q: the request queue for the device |
433 | * @min: smallest I/O size in bytes |
434 | * |
435 | * Description: |
436 | * Storage devices may report a granularity or preferred minimum I/O |
437 | * size which is the smallest request the device can perform without |
438 | * incurring a performance penalty. For disk drives this is often the |
439 | * physical block size. For RAID arrays it is often the stripe chunk |
440 | * size. A properly aligned multiple of minimum_io_size is the |
441 | * preferred request size for workloads where a high number of I/O |
442 | * operations is desired. |
443 | */ |
444 | void blk_queue_io_min(struct request_queue *q, unsigned int min) |
445 | { |
446 | blk_limits_io_min(&q->limits, min); |
447 | } |
448 | EXPORT_SYMBOL(blk_queue_io_min); |
449 | |
450 | /** |
451 | * blk_limits_io_opt - set optimal request size for a device |
452 | * @limits: the queue limits |
453 | * @opt: smallest I/O size in bytes |
454 | * |
455 | * Description: |
456 | * Storage devices may report an optimal I/O size, which is the |
457 | * device's preferred unit for sustained I/O. This is rarely reported |
458 | * for disk drives. For RAID arrays it is usually the stripe width or |
459 | * the internal track size. A properly aligned multiple of |
460 | * optimal_io_size is the preferred request size for workloads where |
461 | * sustained throughput is desired. |
462 | */ |
463 | void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) |
464 | { |
465 | limits->io_opt = opt; |
466 | } |
467 | EXPORT_SYMBOL(blk_limits_io_opt); |
468 | |
469 | /** |
470 | * blk_queue_io_opt - set optimal request size for the queue |
471 | * @q: the request queue for the device |
472 | * @opt: optimal request size in bytes |
473 | * |
474 | * Description: |
475 | * Storage devices may report an optimal I/O size, which is the |
476 | * device's preferred unit for sustained I/O. This is rarely reported |
477 | * for disk drives. For RAID arrays it is usually the stripe width or |
478 | * the internal track size. A properly aligned multiple of |
479 | * optimal_io_size is the preferred request size for workloads where |
480 | * sustained throughput is desired. |
481 | */ |
482 | void blk_queue_io_opt(struct request_queue *q, unsigned int opt) |
483 | { |
484 | blk_limits_io_opt(&q->limits, opt); |
485 | } |
486 | EXPORT_SYMBOL(blk_queue_io_opt); |
487 | |
488 | /** |
489 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers |
490 | * @t: the stacking driver (top) |
491 | * @b: the underlying device (bottom) |
492 | **/ |
493 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) |
494 | { |
495 | blk_stack_limits(&t->limits, &b->limits, 0); |
496 | } |
497 | EXPORT_SYMBOL(blk_queue_stack_limits); |
498 | |
499 | /** |
500 | * blk_stack_limits - adjust queue_limits for stacked devices |
501 | * @t: the stacking driver limits (top device) |
502 | * @b: the underlying queue limits (bottom, component device) |
503 | * @start: first data sector within component device |
504 | * |
505 | * Description: |
506 | * This function is used by stacking drivers like MD and DM to ensure |
507 | * that all component devices have compatible block sizes and |
508 | * alignments. The stacking driver must provide a queue_limits |
509 | * struct (top) and then iteratively call the stacking function for |
510 | * all component (bottom) devices. The stacking function will |
511 | * attempt to combine the values and ensure proper alignment. |
512 | * |
513 | * Returns 0 if the top and bottom queue_limits are compatible. The |
514 | * top device's block sizes and alignment offsets may be adjusted to |
515 | * ensure alignment with the bottom device. If no compatible sizes |
516 | * and alignments exist, -1 is returned and the resulting top |
517 | * queue_limits will have the misaligned flag set to indicate that |
518 | * the alignment_offset is undefined. |
519 | */ |
520 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
521 | sector_t start) |
522 | { |
523 | unsigned int top, bottom, alignment, ret = 0; |
524 | |
525 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
526 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
527 | t->max_write_same_sectors = min(t->max_write_same_sectors, |
528 | b->max_write_same_sectors); |
529 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); |
530 | |
531 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, |
532 | b->seg_boundary_mask); |
533 | |
534 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
535 | t->max_integrity_segments = min_not_zero(t->max_integrity_segments, |
536 | b->max_integrity_segments); |
537 | |
538 | t->max_segment_size = min_not_zero(t->max_segment_size, |
539 | b->max_segment_size); |
540 | |
541 | t->misaligned |= b->misaligned; |
542 | |
543 | alignment = queue_limit_alignment_offset(b, start); |
544 | |
545 | /* Bottom device has different alignment. Check that it is |
546 | * compatible with the current top alignment. |
547 | */ |
548 | if (t->alignment_offset != alignment) { |
549 | |
550 | top = max(t->physical_block_size, t->io_min) |
551 | + t->alignment_offset; |
552 | bottom = max(b->physical_block_size, b->io_min) + alignment; |
553 | |
554 | /* Verify that top and bottom intervals line up */ |
555 | if (max(top, bottom) & (min(top, bottom) - 1)) { |
556 | t->misaligned = 1; |
557 | ret = -1; |
558 | } |
559 | } |
560 | |
561 | t->logical_block_size = max(t->logical_block_size, |
562 | b->logical_block_size); |
563 | |
564 | t->physical_block_size = max(t->physical_block_size, |
565 | b->physical_block_size); |
566 | |
567 | t->io_min = max(t->io_min, b->io_min); |
568 | t->io_opt = lcm(t->io_opt, b->io_opt); |
569 | |
570 | t->cluster &= b->cluster; |
571 | t->discard_zeroes_data &= b->discard_zeroes_data; |
572 | |
573 | /* Physical block size a multiple of the logical block size? */ |
574 | if (t->physical_block_size & (t->logical_block_size - 1)) { |
575 | t->physical_block_size = t->logical_block_size; |
576 | t->misaligned = 1; |
577 | ret = -1; |
578 | } |
579 | |
580 | /* Minimum I/O a multiple of the physical block size? */ |
581 | if (t->io_min & (t->physical_block_size - 1)) { |
582 | t->io_min = t->physical_block_size; |
583 | t->misaligned = 1; |
584 | ret = -1; |
585 | } |
586 | |
587 | /* Optimal I/O a multiple of the physical block size? */ |
588 | if (t->io_opt & (t->physical_block_size - 1)) { |
589 | t->io_opt = 0; |
590 | t->misaligned = 1; |
591 | ret = -1; |
592 | } |
593 | |
594 | /* Find lowest common alignment_offset */ |
595 | t->alignment_offset = lcm(t->alignment_offset, alignment) |
596 | & (max(t->physical_block_size, t->io_min) - 1); |
597 | |
598 | /* Verify that new alignment_offset is on a logical block boundary */ |
599 | if (t->alignment_offset & (t->logical_block_size - 1)) { |
600 | t->misaligned = 1; |
601 | ret = -1; |
602 | } |
603 | |
604 | /* Discard alignment and granularity */ |
605 | if (b->discard_granularity) { |
606 | alignment = queue_limit_discard_alignment(b, start); |
607 | |
608 | if (t->discard_granularity != 0 && |
609 | t->discard_alignment != alignment) { |
610 | top = t->discard_granularity + t->discard_alignment; |
611 | bottom = b->discard_granularity + alignment; |
612 | |
613 | /* Verify that top and bottom intervals line up */ |
614 | if ((max(top, bottom) % min(top, bottom)) != 0) |
615 | t->discard_misaligned = 1; |
616 | } |
617 | |
618 | t->max_discard_sectors = min_not_zero(t->max_discard_sectors, |
619 | b->max_discard_sectors); |
620 | t->discard_granularity = max(t->discard_granularity, |
621 | b->discard_granularity); |
622 | t->discard_alignment = lcm(t->discard_alignment, alignment) % |
623 | t->discard_granularity; |
624 | } |
625 | |
626 | return ret; |
627 | } |
628 | EXPORT_SYMBOL(blk_stack_limits); |
629 | |
630 | /** |
631 | * bdev_stack_limits - adjust queue limits for stacked drivers |
632 | * @t: the stacking driver limits (top device) |
633 | * @bdev: the component block_device (bottom) |
634 | * @start: first data sector within component device |
635 | * |
636 | * Description: |
637 | * Merges queue limits for a top device and a block_device. Returns |
638 | * 0 if alignment didn't change. Returns -1 if adding the bottom |
639 | * device caused misalignment. |
640 | */ |
641 | int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, |
642 | sector_t start) |
643 | { |
644 | struct request_queue *bq = bdev_get_queue(bdev); |
645 | |
646 | start += get_start_sect(bdev); |
647 | |
648 | return blk_stack_limits(t, &bq->limits, start); |
649 | } |
650 | EXPORT_SYMBOL(bdev_stack_limits); |
651 | |
652 | /** |
653 | * disk_stack_limits - adjust queue limits for stacked drivers |
654 | * @disk: MD/DM gendisk (top) |
655 | * @bdev: the underlying block device (bottom) |
656 | * @offset: offset to beginning of data within component device |
657 | * |
658 | * Description: |
659 | * Merges the limits for a top level gendisk and a bottom level |
660 | * block_device. |
661 | */ |
662 | void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
663 | sector_t offset) |
664 | { |
665 | struct request_queue *t = disk->queue; |
666 | |
667 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { |
668 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; |
669 | |
670 | disk_name(disk, 0, top); |
671 | bdevname(bdev, bottom); |
672 | |
673 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", |
674 | top, bottom); |
675 | } |
676 | } |
677 | EXPORT_SYMBOL(disk_stack_limits); |
678 | |
679 | /** |
680 | * blk_queue_dma_pad - set pad mask |
681 | * @q: the request queue for the device |
682 | * @mask: pad mask |
683 | * |
684 | * Set dma pad mask. |
685 | * |
686 | * Appending pad buffer to a request modifies the last entry of a |
687 | * scatter list such that it includes the pad buffer. |
688 | **/ |
689 | void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) |
690 | { |
691 | q->dma_pad_mask = mask; |
692 | } |
693 | EXPORT_SYMBOL(blk_queue_dma_pad); |
694 | |
695 | /** |
696 | * blk_queue_update_dma_pad - update pad mask |
697 | * @q: the request queue for the device |
698 | * @mask: pad mask |
699 | * |
700 | * Update dma pad mask. |
701 | * |
702 | * Appending pad buffer to a request modifies the last entry of a |
703 | * scatter list such that it includes the pad buffer. |
704 | **/ |
705 | void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) |
706 | { |
707 | if (mask > q->dma_pad_mask) |
708 | q->dma_pad_mask = mask; |
709 | } |
710 | EXPORT_SYMBOL(blk_queue_update_dma_pad); |
711 | |
712 | /** |
713 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. |
714 | * @q: the request queue for the device |
715 | * @dma_drain_needed: fn which returns non-zero if drain is necessary |
716 | * @buf: physically contiguous buffer |
717 | * @size: size of the buffer in bytes |
718 | * |
719 | * Some devices have excess DMA problems and can't simply discard (or |
720 | * zero fill) the unwanted piece of the transfer. They have to have a |
721 | * real area of memory to transfer it into. The use case for this is |
722 | * ATAPI devices in DMA mode. If the packet command causes a transfer |
723 | * bigger than the transfer size some HBAs will lock up if there |
724 | * aren't DMA elements to contain the excess transfer. What this API |
725 | * does is adjust the queue so that the buf is always appended |
726 | * silently to the scatterlist. |
727 | * |
728 | * Note: This routine adjusts max_hw_segments to make room for appending |
729 | * the drain buffer. If you call blk_queue_max_segments() after calling |
730 | * this routine, you must set the limit to one fewer than your device |
731 | * can support otherwise there won't be room for the drain buffer. |
732 | */ |
733 | int blk_queue_dma_drain(struct request_queue *q, |
734 | dma_drain_needed_fn *dma_drain_needed, |
735 | void *buf, unsigned int size) |
736 | { |
737 | if (queue_max_segments(q) < 2) |
738 | return -EINVAL; |
739 | /* make room for appending the drain */ |
740 | blk_queue_max_segments(q, queue_max_segments(q) - 1); |
741 | q->dma_drain_needed = dma_drain_needed; |
742 | q->dma_drain_buffer = buf; |
743 | q->dma_drain_size = size; |
744 | |
745 | return 0; |
746 | } |
747 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); |
748 | |
749 | /** |
750 | * blk_queue_segment_boundary - set boundary rules for segment merging |
751 | * @q: the request queue for the device |
752 | * @mask: the memory boundary mask |
753 | **/ |
754 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) |
755 | { |
756 | if (mask < PAGE_CACHE_SIZE - 1) { |
757 | mask = PAGE_CACHE_SIZE - 1; |
758 | printk(KERN_INFO "%s: set to minimum %lx\n", |
759 | __func__, mask); |
760 | } |
761 | |
762 | q->limits.seg_boundary_mask = mask; |
763 | } |
764 | EXPORT_SYMBOL(blk_queue_segment_boundary); |
765 | |
766 | /** |
767 | * blk_queue_dma_alignment - set dma length and memory alignment |
768 | * @q: the request queue for the device |
769 | * @mask: alignment mask |
770 | * |
771 | * description: |
772 | * set required memory and length alignment for direct dma transactions. |
773 | * this is used when building direct io requests for the queue. |
774 | * |
775 | **/ |
776 | void blk_queue_dma_alignment(struct request_queue *q, int mask) |
777 | { |
778 | q->dma_alignment = mask; |
779 | } |
780 | EXPORT_SYMBOL(blk_queue_dma_alignment); |
781 | |
782 | /** |
783 | * blk_queue_update_dma_alignment - update dma length and memory alignment |
784 | * @q: the request queue for the device |
785 | * @mask: alignment mask |
786 | * |
787 | * description: |
788 | * update required memory and length alignment for direct dma transactions. |
789 | * If the requested alignment is larger than the current alignment, then |
790 | * the current queue alignment is updated to the new value, otherwise it |
791 | * is left alone. The design of this is to allow multiple objects |
792 | * (driver, device, transport etc) to set their respective |
793 | * alignments without having them interfere. |
794 | * |
795 | **/ |
796 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) |
797 | { |
798 | BUG_ON(mask > PAGE_SIZE); |
799 | |
800 | if (mask > q->dma_alignment) |
801 | q->dma_alignment = mask; |
802 | } |
803 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); |
804 | |
805 | /** |
806 | * blk_queue_flush - configure queue's cache flush capability |
807 | * @q: the request queue for the device |
808 | * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA |
809 | * |
810 | * Tell block layer cache flush capability of @q. If it supports |
811 | * flushing, REQ_FLUSH should be set. If it supports bypassing |
812 | * write cache for individual writes, REQ_FUA should be set. |
813 | */ |
814 | void blk_queue_flush(struct request_queue *q, unsigned int flush) |
815 | { |
816 | WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); |
817 | |
818 | if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) |
819 | flush &= ~REQ_FUA; |
820 | |
821 | q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); |
822 | } |
823 | EXPORT_SYMBOL_GPL(blk_queue_flush); |
824 | |
825 | void blk_queue_flush_queueable(struct request_queue *q, bool queueable) |
826 | { |
827 | q->flush_not_queueable = !queueable; |
828 | } |
829 | EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); |
830 | |
831 | static int __init blk_settings_init(void) |
832 | { |
833 | blk_max_low_pfn = max_low_pfn - 1; |
834 | blk_max_pfn = max_pfn - 1; |
835 | return 0; |
836 | } |
837 | subsys_initcall(blk_settings_init); |
838 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9