Root/
1 | /* |
2 | * Functions related to setting various queue properties from drivers |
3 | */ |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <linux/init.h> |
7 | #include <linux/bio.h> |
8 | #include <linux/blkdev.h> |
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ |
10 | #include <linux/gcd.h> |
11 | #include <linux/lcm.h> |
12 | #include <linux/jiffies.h> |
13 | #include <linux/gfp.h> |
14 | |
15 | #include "blk.h" |
16 | |
17 | unsigned long blk_max_low_pfn; |
18 | EXPORT_SYMBOL(blk_max_low_pfn); |
19 | |
20 | unsigned long blk_max_pfn; |
21 | |
22 | /** |
23 | * blk_queue_prep_rq - set a prepare_request function for queue |
24 | * @q: queue |
25 | * @pfn: prepare_request function |
26 | * |
27 | * It's possible for a queue to register a prepare_request callback which |
28 | * is invoked before the request is handed to the request_fn. The goal of |
29 | * the function is to prepare a request for I/O, it can be used to build a |
30 | * cdb from the request data for instance. |
31 | * |
32 | */ |
33 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) |
34 | { |
35 | q->prep_rq_fn = pfn; |
36 | } |
37 | EXPORT_SYMBOL(blk_queue_prep_rq); |
38 | |
39 | /** |
40 | * blk_queue_merge_bvec - set a merge_bvec function for queue |
41 | * @q: queue |
42 | * @mbfn: merge_bvec_fn |
43 | * |
44 | * Usually queues have static limitations on the max sectors or segments that |
45 | * we can put in a request. Stacking drivers may have some settings that |
46 | * are dynamic, and thus we have to query the queue whether it is ok to |
47 | * add a new bio_vec to a bio at a given offset or not. If the block device |
48 | * has such limitations, it needs to register a merge_bvec_fn to control |
49 | * the size of bio's sent to it. Note that a block device *must* allow a |
50 | * single page to be added to an empty bio. The block device driver may want |
51 | * to use the bio_split() function to deal with these bio's. By default |
52 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are |
53 | * honored. |
54 | */ |
55 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) |
56 | { |
57 | q->merge_bvec_fn = mbfn; |
58 | } |
59 | EXPORT_SYMBOL(blk_queue_merge_bvec); |
60 | |
61 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) |
62 | { |
63 | q->softirq_done_fn = fn; |
64 | } |
65 | EXPORT_SYMBOL(blk_queue_softirq_done); |
66 | |
67 | void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) |
68 | { |
69 | q->rq_timeout = timeout; |
70 | } |
71 | EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); |
72 | |
73 | void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) |
74 | { |
75 | q->rq_timed_out_fn = fn; |
76 | } |
77 | EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); |
78 | |
79 | void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) |
80 | { |
81 | q->lld_busy_fn = fn; |
82 | } |
83 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); |
84 | |
85 | /** |
86 | * blk_set_default_limits - reset limits to default values |
87 | * @lim: the queue_limits structure to reset |
88 | * |
89 | * Description: |
90 | * Returns a queue_limit struct to its default state. Can be used by |
91 | * stacking drivers like DM that stage table swaps and reuse an |
92 | * existing device queue. |
93 | */ |
94 | void blk_set_default_limits(struct queue_limits *lim) |
95 | { |
96 | lim->max_segments = BLK_MAX_SEGMENTS; |
97 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
98 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
99 | lim->max_sectors = BLK_DEF_MAX_SECTORS; |
100 | lim->max_hw_sectors = INT_MAX; |
101 | lim->max_discard_sectors = 0; |
102 | lim->discard_granularity = 0; |
103 | lim->discard_alignment = 0; |
104 | lim->discard_misaligned = 0; |
105 | lim->discard_zeroes_data = -1; |
106 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; |
107 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); |
108 | lim->alignment_offset = 0; |
109 | lim->io_opt = 0; |
110 | lim->misaligned = 0; |
111 | lim->no_cluster = 0; |
112 | } |
113 | EXPORT_SYMBOL(blk_set_default_limits); |
114 | |
115 | /** |
116 | * blk_queue_make_request - define an alternate make_request function for a device |
117 | * @q: the request queue for the device to be affected |
118 | * @mfn: the alternate make_request function |
119 | * |
120 | * Description: |
121 | * The normal way for &struct bios to be passed to a device |
122 | * driver is for them to be collected into requests on a request |
123 | * queue, and then to allow the device driver to select requests |
124 | * off that queue when it is ready. This works well for many block |
125 | * devices. However some block devices (typically virtual devices |
126 | * such as md or lvm) do not benefit from the processing on the |
127 | * request queue, and are served best by having the requests passed |
128 | * directly to them. This can be achieved by providing a function |
129 | * to blk_queue_make_request(). |
130 | * |
131 | * Caveat: |
132 | * The driver that does this *must* be able to deal appropriately |
133 | * with buffers in "highmemory". This can be accomplished by either calling |
134 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling |
135 | * blk_queue_bounce() to create a buffer in normal memory. |
136 | **/ |
137 | void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) |
138 | { |
139 | /* |
140 | * set defaults |
141 | */ |
142 | q->nr_requests = BLKDEV_MAX_RQ; |
143 | |
144 | q->make_request_fn = mfn; |
145 | blk_queue_dma_alignment(q, 511); |
146 | blk_queue_congestion_threshold(q); |
147 | q->nr_batching = BLK_BATCH_REQ; |
148 | |
149 | q->unplug_thresh = 4; /* hmm */ |
150 | q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */ |
151 | if (q->unplug_delay == 0) |
152 | q->unplug_delay = 1; |
153 | |
154 | q->unplug_timer.function = blk_unplug_timeout; |
155 | q->unplug_timer.data = (unsigned long)q; |
156 | |
157 | blk_set_default_limits(&q->limits); |
158 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); |
159 | |
160 | /* |
161 | * If the caller didn't supply a lock, fall back to our embedded |
162 | * per-queue locks |
163 | */ |
164 | if (!q->queue_lock) |
165 | q->queue_lock = &q->__queue_lock; |
166 | |
167 | /* |
168 | * by default assume old behaviour and bounce for any highmem page |
169 | */ |
170 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
171 | } |
172 | EXPORT_SYMBOL(blk_queue_make_request); |
173 | |
174 | /** |
175 | * blk_queue_bounce_limit - set bounce buffer limit for queue |
176 | * @q: the request queue for the device |
177 | * @dma_mask: the maximum address the device can handle |
178 | * |
179 | * Description: |
180 | * Different hardware can have different requirements as to what pages |
181 | * it can do I/O directly to. A low level driver can call |
182 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce |
183 | * buffers for doing I/O to pages residing above @dma_mask. |
184 | **/ |
185 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) |
186 | { |
187 | unsigned long b_pfn = dma_mask >> PAGE_SHIFT; |
188 | int dma = 0; |
189 | |
190 | q->bounce_gfp = GFP_NOIO; |
191 | #if BITS_PER_LONG == 64 |
192 | /* |
193 | * Assume anything <= 4GB can be handled by IOMMU. Actually |
194 | * some IOMMUs can handle everything, but I don't know of a |
195 | * way to test this here. |
196 | */ |
197 | if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
198 | dma = 1; |
199 | q->limits.bounce_pfn = max_low_pfn; |
200 | #else |
201 | if (b_pfn < blk_max_low_pfn) |
202 | dma = 1; |
203 | q->limits.bounce_pfn = b_pfn; |
204 | #endif |
205 | if (dma) { |
206 | init_emergency_isa_pool(); |
207 | q->bounce_gfp = GFP_NOIO | GFP_DMA; |
208 | q->limits.bounce_pfn = b_pfn; |
209 | } |
210 | } |
211 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
212 | |
213 | /** |
214 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue |
215 | * @q: the request queue for the device |
216 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
217 | * |
218 | * Description: |
219 | * Enables a low level driver to set a hard upper limit, |
220 | * max_hw_sectors, on the size of requests. max_hw_sectors is set by |
221 | * the device driver based upon the combined capabilities of I/O |
222 | * controller and storage device. |
223 | * |
224 | * max_sectors is a soft limit imposed by the block layer for |
225 | * filesystem type requests. This value can be overridden on a |
226 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. |
227 | * The soft limit can not exceed max_hw_sectors. |
228 | **/ |
229 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) |
230 | { |
231 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { |
232 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
233 | printk(KERN_INFO "%s: set to minimum %d\n", |
234 | __func__, max_hw_sectors); |
235 | } |
236 | |
237 | q->limits.max_hw_sectors = max_hw_sectors; |
238 | q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, |
239 | BLK_DEF_MAX_SECTORS); |
240 | } |
241 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
242 | |
243 | /** |
244 | * blk_queue_max_discard_sectors - set max sectors for a single discard |
245 | * @q: the request queue for the device |
246 | * @max_discard_sectors: maximum number of sectors to discard |
247 | **/ |
248 | void blk_queue_max_discard_sectors(struct request_queue *q, |
249 | unsigned int max_discard_sectors) |
250 | { |
251 | q->limits.max_discard_sectors = max_discard_sectors; |
252 | } |
253 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); |
254 | |
255 | /** |
256 | * blk_queue_max_segments - set max hw segments for a request for this queue |
257 | * @q: the request queue for the device |
258 | * @max_segments: max number of segments |
259 | * |
260 | * Description: |
261 | * Enables a low level driver to set an upper limit on the number of |
262 | * hw data segments in a request. |
263 | **/ |
264 | void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) |
265 | { |
266 | if (!max_segments) { |
267 | max_segments = 1; |
268 | printk(KERN_INFO "%s: set to minimum %d\n", |
269 | __func__, max_segments); |
270 | } |
271 | |
272 | q->limits.max_segments = max_segments; |
273 | } |
274 | EXPORT_SYMBOL(blk_queue_max_segments); |
275 | |
276 | /** |
277 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg |
278 | * @q: the request queue for the device |
279 | * @max_size: max size of segment in bytes |
280 | * |
281 | * Description: |
282 | * Enables a low level driver to set an upper limit on the size of a |
283 | * coalesced segment |
284 | **/ |
285 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) |
286 | { |
287 | if (max_size < PAGE_CACHE_SIZE) { |
288 | max_size = PAGE_CACHE_SIZE; |
289 | printk(KERN_INFO "%s: set to minimum %d\n", |
290 | __func__, max_size); |
291 | } |
292 | |
293 | q->limits.max_segment_size = max_size; |
294 | } |
295 | EXPORT_SYMBOL(blk_queue_max_segment_size); |
296 | |
297 | /** |
298 | * blk_queue_logical_block_size - set logical block size for the queue |
299 | * @q: the request queue for the device |
300 | * @size: the logical block size, in bytes |
301 | * |
302 | * Description: |
303 | * This should be set to the lowest possible block size that the |
304 | * storage device can address. The default of 512 covers most |
305 | * hardware. |
306 | **/ |
307 | void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) |
308 | { |
309 | q->limits.logical_block_size = size; |
310 | |
311 | if (q->limits.physical_block_size < size) |
312 | q->limits.physical_block_size = size; |
313 | |
314 | if (q->limits.io_min < q->limits.physical_block_size) |
315 | q->limits.io_min = q->limits.physical_block_size; |
316 | } |
317 | EXPORT_SYMBOL(blk_queue_logical_block_size); |
318 | |
319 | /** |
320 | * blk_queue_physical_block_size - set physical block size for the queue |
321 | * @q: the request queue for the device |
322 | * @size: the physical block size, in bytes |
323 | * |
324 | * Description: |
325 | * This should be set to the lowest possible sector size that the |
326 | * hardware can operate on without reverting to read-modify-write |
327 | * operations. |
328 | */ |
329 | void blk_queue_physical_block_size(struct request_queue *q, unsigned short size) |
330 | { |
331 | q->limits.physical_block_size = size; |
332 | |
333 | if (q->limits.physical_block_size < q->limits.logical_block_size) |
334 | q->limits.physical_block_size = q->limits.logical_block_size; |
335 | |
336 | if (q->limits.io_min < q->limits.physical_block_size) |
337 | q->limits.io_min = q->limits.physical_block_size; |
338 | } |
339 | EXPORT_SYMBOL(blk_queue_physical_block_size); |
340 | |
341 | /** |
342 | * blk_queue_alignment_offset - set physical block alignment offset |
343 | * @q: the request queue for the device |
344 | * @offset: alignment offset in bytes |
345 | * |
346 | * Description: |
347 | * Some devices are naturally misaligned to compensate for things like |
348 | * the legacy DOS partition table 63-sector offset. Low-level drivers |
349 | * should call this function for devices whose first sector is not |
350 | * naturally aligned. |
351 | */ |
352 | void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) |
353 | { |
354 | q->limits.alignment_offset = |
355 | offset & (q->limits.physical_block_size - 1); |
356 | q->limits.misaligned = 0; |
357 | } |
358 | EXPORT_SYMBOL(blk_queue_alignment_offset); |
359 | |
360 | /** |
361 | * blk_limits_io_min - set minimum request size for a device |
362 | * @limits: the queue limits |
363 | * @min: smallest I/O size in bytes |
364 | * |
365 | * Description: |
366 | * Some devices have an internal block size bigger than the reported |
367 | * hardware sector size. This function can be used to signal the |
368 | * smallest I/O the device can perform without incurring a performance |
369 | * penalty. |
370 | */ |
371 | void blk_limits_io_min(struct queue_limits *limits, unsigned int min) |
372 | { |
373 | limits->io_min = min; |
374 | |
375 | if (limits->io_min < limits->logical_block_size) |
376 | limits->io_min = limits->logical_block_size; |
377 | |
378 | if (limits->io_min < limits->physical_block_size) |
379 | limits->io_min = limits->physical_block_size; |
380 | } |
381 | EXPORT_SYMBOL(blk_limits_io_min); |
382 | |
383 | /** |
384 | * blk_queue_io_min - set minimum request size for the queue |
385 | * @q: the request queue for the device |
386 | * @min: smallest I/O size in bytes |
387 | * |
388 | * Description: |
389 | * Storage devices may report a granularity or preferred minimum I/O |
390 | * size which is the smallest request the device can perform without |
391 | * incurring a performance penalty. For disk drives this is often the |
392 | * physical block size. For RAID arrays it is often the stripe chunk |
393 | * size. A properly aligned multiple of minimum_io_size is the |
394 | * preferred request size for workloads where a high number of I/O |
395 | * operations is desired. |
396 | */ |
397 | void blk_queue_io_min(struct request_queue *q, unsigned int min) |
398 | { |
399 | blk_limits_io_min(&q->limits, min); |
400 | } |
401 | EXPORT_SYMBOL(blk_queue_io_min); |
402 | |
403 | /** |
404 | * blk_limits_io_opt - set optimal request size for a device |
405 | * @limits: the queue limits |
406 | * @opt: smallest I/O size in bytes |
407 | * |
408 | * Description: |
409 | * Storage devices may report an optimal I/O size, which is the |
410 | * device's preferred unit for sustained I/O. This is rarely reported |
411 | * for disk drives. For RAID arrays it is usually the stripe width or |
412 | * the internal track size. A properly aligned multiple of |
413 | * optimal_io_size is the preferred request size for workloads where |
414 | * sustained throughput is desired. |
415 | */ |
416 | void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) |
417 | { |
418 | limits->io_opt = opt; |
419 | } |
420 | EXPORT_SYMBOL(blk_limits_io_opt); |
421 | |
422 | /** |
423 | * blk_queue_io_opt - set optimal request size for the queue |
424 | * @q: the request queue for the device |
425 | * @opt: optimal request size in bytes |
426 | * |
427 | * Description: |
428 | * Storage devices may report an optimal I/O size, which is the |
429 | * device's preferred unit for sustained I/O. This is rarely reported |
430 | * for disk drives. For RAID arrays it is usually the stripe width or |
431 | * the internal track size. A properly aligned multiple of |
432 | * optimal_io_size is the preferred request size for workloads where |
433 | * sustained throughput is desired. |
434 | */ |
435 | void blk_queue_io_opt(struct request_queue *q, unsigned int opt) |
436 | { |
437 | blk_limits_io_opt(&q->limits, opt); |
438 | } |
439 | EXPORT_SYMBOL(blk_queue_io_opt); |
440 | |
441 | /* |
442 | * Returns the minimum that is _not_ zero, unless both are zero. |
443 | */ |
444 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) |
445 | |
446 | /** |
447 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers |
448 | * @t: the stacking driver (top) |
449 | * @b: the underlying device (bottom) |
450 | **/ |
451 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) |
452 | { |
453 | blk_stack_limits(&t->limits, &b->limits, 0); |
454 | |
455 | if (!t->queue_lock) |
456 | WARN_ON_ONCE(1); |
457 | else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { |
458 | unsigned long flags; |
459 | spin_lock_irqsave(t->queue_lock, flags); |
460 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); |
461 | spin_unlock_irqrestore(t->queue_lock, flags); |
462 | } |
463 | } |
464 | EXPORT_SYMBOL(blk_queue_stack_limits); |
465 | |
466 | /** |
467 | * blk_stack_limits - adjust queue_limits for stacked devices |
468 | * @t: the stacking driver limits (top device) |
469 | * @b: the underlying queue limits (bottom, component device) |
470 | * @start: first data sector within component device |
471 | * |
472 | * Description: |
473 | * This function is used by stacking drivers like MD and DM to ensure |
474 | * that all component devices have compatible block sizes and |
475 | * alignments. The stacking driver must provide a queue_limits |
476 | * struct (top) and then iteratively call the stacking function for |
477 | * all component (bottom) devices. The stacking function will |
478 | * attempt to combine the values and ensure proper alignment. |
479 | * |
480 | * Returns 0 if the top and bottom queue_limits are compatible. The |
481 | * top device's block sizes and alignment offsets may be adjusted to |
482 | * ensure alignment with the bottom device. If no compatible sizes |
483 | * and alignments exist, -1 is returned and the resulting top |
484 | * queue_limits will have the misaligned flag set to indicate that |
485 | * the alignment_offset is undefined. |
486 | */ |
487 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
488 | sector_t start) |
489 | { |
490 | unsigned int top, bottom, alignment, ret = 0; |
491 | |
492 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
493 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
494 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); |
495 | |
496 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, |
497 | b->seg_boundary_mask); |
498 | |
499 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
500 | |
501 | t->max_segment_size = min_not_zero(t->max_segment_size, |
502 | b->max_segment_size); |
503 | |
504 | t->misaligned |= b->misaligned; |
505 | |
506 | alignment = queue_limit_alignment_offset(b, start); |
507 | |
508 | /* Bottom device has different alignment. Check that it is |
509 | * compatible with the current top alignment. |
510 | */ |
511 | if (t->alignment_offset != alignment) { |
512 | |
513 | top = max(t->physical_block_size, t->io_min) |
514 | + t->alignment_offset; |
515 | bottom = max(b->physical_block_size, b->io_min) + alignment; |
516 | |
517 | /* Verify that top and bottom intervals line up */ |
518 | if (max(top, bottom) & (min(top, bottom) - 1)) { |
519 | t->misaligned = 1; |
520 | ret = -1; |
521 | } |
522 | } |
523 | |
524 | t->logical_block_size = max(t->logical_block_size, |
525 | b->logical_block_size); |
526 | |
527 | t->physical_block_size = max(t->physical_block_size, |
528 | b->physical_block_size); |
529 | |
530 | t->io_min = max(t->io_min, b->io_min); |
531 | t->io_opt = lcm(t->io_opt, b->io_opt); |
532 | |
533 | t->no_cluster |= b->no_cluster; |
534 | t->discard_zeroes_data &= b->discard_zeroes_data; |
535 | |
536 | /* Physical block size a multiple of the logical block size? */ |
537 | if (t->physical_block_size & (t->logical_block_size - 1)) { |
538 | t->physical_block_size = t->logical_block_size; |
539 | t->misaligned = 1; |
540 | ret = -1; |
541 | } |
542 | |
543 | /* Minimum I/O a multiple of the physical block size? */ |
544 | if (t->io_min & (t->physical_block_size - 1)) { |
545 | t->io_min = t->physical_block_size; |
546 | t->misaligned = 1; |
547 | ret = -1; |
548 | } |
549 | |
550 | /* Optimal I/O a multiple of the physical block size? */ |
551 | if (t->io_opt & (t->physical_block_size - 1)) { |
552 | t->io_opt = 0; |
553 | t->misaligned = 1; |
554 | ret = -1; |
555 | } |
556 | |
557 | /* Find lowest common alignment_offset */ |
558 | t->alignment_offset = lcm(t->alignment_offset, alignment) |
559 | & (max(t->physical_block_size, t->io_min) - 1); |
560 | |
561 | /* Verify that new alignment_offset is on a logical block boundary */ |
562 | if (t->alignment_offset & (t->logical_block_size - 1)) { |
563 | t->misaligned = 1; |
564 | ret = -1; |
565 | } |
566 | |
567 | /* Discard alignment and granularity */ |
568 | if (b->discard_granularity) { |
569 | alignment = queue_limit_discard_alignment(b, start); |
570 | |
571 | if (t->discard_granularity != 0 && |
572 | t->discard_alignment != alignment) { |
573 | top = t->discard_granularity + t->discard_alignment; |
574 | bottom = b->discard_granularity + alignment; |
575 | |
576 | /* Verify that top and bottom intervals line up */ |
577 | if (max(top, bottom) & (min(top, bottom) - 1)) |
578 | t->discard_misaligned = 1; |
579 | } |
580 | |
581 | t->max_discard_sectors = min_not_zero(t->max_discard_sectors, |
582 | b->max_discard_sectors); |
583 | t->discard_granularity = max(t->discard_granularity, |
584 | b->discard_granularity); |
585 | t->discard_alignment = lcm(t->discard_alignment, alignment) & |
586 | (t->discard_granularity - 1); |
587 | } |
588 | |
589 | return ret; |
590 | } |
591 | EXPORT_SYMBOL(blk_stack_limits); |
592 | |
593 | /** |
594 | * bdev_stack_limits - adjust queue limits for stacked drivers |
595 | * @t: the stacking driver limits (top device) |
596 | * @bdev: the component block_device (bottom) |
597 | * @start: first data sector within component device |
598 | * |
599 | * Description: |
600 | * Merges queue limits for a top device and a block_device. Returns |
601 | * 0 if alignment didn't change. Returns -1 if adding the bottom |
602 | * device caused misalignment. |
603 | */ |
604 | int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, |
605 | sector_t start) |
606 | { |
607 | struct request_queue *bq = bdev_get_queue(bdev); |
608 | |
609 | start += get_start_sect(bdev); |
610 | |
611 | return blk_stack_limits(t, &bq->limits, start); |
612 | } |
613 | EXPORT_SYMBOL(bdev_stack_limits); |
614 | |
615 | /** |
616 | * disk_stack_limits - adjust queue limits for stacked drivers |
617 | * @disk: MD/DM gendisk (top) |
618 | * @bdev: the underlying block device (bottom) |
619 | * @offset: offset to beginning of data within component device |
620 | * |
621 | * Description: |
622 | * Merges the limits for a top level gendisk and a bottom level |
623 | * block_device. |
624 | */ |
625 | void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
626 | sector_t offset) |
627 | { |
628 | struct request_queue *t = disk->queue; |
629 | struct request_queue *b = bdev_get_queue(bdev); |
630 | |
631 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { |
632 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; |
633 | |
634 | disk_name(disk, 0, top); |
635 | bdevname(bdev, bottom); |
636 | |
637 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", |
638 | top, bottom); |
639 | } |
640 | |
641 | if (!t->queue_lock) |
642 | WARN_ON_ONCE(1); |
643 | else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { |
644 | unsigned long flags; |
645 | |
646 | spin_lock_irqsave(t->queue_lock, flags); |
647 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) |
648 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); |
649 | spin_unlock_irqrestore(t->queue_lock, flags); |
650 | } |
651 | } |
652 | EXPORT_SYMBOL(disk_stack_limits); |
653 | |
654 | /** |
655 | * blk_queue_dma_pad - set pad mask |
656 | * @q: the request queue for the device |
657 | * @mask: pad mask |
658 | * |
659 | * Set dma pad mask. |
660 | * |
661 | * Appending pad buffer to a request modifies the last entry of a |
662 | * scatter list such that it includes the pad buffer. |
663 | **/ |
664 | void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) |
665 | { |
666 | q->dma_pad_mask = mask; |
667 | } |
668 | EXPORT_SYMBOL(blk_queue_dma_pad); |
669 | |
670 | /** |
671 | * blk_queue_update_dma_pad - update pad mask |
672 | * @q: the request queue for the device |
673 | * @mask: pad mask |
674 | * |
675 | * Update dma pad mask. |
676 | * |
677 | * Appending pad buffer to a request modifies the last entry of a |
678 | * scatter list such that it includes the pad buffer. |
679 | **/ |
680 | void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) |
681 | { |
682 | if (mask > q->dma_pad_mask) |
683 | q->dma_pad_mask = mask; |
684 | } |
685 | EXPORT_SYMBOL(blk_queue_update_dma_pad); |
686 | |
687 | /** |
688 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. |
689 | * @q: the request queue for the device |
690 | * @dma_drain_needed: fn which returns non-zero if drain is necessary |
691 | * @buf: physically contiguous buffer |
692 | * @size: size of the buffer in bytes |
693 | * |
694 | * Some devices have excess DMA problems and can't simply discard (or |
695 | * zero fill) the unwanted piece of the transfer. They have to have a |
696 | * real area of memory to transfer it into. The use case for this is |
697 | * ATAPI devices in DMA mode. If the packet command causes a transfer |
698 | * bigger than the transfer size some HBAs will lock up if there |
699 | * aren't DMA elements to contain the excess transfer. What this API |
700 | * does is adjust the queue so that the buf is always appended |
701 | * silently to the scatterlist. |
702 | * |
703 | * Note: This routine adjusts max_hw_segments to make room for appending |
704 | * the drain buffer. If you call blk_queue_max_segments() after calling |
705 | * this routine, you must set the limit to one fewer than your device |
706 | * can support otherwise there won't be room for the drain buffer. |
707 | */ |
708 | int blk_queue_dma_drain(struct request_queue *q, |
709 | dma_drain_needed_fn *dma_drain_needed, |
710 | void *buf, unsigned int size) |
711 | { |
712 | if (queue_max_segments(q) < 2) |
713 | return -EINVAL; |
714 | /* make room for appending the drain */ |
715 | blk_queue_max_segments(q, queue_max_segments(q) - 1); |
716 | q->dma_drain_needed = dma_drain_needed; |
717 | q->dma_drain_buffer = buf; |
718 | q->dma_drain_size = size; |
719 | |
720 | return 0; |
721 | } |
722 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); |
723 | |
724 | /** |
725 | * blk_queue_segment_boundary - set boundary rules for segment merging |
726 | * @q: the request queue for the device |
727 | * @mask: the memory boundary mask |
728 | **/ |
729 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) |
730 | { |
731 | if (mask < PAGE_CACHE_SIZE - 1) { |
732 | mask = PAGE_CACHE_SIZE - 1; |
733 | printk(KERN_INFO "%s: set to minimum %lx\n", |
734 | __func__, mask); |
735 | } |
736 | |
737 | q->limits.seg_boundary_mask = mask; |
738 | } |
739 | EXPORT_SYMBOL(blk_queue_segment_boundary); |
740 | |
741 | /** |
742 | * blk_queue_dma_alignment - set dma length and memory alignment |
743 | * @q: the request queue for the device |
744 | * @mask: alignment mask |
745 | * |
746 | * description: |
747 | * set required memory and length alignment for direct dma transactions. |
748 | * this is used when building direct io requests for the queue. |
749 | * |
750 | **/ |
751 | void blk_queue_dma_alignment(struct request_queue *q, int mask) |
752 | { |
753 | q->dma_alignment = mask; |
754 | } |
755 | EXPORT_SYMBOL(blk_queue_dma_alignment); |
756 | |
757 | /** |
758 | * blk_queue_update_dma_alignment - update dma length and memory alignment |
759 | * @q: the request queue for the device |
760 | * @mask: alignment mask |
761 | * |
762 | * description: |
763 | * update required memory and length alignment for direct dma transactions. |
764 | * If the requested alignment is larger than the current alignment, then |
765 | * the current queue alignment is updated to the new value, otherwise it |
766 | * is left alone. The design of this is to allow multiple objects |
767 | * (driver, device, transport etc) to set their respective |
768 | * alignments without having them interfere. |
769 | * |
770 | **/ |
771 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) |
772 | { |
773 | BUG_ON(mask > PAGE_SIZE); |
774 | |
775 | if (mask > q->dma_alignment) |
776 | q->dma_alignment = mask; |
777 | } |
778 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); |
779 | |
780 | static int __init blk_settings_init(void) |
781 | { |
782 | blk_max_low_pfn = max_low_pfn - 1; |
783 | blk_max_pfn = max_pfn - 1; |
784 | return 0; |
785 | } |
786 | subsys_initcall(blk_settings_init); |
787 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9