Root/
1 | #ifndef _LINUX_BLKDEV_H |
2 | #define _LINUX_BLKDEV_H |
3 | |
4 | #ifdef CONFIG_BLOCK |
5 | |
6 | #include <linux/sched.h> |
7 | #include <linux/major.h> |
8 | #include <linux/genhd.h> |
9 | #include <linux/list.h> |
10 | #include <linux/timer.h> |
11 | #include <linux/workqueue.h> |
12 | #include <linux/pagemap.h> |
13 | #include <linux/backing-dev.h> |
14 | #include <linux/wait.h> |
15 | #include <linux/mempool.h> |
16 | #include <linux/bio.h> |
17 | #include <linux/module.h> |
18 | #include <linux/stringify.h> |
19 | #include <linux/gfp.h> |
20 | #include <linux/bsg.h> |
21 | #include <linux/smp.h> |
22 | |
23 | #include <asm/scatterlist.h> |
24 | |
25 | struct scsi_ioctl_command; |
26 | |
27 | struct request_queue; |
28 | struct elevator_queue; |
29 | struct request_pm_state; |
30 | struct blk_trace; |
31 | struct request; |
32 | struct sg_io_hdr; |
33 | |
34 | #define BLKDEV_MIN_RQ 4 |
35 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
36 | |
37 | struct request; |
38 | typedef void (rq_end_io_fn)(struct request *, int); |
39 | |
40 | struct request_list { |
41 | /* |
42 | * count[], starved[], and wait[] are indexed by |
43 | * BLK_RW_SYNC/BLK_RW_ASYNC |
44 | */ |
45 | int count[2]; |
46 | int starved[2]; |
47 | int elvpriv; |
48 | mempool_t *rq_pool; |
49 | wait_queue_head_t wait[2]; |
50 | }; |
51 | |
52 | /* |
53 | * request command types |
54 | */ |
55 | enum rq_cmd_type_bits { |
56 | REQ_TYPE_FS = 1, /* fs request */ |
57 | REQ_TYPE_BLOCK_PC, /* scsi command */ |
58 | REQ_TYPE_SENSE, /* sense request */ |
59 | REQ_TYPE_PM_SUSPEND, /* suspend request */ |
60 | REQ_TYPE_PM_RESUME, /* resume request */ |
61 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ |
62 | REQ_TYPE_SPECIAL, /* driver defined type */ |
63 | /* |
64 | * for ATA/ATAPI devices. this really doesn't belong here, ide should |
65 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver |
66 | * private REQ_LB opcodes to differentiate what type of request this is |
67 | */ |
68 | REQ_TYPE_ATA_TASKFILE, |
69 | REQ_TYPE_ATA_PC, |
70 | }; |
71 | |
72 | #define BLK_MAX_CDB 16 |
73 | |
74 | /* |
75 | * try to put the fields that are referenced together in the same cacheline. |
76 | * if you modify this structure, be sure to check block/blk-core.c:rq_init() |
77 | * as well! |
78 | */ |
79 | struct request { |
80 | struct list_head queuelist; |
81 | struct call_single_data csd; |
82 | |
83 | struct request_queue *q; |
84 | |
85 | unsigned int cmd_flags; |
86 | enum rq_cmd_type_bits cmd_type; |
87 | unsigned long atomic_flags; |
88 | |
89 | int cpu; |
90 | |
91 | /* the following two fields are internal, NEVER access directly */ |
92 | unsigned int __data_len; /* total data len */ |
93 | sector_t __sector; /* sector cursor */ |
94 | |
95 | struct bio *bio; |
96 | struct bio *biotail; |
97 | |
98 | struct hlist_node hash; /* merge hash */ |
99 | /* |
100 | * The rb_node is only used inside the io scheduler, requests |
101 | * are pruned when moved to the dispatch queue. So let the |
102 | * completion_data share space with the rb_node. |
103 | */ |
104 | union { |
105 | struct rb_node rb_node; /* sort/lookup */ |
106 | void *completion_data; |
107 | }; |
108 | |
109 | /* |
110 | * Three pointers are available for the IO schedulers, if they need |
111 | * more they have to dynamically allocate it. |
112 | */ |
113 | void *elevator_private; |
114 | void *elevator_private2; |
115 | void *elevator_private3; |
116 | |
117 | struct gendisk *rq_disk; |
118 | unsigned long start_time; |
119 | #ifdef CONFIG_BLK_CGROUP |
120 | unsigned long long start_time_ns; |
121 | unsigned long long io_start_time_ns; /* when passed to hardware */ |
122 | #endif |
123 | /* Number of scatter-gather DMA addr+len pairs after |
124 | * physical address coalescing is performed. |
125 | */ |
126 | unsigned short nr_phys_segments; |
127 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
128 | unsigned short nr_integrity_segments; |
129 | #endif |
130 | |
131 | unsigned short ioprio; |
132 | |
133 | int ref_count; |
134 | |
135 | void *special; /* opaque pointer available for LLD use */ |
136 | char *buffer; /* kaddr of the current segment if available */ |
137 | |
138 | int tag; |
139 | int errors; |
140 | |
141 | /* |
142 | * when request is used as a packet command carrier |
143 | */ |
144 | unsigned char __cmd[BLK_MAX_CDB]; |
145 | unsigned char *cmd; |
146 | unsigned short cmd_len; |
147 | |
148 | unsigned int extra_len; /* length of alignment and padding */ |
149 | unsigned int sense_len; |
150 | unsigned int resid_len; /* residual count */ |
151 | void *sense; |
152 | |
153 | unsigned long deadline; |
154 | struct list_head timeout_list; |
155 | unsigned int timeout; |
156 | int retries; |
157 | |
158 | /* |
159 | * completion callback. |
160 | */ |
161 | rq_end_io_fn *end_io; |
162 | void *end_io_data; |
163 | |
164 | /* for bidi */ |
165 | struct request *next_rq; |
166 | }; |
167 | |
168 | static inline unsigned short req_get_ioprio(struct request *req) |
169 | { |
170 | return req->ioprio; |
171 | } |
172 | |
173 | /* |
174 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME |
175 | * requests. Some step values could eventually be made generic. |
176 | */ |
177 | struct request_pm_state |
178 | { |
179 | /* PM state machine step value, currently driver specific */ |
180 | int pm_step; |
181 | /* requested PM state value (S1, S2, S3, S4, ...) */ |
182 | u32 pm_state; |
183 | void* data; /* for driver use */ |
184 | }; |
185 | |
186 | #include <linux/elevator.h> |
187 | |
188 | typedef void (request_fn_proc) (struct request_queue *q); |
189 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
190 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
191 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
192 | typedef void (unplug_fn) (struct request_queue *); |
193 | |
194 | struct bio_vec; |
195 | struct bvec_merge_data { |
196 | struct block_device *bi_bdev; |
197 | sector_t bi_sector; |
198 | unsigned bi_size; |
199 | unsigned long bi_rw; |
200 | }; |
201 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, |
202 | struct bio_vec *); |
203 | typedef void (softirq_done_fn)(struct request *); |
204 | typedef int (dma_drain_needed_fn)(struct request *); |
205 | typedef int (lld_busy_fn) (struct request_queue *q); |
206 | |
207 | enum blk_eh_timer_return { |
208 | BLK_EH_NOT_HANDLED, |
209 | BLK_EH_HANDLED, |
210 | BLK_EH_RESET_TIMER, |
211 | }; |
212 | |
213 | typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); |
214 | |
215 | enum blk_queue_state { |
216 | Queue_down, |
217 | Queue_up, |
218 | }; |
219 | |
220 | struct blk_queue_tag { |
221 | struct request **tag_index; /* map of busy tags */ |
222 | unsigned long *tag_map; /* bit map of free/busy tags */ |
223 | int busy; /* current depth */ |
224 | int max_depth; /* what we will send to device */ |
225 | int real_max_depth; /* what the array can hold */ |
226 | atomic_t refcnt; /* map can be shared */ |
227 | }; |
228 | |
229 | #define BLK_SCSI_MAX_CMDS (256) |
230 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) |
231 | |
232 | struct queue_limits { |
233 | unsigned long bounce_pfn; |
234 | unsigned long seg_boundary_mask; |
235 | |
236 | unsigned int max_hw_sectors; |
237 | unsigned int max_sectors; |
238 | unsigned int max_segment_size; |
239 | unsigned int physical_block_size; |
240 | unsigned int alignment_offset; |
241 | unsigned int io_min; |
242 | unsigned int io_opt; |
243 | unsigned int max_discard_sectors; |
244 | unsigned int discard_granularity; |
245 | unsigned int discard_alignment; |
246 | |
247 | unsigned short logical_block_size; |
248 | unsigned short max_segments; |
249 | unsigned short max_integrity_segments; |
250 | |
251 | unsigned char misaligned; |
252 | unsigned char discard_misaligned; |
253 | unsigned char cluster; |
254 | signed char discard_zeroes_data; |
255 | }; |
256 | |
257 | struct request_queue |
258 | { |
259 | /* |
260 | * Together with queue_head for cacheline sharing |
261 | */ |
262 | struct list_head queue_head; |
263 | struct request *last_merge; |
264 | struct elevator_queue *elevator; |
265 | |
266 | /* |
267 | * the queue request freelist, one for reads and one for writes |
268 | */ |
269 | struct request_list rq; |
270 | |
271 | request_fn_proc *request_fn; |
272 | make_request_fn *make_request_fn; |
273 | prep_rq_fn *prep_rq_fn; |
274 | unprep_rq_fn *unprep_rq_fn; |
275 | unplug_fn *unplug_fn; |
276 | merge_bvec_fn *merge_bvec_fn; |
277 | softirq_done_fn *softirq_done_fn; |
278 | rq_timed_out_fn *rq_timed_out_fn; |
279 | dma_drain_needed_fn *dma_drain_needed; |
280 | lld_busy_fn *lld_busy_fn; |
281 | |
282 | /* |
283 | * Dispatch queue sorting |
284 | */ |
285 | sector_t end_sector; |
286 | struct request *boundary_rq; |
287 | |
288 | /* |
289 | * Auto-unplugging state |
290 | */ |
291 | struct timer_list unplug_timer; |
292 | int unplug_thresh; /* After this many requests */ |
293 | unsigned long unplug_delay; /* After this many jiffies */ |
294 | struct work_struct unplug_work; |
295 | |
296 | struct backing_dev_info backing_dev_info; |
297 | |
298 | /* |
299 | * The queue owner gets to use this for whatever they like. |
300 | * ll_rw_blk doesn't touch it. |
301 | */ |
302 | void *queuedata; |
303 | |
304 | /* |
305 | * queue needs bounce pages for pages above this limit |
306 | */ |
307 | gfp_t bounce_gfp; |
308 | |
309 | /* |
310 | * various queue flags, see QUEUE_* below |
311 | */ |
312 | unsigned long queue_flags; |
313 | |
314 | /* |
315 | * protects queue structures from reentrancy. ->__queue_lock should |
316 | * _never_ be used directly, it is queue private. always use |
317 | * ->queue_lock. |
318 | */ |
319 | spinlock_t __queue_lock; |
320 | spinlock_t *queue_lock; |
321 | |
322 | /* |
323 | * queue kobject |
324 | */ |
325 | struct kobject kobj; |
326 | |
327 | /* |
328 | * queue settings |
329 | */ |
330 | unsigned long nr_requests; /* Max # of requests */ |
331 | unsigned int nr_congestion_on; |
332 | unsigned int nr_congestion_off; |
333 | unsigned int nr_batching; |
334 | |
335 | void *dma_drain_buffer; |
336 | unsigned int dma_drain_size; |
337 | unsigned int dma_pad_mask; |
338 | unsigned int dma_alignment; |
339 | |
340 | struct blk_queue_tag *queue_tags; |
341 | struct list_head tag_busy_list; |
342 | |
343 | unsigned int nr_sorted; |
344 | unsigned int in_flight[2]; |
345 | |
346 | unsigned int rq_timeout; |
347 | struct timer_list timeout; |
348 | struct list_head timeout_list; |
349 | |
350 | struct queue_limits limits; |
351 | |
352 | /* |
353 | * sg stuff |
354 | */ |
355 | unsigned int sg_timeout; |
356 | unsigned int sg_reserved_size; |
357 | int node; |
358 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
359 | struct blk_trace *blk_trace; |
360 | #endif |
361 | /* |
362 | * for flush operations |
363 | */ |
364 | unsigned int flush_flags; |
365 | unsigned int flush_seq; |
366 | int flush_err; |
367 | struct request flush_rq; |
368 | struct request *orig_flush_rq; |
369 | struct list_head pending_flushes; |
370 | |
371 | struct mutex sysfs_lock; |
372 | |
373 | #if defined(CONFIG_BLK_DEV_BSG) |
374 | struct bsg_class_device bsg_dev; |
375 | #endif |
376 | |
377 | #ifdef CONFIG_BLK_DEV_THROTTLING |
378 | /* Throttle data */ |
379 | struct throtl_data *td; |
380 | #endif |
381 | }; |
382 | |
383 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
384 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
385 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
386 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
387 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
388 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
389 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
390 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ |
391 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ |
392 | #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ |
393 | #define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ |
394 | #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ |
395 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ |
396 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ |
397 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
398 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
399 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
400 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ |
401 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ |
402 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ |
403 | |
404 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
405 | (1 << QUEUE_FLAG_STACKABLE) | \ |
406 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
407 | (1 << QUEUE_FLAG_ADD_RANDOM)) |
408 | |
409 | static inline int queue_is_locked(struct request_queue *q) |
410 | { |
411 | #ifdef CONFIG_SMP |
412 | spinlock_t *lock = q->queue_lock; |
413 | return lock && spin_is_locked(lock); |
414 | #else |
415 | return 1; |
416 | #endif |
417 | } |
418 | |
419 | static inline void queue_flag_set_unlocked(unsigned int flag, |
420 | struct request_queue *q) |
421 | { |
422 | __set_bit(flag, &q->queue_flags); |
423 | } |
424 | |
425 | static inline int queue_flag_test_and_clear(unsigned int flag, |
426 | struct request_queue *q) |
427 | { |
428 | WARN_ON_ONCE(!queue_is_locked(q)); |
429 | |
430 | if (test_bit(flag, &q->queue_flags)) { |
431 | __clear_bit(flag, &q->queue_flags); |
432 | return 1; |
433 | } |
434 | |
435 | return 0; |
436 | } |
437 | |
438 | static inline int queue_flag_test_and_set(unsigned int flag, |
439 | struct request_queue *q) |
440 | { |
441 | WARN_ON_ONCE(!queue_is_locked(q)); |
442 | |
443 | if (!test_bit(flag, &q->queue_flags)) { |
444 | __set_bit(flag, &q->queue_flags); |
445 | return 0; |
446 | } |
447 | |
448 | return 1; |
449 | } |
450 | |
451 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) |
452 | { |
453 | WARN_ON_ONCE(!queue_is_locked(q)); |
454 | __set_bit(flag, &q->queue_flags); |
455 | } |
456 | |
457 | static inline void queue_flag_clear_unlocked(unsigned int flag, |
458 | struct request_queue *q) |
459 | { |
460 | __clear_bit(flag, &q->queue_flags); |
461 | } |
462 | |
463 | static inline int queue_in_flight(struct request_queue *q) |
464 | { |
465 | return q->in_flight[0] + q->in_flight[1]; |
466 | } |
467 | |
468 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) |
469 | { |
470 | WARN_ON_ONCE(!queue_is_locked(q)); |
471 | __clear_bit(flag, &q->queue_flags); |
472 | } |
473 | |
474 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |
475 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
476 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
477 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
478 | #define blk_queue_noxmerges(q) \ |
479 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
480 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
481 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
482 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) |
483 | #define blk_queue_stackable(q) \ |
484 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
485 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
486 | #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ |
487 | test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) |
488 | |
489 | #define blk_noretry_request(rq) \ |
490 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ |
491 | REQ_FAILFAST_DRIVER)) |
492 | |
493 | #define blk_account_rq(rq) \ |
494 | (((rq)->cmd_flags & REQ_STARTED) && \ |
495 | ((rq)->cmd_type == REQ_TYPE_FS || \ |
496 | ((rq)->cmd_flags & REQ_DISCARD))) |
497 | |
498 | #define blk_pm_request(rq) \ |
499 | ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ |
500 | (rq)->cmd_type == REQ_TYPE_PM_RESUME) |
501 | |
502 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) |
503 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
504 | /* rq->queuelist of dequeued request must be list_empty() */ |
505 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) |
506 | |
507 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
508 | |
509 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
510 | |
511 | static inline unsigned int blk_queue_cluster(struct request_queue *q) |
512 | { |
513 | return q->limits.cluster; |
514 | } |
515 | |
516 | /* |
517 | * We regard a request as sync, if either a read or a sync write |
518 | */ |
519 | static inline bool rw_is_sync(unsigned int rw_flags) |
520 | { |
521 | return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); |
522 | } |
523 | |
524 | static inline bool rq_is_sync(struct request *rq) |
525 | { |
526 | return rw_is_sync(rq->cmd_flags); |
527 | } |
528 | |
529 | static inline int blk_queue_full(struct request_queue *q, int sync) |
530 | { |
531 | if (sync) |
532 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); |
533 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); |
534 | } |
535 | |
536 | static inline void blk_set_queue_full(struct request_queue *q, int sync) |
537 | { |
538 | if (sync) |
539 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); |
540 | else |
541 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); |
542 | } |
543 | |
544 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) |
545 | { |
546 | if (sync) |
547 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); |
548 | else |
549 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); |
550 | } |
551 | |
552 | |
553 | /* |
554 | * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may |
555 | * it already be started by driver. |
556 | */ |
557 | #define RQ_NOMERGE_FLAGS \ |
558 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) |
559 | #define rq_mergeable(rq) \ |
560 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
561 | (((rq)->cmd_flags & REQ_DISCARD) || \ |
562 | (rq)->cmd_type == REQ_TYPE_FS)) |
563 | |
564 | /* |
565 | * q->prep_rq_fn return values |
566 | */ |
567 | #define BLKPREP_OK 0 /* serve it */ |
568 | #define BLKPREP_KILL 1 /* fatal error, kill */ |
569 | #define BLKPREP_DEFER 2 /* leave on queue */ |
570 | |
571 | extern unsigned long blk_max_low_pfn, blk_max_pfn; |
572 | |
573 | /* |
574 | * standard bounce addresses: |
575 | * |
576 | * BLK_BOUNCE_HIGH : bounce all highmem pages |
577 | * BLK_BOUNCE_ANY : don't bounce anything |
578 | * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary |
579 | */ |
580 | |
581 | #if BITS_PER_LONG == 32 |
582 | #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) |
583 | #else |
584 | #define BLK_BOUNCE_HIGH -1ULL |
585 | #endif |
586 | #define BLK_BOUNCE_ANY (-1ULL) |
587 | #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) |
588 | |
589 | /* |
590 | * default timeout for SG_IO if none specified |
591 | */ |
592 | #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) |
593 | #define BLK_MIN_SG_TIMEOUT (7 * HZ) |
594 | |
595 | #ifdef CONFIG_BOUNCE |
596 | extern int init_emergency_isa_pool(void); |
597 | extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); |
598 | #else |
599 | static inline int init_emergency_isa_pool(void) |
600 | { |
601 | return 0; |
602 | } |
603 | static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) |
604 | { |
605 | } |
606 | #endif /* CONFIG_MMU */ |
607 | |
608 | struct rq_map_data { |
609 | struct page **pages; |
610 | int page_order; |
611 | int nr_entries; |
612 | unsigned long offset; |
613 | int null_mapped; |
614 | int from_user; |
615 | }; |
616 | |
617 | struct req_iterator { |
618 | int i; |
619 | struct bio *bio; |
620 | }; |
621 | |
622 | /* This should not be used directly - use rq_for_each_segment */ |
623 | #define for_each_bio(_bio) \ |
624 | for (; _bio; _bio = _bio->bi_next) |
625 | #define __rq_for_each_bio(_bio, rq) \ |
626 | if ((rq->bio)) \ |
627 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) |
628 | |
629 | #define rq_for_each_segment(bvl, _rq, _iter) \ |
630 | __rq_for_each_bio(_iter.bio, _rq) \ |
631 | bio_for_each_segment(bvl, _iter.bio, _iter.i) |
632 | |
633 | #define rq_iter_last(rq, _iter) \ |
634 | (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) |
635 | |
636 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
637 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" |
638 | #endif |
639 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
640 | extern void rq_flush_dcache_pages(struct request *rq); |
641 | #else |
642 | static inline void rq_flush_dcache_pages(struct request *rq) |
643 | { |
644 | } |
645 | #endif |
646 | |
647 | extern int blk_register_queue(struct gendisk *disk); |
648 | extern void blk_unregister_queue(struct gendisk *disk); |
649 | extern void register_disk(struct gendisk *dev); |
650 | extern void generic_make_request(struct bio *bio); |
651 | extern void blk_rq_init(struct request_queue *q, struct request *rq); |
652 | extern void blk_put_request(struct request *); |
653 | extern void __blk_put_request(struct request_queue *, struct request *); |
654 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
655 | extern struct request *blk_make_request(struct request_queue *, struct bio *, |
656 | gfp_t); |
657 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
658 | extern void blk_requeue_request(struct request_queue *, struct request *); |
659 | extern void blk_add_request_payload(struct request *rq, struct page *page, |
660 | unsigned int len); |
661 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
662 | extern int blk_lld_busy(struct request_queue *q); |
663 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
664 | struct bio_set *bs, gfp_t gfp_mask, |
665 | int (*bio_ctr)(struct bio *, struct bio *, void *), |
666 | void *data); |
667 | extern void blk_rq_unprep_clone(struct request *rq); |
668 | extern int blk_insert_cloned_request(struct request_queue *q, |
669 | struct request *rq); |
670 | extern void blk_plug_device(struct request_queue *); |
671 | extern void blk_plug_device_unlocked(struct request_queue *); |
672 | extern int blk_remove_plug(struct request_queue *); |
673 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
674 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
675 | unsigned int, void __user *); |
676 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
677 | struct scsi_ioctl_command __user *); |
678 | |
679 | /* |
680 | * A queue has just exitted congestion. Note this in the global counter of |
681 | * congested queues, and wake up anyone who was waiting for requests to be |
682 | * put back. |
683 | */ |
684 | static inline void blk_clear_queue_congested(struct request_queue *q, int sync) |
685 | { |
686 | clear_bdi_congested(&q->backing_dev_info, sync); |
687 | } |
688 | |
689 | /* |
690 | * A queue has just entered congestion. Flag that in the queue's VM-visible |
691 | * state flags and increment the global gounter of congested queues. |
692 | */ |
693 | static inline void blk_set_queue_congested(struct request_queue *q, int sync) |
694 | { |
695 | set_bdi_congested(&q->backing_dev_info, sync); |
696 | } |
697 | |
698 | extern void blk_start_queue(struct request_queue *q); |
699 | extern void blk_stop_queue(struct request_queue *q); |
700 | extern void blk_sync_queue(struct request_queue *q); |
701 | extern void __blk_stop_queue(struct request_queue *q); |
702 | extern void __blk_run_queue(struct request_queue *); |
703 | extern void blk_run_queue(struct request_queue *); |
704 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
705 | struct rq_map_data *, void __user *, unsigned long, |
706 | gfp_t); |
707 | extern int blk_rq_unmap_user(struct bio *); |
708 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); |
709 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, |
710 | struct rq_map_data *, struct sg_iovec *, int, |
711 | unsigned int, gfp_t); |
712 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, |
713 | struct request *, int); |
714 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
715 | struct request *, int, rq_end_io_fn *); |
716 | extern void blk_unplug(struct request_queue *q); |
717 | |
718 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
719 | { |
720 | return bdev->bd_disk->queue; |
721 | } |
722 | |
723 | /* |
724 | * blk_rq_pos() : the current sector |
725 | * blk_rq_bytes() : bytes left in the entire request |
726 | * blk_rq_cur_bytes() : bytes left in the current segment |
727 | * blk_rq_err_bytes() : bytes left till the next error boundary |
728 | * blk_rq_sectors() : sectors left in the entire request |
729 | * blk_rq_cur_sectors() : sectors left in the current segment |
730 | */ |
731 | static inline sector_t blk_rq_pos(const struct request *rq) |
732 | { |
733 | return rq->__sector; |
734 | } |
735 | |
736 | static inline unsigned int blk_rq_bytes(const struct request *rq) |
737 | { |
738 | return rq->__data_len; |
739 | } |
740 | |
741 | static inline int blk_rq_cur_bytes(const struct request *rq) |
742 | { |
743 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; |
744 | } |
745 | |
746 | extern unsigned int blk_rq_err_bytes(const struct request *rq); |
747 | |
748 | static inline unsigned int blk_rq_sectors(const struct request *rq) |
749 | { |
750 | return blk_rq_bytes(rq) >> 9; |
751 | } |
752 | |
753 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) |
754 | { |
755 | return blk_rq_cur_bytes(rq) >> 9; |
756 | } |
757 | |
758 | /* |
759 | * Request issue related functions. |
760 | */ |
761 | extern struct request *blk_peek_request(struct request_queue *q); |
762 | extern void blk_start_request(struct request *rq); |
763 | extern struct request *blk_fetch_request(struct request_queue *q); |
764 | |
765 | /* |
766 | * Request completion related functions. |
767 | * |
768 | * blk_update_request() completes given number of bytes and updates |
769 | * the request without completing it. |
770 | * |
771 | * blk_end_request() and friends. __blk_end_request() must be called |
772 | * with the request queue spinlock acquired. |
773 | * |
774 | * Several drivers define their own end_request and call |
775 | * blk_end_request() for parts of the original function. |
776 | * This prevents code duplication in drivers. |
777 | */ |
778 | extern bool blk_update_request(struct request *rq, int error, |
779 | unsigned int nr_bytes); |
780 | extern bool blk_end_request(struct request *rq, int error, |
781 | unsigned int nr_bytes); |
782 | extern void blk_end_request_all(struct request *rq, int error); |
783 | extern bool blk_end_request_cur(struct request *rq, int error); |
784 | extern bool blk_end_request_err(struct request *rq, int error); |
785 | extern bool __blk_end_request(struct request *rq, int error, |
786 | unsigned int nr_bytes); |
787 | extern void __blk_end_request_all(struct request *rq, int error); |
788 | extern bool __blk_end_request_cur(struct request *rq, int error); |
789 | extern bool __blk_end_request_err(struct request *rq, int error); |
790 | |
791 | extern void blk_complete_request(struct request *); |
792 | extern void __blk_complete_request(struct request *); |
793 | extern void blk_abort_request(struct request *); |
794 | extern void blk_abort_queue(struct request_queue *); |
795 | extern void blk_unprep_request(struct request *); |
796 | |
797 | /* |
798 | * Access functions for manipulating queue properties |
799 | */ |
800 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, |
801 | spinlock_t *lock, int node_id); |
802 | extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *, |
803 | request_fn_proc *, |
804 | spinlock_t *, int node_id); |
805 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); |
806 | extern struct request_queue *blk_init_allocated_queue(struct request_queue *, |
807 | request_fn_proc *, spinlock_t *); |
808 | extern void blk_cleanup_queue(struct request_queue *); |
809 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
810 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
811 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); |
812 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
813 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
814 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
815 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
816 | unsigned int max_discard_sectors); |
817 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
818 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
819 | extern void blk_queue_alignment_offset(struct request_queue *q, |
820 | unsigned int alignment); |
821 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); |
822 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); |
823 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); |
824 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); |
825 | extern void blk_set_default_limits(struct queue_limits *lim); |
826 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
827 | sector_t offset); |
828 | extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, |
829 | sector_t offset); |
830 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
831 | sector_t offset); |
832 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
833 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
834 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); |
835 | extern int blk_queue_dma_drain(struct request_queue *q, |
836 | dma_drain_needed_fn *dma_drain_needed, |
837 | void *buf, unsigned int size); |
838 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); |
839 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
840 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
841 | extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); |
842 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |
843 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
844 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
845 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
846 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
847 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
848 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); |
849 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
850 | |
851 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
852 | extern void blk_dump_rq_flags(struct request *, char *); |
853 | extern void generic_unplug_device(struct request_queue *); |
854 | extern long nr_blockdev_pages(void); |
855 | |
856 | int blk_get_queue(struct request_queue *); |
857 | struct request_queue *blk_alloc_queue(gfp_t); |
858 | struct request_queue *blk_alloc_queue_node(gfp_t, int); |
859 | extern void blk_put_queue(struct request_queue *); |
860 | |
861 | /* |
862 | * tag stuff |
863 | */ |
864 | #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) |
865 | extern int blk_queue_start_tag(struct request_queue *, struct request *); |
866 | extern struct request *blk_queue_find_tag(struct request_queue *, int); |
867 | extern void blk_queue_end_tag(struct request_queue *, struct request *); |
868 | extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); |
869 | extern void blk_queue_free_tags(struct request_queue *); |
870 | extern int blk_queue_resize_tags(struct request_queue *, int); |
871 | extern void blk_queue_invalidate_tags(struct request_queue *); |
872 | extern struct blk_queue_tag *blk_init_tags(int); |
873 | extern void blk_free_tags(struct blk_queue_tag *); |
874 | |
875 | static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, |
876 | int tag) |
877 | { |
878 | if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) |
879 | return NULL; |
880 | return bqt->tag_index[tag]; |
881 | } |
882 | |
883 | #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ |
884 | |
885 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); |
886 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
887 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); |
888 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
889 | sector_t nr_sects, gfp_t gfp_mask); |
890 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, |
891 | sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) |
892 | { |
893 | return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), |
894 | nr_blocks << (sb->s_blocksize_bits - 9), |
895 | gfp_mask, flags); |
896 | } |
897 | static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, |
898 | sector_t nr_blocks, gfp_t gfp_mask) |
899 | { |
900 | return blkdev_issue_zeroout(sb->s_bdev, |
901 | block << (sb->s_blocksize_bits - 9), |
902 | nr_blocks << (sb->s_blocksize_bits - 9), |
903 | gfp_mask); |
904 | } |
905 | |
906 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
907 | |
908 | enum blk_default_limits { |
909 | BLK_MAX_SEGMENTS = 128, |
910 | BLK_SAFE_MAX_SECTORS = 255, |
911 | BLK_DEF_MAX_SECTORS = 1024, |
912 | BLK_MAX_SEGMENT_SIZE = 65536, |
913 | BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, |
914 | }; |
915 | |
916 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
917 | |
918 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) |
919 | { |
920 | return q->limits.bounce_pfn; |
921 | } |
922 | |
923 | static inline unsigned long queue_segment_boundary(struct request_queue *q) |
924 | { |
925 | return q->limits.seg_boundary_mask; |
926 | } |
927 | |
928 | static inline unsigned int queue_max_sectors(struct request_queue *q) |
929 | { |
930 | return q->limits.max_sectors; |
931 | } |
932 | |
933 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) |
934 | { |
935 | return q->limits.max_hw_sectors; |
936 | } |
937 | |
938 | static inline unsigned short queue_max_segments(struct request_queue *q) |
939 | { |
940 | return q->limits.max_segments; |
941 | } |
942 | |
943 | static inline unsigned int queue_max_segment_size(struct request_queue *q) |
944 | { |
945 | return q->limits.max_segment_size; |
946 | } |
947 | |
948 | static inline unsigned short queue_logical_block_size(struct request_queue *q) |
949 | { |
950 | int retval = 512; |
951 | |
952 | if (q && q->limits.logical_block_size) |
953 | retval = q->limits.logical_block_size; |
954 | |
955 | return retval; |
956 | } |
957 | |
958 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) |
959 | { |
960 | return queue_logical_block_size(bdev_get_queue(bdev)); |
961 | } |
962 | |
963 | static inline unsigned int queue_physical_block_size(struct request_queue *q) |
964 | { |
965 | return q->limits.physical_block_size; |
966 | } |
967 | |
968 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) |
969 | { |
970 | return queue_physical_block_size(bdev_get_queue(bdev)); |
971 | } |
972 | |
973 | static inline unsigned int queue_io_min(struct request_queue *q) |
974 | { |
975 | return q->limits.io_min; |
976 | } |
977 | |
978 | static inline int bdev_io_min(struct block_device *bdev) |
979 | { |
980 | return queue_io_min(bdev_get_queue(bdev)); |
981 | } |
982 | |
983 | static inline unsigned int queue_io_opt(struct request_queue *q) |
984 | { |
985 | return q->limits.io_opt; |
986 | } |
987 | |
988 | static inline int bdev_io_opt(struct block_device *bdev) |
989 | { |
990 | return queue_io_opt(bdev_get_queue(bdev)); |
991 | } |
992 | |
993 | static inline int queue_alignment_offset(struct request_queue *q) |
994 | { |
995 | if (q->limits.misaligned) |
996 | return -1; |
997 | |
998 | return q->limits.alignment_offset; |
999 | } |
1000 | |
1001 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) |
1002 | { |
1003 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); |
1004 | unsigned int alignment = (sector << 9) & (granularity - 1); |
1005 | |
1006 | return (granularity + lim->alignment_offset - alignment) |
1007 | & (granularity - 1); |
1008 | } |
1009 | |
1010 | static inline int bdev_alignment_offset(struct block_device *bdev) |
1011 | { |
1012 | struct request_queue *q = bdev_get_queue(bdev); |
1013 | |
1014 | if (q->limits.misaligned) |
1015 | return -1; |
1016 | |
1017 | if (bdev != bdev->bd_contains) |
1018 | return bdev->bd_part->alignment_offset; |
1019 | |
1020 | return q->limits.alignment_offset; |
1021 | } |
1022 | |
1023 | static inline int queue_discard_alignment(struct request_queue *q) |
1024 | { |
1025 | if (q->limits.discard_misaligned) |
1026 | return -1; |
1027 | |
1028 | return q->limits.discard_alignment; |
1029 | } |
1030 | |
1031 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) |
1032 | { |
1033 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); |
1034 | |
1035 | return (lim->discard_granularity + lim->discard_alignment - alignment) |
1036 | & (lim->discard_granularity - 1); |
1037 | } |
1038 | |
1039 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) |
1040 | { |
1041 | if (q->limits.discard_zeroes_data == 1) |
1042 | return 1; |
1043 | |
1044 | return 0; |
1045 | } |
1046 | |
1047 | static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) |
1048 | { |
1049 | return queue_discard_zeroes_data(bdev_get_queue(bdev)); |
1050 | } |
1051 | |
1052 | static inline int queue_dma_alignment(struct request_queue *q) |
1053 | { |
1054 | return q ? q->dma_alignment : 511; |
1055 | } |
1056 | |
1057 | static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, |
1058 | unsigned int len) |
1059 | { |
1060 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
1061 | return !(addr & alignment) && !(len & alignment); |
1062 | } |
1063 | |
1064 | /* assumes size > 256 */ |
1065 | static inline unsigned int blksize_bits(unsigned int size) |
1066 | { |
1067 | unsigned int bits = 8; |
1068 | do { |
1069 | bits++; |
1070 | size >>= 1; |
1071 | } while (size > 256); |
1072 | return bits; |
1073 | } |
1074 | |
1075 | static inline unsigned int block_size(struct block_device *bdev) |
1076 | { |
1077 | return bdev->bd_block_size; |
1078 | } |
1079 | |
1080 | typedef struct {struct page *v;} Sector; |
1081 | |
1082 | unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); |
1083 | |
1084 | static inline void put_dev_sector(Sector p) |
1085 | { |
1086 | page_cache_release(p.v); |
1087 | } |
1088 | |
1089 | struct work_struct; |
1090 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
1091 | int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); |
1092 | |
1093 | #ifdef CONFIG_BLK_CGROUP |
1094 | /* |
1095 | * This should not be using sched_clock(). A real patch is in progress |
1096 | * to fix this up, until that is in place we need to disable preemption |
1097 | * around sched_clock() in this function and set_io_start_time_ns(). |
1098 | */ |
1099 | static inline void set_start_time_ns(struct request *req) |
1100 | { |
1101 | preempt_disable(); |
1102 | req->start_time_ns = sched_clock(); |
1103 | preempt_enable(); |
1104 | } |
1105 | |
1106 | static inline void set_io_start_time_ns(struct request *req) |
1107 | { |
1108 | preempt_disable(); |
1109 | req->io_start_time_ns = sched_clock(); |
1110 | preempt_enable(); |
1111 | } |
1112 | |
1113 | static inline uint64_t rq_start_time_ns(struct request *req) |
1114 | { |
1115 | return req->start_time_ns; |
1116 | } |
1117 | |
1118 | static inline uint64_t rq_io_start_time_ns(struct request *req) |
1119 | { |
1120 | return req->io_start_time_ns; |
1121 | } |
1122 | #else |
1123 | static inline void set_start_time_ns(struct request *req) {} |
1124 | static inline void set_io_start_time_ns(struct request *req) {} |
1125 | static inline uint64_t rq_start_time_ns(struct request *req) |
1126 | { |
1127 | return 0; |
1128 | } |
1129 | static inline uint64_t rq_io_start_time_ns(struct request *req) |
1130 | { |
1131 | return 0; |
1132 | } |
1133 | #endif |
1134 | |
1135 | #ifdef CONFIG_BLK_DEV_THROTTLING |
1136 | extern int blk_throtl_init(struct request_queue *q); |
1137 | extern void blk_throtl_exit(struct request_queue *q); |
1138 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); |
1139 | extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); |
1140 | extern void throtl_shutdown_timer_wq(struct request_queue *q); |
1141 | #else /* CONFIG_BLK_DEV_THROTTLING */ |
1142 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) |
1143 | { |
1144 | return 0; |
1145 | } |
1146 | |
1147 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } |
1148 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } |
1149 | static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} |
1150 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} |
1151 | #endif /* CONFIG_BLK_DEV_THROTTLING */ |
1152 | |
1153 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
1154 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
1155 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
1156 | MODULE_ALIAS("block-major-" __stringify(major) "-*") |
1157 | |
1158 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
1159 | |
1160 | #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ |
1161 | #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ |
1162 | |
1163 | struct blk_integrity_exchg { |
1164 | void *prot_buf; |
1165 | void *data_buf; |
1166 | sector_t sector; |
1167 | unsigned int data_size; |
1168 | unsigned short sector_size; |
1169 | const char *disk_name; |
1170 | }; |
1171 | |
1172 | typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); |
1173 | typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); |
1174 | typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); |
1175 | typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); |
1176 | |
1177 | struct blk_integrity { |
1178 | integrity_gen_fn *generate_fn; |
1179 | integrity_vrfy_fn *verify_fn; |
1180 | integrity_set_tag_fn *set_tag_fn; |
1181 | integrity_get_tag_fn *get_tag_fn; |
1182 | |
1183 | unsigned short flags; |
1184 | unsigned short tuple_size; |
1185 | unsigned short sector_size; |
1186 | unsigned short tag_size; |
1187 | |
1188 | const char *name; |
1189 | |
1190 | struct kobject kobj; |
1191 | }; |
1192 | |
1193 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); |
1194 | extern void blk_integrity_unregister(struct gendisk *); |
1195 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
1196 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, |
1197 | struct scatterlist *); |
1198 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); |
1199 | extern int blk_integrity_merge_rq(struct request_queue *, struct request *, |
1200 | struct request *); |
1201 | extern int blk_integrity_merge_bio(struct request_queue *, struct request *, |
1202 | struct bio *); |
1203 | |
1204 | static inline |
1205 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
1206 | { |
1207 | return bdev->bd_disk->integrity; |
1208 | } |
1209 | |
1210 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) |
1211 | { |
1212 | return disk->integrity; |
1213 | } |
1214 | |
1215 | static inline int blk_integrity_rq(struct request *rq) |
1216 | { |
1217 | if (rq->bio == NULL) |
1218 | return 0; |
1219 | |
1220 | return bio_integrity(rq->bio); |
1221 | } |
1222 | |
1223 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, |
1224 | unsigned int segs) |
1225 | { |
1226 | q->limits.max_integrity_segments = segs; |
1227 | } |
1228 | |
1229 | static inline unsigned short |
1230 | queue_max_integrity_segments(struct request_queue *q) |
1231 | { |
1232 | return q->limits.max_integrity_segments; |
1233 | } |
1234 | |
1235 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
1236 | |
1237 | #define blk_integrity_rq(rq) (0) |
1238 | #define blk_rq_count_integrity_sg(a, b) (0) |
1239 | #define blk_rq_map_integrity_sg(a, b, c) (0) |
1240 | #define bdev_get_integrity(a) (0) |
1241 | #define blk_get_integrity(a) (0) |
1242 | #define blk_integrity_compare(a, b) (0) |
1243 | #define blk_integrity_register(a, b) (0) |
1244 | #define blk_integrity_unregister(a) do { } while (0); |
1245 | #define blk_queue_max_integrity_segments(a, b) do { } while (0); |
1246 | #define queue_max_integrity_segments(a) (0) |
1247 | #define blk_integrity_merge_rq(a, b, c) (0) |
1248 | #define blk_integrity_merge_bio(a, b, c) (0) |
1249 | |
1250 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1251 | |
1252 | struct block_device_operations { |
1253 | int (*open) (struct block_device *, fmode_t); |
1254 | int (*release) (struct gendisk *, fmode_t); |
1255 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
1256 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
1257 | int (*direct_access) (struct block_device *, sector_t, |
1258 | void **, unsigned long *); |
1259 | int (*media_changed) (struct gendisk *); |
1260 | void (*unlock_native_capacity) (struct gendisk *); |
1261 | int (*revalidate_disk) (struct gendisk *); |
1262 | int (*getgeo)(struct block_device *, struct hd_geometry *); |
1263 | /* this callback is with swap_lock and sometimes page table lock held */ |
1264 | void (*swap_slot_free_notify) (struct block_device *, unsigned long); |
1265 | struct module *owner; |
1266 | }; |
1267 | |
1268 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, |
1269 | unsigned long); |
1270 | #else /* CONFIG_BLOCK */ |
1271 | /* |
1272 | * stubs for when the block layer is configured out |
1273 | */ |
1274 | #define buffer_heads_over_limit 0 |
1275 | |
1276 | static inline long nr_blockdev_pages(void) |
1277 | { |
1278 | return 0; |
1279 | } |
1280 | |
1281 | #endif /* CONFIG_BLOCK */ |
1282 | |
1283 | #endif |
1284 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9