Root/block/blk-core.c

1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11/*
12 * This handles all read/write requests to block devices
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
19#include <linux/highmem.h>
20#include <linux/mm.h>
21#include <linux/kernel_stat.h>
22#include <linux/string.h>
23#include <linux/init.h>
24#include <linux/completion.h>
25#include <linux/slab.h>
26#include <linux/swap.h>
27#include <linux/writeback.h>
28#include <linux/task_io_accounting_ops.h>
29#include <linux/fault-inject.h>
30#include <linux/list_sort.h>
31#include <linux/delay.h>
32#include <linux/ratelimit.h>
33#include <linux/pm_runtime.h>
34
35#define CREATE_TRACE_POINTS
36#include <trace/events/block.h>
37
38#include "blk.h"
39#include "blk-cgroup.h"
40
41EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
42EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
43EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
44EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
45
46DEFINE_IDA(blk_queue_ida);
47
48/*
49 * For the allocated request tables
50 */
51static struct kmem_cache *request_cachep;
52
53/*
54 * For queue allocation
55 */
56struct kmem_cache *blk_requestq_cachep;
57
58/*
59 * Controlling structure to kblockd
60 */
61static struct workqueue_struct *kblockd_workqueue;
62
63static void drive_stat_acct(struct request *rq, int new_io)
64{
65    struct hd_struct *part;
66    int rw = rq_data_dir(rq);
67    int cpu;
68
69    if (!blk_do_io_stat(rq))
70        return;
71
72    cpu = part_stat_lock();
73
74    if (!new_io) {
75        part = rq->part;
76        part_stat_inc(cpu, part, merges[rw]);
77    } else {
78        part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
79        if (!hd_struct_try_get(part)) {
80            /*
81             * The partition is already being removed,
82             * the request will be accounted on the disk only
83             *
84             * We take a reference on disk->part0 although that
85             * partition will never be deleted, so we can treat
86             * it as any other partition.
87             */
88            part = &rq->rq_disk->part0;
89            hd_struct_get(part);
90        }
91        part_round_stats(cpu, part);
92        part_inc_in_flight(part, rw);
93        rq->part = part;
94    }
95
96    part_stat_unlock();
97}
98
99void blk_queue_congestion_threshold(struct request_queue *q)
100{
101    int nr;
102
103    nr = q->nr_requests - (q->nr_requests / 8) + 1;
104    if (nr > q->nr_requests)
105        nr = q->nr_requests;
106    q->nr_congestion_on = nr;
107
108    nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
109    if (nr < 1)
110        nr = 1;
111    q->nr_congestion_off = nr;
112}
113
114/**
115 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
116 * @bdev: device
117 *
118 * Locates the passed device's request queue and returns the address of its
119 * backing_dev_info
120 *
121 * Will return NULL if the request queue cannot be located.
122 */
123struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
124{
125    struct backing_dev_info *ret = NULL;
126    struct request_queue *q = bdev_get_queue(bdev);
127
128    if (q)
129        ret = &q->backing_dev_info;
130    return ret;
131}
132EXPORT_SYMBOL(blk_get_backing_dev_info);
133
134void blk_rq_init(struct request_queue *q, struct request *rq)
135{
136    memset(rq, 0, sizeof(*rq));
137
138    INIT_LIST_HEAD(&rq->queuelist);
139    INIT_LIST_HEAD(&rq->timeout_list);
140    rq->cpu = -1;
141    rq->q = q;
142    rq->__sector = (sector_t) -1;
143    INIT_HLIST_NODE(&rq->hash);
144    RB_CLEAR_NODE(&rq->rb_node);
145    rq->cmd = rq->__cmd;
146    rq->cmd_len = BLK_MAX_CDB;
147    rq->tag = -1;
148    rq->ref_count = 1;
149    rq->start_time = jiffies;
150    set_start_time_ns(rq);
151    rq->part = NULL;
152}
153EXPORT_SYMBOL(blk_rq_init);
154
155static void req_bio_endio(struct request *rq, struct bio *bio,
156              unsigned int nbytes, int error)
157{
158    if (error)
159        clear_bit(BIO_UPTODATE, &bio->bi_flags);
160    else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
161        error = -EIO;
162
163    if (unlikely(rq->cmd_flags & REQ_QUIET))
164        set_bit(BIO_QUIET, &bio->bi_flags);
165
166    bio_advance(bio, nbytes);
167
168    /* don't actually finish bio if it's part of flush sequence */
169    if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
170        bio_endio(bio, error);
171}
172
173void blk_dump_rq_flags(struct request *rq, char *msg)
174{
175    int bit;
176
177    printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
178        rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
179        rq->cmd_flags);
180
181    printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
182           (unsigned long long)blk_rq_pos(rq),
183           blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
184    printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
185           rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
186
187    if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
188        printk(KERN_INFO " cdb: ");
189        for (bit = 0; bit < BLK_MAX_CDB; bit++)
190            printk("%02x ", rq->cmd[bit]);
191        printk("\n");
192    }
193}
194EXPORT_SYMBOL(blk_dump_rq_flags);
195
196static void blk_delay_work(struct work_struct *work)
197{
198    struct request_queue *q;
199
200    q = container_of(work, struct request_queue, delay_work.work);
201    spin_lock_irq(q->queue_lock);
202    __blk_run_queue(q);
203    spin_unlock_irq(q->queue_lock);
204}
205
206/**
207 * blk_delay_queue - restart queueing after defined interval
208 * @q: The &struct request_queue in question
209 * @msecs: Delay in msecs
210 *
211 * Description:
212 * Sometimes queueing needs to be postponed for a little while, to allow
213 * resources to come back. This function will make sure that queueing is
214 * restarted around the specified time. Queue lock must be held.
215 */
216void blk_delay_queue(struct request_queue *q, unsigned long msecs)
217{
218    if (likely(!blk_queue_dead(q)))
219        queue_delayed_work(kblockd_workqueue, &q->delay_work,
220                   msecs_to_jiffies(msecs));
221}
222EXPORT_SYMBOL(blk_delay_queue);
223
224/**
225 * blk_start_queue - restart a previously stopped queue
226 * @q: The &struct request_queue in question
227 *
228 * Description:
229 * blk_start_queue() will clear the stop flag on the queue, and call
230 * the request_fn for the queue if it was in a stopped state when
231 * entered. Also see blk_stop_queue(). Queue lock must be held.
232 **/
233void blk_start_queue(struct request_queue *q)
234{
235    WARN_ON(!irqs_disabled());
236
237    queue_flag_clear(QUEUE_FLAG_STOPPED, q);
238    __blk_run_queue(q);
239}
240EXPORT_SYMBOL(blk_start_queue);
241
242/**
243 * blk_stop_queue - stop a queue
244 * @q: The &struct request_queue in question
245 *
246 * Description:
247 * The Linux block layer assumes that a block driver will consume all
248 * entries on the request queue when the request_fn strategy is called.
249 * Often this will not happen, because of hardware limitations (queue
250 * depth settings). If a device driver gets a 'queue full' response,
251 * or if it simply chooses not to queue more I/O at one point, it can
252 * call this function to prevent the request_fn from being called until
253 * the driver has signalled it's ready to go again. This happens by calling
254 * blk_start_queue() to restart queue operations. Queue lock must be held.
255 **/
256void blk_stop_queue(struct request_queue *q)
257{
258    cancel_delayed_work(&q->delay_work);
259    queue_flag_set(QUEUE_FLAG_STOPPED, q);
260}
261EXPORT_SYMBOL(blk_stop_queue);
262
263/**
264 * blk_sync_queue - cancel any pending callbacks on a queue
265 * @q: the queue
266 *
267 * Description:
268 * The block layer may perform asynchronous callback activity
269 * on a queue, such as calling the unplug function after a timeout.
270 * A block device may call blk_sync_queue to ensure that any
271 * such activity is cancelled, thus allowing it to release resources
272 * that the callbacks might use. The caller must already have made sure
273 * that its ->make_request_fn will not re-add plugging prior to calling
274 * this function.
275 *
276 * This function does not cancel any asynchronous activity arising
277 * out of elevator or throttling code. That would require elevaotor_exit()
278 * and blkcg_exit_queue() to be called with queue lock initialized.
279 *
280 */
281void blk_sync_queue(struct request_queue *q)
282{
283    del_timer_sync(&q->timeout);
284    cancel_delayed_work_sync(&q->delay_work);
285}
286EXPORT_SYMBOL(blk_sync_queue);
287
288/**
289 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
290 * @q: The queue to run
291 *
292 * Description:
293 * Invoke request handling on a queue if there are any pending requests.
294 * May be used to restart request handling after a request has completed.
295 * This variant runs the queue whether or not the queue has been
296 * stopped. Must be called with the queue lock held and interrupts
297 * disabled. See also @blk_run_queue.
298 */
299inline void __blk_run_queue_uncond(struct request_queue *q)
300{
301    if (unlikely(blk_queue_dead(q)))
302        return;
303
304    /*
305     * Some request_fn implementations, e.g. scsi_request_fn(), unlock
306     * the queue lock internally. As a result multiple threads may be
307     * running such a request function concurrently. Keep track of the
308     * number of active request_fn invocations such that blk_drain_queue()
309     * can wait until all these request_fn calls have finished.
310     */
311    q->request_fn_active++;
312    q->request_fn(q);
313    q->request_fn_active--;
314}
315
316/**
317 * __blk_run_queue - run a single device queue
318 * @q: The queue to run
319 *
320 * Description:
321 * See @blk_run_queue. This variant must be called with the queue lock
322 * held and interrupts disabled.
323 */
324void __blk_run_queue(struct request_queue *q)
325{
326    if (unlikely(blk_queue_stopped(q)))
327        return;
328
329    __blk_run_queue_uncond(q);
330}
331EXPORT_SYMBOL(__blk_run_queue);
332
333/**
334 * blk_run_queue_async - run a single device queue in workqueue context
335 * @q: The queue to run
336 *
337 * Description:
338 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
339 * of us. The caller must hold the queue lock.
340 */
341void blk_run_queue_async(struct request_queue *q)
342{
343    if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
344        mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
345}
346EXPORT_SYMBOL(blk_run_queue_async);
347
348/**
349 * blk_run_queue - run a single device queue
350 * @q: The queue to run
351 *
352 * Description:
353 * Invoke request handling on this queue, if it has pending work to do.
354 * May be used to restart queueing when a request has completed.
355 */
356void blk_run_queue(struct request_queue *q)
357{
358    unsigned long flags;
359
360    spin_lock_irqsave(q->queue_lock, flags);
361    __blk_run_queue(q);
362    spin_unlock_irqrestore(q->queue_lock, flags);
363}
364EXPORT_SYMBOL(blk_run_queue);
365
366void blk_put_queue(struct request_queue *q)
367{
368    kobject_put(&q->kobj);
369}
370EXPORT_SYMBOL(blk_put_queue);
371
372/**
373 * __blk_drain_queue - drain requests from request_queue
374 * @q: queue to drain
375 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
376 *
377 * Drain requests from @q. If @drain_all is set, all requests are drained.
378 * If not, only ELVPRIV requests are drained. The caller is responsible
379 * for ensuring that no new requests which need to be drained are queued.
380 */
381static void __blk_drain_queue(struct request_queue *q, bool drain_all)
382    __releases(q->queue_lock)
383    __acquires(q->queue_lock)
384{
385    int i;
386
387    lockdep_assert_held(q->queue_lock);
388
389    while (true) {
390        bool drain = false;
391
392        /*
393         * The caller might be trying to drain @q before its
394         * elevator is initialized.
395         */
396        if (q->elevator)
397            elv_drain_elevator(q);
398
399        blkcg_drain_queue(q);
400
401        /*
402         * This function might be called on a queue which failed
403         * driver init after queue creation or is not yet fully
404         * active yet. Some drivers (e.g. fd and loop) get unhappy
405         * in such cases. Kick queue iff dispatch queue has
406         * something on it and @q has request_fn set.
407         */
408        if (!list_empty(&q->queue_head) && q->request_fn)
409            __blk_run_queue(q);
410
411        drain |= q->nr_rqs_elvpriv;
412        drain |= q->request_fn_active;
413
414        /*
415         * Unfortunately, requests are queued at and tracked from
416         * multiple places and there's no single counter which can
417         * be drained. Check all the queues and counters.
418         */
419        if (drain_all) {
420            drain |= !list_empty(&q->queue_head);
421            for (i = 0; i < 2; i++) {
422                drain |= q->nr_rqs[i];
423                drain |= q->in_flight[i];
424                drain |= !list_empty(&q->flush_queue[i]);
425            }
426        }
427
428        if (!drain)
429            break;
430
431        spin_unlock_irq(q->queue_lock);
432
433        msleep(10);
434
435        spin_lock_irq(q->queue_lock);
436    }
437
438    /*
439     * With queue marked dead, any woken up waiter will fail the
440     * allocation path, so the wakeup chaining is lost and we're
441     * left with hung waiters. We need to wake up those waiters.
442     */
443    if (q->request_fn) {
444        struct request_list *rl;
445
446        blk_queue_for_each_rl(rl, q)
447            for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
448                wake_up_all(&rl->wait[i]);
449    }
450}
451
452/**
453 * blk_queue_bypass_start - enter queue bypass mode
454 * @q: queue of interest
455 *
456 * In bypass mode, only the dispatch FIFO queue of @q is used. This
457 * function makes @q enter bypass mode and drains all requests which were
458 * throttled or issued before. On return, it's guaranteed that no request
459 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
460 * inside queue or RCU read lock.
461 */
462void blk_queue_bypass_start(struct request_queue *q)
463{
464    bool drain;
465
466    spin_lock_irq(q->queue_lock);
467    drain = !q->bypass_depth++;
468    queue_flag_set(QUEUE_FLAG_BYPASS, q);
469    spin_unlock_irq(q->queue_lock);
470
471    if (drain) {
472        spin_lock_irq(q->queue_lock);
473        __blk_drain_queue(q, false);
474        spin_unlock_irq(q->queue_lock);
475
476        /* ensure blk_queue_bypass() is %true inside RCU read lock */
477        synchronize_rcu();
478    }
479}
480EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
481
482/**
483 * blk_queue_bypass_end - leave queue bypass mode
484 * @q: queue of interest
485 *
486 * Leave bypass mode and restore the normal queueing behavior.
487 */
488void blk_queue_bypass_end(struct request_queue *q)
489{
490    spin_lock_irq(q->queue_lock);
491    if (!--q->bypass_depth)
492        queue_flag_clear(QUEUE_FLAG_BYPASS, q);
493    WARN_ON_ONCE(q->bypass_depth < 0);
494    spin_unlock_irq(q->queue_lock);
495}
496EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
497
498/**
499 * blk_cleanup_queue - shutdown a request queue
500 * @q: request queue to shutdown
501 *
502 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
503 * put it. All future requests will be failed immediately with -ENODEV.
504 */
505void blk_cleanup_queue(struct request_queue *q)
506{
507    spinlock_t *lock = q->queue_lock;
508
509    /* mark @q DYING, no new request or merges will be allowed afterwards */
510    mutex_lock(&q->sysfs_lock);
511    queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
512    spin_lock_irq(lock);
513
514    /*
515     * A dying queue is permanently in bypass mode till released. Note
516     * that, unlike blk_queue_bypass_start(), we aren't performing
517     * synchronize_rcu() after entering bypass mode to avoid the delay
518     * as some drivers create and destroy a lot of queues while
519     * probing. This is still safe because blk_release_queue() will be
520     * called only after the queue refcnt drops to zero and nothing,
521     * RCU or not, would be traversing the queue by then.
522     */
523    q->bypass_depth++;
524    queue_flag_set(QUEUE_FLAG_BYPASS, q);
525
526    queue_flag_set(QUEUE_FLAG_NOMERGES, q);
527    queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
528    queue_flag_set(QUEUE_FLAG_DYING, q);
529    spin_unlock_irq(lock);
530    mutex_unlock(&q->sysfs_lock);
531
532    /*
533     * Drain all requests queued before DYING marking. Set DEAD flag to
534     * prevent that q->request_fn() gets invoked after draining finished.
535     */
536    spin_lock_irq(lock);
537    __blk_drain_queue(q, true);
538    queue_flag_set(QUEUE_FLAG_DEAD, q);
539    spin_unlock_irq(lock);
540
541    /* @q won't process any more request, flush async actions */
542    del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
543    blk_sync_queue(q);
544
545    spin_lock_irq(lock);
546    if (q->queue_lock != &q->__queue_lock)
547        q->queue_lock = &q->__queue_lock;
548    spin_unlock_irq(lock);
549
550    /* @q is and will stay empty, shutdown and put */
551    blk_put_queue(q);
552}
553EXPORT_SYMBOL(blk_cleanup_queue);
554
555int blk_init_rl(struct request_list *rl, struct request_queue *q,
556        gfp_t gfp_mask)
557{
558    if (unlikely(rl->rq_pool))
559        return 0;
560
561    rl->q = q;
562    rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
563    rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
564    init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
565    init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
566
567    rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
568                      mempool_free_slab, request_cachep,
569                      gfp_mask, q->node);
570    if (!rl->rq_pool)
571        return -ENOMEM;
572
573    return 0;
574}
575
576void blk_exit_rl(struct request_list *rl)
577{
578    if (rl->rq_pool)
579        mempool_destroy(rl->rq_pool);
580}
581
582struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
583{
584    return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
585}
586EXPORT_SYMBOL(blk_alloc_queue);
587
588struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
589{
590    struct request_queue *q;
591    int err;
592
593    q = kmem_cache_alloc_node(blk_requestq_cachep,
594                gfp_mask | __GFP_ZERO, node_id);
595    if (!q)
596        return NULL;
597
598    q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
599    if (q->id < 0)
600        goto fail_q;
601
602    q->backing_dev_info.ra_pages =
603            (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
604    q->backing_dev_info.state = 0;
605    q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
606    q->backing_dev_info.name = "block";
607    q->node = node_id;
608
609    err = bdi_init(&q->backing_dev_info);
610    if (err)
611        goto fail_id;
612
613    setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
614            laptop_mode_timer_fn, (unsigned long) q);
615    setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
616    INIT_LIST_HEAD(&q->queue_head);
617    INIT_LIST_HEAD(&q->timeout_list);
618    INIT_LIST_HEAD(&q->icq_list);
619#ifdef CONFIG_BLK_CGROUP
620    INIT_LIST_HEAD(&q->blkg_list);
621#endif
622    INIT_LIST_HEAD(&q->flush_queue[0]);
623    INIT_LIST_HEAD(&q->flush_queue[1]);
624    INIT_LIST_HEAD(&q->flush_data_in_flight);
625    INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
626
627    kobject_init(&q->kobj, &blk_queue_ktype);
628
629    mutex_init(&q->sysfs_lock);
630    spin_lock_init(&q->__queue_lock);
631
632    /*
633     * By default initialize queue_lock to internal lock and driver can
634     * override it later if need be.
635     */
636    q->queue_lock = &q->__queue_lock;
637
638    /*
639     * A queue starts its life with bypass turned on to avoid
640     * unnecessary bypass on/off overhead and nasty surprises during
641     * init. The initial bypass will be finished when the queue is
642     * registered by blk_register_queue().
643     */
644    q->bypass_depth = 1;
645    __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
646
647    if (blkcg_init_queue(q))
648        goto fail_id;
649
650    return q;
651
652fail_id:
653    ida_simple_remove(&blk_queue_ida, q->id);
654fail_q:
655    kmem_cache_free(blk_requestq_cachep, q);
656    return NULL;
657}
658EXPORT_SYMBOL(blk_alloc_queue_node);
659
660/**
661 * blk_init_queue - prepare a request queue for use with a block device
662 * @rfn: The function to be called to process requests that have been
663 * placed on the queue.
664 * @lock: Request queue spin lock
665 *
666 * Description:
667 * If a block device wishes to use the standard request handling procedures,
668 * which sorts requests and coalesces adjacent requests, then it must
669 * call blk_init_queue(). The function @rfn will be called when there
670 * are requests on the queue that need to be processed. If the device
671 * supports plugging, then @rfn may not be called immediately when requests
672 * are available on the queue, but may be called at some time later instead.
673 * Plugged queues are generally unplugged when a buffer belonging to one
674 * of the requests on the queue is needed, or due to memory pressure.
675 *
676 * @rfn is not required, or even expected, to remove all requests off the
677 * queue, but only as many as it can handle at a time. If it does leave
678 * requests on the queue, it is responsible for arranging that the requests
679 * get dealt with eventually.
680 *
681 * The queue spin lock must be held while manipulating the requests on the
682 * request queue; this lock will be taken also from interrupt context, so irq
683 * disabling is needed for it.
684 *
685 * Function returns a pointer to the initialized request queue, or %NULL if
686 * it didn't succeed.
687 *
688 * Note:
689 * blk_init_queue() must be paired with a blk_cleanup_queue() call
690 * when the block device is deactivated (such as at module unload).
691 **/
692
693struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
694{
695    return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
696}
697EXPORT_SYMBOL(blk_init_queue);
698
699struct request_queue *
700blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
701{
702    struct request_queue *uninit_q, *q;
703
704    uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
705    if (!uninit_q)
706        return NULL;
707
708    q = blk_init_allocated_queue(uninit_q, rfn, lock);
709    if (!q)
710        blk_cleanup_queue(uninit_q);
711
712    return q;
713}
714EXPORT_SYMBOL(blk_init_queue_node);
715
716struct request_queue *
717blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
718             spinlock_t *lock)
719{
720    if (!q)
721        return NULL;
722
723    if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
724        return NULL;
725
726    q->request_fn = rfn;
727    q->prep_rq_fn = NULL;
728    q->unprep_rq_fn = NULL;
729    q->queue_flags |= QUEUE_FLAG_DEFAULT;
730
731    /* Override internal queue lock with supplied lock pointer */
732    if (lock)
733        q->queue_lock = lock;
734
735    /*
736     * This also sets hw/phys segments, boundary and size
737     */
738    blk_queue_make_request(q, blk_queue_bio);
739
740    q->sg_reserved_size = INT_MAX;
741
742    /* init elevator */
743    if (elevator_init(q, NULL))
744        return NULL;
745    return q;
746}
747EXPORT_SYMBOL(blk_init_allocated_queue);
748
749bool blk_get_queue(struct request_queue *q)
750{
751    if (likely(!blk_queue_dying(q))) {
752        __blk_get_queue(q);
753        return true;
754    }
755
756    return false;
757}
758EXPORT_SYMBOL(blk_get_queue);
759
760static inline void blk_free_request(struct request_list *rl, struct request *rq)
761{
762    if (rq->cmd_flags & REQ_ELVPRIV) {
763        elv_put_request(rl->q, rq);
764        if (rq->elv.icq)
765            put_io_context(rq->elv.icq->ioc);
766    }
767
768    mempool_free(rq, rl->rq_pool);
769}
770
771/*
772 * ioc_batching returns true if the ioc is a valid batching request and
773 * should be given priority access to a request.
774 */
775static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
776{
777    if (!ioc)
778        return 0;
779
780    /*
781     * Make sure the process is able to allocate at least 1 request
782     * even if the batch times out, otherwise we could theoretically
783     * lose wakeups.
784     */
785    return ioc->nr_batch_requests == q->nr_batching ||
786        (ioc->nr_batch_requests > 0
787        && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
788}
789
790/*
791 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
792 * will cause the process to be a "batcher" on all queues in the system. This
793 * is the behaviour we want though - once it gets a wakeup it should be given
794 * a nice run.
795 */
796static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
797{
798    if (!ioc || ioc_batching(q, ioc))
799        return;
800
801    ioc->nr_batch_requests = q->nr_batching;
802    ioc->last_waited = jiffies;
803}
804
805static void __freed_request(struct request_list *rl, int sync)
806{
807    struct request_queue *q = rl->q;
808
809    /*
810     * bdi isn't aware of blkcg yet. As all async IOs end up root
811     * blkcg anyway, just use root blkcg state.
812     */
813    if (rl == &q->root_rl &&
814        rl->count[sync] < queue_congestion_off_threshold(q))
815        blk_clear_queue_congested(q, sync);
816
817    if (rl->count[sync] + 1 <= q->nr_requests) {
818        if (waitqueue_active(&rl->wait[sync]))
819            wake_up(&rl->wait[sync]);
820
821        blk_clear_rl_full(rl, sync);
822    }
823}
824
825/*
826 * A request has just been released. Account for it, update the full and
827 * congestion status, wake up any waiters. Called under q->queue_lock.
828 */
829static void freed_request(struct request_list *rl, unsigned int flags)
830{
831    struct request_queue *q = rl->q;
832    int sync = rw_is_sync(flags);
833
834    q->nr_rqs[sync]--;
835    rl->count[sync]--;
836    if (flags & REQ_ELVPRIV)
837        q->nr_rqs_elvpriv--;
838
839    __freed_request(rl, sync);
840
841    if (unlikely(rl->starved[sync ^ 1]))
842        __freed_request(rl, sync ^ 1);
843}
844
845/*
846 * Determine if elevator data should be initialized when allocating the
847 * request associated with @bio.
848 */
849static bool blk_rq_should_init_elevator(struct bio *bio)
850{
851    if (!bio)
852        return true;
853
854    /*
855     * Flush requests do not use the elevator so skip initialization.
856     * This allows a request to share the flush and elevator data.
857     */
858    if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
859        return false;
860
861    return true;
862}
863
864/**
865 * rq_ioc - determine io_context for request allocation
866 * @bio: request being allocated is for this bio (can be %NULL)
867 *
868 * Determine io_context to use for request allocation for @bio. May return
869 * %NULL if %current->io_context doesn't exist.
870 */
871static struct io_context *rq_ioc(struct bio *bio)
872{
873#ifdef CONFIG_BLK_CGROUP
874    if (bio && bio->bi_ioc)
875        return bio->bi_ioc;
876#endif
877    return current->io_context;
878}
879
880/**
881 * __get_request - get a free request
882 * @rl: request list to allocate from
883 * @rw_flags: RW and SYNC flags
884 * @bio: bio to allocate request for (can be %NULL)
885 * @gfp_mask: allocation mask
886 *
887 * Get a free request from @q. This function may fail under memory
888 * pressure or if @q is dead.
889 *
890 * Must be callled with @q->queue_lock held and,
891 * Returns %NULL on failure, with @q->queue_lock held.
892 * Returns !%NULL on success, with @q->queue_lock *not held*.
893 */
894static struct request *__get_request(struct request_list *rl, int rw_flags,
895                     struct bio *bio, gfp_t gfp_mask)
896{
897    struct request_queue *q = rl->q;
898    struct request *rq;
899    struct elevator_type *et = q->elevator->type;
900    struct io_context *ioc = rq_ioc(bio);
901    struct io_cq *icq = NULL;
902    const bool is_sync = rw_is_sync(rw_flags) != 0;
903    int may_queue;
904
905    if (unlikely(blk_queue_dying(q)))
906        return NULL;
907
908    may_queue = elv_may_queue(q, rw_flags);
909    if (may_queue == ELV_MQUEUE_NO)
910        goto rq_starved;
911
912    if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
913        if (rl->count[is_sync]+1 >= q->nr_requests) {
914            /*
915             * The queue will fill after this allocation, so set
916             * it as full, and mark this process as "batching".
917             * This process will be allowed to complete a batch of
918             * requests, others will be blocked.
919             */
920            if (!blk_rl_full(rl, is_sync)) {
921                ioc_set_batching(q, ioc);
922                blk_set_rl_full(rl, is_sync);
923            } else {
924                if (may_queue != ELV_MQUEUE_MUST
925                        && !ioc_batching(q, ioc)) {
926                    /*
927                     * The queue is full and the allocating
928                     * process is not a "batcher", and not
929                     * exempted by the IO scheduler
930                     */
931                    return NULL;
932                }
933            }
934        }
935        /*
936         * bdi isn't aware of blkcg yet. As all async IOs end up
937         * root blkcg anyway, just use root blkcg state.
938         */
939        if (rl == &q->root_rl)
940            blk_set_queue_congested(q, is_sync);
941    }
942
943    /*
944     * Only allow batching queuers to allocate up to 50% over the defined
945     * limit of requests, otherwise we could have thousands of requests
946     * allocated with any setting of ->nr_requests
947     */
948    if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
949        return NULL;
950
951    q->nr_rqs[is_sync]++;
952    rl->count[is_sync]++;
953    rl->starved[is_sync] = 0;
954
955    /*
956     * Decide whether the new request will be managed by elevator. If
957     * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
958     * prevent the current elevator from being destroyed until the new
959     * request is freed. This guarantees icq's won't be destroyed and
960     * makes creating new ones safe.
961     *
962     * Also, lookup icq while holding queue_lock. If it doesn't exist,
963     * it will be created after releasing queue_lock.
964     */
965    if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
966        rw_flags |= REQ_ELVPRIV;
967        q->nr_rqs_elvpriv++;
968        if (et->icq_cache && ioc)
969            icq = ioc_lookup_icq(ioc, q);
970    }
971
972    if (blk_queue_io_stat(q))
973        rw_flags |= REQ_IO_STAT;
974    spin_unlock_irq(q->queue_lock);
975
976    /* allocate and init request */
977    rq = mempool_alloc(rl->rq_pool, gfp_mask);
978    if (!rq)
979        goto fail_alloc;
980
981    blk_rq_init(q, rq);
982    blk_rq_set_rl(rq, rl);
983    rq->cmd_flags = rw_flags | REQ_ALLOCED;
984
985    /* init elvpriv */
986    if (rw_flags & REQ_ELVPRIV) {
987        if (unlikely(et->icq_cache && !icq)) {
988            if (ioc)
989                icq = ioc_create_icq(ioc, q, gfp_mask);
990            if (!icq)
991                goto fail_elvpriv;
992        }
993
994        rq->elv.icq = icq;
995        if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
996            goto fail_elvpriv;
997
998        /* @rq->elv.icq holds io_context until @rq is freed */
999        if (icq)
1000            get_io_context(icq->ioc);
1001    }
1002out:
1003    /*
1004     * ioc may be NULL here, and ioc_batching will be false. That's
1005     * OK, if the queue is under the request limit then requests need
1006     * not count toward the nr_batch_requests limit. There will always
1007     * be some limit enforced by BLK_BATCH_TIME.
1008     */
1009    if (ioc_batching(q, ioc))
1010        ioc->nr_batch_requests--;
1011
1012    trace_block_getrq(q, bio, rw_flags & 1);
1013    return rq;
1014
1015fail_elvpriv:
1016    /*
1017     * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
1018     * and may fail indefinitely under memory pressure and thus
1019     * shouldn't stall IO. Treat this request as !elvpriv. This will
1020     * disturb iosched and blkcg but weird is bettern than dead.
1021     */
1022    printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
1023               dev_name(q->backing_dev_info.dev));
1024
1025    rq->cmd_flags &= ~REQ_ELVPRIV;
1026    rq->elv.icq = NULL;
1027
1028    spin_lock_irq(q->queue_lock);
1029    q->nr_rqs_elvpriv--;
1030    spin_unlock_irq(q->queue_lock);
1031    goto out;
1032
1033fail_alloc:
1034    /*
1035     * Allocation failed presumably due to memory. Undo anything we
1036     * might have messed up.
1037     *
1038     * Allocating task should really be put onto the front of the wait
1039     * queue, but this is pretty rare.
1040     */
1041    spin_lock_irq(q->queue_lock);
1042    freed_request(rl, rw_flags);
1043
1044    /*
1045     * in the very unlikely event that allocation failed and no
1046     * requests for this direction was pending, mark us starved so that
1047     * freeing of a request in the other direction will notice
1048     * us. another possible fix would be to split the rq mempool into
1049     * READ and WRITE
1050     */
1051rq_starved:
1052    if (unlikely(rl->count[is_sync] == 0))
1053        rl->starved[is_sync] = 1;
1054    return NULL;
1055}
1056
1057/**
1058 * get_request - get a free request
1059 * @q: request_queue to allocate request from
1060 * @rw_flags: RW and SYNC flags
1061 * @bio: bio to allocate request for (can be %NULL)
1062 * @gfp_mask: allocation mask
1063 *
1064 * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
1065 * function keeps retrying under memory pressure and fails iff @q is dead.
1066 *
1067 * Must be callled with @q->queue_lock held and,
1068 * Returns %NULL on failure, with @q->queue_lock held.
1069 * Returns !%NULL on success, with @q->queue_lock *not held*.
1070 */
1071static struct request *get_request(struct request_queue *q, int rw_flags,
1072                   struct bio *bio, gfp_t gfp_mask)
1073{
1074    const bool is_sync = rw_is_sync(rw_flags) != 0;
1075    DEFINE_WAIT(wait);
1076    struct request_list *rl;
1077    struct request *rq;
1078
1079    rl = blk_get_rl(q, bio); /* transferred to @rq on success */
1080retry:
1081    rq = __get_request(rl, rw_flags, bio, gfp_mask);
1082    if (rq)
1083        return rq;
1084
1085    if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
1086        blk_put_rl(rl);
1087        return NULL;
1088    }
1089
1090    /* wait on @rl and retry */
1091    prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1092                  TASK_UNINTERRUPTIBLE);
1093
1094    trace_block_sleeprq(q, bio, rw_flags & 1);
1095
1096    spin_unlock_irq(q->queue_lock);
1097    io_schedule();
1098
1099    /*
1100     * After sleeping, we become a "batching" process and will be able
1101     * to allocate at least one request, and up to a big batch of them
1102     * for a small period time. See ioc_batching, ioc_set_batching
1103     */
1104    ioc_set_batching(q, current->io_context);
1105
1106    spin_lock_irq(q->queue_lock);
1107    finish_wait(&rl->wait[is_sync], &wait);
1108
1109    goto retry;
1110}
1111
1112struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1113{
1114    struct request *rq;
1115
1116    BUG_ON(rw != READ && rw != WRITE);
1117
1118    /* create ioc upfront */
1119    create_io_context(gfp_mask, q->node);
1120
1121    spin_lock_irq(q->queue_lock);
1122    rq = get_request(q, rw, NULL, gfp_mask);
1123    if (!rq)
1124        spin_unlock_irq(q->queue_lock);
1125    /* q->queue_lock is unlocked at this point */
1126
1127    return rq;
1128}
1129EXPORT_SYMBOL(blk_get_request);
1130
1131/**
1132 * blk_make_request - given a bio, allocate a corresponding struct request.
1133 * @q: target request queue
1134 * @bio: The bio describing the memory mappings that will be submitted for IO.
1135 * It may be a chained-bio properly constructed by block/bio layer.
1136 * @gfp_mask: gfp flags to be used for memory allocation
1137 *
1138 * blk_make_request is the parallel of generic_make_request for BLOCK_PC
1139 * type commands. Where the struct request needs to be farther initialized by
1140 * the caller. It is passed a &struct bio, which describes the memory info of
1141 * the I/O transfer.
1142 *
1143 * The caller of blk_make_request must make sure that bi_io_vec
1144 * are set to describe the memory buffers. That bio_data_dir() will return
1145 * the needed direction of the request. (And all bio's in the passed bio-chain
1146 * are properly set accordingly)
1147 *
1148 * If called under none-sleepable conditions, mapped bio buffers must not
1149 * need bouncing, by calling the appropriate masked or flagged allocator,
1150 * suitable for the target device. Otherwise the call to blk_queue_bounce will
1151 * BUG.
1152 *
1153 * WARNING: When allocating/cloning a bio-chain, careful consideration should be
1154 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
1155 * anything but the first bio in the chain. Otherwise you risk waiting for IO
1156 * completion of a bio that hasn't been submitted yet, thus resulting in a
1157 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
1158 * of bio_alloc(), as that avoids the mempool deadlock.
1159 * If possible a big IO should be split into smaller parts when allocation
1160 * fails. Partial allocation should not be an error, or you risk a live-lock.
1161 */
1162struct request *blk_make_request(struct request_queue *q, struct bio *bio,
1163                 gfp_t gfp_mask)
1164{
1165    struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
1166
1167    if (unlikely(!rq))
1168        return ERR_PTR(-ENOMEM);
1169
1170    for_each_bio(bio) {
1171        struct bio *bounce_bio = bio;
1172        int ret;
1173
1174        blk_queue_bounce(q, &bounce_bio);
1175        ret = blk_rq_append_bio(q, rq, bounce_bio);
1176        if (unlikely(ret)) {
1177            blk_put_request(rq);
1178            return ERR_PTR(ret);
1179        }
1180    }
1181
1182    return rq;
1183}
1184EXPORT_SYMBOL(blk_make_request);
1185
1186/**
1187 * blk_requeue_request - put a request back on queue
1188 * @q: request queue where request should be inserted
1189 * @rq: request to be inserted
1190 *
1191 * Description:
1192 * Drivers often keep queueing requests until the hardware cannot accept
1193 * more, when that condition happens we need to put the request back
1194 * on the queue. Must be called with queue lock held.
1195 */
1196void blk_requeue_request(struct request_queue *q, struct request *rq)
1197{
1198    blk_delete_timer(rq);
1199    blk_clear_rq_complete(rq);
1200    trace_block_rq_requeue(q, rq);
1201
1202    if (blk_rq_tagged(rq))
1203        blk_queue_end_tag(q, rq);
1204
1205    BUG_ON(blk_queued_rq(rq));
1206
1207    elv_requeue_request(q, rq);
1208}
1209EXPORT_SYMBOL(blk_requeue_request);
1210
1211static void add_acct_request(struct request_queue *q, struct request *rq,
1212                 int where)
1213{
1214    drive_stat_acct(rq, 1);
1215    __elv_add_request(q, rq, where);
1216}
1217
1218static void part_round_stats_single(int cpu, struct hd_struct *part,
1219                    unsigned long now)
1220{
1221    if (now == part->stamp)
1222        return;
1223
1224    if (part_in_flight(part)) {
1225        __part_stat_add(cpu, part, time_in_queue,
1226                part_in_flight(part) * (now - part->stamp));
1227        __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1228    }
1229    part->stamp = now;
1230}
1231
1232/**
1233 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1234 * @cpu: cpu number for stats access
1235 * @part: target partition
1236 *
1237 * The average IO queue length and utilisation statistics are maintained
1238 * by observing the current state of the queue length and the amount of
1239 * time it has been in this state for.
1240 *
1241 * Normally, that accounting is done on IO completion, but that can result
1242 * in more than a second's worth of IO being accounted for within any one
1243 * second, leading to >100% utilisation. To deal with that, we call this
1244 * function to do a round-off before returning the results when reading
1245 * /proc/diskstats. This accounts immediately for all queue usage up to
1246 * the current jiffies and restarts the counters again.
1247 */
1248void part_round_stats(int cpu, struct hd_struct *part)
1249{
1250    unsigned long now = jiffies;
1251
1252    if (part->partno)
1253        part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1254    part_round_stats_single(cpu, part, now);
1255}
1256EXPORT_SYMBOL_GPL(part_round_stats);
1257
1258#ifdef CONFIG_PM_RUNTIME
1259static void blk_pm_put_request(struct request *rq)
1260{
1261    if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
1262        pm_runtime_mark_last_busy(rq->q->dev);
1263}
1264#else
1265static inline void blk_pm_put_request(struct request *rq) {}
1266#endif
1267
1268/*
1269 * queue lock must be held
1270 */
1271void __blk_put_request(struct request_queue *q, struct request *req)
1272{
1273    if (unlikely(!q))
1274        return;
1275    if (unlikely(--req->ref_count))
1276        return;
1277
1278    blk_pm_put_request(req);
1279
1280    elv_completed_request(q, req);
1281
1282    /* this is a bio leak */
1283    WARN_ON(req->bio != NULL);
1284
1285    /*
1286     * Request may not have originated from ll_rw_blk. if not,
1287     * it didn't come out of our reserved rq pools
1288     */
1289    if (req->cmd_flags & REQ_ALLOCED) {
1290        unsigned int flags = req->cmd_flags;
1291        struct request_list *rl = blk_rq_rl(req);
1292
1293        BUG_ON(!list_empty(&req->queuelist));
1294        BUG_ON(!hlist_unhashed(&req->hash));
1295
1296        blk_free_request(rl, req);
1297        freed_request(rl, flags);
1298        blk_put_rl(rl);
1299    }
1300}
1301EXPORT_SYMBOL_GPL(__blk_put_request);
1302
1303void blk_put_request(struct request *req)
1304{
1305    unsigned long flags;
1306    struct request_queue *q = req->q;
1307
1308    spin_lock_irqsave(q->queue_lock, flags);
1309    __blk_put_request(q, req);
1310    spin_unlock_irqrestore(q->queue_lock, flags);
1311}
1312EXPORT_SYMBOL(blk_put_request);
1313
1314/**
1315 * blk_add_request_payload - add a payload to a request
1316 * @rq: request to update
1317 * @page: page backing the payload
1318 * @len: length of the payload.
1319 *
1320 * This allows to later add a payload to an already submitted request by
1321 * a block driver. The driver needs to take care of freeing the payload
1322 * itself.
1323 *
1324 * Note that this is a quite horrible hack and nothing but handling of
1325 * discard requests should ever use it.
1326 */
1327void blk_add_request_payload(struct request *rq, struct page *page,
1328        unsigned int len)
1329{
1330    struct bio *bio = rq->bio;
1331
1332    bio->bi_io_vec->bv_page = page;
1333    bio->bi_io_vec->bv_offset = 0;
1334    bio->bi_io_vec->bv_len = len;
1335
1336    bio->bi_size = len;
1337    bio->bi_vcnt = 1;
1338    bio->bi_phys_segments = 1;
1339
1340    rq->__data_len = rq->resid_len = len;
1341    rq->nr_phys_segments = 1;
1342    rq->buffer = bio_data(bio);
1343}
1344EXPORT_SYMBOL_GPL(blk_add_request_payload);
1345
1346static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1347                   struct bio *bio)
1348{
1349    const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1350
1351    if (!ll_back_merge_fn(q, req, bio))
1352        return false;
1353
1354    trace_block_bio_backmerge(q, req, bio);
1355
1356    if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1357        blk_rq_set_mixed_merge(req);
1358
1359    req->biotail->bi_next = bio;
1360    req->biotail = bio;
1361    req->__data_len += bio->bi_size;
1362    req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1363
1364    drive_stat_acct(req, 0);
1365    return true;
1366}
1367
1368static bool bio_attempt_front_merge(struct request_queue *q,
1369                    struct request *req, struct bio *bio)
1370{
1371    const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1372
1373    if (!ll_front_merge_fn(q, req, bio))
1374        return false;
1375
1376    trace_block_bio_frontmerge(q, req, bio);
1377
1378    if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1379        blk_rq_set_mixed_merge(req);
1380
1381    bio->bi_next = req->bio;
1382    req->bio = bio;
1383
1384    /*
1385     * may not be valid. if the low level driver said
1386     * it didn't need a bounce buffer then it better
1387     * not touch req->buffer either...
1388     */
1389    req->buffer = bio_data(bio);
1390    req->__sector = bio->bi_sector;
1391    req->__data_len += bio->bi_size;
1392    req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1393
1394    drive_stat_acct(req, 0);
1395    return true;
1396}
1397
1398/**
1399 * attempt_plug_merge - try to merge with %current's plugged list
1400 * @q: request_queue new bio is being queued at
1401 * @bio: new bio being queued
1402 * @request_count: out parameter for number of traversed plugged requests
1403 *
1404 * Determine whether @bio being queued on @q can be merged with a request
1405 * on %current's plugged list. Returns %true if merge was successful,
1406 * otherwise %false.
1407 *
1408 * Plugging coalesces IOs from the same issuer for the same purpose without
1409 * going through @q->queue_lock. As such it's more of an issuing mechanism
1410 * than scheduling, and the request, while may have elvpriv data, is not
1411 * added on the elevator at this point. In addition, we don't have
1412 * reliable access to the elevator outside queue lock. Only check basic
1413 * merging parameters without querying the elevator.
1414 */
1415static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1416                   unsigned int *request_count)
1417{
1418    struct blk_plug *plug;
1419    struct request *rq;
1420    bool ret = false;
1421
1422    plug = current->plug;
1423    if (!plug)
1424        goto out;
1425    *request_count = 0;
1426
1427    list_for_each_entry_reverse(rq, &plug->list, queuelist) {
1428        int el_ret;
1429
1430        if (rq->q == q)
1431            (*request_count)++;
1432
1433        if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1434            continue;
1435
1436        el_ret = blk_try_merge(rq, bio);
1437        if (el_ret == ELEVATOR_BACK_MERGE) {
1438            ret = bio_attempt_back_merge(q, rq, bio);
1439            if (ret)
1440                break;
1441        } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1442            ret = bio_attempt_front_merge(q, rq, bio);
1443            if (ret)
1444                break;
1445        }
1446    }
1447out:
1448    return ret;
1449}
1450
1451void init_request_from_bio(struct request *req, struct bio *bio)
1452{
1453    req->cmd_type = REQ_TYPE_FS;
1454
1455    req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1456    if (bio->bi_rw & REQ_RAHEAD)
1457        req->cmd_flags |= REQ_FAILFAST_MASK;
1458
1459    req->errors = 0;
1460    req->__sector = bio->bi_sector;
1461    req->ioprio = bio_prio(bio);
1462    blk_rq_bio_prep(req->q, req, bio);
1463}
1464
1465void blk_queue_bio(struct request_queue *q, struct bio *bio)
1466{
1467    const bool sync = !!(bio->bi_rw & REQ_SYNC);
1468    struct blk_plug *plug;
1469    int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
1470    struct request *req;
1471    unsigned int request_count = 0;
1472
1473    /*
1474     * low level driver can indicate that it wants pages above a
1475     * certain limit bounced to low memory (ie for highmem, or even
1476     * ISA dma in theory)
1477     */
1478    blk_queue_bounce(q, &bio);
1479
1480    if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1481        bio_endio(bio, -EIO);
1482        return;
1483    }
1484
1485    if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
1486        spin_lock_irq(q->queue_lock);
1487        where = ELEVATOR_INSERT_FLUSH;
1488        goto get_rq;
1489    }
1490
1491    /*
1492     * Check if we can merge with the plugged list before grabbing
1493     * any locks.
1494     */
1495    if (attempt_plug_merge(q, bio, &request_count))
1496        return;
1497
1498    spin_lock_irq(q->queue_lock);
1499
1500    el_ret = elv_merge(q, &req, bio);
1501    if (el_ret == ELEVATOR_BACK_MERGE) {
1502        if (bio_attempt_back_merge(q, req, bio)) {
1503            elv_bio_merged(q, req, bio);
1504            if (!attempt_back_merge(q, req))
1505                elv_merged_request(q, req, el_ret);
1506            goto out_unlock;
1507        }
1508    } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1509        if (bio_attempt_front_merge(q, req, bio)) {
1510            elv_bio_merged(q, req, bio);
1511            if (!attempt_front_merge(q, req))
1512                elv_merged_request(q, req, el_ret);
1513            goto out_unlock;
1514        }
1515    }
1516
1517get_rq:
1518    /*
1519     * This sync check and mask will be re-done in init_request_from_bio(),
1520     * but we need to set it earlier to expose the sync flag to the
1521     * rq allocator and io schedulers.
1522     */
1523    rw_flags = bio_data_dir(bio);
1524    if (sync)
1525        rw_flags |= REQ_SYNC;
1526
1527    /*
1528     * Grab a free request. This is might sleep but can not fail.
1529     * Returns with the queue unlocked.
1530     */
1531    req = get_request(q, rw_flags, bio, GFP_NOIO);
1532    if (unlikely(!req)) {
1533        bio_endio(bio, -ENODEV); /* @q is dead */
1534        goto out_unlock;
1535    }
1536
1537    /*
1538     * After dropping the lock and possibly sleeping here, our request
1539     * may now be mergeable after it had proven unmergeable (above).
1540     * We don't worry about that case for efficiency. It won't happen
1541     * often, and the elevators are able to handle it.
1542     */
1543    init_request_from_bio(req, bio);
1544
1545    if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
1546        req->cpu = raw_smp_processor_id();
1547
1548    plug = current->plug;
1549    if (plug) {
1550        /*
1551         * If this is the first request added after a plug, fire
1552         * of a plug trace.
1553         */
1554        if (!request_count)
1555            trace_block_plug(q);
1556        else {
1557            if (request_count >= BLK_MAX_REQUEST_COUNT) {
1558                blk_flush_plug_list(plug, false);
1559                trace_block_plug(q);
1560            }
1561        }
1562        list_add_tail(&req->queuelist, &plug->list);
1563        drive_stat_acct(req, 1);
1564    } else {
1565        spin_lock_irq(q->queue_lock);
1566        add_acct_request(q, req, where);
1567        __blk_run_queue(q);
1568out_unlock:
1569        spin_unlock_irq(q->queue_lock);
1570    }
1571}
1572EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
1573
1574/*
1575 * If bio->bi_dev is a partition, remap the location
1576 */
1577static inline void blk_partition_remap(struct bio *bio)
1578{
1579    struct block_device *bdev = bio->bi_bdev;
1580
1581    if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1582        struct hd_struct *p = bdev->bd_part;
1583
1584        bio->bi_sector += p->start_sect;
1585        bio->bi_bdev = bdev->bd_contains;
1586
1587        trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1588                      bdev->bd_dev,
1589                      bio->bi_sector - p->start_sect);
1590    }
1591}
1592
1593static void handle_bad_sector(struct bio *bio)
1594{
1595    char b[BDEVNAME_SIZE];
1596
1597    printk(KERN_INFO "attempt to access beyond end of device\n");
1598    printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1599            bdevname(bio->bi_bdev, b),
1600            bio->bi_rw,
1601            (unsigned long long)bio_end_sector(bio),
1602            (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1603
1604    set_bit(BIO_EOF, &bio->bi_flags);
1605}
1606
1607#ifdef CONFIG_FAIL_MAKE_REQUEST
1608
1609static DECLARE_FAULT_ATTR(fail_make_request);
1610
1611static int __init setup_fail_make_request(char *str)
1612{
1613    return setup_fault_attr(&fail_make_request, str);
1614}
1615__setup("fail_make_request=", setup_fail_make_request);
1616
1617static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
1618{
1619    return part->make_it_fail && should_fail(&fail_make_request, bytes);
1620}
1621
1622static int __init fail_make_request_debugfs(void)
1623{
1624    struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1625                        NULL, &fail_make_request);
1626
1627    return IS_ERR(dir) ? PTR_ERR(dir) : 0;
1628}
1629
1630late_initcall(fail_make_request_debugfs);
1631
1632#else /* CONFIG_FAIL_MAKE_REQUEST */
1633
1634static inline bool should_fail_request(struct hd_struct *part,
1635                    unsigned int bytes)
1636{
1637    return false;
1638}
1639
1640#endif /* CONFIG_FAIL_MAKE_REQUEST */
1641
1642/*
1643 * Check whether this bio extends beyond the end of the device.
1644 */
1645static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1646{
1647    sector_t maxsector;
1648
1649    if (!nr_sectors)
1650        return 0;
1651
1652    /* Test device or partition size, when known. */
1653    maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1654    if (maxsector) {
1655        sector_t sector = bio->bi_sector;
1656
1657        if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1658            /*
1659             * This may well happen - the kernel calls bread()
1660             * without checking the size of the device, e.g., when
1661             * mounting a device.
1662             */
1663            handle_bad_sector(bio);
1664            return 1;
1665        }
1666    }
1667
1668    return 0;
1669}
1670
1671static noinline_for_stack bool
1672generic_make_request_checks(struct bio *bio)
1673{
1674    struct request_queue *q;
1675    int nr_sectors = bio_sectors(bio);
1676    int err = -EIO;
1677    char b[BDEVNAME_SIZE];
1678    struct hd_struct *part;
1679
1680    might_sleep();
1681
1682    if (bio_check_eod(bio, nr_sectors))
1683        goto end_io;
1684
1685    q = bdev_get_queue(bio->bi_bdev);
1686    if (unlikely(!q)) {
1687        printk(KERN_ERR
1688               "generic_make_request: Trying to access "
1689            "nonexistent block-device %s (%Lu)\n",
1690            bdevname(bio->bi_bdev, b),
1691            (long long) bio->bi_sector);
1692        goto end_io;
1693    }
1694
1695    if (likely(bio_is_rw(bio) &&
1696           nr_sectors > queue_max_hw_sectors(q))) {
1697        printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1698               bdevname(bio->bi_bdev, b),
1699               bio_sectors(bio),
1700               queue_max_hw_sectors(q));
1701        goto end_io;
1702    }
1703
1704    part = bio->bi_bdev->bd_part;
1705    if (should_fail_request(part, bio->bi_size) ||
1706        should_fail_request(&part_to_disk(part)->part0,
1707                bio->bi_size))
1708        goto end_io;
1709
1710    /*
1711     * If this device has partitions, remap block n
1712     * of partition p to block n+start(p) of the disk.
1713     */
1714    blk_partition_remap(bio);
1715
1716    if (bio_check_eod(bio, nr_sectors))
1717        goto end_io;
1718
1719    /*
1720     * Filter flush bio's early so that make_request based
1721     * drivers without flush support don't have to worry
1722     * about them.
1723     */
1724    if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1725        bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1726        if (!nr_sectors) {
1727            err = 0;
1728            goto end_io;
1729        }
1730    }
1731
1732    if ((bio->bi_rw & REQ_DISCARD) &&
1733        (!blk_queue_discard(q) ||
1734         ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
1735        err = -EOPNOTSUPP;
1736        goto end_io;
1737    }
1738
1739    if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
1740        err = -EOPNOTSUPP;
1741        goto end_io;
1742    }
1743
1744    /*
1745     * Various block parts want %current->io_context and lazy ioc
1746     * allocation ends up trading a lot of pain for a small amount of
1747     * memory. Just allocate it upfront. This may fail and block
1748     * layer knows how to live with it.
1749     */
1750    create_io_context(GFP_ATOMIC, q->node);
1751
1752    if (blk_throtl_bio(q, bio))
1753        return false; /* throttled, will be resubmitted later */
1754
1755    trace_block_bio_queue(q, bio);
1756    return true;
1757
1758end_io:
1759    bio_endio(bio, err);
1760    return false;
1761}
1762
1763/**
1764 * generic_make_request - hand a buffer to its device driver for I/O
1765 * @bio: The bio describing the location in memory and on the device.
1766 *
1767 * generic_make_request() is used to make I/O requests of block
1768 * devices. It is passed a &struct bio, which describes the I/O that needs
1769 * to be done.
1770 *
1771 * generic_make_request() does not return any status. The
1772 * success/failure status of the request, along with notification of
1773 * completion, is delivered asynchronously through the bio->bi_end_io
1774 * function described (one day) else where.
1775 *
1776 * The caller of generic_make_request must make sure that bi_io_vec
1777 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1778 * set to describe the device address, and the
1779 * bi_end_io and optionally bi_private are set to describe how
1780 * completion notification should be signaled.
1781 *
1782 * generic_make_request and the drivers it calls may use bi_next if this
1783 * bio happens to be merged with someone else, and may resubmit the bio to
1784 * a lower device by calling into generic_make_request recursively, which
1785 * means the bio should NOT be touched after the call to ->make_request_fn.
1786 */
1787void generic_make_request(struct bio *bio)
1788{
1789    struct bio_list bio_list_on_stack;
1790
1791    if (!generic_make_request_checks(bio))
1792        return;
1793
1794    /*
1795     * We only want one ->make_request_fn to be active at a time, else
1796     * stack usage with stacked devices could be a problem. So use
1797     * current->bio_list to keep a list of requests submited by a
1798     * make_request_fn function. current->bio_list is also used as a
1799     * flag to say if generic_make_request is currently active in this
1800     * task or not. If it is NULL, then no make_request is active. If
1801     * it is non-NULL, then a make_request is active, and new requests
1802     * should be added at the tail
1803     */
1804    if (current->bio_list) {
1805        bio_list_add(current->bio_list, bio);
1806        return;
1807    }
1808
1809    /* following loop may be a bit non-obvious, and so deserves some
1810     * explanation.
1811     * Before entering the loop, bio->bi_next is NULL (as all callers
1812     * ensure that) so we have a list with a single bio.
1813     * We pretend that we have just taken it off a longer list, so
1814     * we assign bio_list to a pointer to the bio_list_on_stack,
1815     * thus initialising the bio_list of new bios to be
1816     * added. ->make_request() may indeed add some more bios
1817     * through a recursive call to generic_make_request. If it
1818     * did, we find a non-NULL value in bio_list and re-enter the loop
1819     * from the top. In this case we really did just take the bio
1820     * of the top of the list (no pretending) and so remove it from
1821     * bio_list, and call into ->make_request() again.
1822     */
1823    BUG_ON(bio->bi_next);
1824    bio_list_init(&bio_list_on_stack);
1825    current->bio_list = &bio_list_on_stack;
1826    do {
1827        struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1828
1829        q->make_request_fn(q, bio);
1830
1831        bio = bio_list_pop(current->bio_list);
1832    } while (bio);
1833    current->bio_list = NULL; /* deactivate */
1834}
1835EXPORT_SYMBOL(generic_make_request);
1836
1837/**
1838 * submit_bio - submit a bio to the block device layer for I/O
1839 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1840 * @bio: The &struct bio which describes the I/O
1841 *
1842 * submit_bio() is very similar in purpose to generic_make_request(), and
1843 * uses that function to do most of the work. Both are fairly rough
1844 * interfaces; @bio must be presetup and ready for I/O.
1845 *
1846 */
1847void submit_bio(int rw, struct bio *bio)
1848{
1849    bio->bi_rw |= rw;
1850
1851    /*
1852     * If it's a regular read/write or a barrier with data attached,
1853     * go through the normal accounting stuff before submission.
1854     */
1855    if (bio_has_data(bio)) {
1856        unsigned int count;
1857
1858        if (unlikely(rw & REQ_WRITE_SAME))
1859            count = bdev_logical_block_size(bio->bi_bdev) >> 9;
1860        else
1861            count = bio_sectors(bio);
1862
1863        if (rw & WRITE) {
1864            count_vm_events(PGPGOUT, count);
1865        } else {
1866            task_io_account_read(bio->bi_size);
1867            count_vm_events(PGPGIN, count);
1868        }
1869
1870        if (unlikely(block_dump)) {
1871            char b[BDEVNAME_SIZE];
1872            printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1873            current->comm, task_pid_nr(current),
1874                (rw & WRITE) ? "WRITE" : "READ",
1875                (unsigned long long)bio->bi_sector,
1876                bdevname(bio->bi_bdev, b),
1877                count);
1878        }
1879    }
1880
1881    generic_make_request(bio);
1882}
1883EXPORT_SYMBOL(submit_bio);
1884
1885/**
1886 * blk_rq_check_limits - Helper function to check a request for the queue limit
1887 * @q: the queue
1888 * @rq: the request being checked
1889 *
1890 * Description:
1891 * @rq may have been made based on weaker limitations of upper-level queues
1892 * in request stacking drivers, and it may violate the limitation of @q.
1893 * Since the block layer and the underlying device driver trust @rq
1894 * after it is inserted to @q, it should be checked against @q before
1895 * the insertion using this generic function.
1896 *
1897 * This function should also be useful for request stacking drivers
1898 * in some cases below, so export this function.
1899 * Request stacking drivers like request-based dm may change the queue
1900 * limits while requests are in the queue (e.g. dm's table swapping).
1901 * Such request stacking drivers should check those requests agaist
1902 * the new queue limits again when they dispatch those requests,
1903 * although such checkings are also done against the old queue limits
1904 * when submitting requests.
1905 */
1906int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1907{
1908    if (!rq_mergeable(rq))
1909        return 0;
1910
1911    if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
1912        printk(KERN_ERR "%s: over max size limit.\n", __func__);
1913        return -EIO;
1914    }
1915
1916    /*
1917     * queue's settings related to segment counting like q->bounce_pfn
1918     * may differ from that of other stacking queues.
1919     * Recalculate it to check the request correctly on this queue's
1920     * limitation.
1921     */
1922    blk_recalc_rq_segments(rq);
1923    if (rq->nr_phys_segments > queue_max_segments(q)) {
1924        printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1925        return -EIO;
1926    }
1927
1928    return 0;
1929}
1930EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1931
1932/**
1933 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1934 * @q: the queue to submit the request
1935 * @rq: the request being queued
1936 */
1937int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1938{
1939    unsigned long flags;
1940    int where = ELEVATOR_INSERT_BACK;
1941
1942    if (blk_rq_check_limits(q, rq))
1943        return -EIO;
1944
1945    if (rq->rq_disk &&
1946        should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1947        return -EIO;
1948
1949    spin_lock_irqsave(q->queue_lock, flags);
1950    if (unlikely(blk_queue_dying(q))) {
1951        spin_unlock_irqrestore(q->queue_lock, flags);
1952        return -ENODEV;
1953    }
1954
1955    /*
1956     * Submitting request must be dequeued before calling this function
1957     * because it will be linked to another request_queue
1958     */
1959    BUG_ON(blk_queued_rq(rq));
1960
1961    if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
1962        where = ELEVATOR_INSERT_FLUSH;
1963
1964    add_acct_request(q, rq, where);
1965    if (where == ELEVATOR_INSERT_FLUSH)
1966        __blk_run_queue(q);
1967    spin_unlock_irqrestore(q->queue_lock, flags);
1968
1969    return 0;
1970}
1971EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1972
1973/**
1974 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1975 * @rq: request to examine
1976 *
1977 * Description:
1978 * A request could be merge of IOs which require different failure
1979 * handling. This function determines the number of bytes which
1980 * can be failed from the beginning of the request without
1981 * crossing into area which need to be retried further.
1982 *
1983 * Return:
1984 * The number of bytes to fail.
1985 *
1986 * Context:
1987 * queue_lock must be held.
1988 */
1989unsigned int blk_rq_err_bytes(const struct request *rq)
1990{
1991    unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1992    unsigned int bytes = 0;
1993    struct bio *bio;
1994
1995    if (!(rq->cmd_flags & REQ_MIXED_MERGE))
1996        return blk_rq_bytes(rq);
1997
1998    /*
1999     * Currently the only 'mixing' which can happen is between
2000     * different fastfail types. We can safely fail portions
2001     * which have all the failfast bits that the first one has -
2002     * the ones which are at least as eager to fail as the first
2003     * one.
2004     */
2005    for (bio = rq->bio; bio; bio = bio->bi_next) {
2006        if ((bio->bi_rw & ff) != ff)
2007            break;
2008        bytes += bio->bi_size;
2009    }
2010
2011    /* this could lead to infinite loop */
2012    BUG_ON(blk_rq_bytes(rq) && !bytes);
2013    return bytes;
2014}
2015EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
2016
2017static void blk_account_io_completion(struct request *req, unsigned int bytes)
2018{
2019    if (blk_do_io_stat(req)) {
2020        const int rw = rq_data_dir(req);
2021        struct hd_struct *part;
2022        int cpu;
2023
2024        cpu = part_stat_lock();
2025        part = req->part;
2026        part_stat_add(cpu, part, sectors[rw], bytes >> 9);
2027        part_stat_unlock();
2028    }
2029}
2030
2031static void blk_account_io_done(struct request *req)
2032{
2033    /*
2034     * Account IO completion. flush_rq isn't accounted as a
2035     * normal IO on queueing nor completion. Accounting the
2036     * containing request is enough.
2037     */
2038    if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
2039        unsigned long duration = jiffies - req->start_time;
2040        const int rw = rq_data_dir(req);
2041        struct hd_struct *part;
2042        int cpu;
2043
2044        cpu = part_stat_lock();
2045        part = req->part;
2046
2047        part_stat_inc(cpu, part, ios[rw]);
2048        part_stat_add(cpu, part, ticks[rw], duration);
2049        part_round_stats(cpu, part);
2050        part_dec_in_flight(part, rw);
2051
2052        hd_struct_put(part);
2053        part_stat_unlock();
2054    }
2055}
2056
2057#ifdef CONFIG_PM_RUNTIME
2058/*
2059 * Don't process normal requests when queue is suspended
2060 * or in the process of suspending/resuming
2061 */
2062static struct request *blk_pm_peek_request(struct request_queue *q,
2063                       struct request *rq)
2064{
2065    if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
2066        (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
2067        return NULL;
2068    else
2069        return rq;
2070}
2071#else
2072static inline struct request *blk_pm_peek_request(struct request_queue *q,
2073                          struct request *rq)
2074{
2075    return rq;
2076}
2077#endif
2078
2079/**
2080 * blk_peek_request - peek at the top of a request queue
2081 * @q: request queue to peek at
2082 *
2083 * Description:
2084 * Return the request at the top of @q. The returned request
2085 * should be started using blk_start_request() before LLD starts
2086 * processing it.
2087 *
2088 * Return:
2089 * Pointer to the request at the top of @q if available. Null
2090 * otherwise.
2091 *
2092 * Context:
2093 * queue_lock must be held.
2094 */
2095struct request *blk_peek_request(struct request_queue *q)
2096{
2097    struct request *rq;
2098    int ret;
2099
2100    while ((rq = __elv_next_request(q)) != NULL) {
2101
2102        rq = blk_pm_peek_request(q, rq);
2103        if (!rq)
2104            break;
2105
2106        if (!(rq->cmd_flags & REQ_STARTED)) {
2107            /*
2108             * This is the first time the device driver
2109             * sees this request (possibly after
2110             * requeueing). Notify IO scheduler.
2111             */
2112            if (rq->cmd_flags & REQ_SORTED)
2113                elv_activate_rq(q, rq);
2114
2115            /*
2116             * just mark as started even if we don't start
2117             * it, a request that has been delayed should
2118             * not be passed by new incoming requests
2119             */
2120            rq->cmd_flags |= REQ_STARTED;
2121            trace_block_rq_issue(q, rq);
2122        }
2123
2124        if (!q->boundary_rq || q->boundary_rq == rq) {
2125            q->end_sector = rq_end_sector(rq);
2126            q->boundary_rq = NULL;
2127        }
2128
2129        if (rq->cmd_flags & REQ_DONTPREP)
2130            break;
2131
2132        if (q->dma_drain_size && blk_rq_bytes(rq)) {
2133            /*
2134             * make sure space for the drain appears we
2135             * know we can do this because max_hw_segments
2136             * has been adjusted to be one fewer than the
2137             * device can handle
2138             */
2139            rq->nr_phys_segments++;
2140        }
2141
2142        if (!q->prep_rq_fn)
2143            break;
2144
2145        ret = q->prep_rq_fn(q, rq);
2146        if (ret == BLKPREP_OK) {
2147            break;
2148        } else if (ret == BLKPREP_DEFER) {
2149            /*
2150             * the request may have been (partially) prepped.
2151             * we need to keep this request in the front to
2152             * avoid resource deadlock. REQ_STARTED will
2153             * prevent other fs requests from passing this one.
2154             */
2155            if (q->dma_drain_size && blk_rq_bytes(rq) &&
2156                !(rq->cmd_flags & REQ_DONTPREP)) {
2157                /*
2158                 * remove the space for the drain we added
2159                 * so that we don't add it again
2160                 */
2161                --rq->nr_phys_segments;
2162            }
2163
2164            rq = NULL;
2165            break;
2166        } else if (ret == BLKPREP_KILL) {
2167            rq->cmd_flags |= REQ_QUIET;
2168            /*
2169             * Mark this request as started so we don't trigger
2170             * any debug logic in the end I/O path.
2171             */
2172            blk_start_request(rq);
2173            __blk_end_request_all(rq, -EIO);
2174        } else {
2175            printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2176            break;
2177        }
2178    }
2179
2180    return rq;
2181}
2182EXPORT_SYMBOL(blk_peek_request);
2183
2184void blk_dequeue_request(struct request *rq)
2185{
2186    struct request_queue *q = rq->q;
2187
2188    BUG_ON(list_empty(&rq->queuelist));
2189    BUG_ON(ELV_ON_HASH(rq));
2190
2191    list_del_init(&rq->queuelist);
2192
2193    /*
2194     * the time frame between a request being removed from the lists
2195     * and to it is freed is accounted as io that is in progress at
2196     * the driver side.
2197     */
2198    if (blk_account_rq(rq)) {
2199        q->in_flight[rq_is_sync(rq)]++;
2200        set_io_start_time_ns(rq);
2201    }
2202}
2203
2204/**
2205 * blk_start_request - start request processing on the driver
2206 * @req: request to dequeue
2207 *
2208 * Description:
2209 * Dequeue @req and start timeout timer on it. This hands off the
2210 * request to the driver.
2211 *
2212 * Block internal functions which don't want to start timer should
2213 * call blk_dequeue_request().
2214 *
2215 * Context:
2216 * queue_lock must be held.
2217 */
2218void blk_start_request(struct request *req)
2219{
2220    blk_dequeue_request(req);
2221
2222    /*
2223     * We are now handing the request to the hardware, initialize
2224     * resid_len to full count and add the timeout handler.
2225     */
2226    req->resid_len = blk_rq_bytes(req);
2227    if (unlikely(blk_bidi_rq(req)))
2228        req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
2229
2230    blk_add_timer(req);
2231}
2232EXPORT_SYMBOL(blk_start_request);
2233
2234/**
2235 * blk_fetch_request - fetch a request from a request queue
2236 * @q: request queue to fetch a request from
2237 *
2238 * Description:
2239 * Return the request at the top of @q. The request is started on
2240 * return and LLD can start processing it immediately.
2241 *
2242 * Return:
2243 * Pointer to the request at the top of @q if available. Null
2244 * otherwise.
2245 *
2246 * Context:
2247 * queue_lock must be held.
2248 */
2249struct request *blk_fetch_request(struct request_queue *q)
2250{
2251    struct request *rq;
2252
2253    rq = blk_peek_request(q);
2254    if (rq)
2255        blk_start_request(rq);
2256    return rq;
2257}
2258EXPORT_SYMBOL(blk_fetch_request);
2259
2260/**
2261 * blk_update_request - Special helper function for request stacking drivers
2262 * @req: the request being processed
2263 * @error: %0 for success, < %0 for error
2264 * @nr_bytes: number of bytes to complete @req
2265 *
2266 * Description:
2267 * Ends I/O on a number of bytes attached to @req, but doesn't complete
2268 * the request structure even if @req doesn't have leftover.
2269 * If @req has leftover, sets it up for the next range of segments.
2270 *
2271 * This special helper function is only for request stacking drivers
2272 * (e.g. request-based dm) so that they can handle partial completion.
2273 * Actual device drivers should use blk_end_request instead.
2274 *
2275 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2276 * %false return from this function.
2277 *
2278 * Return:
2279 * %false - this request doesn't have any more data
2280 * %true - this request has more data
2281 **/
2282bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2283{
2284    int total_bytes;
2285
2286    if (!req->bio)
2287        return false;
2288
2289    trace_block_rq_complete(req->q, req);
2290
2291    /*
2292     * For fs requests, rq is just carrier of independent bio's
2293     * and each partial completion should be handled separately.
2294     * Reset per-request error on each partial completion.
2295     *
2296     * TODO: tj: This is too subtle. It would be better to let
2297     * low level drivers do what they see fit.
2298     */
2299    if (req->cmd_type == REQ_TYPE_FS)
2300        req->errors = 0;
2301
2302    if (error && req->cmd_type == REQ_TYPE_FS &&
2303        !(req->cmd_flags & REQ_QUIET)) {
2304        char *error_type;
2305
2306        switch (error) {
2307        case -ENOLINK:
2308            error_type = "recoverable transport";
2309            break;
2310        case -EREMOTEIO:
2311            error_type = "critical target";
2312            break;
2313        case -EBADE:
2314            error_type = "critical nexus";
2315            break;
2316        case -ETIMEDOUT:
2317            error_type = "timeout";
2318            break;
2319        case -ENOSPC:
2320            error_type = "critical space allocation";
2321            break;
2322        case -ENODATA:
2323            error_type = "critical medium";
2324            break;
2325        case -EIO:
2326        default:
2327            error_type = "I/O";
2328            break;
2329        }
2330        printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
2331                   error_type, req->rq_disk ?
2332                   req->rq_disk->disk_name : "?",
2333                   (unsigned long long)blk_rq_pos(req));
2334
2335    }
2336
2337    blk_account_io_completion(req, nr_bytes);
2338
2339    total_bytes = 0;
2340    while (req->bio) {
2341        struct bio *bio = req->bio;
2342        unsigned bio_bytes = min(bio->bi_size, nr_bytes);
2343
2344        if (bio_bytes == bio->bi_size)
2345            req->bio = bio->bi_next;
2346
2347        req_bio_endio(req, bio, bio_bytes, error);
2348
2349        total_bytes += bio_bytes;
2350        nr_bytes -= bio_bytes;
2351
2352        if (!nr_bytes)
2353            break;
2354    }
2355
2356    /*
2357     * completely done
2358     */
2359    if (!req->bio) {
2360        /*
2361         * Reset counters so that the request stacking driver
2362         * can find how many bytes remain in the request
2363         * later.
2364         */
2365        req->__data_len = 0;
2366        return false;
2367    }
2368
2369    req->__data_len -= total_bytes;
2370    req->buffer = bio_data(req->bio);
2371
2372    /* update sector only for requests with clear definition of sector */
2373    if (req->cmd_type == REQ_TYPE_FS)
2374        req->__sector += total_bytes >> 9;
2375
2376    /* mixed attributes always follow the first bio */
2377    if (req->cmd_flags & REQ_MIXED_MERGE) {
2378        req->cmd_flags &= ~REQ_FAILFAST_MASK;
2379        req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2380    }
2381
2382    /*
2383     * If total number of sectors is less than the first segment
2384     * size, something has gone terribly wrong.
2385     */
2386    if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2387        blk_dump_rq_flags(req, "request botched");
2388        req->__data_len = blk_rq_cur_bytes(req);
2389    }
2390
2391    /* recalculate the number of segments */
2392    blk_recalc_rq_segments(req);
2393
2394    return true;
2395}
2396EXPORT_SYMBOL_GPL(blk_update_request);
2397
2398static bool blk_update_bidi_request(struct request *rq, int error,
2399                    unsigned int nr_bytes,
2400                    unsigned int bidi_bytes)
2401{
2402    if (blk_update_request(rq, error, nr_bytes))
2403        return true;
2404
2405    /* Bidi request must be completed as a whole */
2406    if (unlikely(blk_bidi_rq(rq)) &&
2407        blk_update_request(rq->next_rq, error, bidi_bytes))
2408        return true;
2409
2410    if (blk_queue_add_random(rq->q))
2411        add_disk_randomness(rq->rq_disk);
2412
2413    return false;
2414}
2415
2416/**
2417 * blk_unprep_request - unprepare a request
2418 * @req: the request
2419 *
2420 * This function makes a request ready for complete resubmission (or
2421 * completion). It happens only after all error handling is complete,
2422 * so represents the appropriate moment to deallocate any resources
2423 * that were allocated to the request in the prep_rq_fn. The queue
2424 * lock is held when calling this.
2425 */
2426void blk_unprep_request(struct request *req)
2427{
2428    struct request_queue *q = req->q;
2429
2430    req->cmd_flags &= ~REQ_DONTPREP;
2431    if (q->unprep_rq_fn)
2432        q->unprep_rq_fn(q, req);
2433}
2434EXPORT_SYMBOL_GPL(blk_unprep_request);
2435
2436/*
2437 * queue lock must be held
2438 */
2439static void blk_finish_request(struct request *req, int error)
2440{
2441    if (blk_rq_tagged(req))
2442        blk_queue_end_tag(req->q, req);
2443
2444    BUG_ON(blk_queued_rq(req));
2445
2446    if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
2447        laptop_io_completion(&req->q->backing_dev_info);
2448
2449    blk_delete_timer(req);
2450
2451    if (req->cmd_flags & REQ_DONTPREP)
2452        blk_unprep_request(req);
2453
2454
2455    blk_account_io_done(req);
2456
2457    if (req->end_io)
2458        req->end_io(req, error);
2459    else {
2460        if (blk_bidi_rq(req))
2461            __blk_put_request(req->next_rq->q, req->next_rq);
2462
2463        __blk_put_request(req->q, req);
2464    }
2465}
2466
2467/**
2468 * blk_end_bidi_request - Complete a bidi request
2469 * @rq: the request to complete
2470 * @error: %0 for success, < %0 for error
2471 * @nr_bytes: number of bytes to complete @rq
2472 * @bidi_bytes: number of bytes to complete @rq->next_rq
2473 *
2474 * Description:
2475 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2476 * Drivers that supports bidi can safely call this member for any
2477 * type of request, bidi or uni. In the later case @bidi_bytes is
2478 * just ignored.
2479 *
2480 * Return:
2481 * %false - we are done with this request
2482 * %true - still buffers pending for this request
2483 **/
2484static bool blk_end_bidi_request(struct request *rq, int error,
2485                 unsigned int nr_bytes, unsigned int bidi_bytes)
2486{
2487    struct request_queue *q = rq->q;
2488    unsigned long flags;
2489
2490    if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2491        return true;
2492
2493    spin_lock_irqsave(q->queue_lock, flags);
2494    blk_finish_request(rq, error);
2495    spin_unlock_irqrestore(q->queue_lock, flags);
2496
2497    return false;
2498}
2499
2500/**
2501 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2502 * @rq: the request to complete
2503 * @error: %0 for success, < %0 for error
2504 * @nr_bytes: number of bytes to complete @rq
2505 * @bidi_bytes: number of bytes to complete @rq->next_rq
2506 *
2507 * Description:
2508 * Identical to blk_end_bidi_request() except that queue lock is
2509 * assumed to be locked on entry and remains so on return.
2510 *
2511 * Return:
2512 * %false - we are done with this request
2513 * %true - still buffers pending for this request
2514 **/
2515bool __blk_end_bidi_request(struct request *rq, int error,
2516                   unsigned int nr_bytes, unsigned int bidi_bytes)
2517{
2518    if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2519        return true;
2520
2521    blk_finish_request(rq, error);
2522
2523    return false;
2524}
2525
2526/**
2527 * blk_end_request - Helper function for drivers to complete the request.
2528 * @rq: the request being processed
2529 * @error: %0 for success, < %0 for error
2530 * @nr_bytes: number of bytes to complete
2531 *
2532 * Description:
2533 * Ends I/O on a number of bytes attached to @rq.
2534 * If @rq has leftover, sets it up for the next range of segments.
2535 *
2536 * Return:
2537 * %false - we are done with this request
2538 * %true - still buffers pending for this request
2539 **/
2540bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2541{
2542    return blk_end_bidi_request(rq, error, nr_bytes, 0);
2543}
2544EXPORT_SYMBOL(blk_end_request);
2545
2546/**
2547 * blk_end_request_all - Helper function for drives to finish the request.
2548 * @rq: the request to finish
2549 * @error: %0 for success, < %0 for error
2550 *
2551 * Description:
2552 * Completely finish @rq.
2553 */
2554void blk_end_request_all(struct request *rq, int error)
2555{
2556    bool pending;
2557    unsigned int bidi_bytes = 0;
2558
2559    if (unlikely(blk_bidi_rq(rq)))
2560        bidi_bytes = blk_rq_bytes(rq->next_rq);
2561
2562    pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2563    BUG_ON(pending);
2564}
2565EXPORT_SYMBOL(blk_end_request_all);
2566
2567/**
2568 * blk_end_request_cur - Helper function to finish the current request chunk.
2569 * @rq: the request to finish the current chunk for
2570 * @error: %0 for success, < %0 for error
2571 *
2572 * Description:
2573 * Complete the current consecutively mapped chunk from @rq.
2574 *
2575 * Return:
2576 * %false - we are done with this request
2577 * %true - still buffers pending for this request
2578 */
2579bool blk_end_request_cur(struct request *rq, int error)
2580{
2581    return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2582}
2583EXPORT_SYMBOL(blk_end_request_cur);
2584
2585/**
2586 * blk_end_request_err - Finish a request till the next failure boundary.
2587 * @rq: the request to finish till the next failure boundary for
2588 * @error: must be negative errno
2589 *
2590 * Description:
2591 * Complete @rq till the next failure boundary.
2592 *
2593 * Return:
2594 * %false - we are done with this request
2595 * %true - still buffers pending for this request
2596 */
2597bool blk_end_request_err(struct request *rq, int error)
2598{
2599    WARN_ON(error >= 0);
2600    return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2601}
2602EXPORT_SYMBOL_GPL(blk_end_request_err);
2603
2604/**
2605 * __blk_end_request - Helper function for drivers to complete the request.
2606 * @rq: the request being processed
2607 * @error: %0 for success, < %0 for error
2608 * @nr_bytes: number of bytes to complete
2609 *
2610 * Description:
2611 * Must be called with queue lock held unlike blk_end_request().
2612 *
2613 * Return:
2614 * %false - we are done with this request
2615 * %true - still buffers pending for this request
2616 **/
2617bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2618{
2619    return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2620}
2621EXPORT_SYMBOL(__blk_end_request);
2622
2623/**
2624 * __blk_end_request_all - Helper function for drives to finish the request.
2625 * @rq: the request to finish
2626 * @error: %0 for success, < %0 for error
2627 *
2628 * Description:
2629 * Completely finish @rq. Must be called with queue lock held.
2630 */
2631void __blk_end_request_all(struct request *rq, int error)
2632{
2633    bool pending;
2634    unsigned int bidi_bytes = 0;
2635
2636    if (unlikely(blk_bidi_rq(rq)))
2637        bidi_bytes = blk_rq_bytes(rq->next_rq);
2638
2639    pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2640    BUG_ON(pending);
2641}
2642EXPORT_SYMBOL(__blk_end_request_all);
2643
2644/**
2645 * __blk_end_request_cur - Helper function to finish the current request chunk.
2646 * @rq: the request to finish the current chunk for
2647 * @error: %0 for success, < %0 for error
2648 *
2649 * Description:
2650 * Complete the current consecutively mapped chunk from @rq. Must
2651 * be called with queue lock held.
2652 *
2653 * Return:
2654 * %false - we are done with this request
2655 * %true - still buffers pending for this request
2656 */
2657bool __blk_end_request_cur(struct request *rq, int error)
2658{
2659    return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2660}
2661EXPORT_SYMBOL(__blk_end_request_cur);
2662
2663/**
2664 * __blk_end_request_err - Finish a request till the next failure boundary.
2665 * @rq: the request to finish till the next failure boundary for
2666 * @error: must be negative errno
2667 *
2668 * Description:
2669 * Complete @rq till the next failure boundary. Must be called
2670 * with queue lock held.
2671 *
2672 * Return:
2673 * %false - we are done with this request
2674 * %true - still buffers pending for this request
2675 */
2676bool __blk_end_request_err(struct request *rq, int error)
2677{
2678    WARN_ON(error >= 0);
2679    return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2680}
2681EXPORT_SYMBOL_GPL(__blk_end_request_err);
2682
2683void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2684             struct bio *bio)
2685{
2686    /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2687    rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
2688
2689    if (bio_has_data(bio)) {
2690        rq->nr_phys_segments = bio_phys_segments(q, bio);
2691        rq->buffer = bio_data(bio);
2692    }
2693    rq->__data_len = bio->bi_size;
2694    rq->bio = rq->biotail = bio;
2695
2696    if (bio->bi_bdev)
2697        rq->rq_disk = bio->bi_bdev->bd_disk;
2698}
2699
2700#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2701/**
2702 * rq_flush_dcache_pages - Helper function to flush all pages in a request
2703 * @rq: the request to be flushed
2704 *
2705 * Description:
2706 * Flush all pages in @rq.
2707 */
2708void rq_flush_dcache_pages(struct request *rq)
2709{
2710    struct req_iterator iter;
2711    struct bio_vec *bvec;
2712
2713    rq_for_each_segment(bvec, rq, iter)
2714        flush_dcache_page(bvec->bv_page);
2715}
2716EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2717#endif
2718
2719/**
2720 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2721 * @q : the queue of the device being checked
2722 *
2723 * Description:
2724 * Check if underlying low-level drivers of a device are busy.
2725 * If the drivers want to export their busy state, they must set own
2726 * exporting function using blk_queue_lld_busy() first.
2727 *
2728 * Basically, this function is used only by request stacking drivers
2729 * to stop dispatching requests to underlying devices when underlying
2730 * devices are busy. This behavior helps more I/O merging on the queue
2731 * of the request stacking driver and prevents I/O throughput regression
2732 * on burst I/O load.
2733 *
2734 * Return:
2735 * 0 - Not busy (The request stacking driver should dispatch request)
2736 * 1 - Busy (The request stacking driver should stop dispatching request)
2737 */
2738int blk_lld_busy(struct request_queue *q)
2739{
2740    if (q->lld_busy_fn)
2741        return q->lld_busy_fn(q);
2742
2743    return 0;
2744}
2745EXPORT_SYMBOL_GPL(blk_lld_busy);
2746
2747/**
2748 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2749 * @rq: the clone request to be cleaned up
2750 *
2751 * Description:
2752 * Free all bios in @rq for a cloned request.
2753 */
2754void blk_rq_unprep_clone(struct request *rq)
2755{
2756    struct bio *bio;
2757
2758    while ((bio = rq->bio) != NULL) {
2759        rq->bio = bio->bi_next;
2760
2761        bio_put(bio);
2762    }
2763}
2764EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2765
2766/*
2767 * Copy attributes of the original request to the clone request.
2768 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
2769 */
2770static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2771{
2772    dst->cpu = src->cpu;
2773    dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
2774    dst->cmd_type = src->cmd_type;
2775    dst->__sector = blk_rq_pos(src);
2776    dst->__data_len = blk_rq_bytes(src);
2777    dst->nr_phys_segments = src->nr_phys_segments;
2778    dst->ioprio = src->ioprio;
2779    dst->extra_len = src->extra_len;
2780}
2781
2782/**
2783 * blk_rq_prep_clone - Helper function to setup clone request
2784 * @rq: the request to be setup
2785 * @rq_src: original request to be cloned
2786 * @bs: bio_set that bios for clone are allocated from
2787 * @gfp_mask: memory allocation mask for bio
2788 * @bio_ctr: setup function to be called for each clone bio.
2789 * Returns %0 for success, non %0 for failure.
2790 * @data: private data to be passed to @bio_ctr
2791 *
2792 * Description:
2793 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
2794 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
2795 * are not copied, and copying such parts is the caller's responsibility.
2796 * Also, pages which the original bios are pointing to are not copied
2797 * and the cloned bios just point same pages.
2798 * So cloned bios must be completed before original bios, which means
2799 * the caller must complete @rq before @rq_src.
2800 */
2801int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2802              struct bio_set *bs, gfp_t gfp_mask,
2803              int (*bio_ctr)(struct bio *, struct bio *, void *),
2804              void *data)
2805{
2806    struct bio *bio, *bio_src;
2807
2808    if (!bs)
2809        bs = fs_bio_set;
2810
2811    blk_rq_init(NULL, rq);
2812
2813    __rq_for_each_bio(bio_src, rq_src) {
2814        bio = bio_clone_bioset(bio_src, gfp_mask, bs);
2815        if (!bio)
2816            goto free_and_out;
2817
2818        if (bio_ctr && bio_ctr(bio, bio_src, data))
2819            goto free_and_out;
2820
2821        if (rq->bio) {
2822            rq->biotail->bi_next = bio;
2823            rq->biotail = bio;
2824        } else
2825            rq->bio = rq->biotail = bio;
2826    }
2827
2828    __blk_rq_prep_clone(rq, rq_src);
2829
2830    return 0;
2831
2832free_and_out:
2833    if (bio)
2834        bio_put(bio);
2835    blk_rq_unprep_clone(rq);
2836
2837    return -ENOMEM;
2838}
2839EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
2840
2841int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2842{
2843    return queue_work(kblockd_workqueue, work);
2844}
2845EXPORT_SYMBOL(kblockd_schedule_work);
2846
2847int kblockd_schedule_delayed_work(struct request_queue *q,
2848            struct delayed_work *dwork, unsigned long delay)
2849{
2850    return queue_delayed_work(kblockd_workqueue, dwork, delay);
2851}
2852EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2853
2854#define PLUG_MAGIC 0x91827364
2855
2856/**
2857 * blk_start_plug - initialize blk_plug and track it inside the task_struct
2858 * @plug: The &struct blk_plug that needs to be initialized
2859 *
2860 * Description:
2861 * Tracking blk_plug inside the task_struct will help with auto-flushing the
2862 * pending I/O should the task end up blocking between blk_start_plug() and
2863 * blk_finish_plug(). This is important from a performance perspective, but
2864 * also ensures that we don't deadlock. For instance, if the task is blocking
2865 * for a memory allocation, memory reclaim could end up wanting to free a
2866 * page belonging to that request that is currently residing in our private
2867 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
2868 * this kind of deadlock.
2869 */
2870void blk_start_plug(struct blk_plug *plug)
2871{
2872    struct task_struct *tsk = current;
2873
2874    plug->magic = PLUG_MAGIC;
2875    INIT_LIST_HEAD(&plug->list);
2876    INIT_LIST_HEAD(&plug->cb_list);
2877
2878    /*
2879     * If this is a nested plug, don't actually assign it. It will be
2880     * flushed on its own.
2881     */
2882    if (!tsk->plug) {
2883        /*
2884         * Store ordering should not be needed here, since a potential
2885         * preempt will imply a full memory barrier
2886         */
2887        tsk->plug = plug;
2888    }
2889}
2890EXPORT_SYMBOL(blk_start_plug);
2891
2892static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2893{
2894    struct request *rqa = container_of(a, struct request, queuelist);
2895    struct request *rqb = container_of(b, struct request, queuelist);
2896
2897    return !(rqa->q < rqb->q ||
2898        (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
2899}
2900
2901/*
2902 * If 'from_schedule' is true, then postpone the dispatch of requests
2903 * until a safe kblockd context. We due this to avoid accidental big
2904 * additional stack usage in driver dispatch, in places where the originally
2905 * plugger did not intend it.
2906 */
2907static void queue_unplugged(struct request_queue *q, unsigned int depth,
2908                bool from_schedule)
2909    __releases(q->queue_lock)
2910{
2911    trace_block_unplug(q, depth, !from_schedule);
2912
2913    if (from_schedule)
2914        blk_run_queue_async(q);
2915    else
2916        __blk_run_queue(q);
2917    spin_unlock(q->queue_lock);
2918}
2919
2920static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
2921{
2922    LIST_HEAD(callbacks);
2923
2924    while (!list_empty(&plug->cb_list)) {
2925        list_splice_init(&plug->cb_list, &callbacks);
2926
2927        while (!list_empty(&callbacks)) {
2928            struct blk_plug_cb *cb = list_first_entry(&callbacks,
2929                              struct blk_plug_cb,
2930                              list);
2931            list_del(&cb->list);
2932            cb->callback(cb, from_schedule);
2933        }
2934    }
2935}
2936
2937struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
2938                      int size)
2939{
2940    struct blk_plug *plug = current->plug;
2941    struct blk_plug_cb *cb;
2942
2943    if (!plug)
2944        return NULL;
2945
2946    list_for_each_entry(cb, &plug->cb_list, list)
2947        if (cb->callback == unplug && cb->data == data)
2948            return cb;
2949
2950    /* Not currently on the callback list */
2951    BUG_ON(size < sizeof(*cb));
2952    cb = kzalloc(size, GFP_ATOMIC);
2953    if (cb) {
2954        cb->data = data;
2955        cb->callback = unplug;
2956        list_add(&cb->list, &plug->cb_list);
2957    }
2958    return cb;
2959}
2960EXPORT_SYMBOL(blk_check_plugged);
2961
2962void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2963{
2964    struct request_queue *q;
2965    unsigned long flags;
2966    struct request *rq;
2967    LIST_HEAD(list);
2968    unsigned int depth;
2969
2970    BUG_ON(plug->magic != PLUG_MAGIC);
2971
2972    flush_plug_callbacks(plug, from_schedule);
2973    if (list_empty(&plug->list))
2974        return;
2975
2976    list_splice_init(&plug->list, &list);
2977
2978    list_sort(NULL, &list, plug_rq_cmp);
2979
2980    q = NULL;
2981    depth = 0;
2982
2983    /*
2984     * Save and disable interrupts here, to avoid doing it for every
2985     * queue lock we have to take.
2986     */
2987    local_irq_save(flags);
2988    while (!list_empty(&list)) {
2989        rq = list_entry_rq(list.next);
2990        list_del_init(&rq->queuelist);
2991        BUG_ON(!rq->q);
2992        if (rq->q != q) {
2993            /*
2994             * This drops the queue lock
2995             */
2996            if (q)
2997                queue_unplugged(q, depth, from_schedule);
2998            q = rq->q;
2999            depth = 0;
3000            spin_lock(q->queue_lock);
3001        }
3002
3003        /*
3004         * Short-circuit if @q is dead
3005         */
3006        if (unlikely(blk_queue_dying(q))) {
3007            __blk_end_request_all(rq, -ENODEV);
3008            continue;
3009        }
3010
3011        /*
3012         * rq is already accounted, so use raw insert
3013         */
3014        if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
3015            __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3016        else
3017            __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
3018
3019        depth++;
3020    }
3021
3022    /*
3023     * This drops the queue lock
3024     */
3025    if (q)
3026        queue_unplugged(q, depth, from_schedule);
3027
3028    local_irq_restore(flags);
3029}
3030
3031void blk_finish_plug(struct blk_plug *plug)
3032{
3033    blk_flush_plug_list(plug, false);
3034
3035    if (plug == current->plug)
3036        current->plug = NULL;
3037}
3038EXPORT_SYMBOL(blk_finish_plug);
3039
3040#ifdef CONFIG_PM_RUNTIME
3041/**
3042 * blk_pm_runtime_init - Block layer runtime PM initialization routine
3043 * @q: the queue of the device
3044 * @dev: the device the queue belongs to
3045 *
3046 * Description:
3047 * Initialize runtime-PM-related fields for @q and start auto suspend for
3048 * @dev. Drivers that want to take advantage of request-based runtime PM
3049 * should call this function after @dev has been initialized, and its
3050 * request queue @q has been allocated, and runtime PM for it can not happen
3051 * yet(either due to disabled/forbidden or its usage_count > 0). In most
3052 * cases, driver should call this function before any I/O has taken place.
3053 *
3054 * This function takes care of setting up using auto suspend for the device,
3055 * the autosuspend delay is set to -1 to make runtime suspend impossible
3056 * until an updated value is either set by user or by driver. Drivers do
3057 * not need to touch other autosuspend settings.
3058 *
3059 * The block layer runtime PM is request based, so only works for drivers
3060 * that use request as their IO unit instead of those directly use bio's.
3061 */
3062void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3063{
3064    q->dev = dev;
3065    q->rpm_status = RPM_ACTIVE;
3066    pm_runtime_set_autosuspend_delay(q->dev, -1);
3067    pm_runtime_use_autosuspend(q->dev);
3068}
3069EXPORT_SYMBOL(blk_pm_runtime_init);
3070
3071/**
3072 * blk_pre_runtime_suspend - Pre runtime suspend check
3073 * @q: the queue of the device
3074 *
3075 * Description:
3076 * This function will check if runtime suspend is allowed for the device
3077 * by examining if there are any requests pending in the queue. If there
3078 * are requests pending, the device can not be runtime suspended; otherwise,
3079 * the queue's status will be updated to SUSPENDING and the driver can
3080 * proceed to suspend the device.
3081 *
3082 * For the not allowed case, we mark last busy for the device so that
3083 * runtime PM core will try to autosuspend it some time later.
3084 *
3085 * This function should be called near the start of the device's
3086 * runtime_suspend callback.
3087 *
3088 * Return:
3089 * 0 - OK to runtime suspend the device
3090 * -EBUSY - Device should not be runtime suspended
3091 */
3092int blk_pre_runtime_suspend(struct request_queue *q)
3093{
3094    int ret = 0;
3095
3096    spin_lock_irq(q->queue_lock);
3097    if (q->nr_pending) {
3098        ret = -EBUSY;
3099        pm_runtime_mark_last_busy(q->dev);
3100    } else {
3101        q->rpm_status = RPM_SUSPENDING;
3102    }
3103    spin_unlock_irq(q->queue_lock);
3104    return ret;
3105}
3106EXPORT_SYMBOL(blk_pre_runtime_suspend);
3107
3108/**
3109 * blk_post_runtime_suspend - Post runtime suspend processing
3110 * @q: the queue of the device
3111 * @err: return value of the device's runtime_suspend function
3112 *
3113 * Description:
3114 * Update the queue's runtime status according to the return value of the
3115 * device's runtime suspend function and mark last busy for the device so
3116 * that PM core will try to auto suspend the device at a later time.
3117 *
3118 * This function should be called near the end of the device's
3119 * runtime_suspend callback.
3120 */
3121void blk_post_runtime_suspend(struct request_queue *q, int err)
3122{
3123    spin_lock_irq(q->queue_lock);
3124    if (!err) {
3125        q->rpm_status = RPM_SUSPENDED;
3126    } else {
3127        q->rpm_status = RPM_ACTIVE;
3128        pm_runtime_mark_last_busy(q->dev);
3129    }
3130    spin_unlock_irq(q->queue_lock);
3131}
3132EXPORT_SYMBOL(blk_post_runtime_suspend);
3133
3134/**
3135 * blk_pre_runtime_resume - Pre runtime resume processing
3136 * @q: the queue of the device
3137 *
3138 * Description:
3139 * Update the queue's runtime status to RESUMING in preparation for the
3140 * runtime resume of the device.
3141 *
3142 * This function should be called near the start of the device's
3143 * runtime_resume callback.
3144 */
3145void blk_pre_runtime_resume(struct request_queue *q)
3146{
3147    spin_lock_irq(q->queue_lock);
3148    q->rpm_status = RPM_RESUMING;
3149    spin_unlock_irq(q->queue_lock);
3150}
3151EXPORT_SYMBOL(blk_pre_runtime_resume);
3152
3153/**
3154 * blk_post_runtime_resume - Post runtime resume processing
3155 * @q: the queue of the device
3156 * @err: return value of the device's runtime_resume function
3157 *
3158 * Description:
3159 * Update the queue's runtime status according to the return value of the
3160 * device's runtime_resume function. If it is successfully resumed, process
3161 * the requests that are queued into the device's queue when it is resuming
3162 * and then mark last busy and initiate autosuspend for it.
3163 *
3164 * This function should be called near the end of the device's
3165 * runtime_resume callback.
3166 */
3167void blk_post_runtime_resume(struct request_queue *q, int err)
3168{
3169    spin_lock_irq(q->queue_lock);
3170    if (!err) {
3171        q->rpm_status = RPM_ACTIVE;
3172        __blk_run_queue(q);
3173        pm_runtime_mark_last_busy(q->dev);
3174        pm_request_autosuspend(q->dev);
3175    } else {
3176        q->rpm_status = RPM_SUSPENDED;
3177    }
3178    spin_unlock_irq(q->queue_lock);
3179}
3180EXPORT_SYMBOL(blk_post_runtime_resume);
3181#endif
3182
3183int __init blk_dev_init(void)
3184{
3185    BUILD_BUG_ON(__REQ_NR_BITS > 8 *
3186            sizeof(((struct request *)0)->cmd_flags));
3187
3188    /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3189    kblockd_workqueue = alloc_workqueue("kblockd",
3190                        WQ_MEM_RECLAIM | WQ_HIGHPRI |
3191                        WQ_POWER_EFFICIENT, 0);
3192    if (!kblockd_workqueue)
3193        panic("Failed to create kblockd\n");
3194
3195    request_cachep = kmem_cache_create("blkdev_requests",
3196            sizeof(struct request), 0, SLAB_PANIC, NULL);
3197
3198    blk_requestq_cachep = kmem_cache_create("blkdev_queue",
3199            sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
3200
3201    return 0;
3202}
3203

Archive Download this file



interactive