Root/block/blk-exec.c

1/*
2 * Functions related to setting various queue properties from drivers
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/sched/sysctl.h>
9
10#include "blk.h"
11
12/*
13 * for max sense size
14 */
15#include <scsi/scsi_cmnd.h>
16
17/**
18 * blk_end_sync_rq - executes a completion event on a request
19 * @rq: request to complete
20 * @error: end I/O status of the request
21 */
22static void blk_end_sync_rq(struct request *rq, int error)
23{
24    struct completion *waiting = rq->end_io_data;
25
26    rq->end_io_data = NULL;
27    __blk_put_request(rq->q, rq);
28
29    /*
30     * complete last, if this is a stack request the process (and thus
31     * the rq pointer) could be invalid right after this complete()
32     */
33    complete(waiting);
34}
35
36/**
37 * blk_execute_rq_nowait - insert a request into queue for execution
38 * @q: queue to insert the request in
39 * @bd_disk: matching gendisk
40 * @rq: request to insert
41 * @at_head: insert request at head or tail of queue
42 * @done: I/O completion handler
43 *
44 * Description:
45 * Insert a fully prepared request at the back of the I/O scheduler queue
46 * for execution. Don't wait for completion.
47 *
48 * Note:
49 * This function will invoke @done directly if the queue is dead.
50 */
51void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
52               struct request *rq, int at_head,
53               rq_end_io_fn *done)
54{
55    int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
56    bool is_pm_resume;
57
58    WARN_ON(irqs_disabled());
59
60    rq->rq_disk = bd_disk;
61    rq->end_io = done;
62    /*
63     * need to check this before __blk_run_queue(), because rq can
64     * be freed before that returns.
65     */
66    is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
67
68    spin_lock_irq(q->queue_lock);
69
70    if (unlikely(blk_queue_dying(q))) {
71        rq->errors = -ENXIO;
72        if (rq->end_io)
73            rq->end_io(rq, rq->errors);
74        spin_unlock_irq(q->queue_lock);
75        return;
76    }
77
78    __elv_add_request(q, rq, where);
79    __blk_run_queue(q);
80    /* the queue is stopped so it won't be run */
81    if (is_pm_resume)
82        __blk_run_queue_uncond(q);
83    spin_unlock_irq(q->queue_lock);
84}
85EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
86
87/**
88 * blk_execute_rq - insert a request into queue for execution
89 * @q: queue to insert the request in
90 * @bd_disk: matching gendisk
91 * @rq: request to insert
92 * @at_head: insert request at head or tail of queue
93 *
94 * Description:
95 * Insert a fully prepared request at the back of the I/O scheduler queue
96 * for execution and wait for completion.
97 */
98int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
99           struct request *rq, int at_head)
100{
101    DECLARE_COMPLETION_ONSTACK(wait);
102    char sense[SCSI_SENSE_BUFFERSIZE];
103    int err = 0;
104    unsigned long hang_check;
105
106    /*
107     * we need an extra reference to the request, so we can look at
108     * it after io completion
109     */
110    rq->ref_count++;
111
112    if (!rq->sense) {
113        memset(sense, 0, sizeof(sense));
114        rq->sense = sense;
115        rq->sense_len = 0;
116    }
117
118    rq->end_io_data = &wait;
119    blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
120
121    /* Prevent hang_check timer from firing at us during very long I/O */
122    hang_check = sysctl_hung_task_timeout_secs;
123    if (hang_check)
124        while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
125    else
126        wait_for_completion_io(&wait);
127
128    if (rq->errors)
129        err = -EIO;
130
131    return err;
132}
133EXPORT_SYMBOL(blk_execute_rq);
134

Archive Download this file



interactive