Root/block/blk-tag.c

1/*
2 * Functions related to tagged command queuing
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/slab.h>
9
10#include "blk.h"
11
12/**
13 * blk_queue_find_tag - find a request by its tag and queue
14 * @q: The request queue for the device
15 * @tag: The tag of the request
16 *
17 * Notes:
18 * Should be used when a device returns a tag and you want to match
19 * it with a request.
20 *
21 * no locks need be held.
22 **/
23struct request *blk_queue_find_tag(struct request_queue *q, int tag)
24{
25    return blk_map_queue_find_tag(q->queue_tags, tag);
26}
27EXPORT_SYMBOL(blk_queue_find_tag);
28
29/**
30 * blk_free_tags - release a given set of tag maintenance info
31 * @bqt: the tag map to free
32 *
33 * Drop the reference count on @bqt and frees it when the last reference
34 * is dropped.
35 */
36void blk_free_tags(struct blk_queue_tag *bqt)
37{
38    if (atomic_dec_and_test(&bqt->refcnt)) {
39        BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
40                            bqt->max_depth);
41
42        kfree(bqt->tag_index);
43        bqt->tag_index = NULL;
44
45        kfree(bqt->tag_map);
46        bqt->tag_map = NULL;
47
48        kfree(bqt);
49    }
50}
51EXPORT_SYMBOL(blk_free_tags);
52
53/**
54 * __blk_queue_free_tags - release tag maintenance info
55 * @q: the request queue for the device
56 *
57 * Notes:
58 * blk_cleanup_queue() will take care of calling this function, if tagging
59 * has been used. So there's no need to call this directly.
60 **/
61void __blk_queue_free_tags(struct request_queue *q)
62{
63    struct blk_queue_tag *bqt = q->queue_tags;
64
65    if (!bqt)
66        return;
67
68    blk_free_tags(bqt);
69
70    q->queue_tags = NULL;
71    queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
72}
73
74/**
75 * blk_queue_free_tags - release tag maintenance info
76 * @q: the request queue for the device
77 *
78 * Notes:
79 * This is used to disable tagged queuing to a device, yet leave
80 * queue in function.
81 **/
82void blk_queue_free_tags(struct request_queue *q)
83{
84    queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
85}
86EXPORT_SYMBOL(blk_queue_free_tags);
87
88static int
89init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
90{
91    struct request **tag_index;
92    unsigned long *tag_map;
93    int nr_ulongs;
94
95    if (q && depth > q->nr_requests * 2) {
96        depth = q->nr_requests * 2;
97        printk(KERN_ERR "%s: adjusted depth to %d\n",
98               __func__, depth);
99    }
100
101    tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
102    if (!tag_index)
103        goto fail;
104
105    nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
106    tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
107    if (!tag_map)
108        goto fail;
109
110    tags->real_max_depth = depth;
111    tags->max_depth = depth;
112    tags->tag_index = tag_index;
113    tags->tag_map = tag_map;
114
115    return 0;
116fail:
117    kfree(tag_index);
118    return -ENOMEM;
119}
120
121static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
122                           int depth)
123{
124    struct blk_queue_tag *tags;
125
126    tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
127    if (!tags)
128        goto fail;
129
130    if (init_tag_map(q, tags, depth))
131        goto fail;
132
133    atomic_set(&tags->refcnt, 1);
134    return tags;
135fail:
136    kfree(tags);
137    return NULL;
138}
139
140/**
141 * blk_init_tags - initialize the tag info for an external tag map
142 * @depth: the maximum queue depth supported
143 **/
144struct blk_queue_tag *blk_init_tags(int depth)
145{
146    return __blk_queue_init_tags(NULL, depth);
147}
148EXPORT_SYMBOL(blk_init_tags);
149
150/**
151 * blk_queue_init_tags - initialize the queue tag info
152 * @q: the request queue for the device
153 * @depth: the maximum queue depth supported
154 * @tags: the tag to use
155 *
156 * Queue lock must be held here if the function is called to resize an
157 * existing map.
158 **/
159int blk_queue_init_tags(struct request_queue *q, int depth,
160            struct blk_queue_tag *tags)
161{
162    int rc;
163
164    BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
165
166    if (!tags && !q->queue_tags) {
167        tags = __blk_queue_init_tags(q, depth);
168
169        if (!tags)
170            return -ENOMEM;
171
172    } else if (q->queue_tags) {
173        rc = blk_queue_resize_tags(q, depth);
174        if (rc)
175            return rc;
176        queue_flag_set(QUEUE_FLAG_QUEUED, q);
177        return 0;
178    } else
179        atomic_inc(&tags->refcnt);
180
181    /*
182     * assign it, all done
183     */
184    q->queue_tags = tags;
185    queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
186    INIT_LIST_HEAD(&q->tag_busy_list);
187    return 0;
188}
189EXPORT_SYMBOL(blk_queue_init_tags);
190
191/**
192 * blk_queue_resize_tags - change the queueing depth
193 * @q: the request queue for the device
194 * @new_depth: the new max command queueing depth
195 *
196 * Notes:
197 * Must be called with the queue lock held.
198 **/
199int blk_queue_resize_tags(struct request_queue *q, int new_depth)
200{
201    struct blk_queue_tag *bqt = q->queue_tags;
202    struct request **tag_index;
203    unsigned long *tag_map;
204    int max_depth, nr_ulongs;
205
206    if (!bqt)
207        return -ENXIO;
208
209    /*
210     * if we already have large enough real_max_depth. just
211     * adjust max_depth. *NOTE* as requests with tag value
212     * between new_depth and real_max_depth can be in-flight, tag
213     * map can not be shrunk blindly here.
214     */
215    if (new_depth <= bqt->real_max_depth) {
216        bqt->max_depth = new_depth;
217        return 0;
218    }
219
220    /*
221     * Currently cannot replace a shared tag map with a new
222     * one, so error out if this is the case
223     */
224    if (atomic_read(&bqt->refcnt) != 1)
225        return -EBUSY;
226
227    /*
228     * save the old state info, so we can copy it back
229     */
230    tag_index = bqt->tag_index;
231    tag_map = bqt->tag_map;
232    max_depth = bqt->real_max_depth;
233
234    if (init_tag_map(q, bqt, new_depth))
235        return -ENOMEM;
236
237    memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
238    nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
239    memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
240
241    kfree(tag_index);
242    kfree(tag_map);
243    return 0;
244}
245EXPORT_SYMBOL(blk_queue_resize_tags);
246
247/**
248 * blk_queue_end_tag - end tag operations for a request
249 * @q: the request queue for the device
250 * @rq: the request that has completed
251 *
252 * Description:
253 * Typically called when end_that_request_first() returns %0, meaning
254 * all transfers have been done for a request. It's important to call
255 * this function before end_that_request_last(), as that will put the
256 * request back on the free list thus corrupting the internal tag list.
257 *
258 * Notes:
259 * queue lock must be held.
260 **/
261void blk_queue_end_tag(struct request_queue *q, struct request *rq)
262{
263    struct blk_queue_tag *bqt = q->queue_tags;
264    unsigned tag = rq->tag; /* negative tags invalid */
265
266    BUG_ON(tag >= bqt->real_max_depth);
267
268    list_del_init(&rq->queuelist);
269    rq->cmd_flags &= ~REQ_QUEUED;
270    rq->tag = -1;
271
272    if (unlikely(bqt->tag_index[tag] == NULL))
273        printk(KERN_ERR "%s: tag %d is missing\n",
274               __func__, tag);
275
276    bqt->tag_index[tag] = NULL;
277
278    if (unlikely(!test_bit(tag, bqt->tag_map))) {
279        printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
280               __func__, tag);
281        return;
282    }
283    /*
284     * The tag_map bit acts as a lock for tag_index[bit], so we need
285     * unlock memory barrier semantics.
286     */
287    clear_bit_unlock(tag, bqt->tag_map);
288}
289EXPORT_SYMBOL(blk_queue_end_tag);
290
291/**
292 * blk_queue_start_tag - find a free tag and assign it
293 * @q: the request queue for the device
294 * @rq: the block request that needs tagging
295 *
296 * Description:
297 * This can either be used as a stand-alone helper, or possibly be
298 * assigned as the queue &prep_rq_fn (in which case &struct request
299 * automagically gets a tag assigned). Note that this function
300 * assumes that any type of request can be queued! if this is not
301 * true for your device, you must check the request type before
302 * calling this function. The request will also be removed from
303 * the request queue, so it's the drivers responsibility to readd
304 * it if it should need to be restarted for some reason.
305 *
306 * Notes:
307 * queue lock must be held.
308 **/
309int blk_queue_start_tag(struct request_queue *q, struct request *rq)
310{
311    struct blk_queue_tag *bqt = q->queue_tags;
312    unsigned max_depth;
313    int tag;
314
315    if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
316        printk(KERN_ERR
317               "%s: request %p for device [%s] already tagged %d",
318               __func__, rq,
319               rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
320        BUG();
321    }
322
323    /*
324     * Protect against shared tag maps, as we may not have exclusive
325     * access to the tag map.
326     *
327     * We reserve a few tags just for sync IO, since we don't want
328     * to starve sync IO on behalf of flooding async IO.
329     */
330    max_depth = bqt->max_depth;
331    if (!rq_is_sync(rq) && max_depth > 1) {
332        switch (max_depth) {
333        case 2:
334            max_depth = 1;
335            break;
336        case 3:
337            max_depth = 2;
338            break;
339        default:
340            max_depth -= 2;
341        }
342        if (q->in_flight[BLK_RW_ASYNC] > max_depth)
343            return 1;
344    }
345
346    do {
347        tag = find_first_zero_bit(bqt->tag_map, max_depth);
348        if (tag >= max_depth)
349            return 1;
350
351    } while (test_and_set_bit_lock(tag, bqt->tag_map));
352    /*
353     * We need lock ordering semantics given by test_and_set_bit_lock.
354     * See blk_queue_end_tag for details.
355     */
356
357    rq->cmd_flags |= REQ_QUEUED;
358    rq->tag = tag;
359    bqt->tag_index[tag] = rq;
360    blk_start_request(rq);
361    list_add(&rq->queuelist, &q->tag_busy_list);
362    return 0;
363}
364EXPORT_SYMBOL(blk_queue_start_tag);
365
366/**
367 * blk_queue_invalidate_tags - invalidate all pending tags
368 * @q: the request queue for the device
369 *
370 * Description:
371 * Hardware conditions may dictate a need to stop all pending requests.
372 * In this case, we will safely clear the block side of the tag queue and
373 * readd all requests to the request queue in the right order.
374 *
375 * Notes:
376 * queue lock must be held.
377 **/
378void blk_queue_invalidate_tags(struct request_queue *q)
379{
380    struct list_head *tmp, *n;
381
382    list_for_each_safe(tmp, n, &q->tag_busy_list)
383        blk_requeue_request(q, list_entry_rq(tmp));
384}
385EXPORT_SYMBOL(blk_queue_invalidate_tags);
386

Archive Download this file



interactive