Root/block/blk-tag.c

1/*
2 * Functions related to tagged command queuing
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/slab.h>
9
10#include "blk.h"
11
12/**
13 * blk_queue_find_tag - find a request by its tag and queue
14 * @q: The request queue for the device
15 * @tag: The tag of the request
16 *
17 * Notes:
18 * Should be used when a device returns a tag and you want to match
19 * it with a request.
20 *
21 * no locks need be held.
22 **/
23struct request *blk_queue_find_tag(struct request_queue *q, int tag)
24{
25    return blk_map_queue_find_tag(q->queue_tags, tag);
26}
27EXPORT_SYMBOL(blk_queue_find_tag);
28
29/**
30 * __blk_free_tags - release a given set of tag maintenance info
31 * @bqt: the tag map to free
32 *
33 * Tries to free the specified @bqt. Returns true if it was
34 * actually freed and false if there are still references using it
35 */
36static int __blk_free_tags(struct blk_queue_tag *bqt)
37{
38    int retval;
39
40    retval = atomic_dec_and_test(&bqt->refcnt);
41    if (retval) {
42        BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
43                            bqt->max_depth);
44
45        kfree(bqt->tag_index);
46        bqt->tag_index = NULL;
47
48        kfree(bqt->tag_map);
49        bqt->tag_map = NULL;
50
51        kfree(bqt);
52    }
53
54    return retval;
55}
56
57/**
58 * __blk_queue_free_tags - release tag maintenance info
59 * @q: the request queue for the device
60 *
61 * Notes:
62 * blk_cleanup_queue() will take care of calling this function, if tagging
63 * has been used. So there's no need to call this directly.
64 **/
65void __blk_queue_free_tags(struct request_queue *q)
66{
67    struct blk_queue_tag *bqt = q->queue_tags;
68
69    if (!bqt)
70        return;
71
72    __blk_free_tags(bqt);
73
74    q->queue_tags = NULL;
75    queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
76}
77
78/**
79 * blk_free_tags - release a given set of tag maintenance info
80 * @bqt: the tag map to free
81 *
82 * For externally managed @bqt frees the map. Callers of this
83 * function must guarantee to have released all the queues that
84 * might have been using this tag map.
85 */
86void blk_free_tags(struct blk_queue_tag *bqt)
87{
88    if (unlikely(!__blk_free_tags(bqt)))
89        BUG();
90}
91EXPORT_SYMBOL(blk_free_tags);
92
93/**
94 * blk_queue_free_tags - release tag maintenance info
95 * @q: the request queue for the device
96 *
97 * Notes:
98 * This is used to disable tagged queuing to a device, yet leave
99 * queue in function.
100 **/
101void blk_queue_free_tags(struct request_queue *q)
102{
103    queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
104}
105EXPORT_SYMBOL(blk_queue_free_tags);
106
107static int
108init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
109{
110    struct request **tag_index;
111    unsigned long *tag_map;
112    int nr_ulongs;
113
114    if (q && depth > q->nr_requests * 2) {
115        depth = q->nr_requests * 2;
116        printk(KERN_ERR "%s: adjusted depth to %d\n",
117               __func__, depth);
118    }
119
120    tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
121    if (!tag_index)
122        goto fail;
123
124    nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
125    tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
126    if (!tag_map)
127        goto fail;
128
129    tags->real_max_depth = depth;
130    tags->max_depth = depth;
131    tags->tag_index = tag_index;
132    tags->tag_map = tag_map;
133
134    return 0;
135fail:
136    kfree(tag_index);
137    return -ENOMEM;
138}
139
140static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
141                           int depth)
142{
143    struct blk_queue_tag *tags;
144
145    tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
146    if (!tags)
147        goto fail;
148
149    if (init_tag_map(q, tags, depth))
150        goto fail;
151
152    atomic_set(&tags->refcnt, 1);
153    return tags;
154fail:
155    kfree(tags);
156    return NULL;
157}
158
159/**
160 * blk_init_tags - initialize the tag info for an external tag map
161 * @depth: the maximum queue depth supported
162 **/
163struct blk_queue_tag *blk_init_tags(int depth)
164{
165    return __blk_queue_init_tags(NULL, depth);
166}
167EXPORT_SYMBOL(blk_init_tags);
168
169/**
170 * blk_queue_init_tags - initialize the queue tag info
171 * @q: the request queue for the device
172 * @depth: the maximum queue depth supported
173 * @tags: the tag to use
174 *
175 * Queue lock must be held here if the function is called to resize an
176 * existing map.
177 **/
178int blk_queue_init_tags(struct request_queue *q, int depth,
179            struct blk_queue_tag *tags)
180{
181    int rc;
182
183    BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
184
185    if (!tags && !q->queue_tags) {
186        tags = __blk_queue_init_tags(q, depth);
187
188        if (!tags)
189            return -ENOMEM;
190
191    } else if (q->queue_tags) {
192        rc = blk_queue_resize_tags(q, depth);
193        if (rc)
194            return rc;
195        queue_flag_set(QUEUE_FLAG_QUEUED, q);
196        return 0;
197    } else
198        atomic_inc(&tags->refcnt);
199
200    /*
201     * assign it, all done
202     */
203    q->queue_tags = tags;
204    queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
205    INIT_LIST_HEAD(&q->tag_busy_list);
206    return 0;
207}
208EXPORT_SYMBOL(blk_queue_init_tags);
209
210/**
211 * blk_queue_resize_tags - change the queueing depth
212 * @q: the request queue for the device
213 * @new_depth: the new max command queueing depth
214 *
215 * Notes:
216 * Must be called with the queue lock held.
217 **/
218int blk_queue_resize_tags(struct request_queue *q, int new_depth)
219{
220    struct blk_queue_tag *bqt = q->queue_tags;
221    struct request **tag_index;
222    unsigned long *tag_map;
223    int max_depth, nr_ulongs;
224
225    if (!bqt)
226        return -ENXIO;
227
228    /*
229     * if we already have large enough real_max_depth. just
230     * adjust max_depth. *NOTE* as requests with tag value
231     * between new_depth and real_max_depth can be in-flight, tag
232     * map can not be shrunk blindly here.
233     */
234    if (new_depth <= bqt->real_max_depth) {
235        bqt->max_depth = new_depth;
236        return 0;
237    }
238
239    /*
240     * Currently cannot replace a shared tag map with a new
241     * one, so error out if this is the case
242     */
243    if (atomic_read(&bqt->refcnt) != 1)
244        return -EBUSY;
245
246    /*
247     * save the old state info, so we can copy it back
248     */
249    tag_index = bqt->tag_index;
250    tag_map = bqt->tag_map;
251    max_depth = bqt->real_max_depth;
252
253    if (init_tag_map(q, bqt, new_depth))
254        return -ENOMEM;
255
256    memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
257    nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
258    memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
259
260    kfree(tag_index);
261    kfree(tag_map);
262    return 0;
263}
264EXPORT_SYMBOL(blk_queue_resize_tags);
265
266/**
267 * blk_queue_end_tag - end tag operations for a request
268 * @q: the request queue for the device
269 * @rq: the request that has completed
270 *
271 * Description:
272 * Typically called when end_that_request_first() returns %0, meaning
273 * all transfers have been done for a request. It's important to call
274 * this function before end_that_request_last(), as that will put the
275 * request back on the free list thus corrupting the internal tag list.
276 *
277 * Notes:
278 * queue lock must be held.
279 **/
280void blk_queue_end_tag(struct request_queue *q, struct request *rq)
281{
282    struct blk_queue_tag *bqt = q->queue_tags;
283    unsigned tag = rq->tag; /* negative tags invalid */
284
285    BUG_ON(tag >= bqt->real_max_depth);
286
287    list_del_init(&rq->queuelist);
288    rq->cmd_flags &= ~REQ_QUEUED;
289    rq->tag = -1;
290
291    if (unlikely(bqt->tag_index[tag] == NULL))
292        printk(KERN_ERR "%s: tag %d is missing\n",
293               __func__, tag);
294
295    bqt->tag_index[tag] = NULL;
296
297    if (unlikely(!test_bit(tag, bqt->tag_map))) {
298        printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
299               __func__, tag);
300        return;
301    }
302    /*
303     * The tag_map bit acts as a lock for tag_index[bit], so we need
304     * unlock memory barrier semantics.
305     */
306    clear_bit_unlock(tag, bqt->tag_map);
307}
308EXPORT_SYMBOL(blk_queue_end_tag);
309
310/**
311 * blk_queue_start_tag - find a free tag and assign it
312 * @q: the request queue for the device
313 * @rq: the block request that needs tagging
314 *
315 * Description:
316 * This can either be used as a stand-alone helper, or possibly be
317 * assigned as the queue &prep_rq_fn (in which case &struct request
318 * automagically gets a tag assigned). Note that this function
319 * assumes that any type of request can be queued! if this is not
320 * true for your device, you must check the request type before
321 * calling this function. The request will also be removed from
322 * the request queue, so it's the drivers responsibility to readd
323 * it if it should need to be restarted for some reason.
324 *
325 * Notes:
326 * queue lock must be held.
327 **/
328int blk_queue_start_tag(struct request_queue *q, struct request *rq)
329{
330    struct blk_queue_tag *bqt = q->queue_tags;
331    unsigned max_depth;
332    int tag;
333
334    if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
335        printk(KERN_ERR
336               "%s: request %p for device [%s] already tagged %d",
337               __func__, rq,
338               rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
339        BUG();
340    }
341
342    /*
343     * Protect against shared tag maps, as we may not have exclusive
344     * access to the tag map.
345     *
346     * We reserve a few tags just for sync IO, since we don't want
347     * to starve sync IO on behalf of flooding async IO.
348     */
349    max_depth = bqt->max_depth;
350    if (!rq_is_sync(rq) && max_depth > 1) {
351        max_depth -= 2;
352        if (!max_depth)
353            max_depth = 1;
354        if (q->in_flight[BLK_RW_ASYNC] > max_depth)
355            return 1;
356    }
357
358    do {
359        tag = find_first_zero_bit(bqt->tag_map, max_depth);
360        if (tag >= max_depth)
361            return 1;
362
363    } while (test_and_set_bit_lock(tag, bqt->tag_map));
364    /*
365     * We need lock ordering semantics given by test_and_set_bit_lock.
366     * See blk_queue_end_tag for details.
367     */
368
369    rq->cmd_flags |= REQ_QUEUED;
370    rq->tag = tag;
371    bqt->tag_index[tag] = rq;
372    blk_start_request(rq);
373    list_add(&rq->queuelist, &q->tag_busy_list);
374    return 0;
375}
376EXPORT_SYMBOL(blk_queue_start_tag);
377
378/**
379 * blk_queue_invalidate_tags - invalidate all pending tags
380 * @q: the request queue for the device
381 *
382 * Description:
383 * Hardware conditions may dictate a need to stop all pending requests.
384 * In this case, we will safely clear the block side of the tag queue and
385 * readd all requests to the request queue in the right order.
386 *
387 * Notes:
388 * queue lock must be held.
389 **/
390void blk_queue_invalidate_tags(struct request_queue *q)
391{
392    struct list_head *tmp, *n;
393
394    list_for_each_safe(tmp, n, &q->tag_busy_list)
395        blk_requeue_request(q, list_entry_rq(tmp));
396}
397EXPORT_SYMBOL(blk_queue_invalidate_tags);
398

Archive Download this file



interactive