Root/block/blk-tag.c

1/*
2 * Functions related to tagged command queuing
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8
9#include "blk.h"
10
11/**
12 * blk_queue_find_tag - find a request by its tag and queue
13 * @q: The request queue for the device
14 * @tag: The tag of the request
15 *
16 * Notes:
17 * Should be used when a device returns a tag and you want to match
18 * it with a request.
19 *
20 * no locks need be held.
21 **/
22struct request *blk_queue_find_tag(struct request_queue *q, int tag)
23{
24    return blk_map_queue_find_tag(q->queue_tags, tag);
25}
26EXPORT_SYMBOL(blk_queue_find_tag);
27
28/**
29 * __blk_free_tags - release a given set of tag maintenance info
30 * @bqt: the tag map to free
31 *
32 * Tries to free the specified @bqt. Returns true if it was
33 * actually freed and false if there are still references using it
34 */
35static int __blk_free_tags(struct blk_queue_tag *bqt)
36{
37    int retval;
38
39    retval = atomic_dec_and_test(&bqt->refcnt);
40    if (retval) {
41        BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
42                            bqt->max_depth);
43
44        kfree(bqt->tag_index);
45        bqt->tag_index = NULL;
46
47        kfree(bqt->tag_map);
48        bqt->tag_map = NULL;
49
50        kfree(bqt);
51    }
52
53    return retval;
54}
55
56/**
57 * __blk_queue_free_tags - release tag maintenance info
58 * @q: the request queue for the device
59 *
60 * Notes:
61 * blk_cleanup_queue() will take care of calling this function, if tagging
62 * has been used. So there's no need to call this directly.
63 **/
64void __blk_queue_free_tags(struct request_queue *q)
65{
66    struct blk_queue_tag *bqt = q->queue_tags;
67
68    if (!bqt)
69        return;
70
71    __blk_free_tags(bqt);
72
73    q->queue_tags = NULL;
74    queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
75}
76
77/**
78 * blk_free_tags - release a given set of tag maintenance info
79 * @bqt: the tag map to free
80 *
81 * For externally managed @bqt frees the map. Callers of this
82 * function must guarantee to have released all the queues that
83 * might have been using this tag map.
84 */
85void blk_free_tags(struct blk_queue_tag *bqt)
86{
87    if (unlikely(!__blk_free_tags(bqt)))
88        BUG();
89}
90EXPORT_SYMBOL(blk_free_tags);
91
92/**
93 * blk_queue_free_tags - release tag maintenance info
94 * @q: the request queue for the device
95 *
96 * Notes:
97 * This is used to disable tagged queuing to a device, yet leave
98 * queue in function.
99 **/
100void blk_queue_free_tags(struct request_queue *q)
101{
102    queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
103}
104EXPORT_SYMBOL(blk_queue_free_tags);
105
106static int
107init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
108{
109    struct request **tag_index;
110    unsigned long *tag_map;
111    int nr_ulongs;
112
113    if (q && depth > q->nr_requests * 2) {
114        depth = q->nr_requests * 2;
115        printk(KERN_ERR "%s: adjusted depth to %d\n",
116               __func__, depth);
117    }
118
119    tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
120    if (!tag_index)
121        goto fail;
122
123    nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
124    tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
125    if (!tag_map)
126        goto fail;
127
128    tags->real_max_depth = depth;
129    tags->max_depth = depth;
130    tags->tag_index = tag_index;
131    tags->tag_map = tag_map;
132
133    return 0;
134fail:
135    kfree(tag_index);
136    return -ENOMEM;
137}
138
139static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
140                           int depth)
141{
142    struct blk_queue_tag *tags;
143
144    tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
145    if (!tags)
146        goto fail;
147
148    if (init_tag_map(q, tags, depth))
149        goto fail;
150
151    atomic_set(&tags->refcnt, 1);
152    return tags;
153fail:
154    kfree(tags);
155    return NULL;
156}
157
158/**
159 * blk_init_tags - initialize the tag info for an external tag map
160 * @depth: the maximum queue depth supported
161 **/
162struct blk_queue_tag *blk_init_tags(int depth)
163{
164    return __blk_queue_init_tags(NULL, depth);
165}
166EXPORT_SYMBOL(blk_init_tags);
167
168/**
169 * blk_queue_init_tags - initialize the queue tag info
170 * @q: the request queue for the device
171 * @depth: the maximum queue depth supported
172 * @tags: the tag to use
173 *
174 * Queue lock must be held here if the function is called to resize an
175 * existing map.
176 **/
177int blk_queue_init_tags(struct request_queue *q, int depth,
178            struct blk_queue_tag *tags)
179{
180    int rc;
181
182    BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
183
184    if (!tags && !q->queue_tags) {
185        tags = __blk_queue_init_tags(q, depth);
186
187        if (!tags)
188            goto fail;
189    } else if (q->queue_tags) {
190        rc = blk_queue_resize_tags(q, depth);
191        if (rc)
192            return rc;
193        queue_flag_set(QUEUE_FLAG_QUEUED, q);
194        return 0;
195    } else
196        atomic_inc(&tags->refcnt);
197
198    /*
199     * assign it, all done
200     */
201    q->queue_tags = tags;
202    queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
203    INIT_LIST_HEAD(&q->tag_busy_list);
204    return 0;
205fail:
206    kfree(tags);
207    return -ENOMEM;
208}
209EXPORT_SYMBOL(blk_queue_init_tags);
210
211/**
212 * blk_queue_resize_tags - change the queueing depth
213 * @q: the request queue for the device
214 * @new_depth: the new max command queueing depth
215 *
216 * Notes:
217 * Must be called with the queue lock held.
218 **/
219int blk_queue_resize_tags(struct request_queue *q, int new_depth)
220{
221    struct blk_queue_tag *bqt = q->queue_tags;
222    struct request **tag_index;
223    unsigned long *tag_map;
224    int max_depth, nr_ulongs;
225
226    if (!bqt)
227        return -ENXIO;
228
229    /*
230     * if we already have large enough real_max_depth. just
231     * adjust max_depth. *NOTE* as requests with tag value
232     * between new_depth and real_max_depth can be in-flight, tag
233     * map can not be shrunk blindly here.
234     */
235    if (new_depth <= bqt->real_max_depth) {
236        bqt->max_depth = new_depth;
237        return 0;
238    }
239
240    /*
241     * Currently cannot replace a shared tag map with a new
242     * one, so error out if this is the case
243     */
244    if (atomic_read(&bqt->refcnt) != 1)
245        return -EBUSY;
246
247    /*
248     * save the old state info, so we can copy it back
249     */
250    tag_index = bqt->tag_index;
251    tag_map = bqt->tag_map;
252    max_depth = bqt->real_max_depth;
253
254    if (init_tag_map(q, bqt, new_depth))
255        return -ENOMEM;
256
257    memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
258    nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
259    memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
260
261    kfree(tag_index);
262    kfree(tag_map);
263    return 0;
264}
265EXPORT_SYMBOL(blk_queue_resize_tags);
266
267/**
268 * blk_queue_end_tag - end tag operations for a request
269 * @q: the request queue for the device
270 * @rq: the request that has completed
271 *
272 * Description:
273 * Typically called when end_that_request_first() returns %0, meaning
274 * all transfers have been done for a request. It's important to call
275 * this function before end_that_request_last(), as that will put the
276 * request back on the free list thus corrupting the internal tag list.
277 *
278 * Notes:
279 * queue lock must be held.
280 **/
281void blk_queue_end_tag(struct request_queue *q, struct request *rq)
282{
283    struct blk_queue_tag *bqt = q->queue_tags;
284    int tag = rq->tag;
285
286    BUG_ON(tag == -1);
287
288    if (unlikely(tag >= bqt->real_max_depth))
289        /*
290         * This can happen after tag depth has been reduced.
291         * FIXME: how about a warning or info message here?
292         */
293        return;
294
295    list_del_init(&rq->queuelist);
296    rq->cmd_flags &= ~REQ_QUEUED;
297    rq->tag = -1;
298
299    if (unlikely(bqt->tag_index[tag] == NULL))
300        printk(KERN_ERR "%s: tag %d is missing\n",
301               __func__, tag);
302
303    bqt->tag_index[tag] = NULL;
304
305    if (unlikely(!test_bit(tag, bqt->tag_map))) {
306        printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
307               __func__, tag);
308        return;
309    }
310    /*
311     * The tag_map bit acts as a lock for tag_index[bit], so we need
312     * unlock memory barrier semantics.
313     */
314    clear_bit_unlock(tag, bqt->tag_map);
315}
316EXPORT_SYMBOL(blk_queue_end_tag);
317
318/**
319 * blk_queue_start_tag - find a free tag and assign it
320 * @q: the request queue for the device
321 * @rq: the block request that needs tagging
322 *
323 * Description:
324 * This can either be used as a stand-alone helper, or possibly be
325 * assigned as the queue &prep_rq_fn (in which case &struct request
326 * automagically gets a tag assigned). Note that this function
327 * assumes that any type of request can be queued! if this is not
328 * true for your device, you must check the request type before
329 * calling this function. The request will also be removed from
330 * the request queue, so it's the drivers responsibility to readd
331 * it if it should need to be restarted for some reason.
332 *
333 * Notes:
334 * queue lock must be held.
335 **/
336int blk_queue_start_tag(struct request_queue *q, struct request *rq)
337{
338    struct blk_queue_tag *bqt = q->queue_tags;
339    unsigned max_depth;
340    int tag;
341
342    if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
343        printk(KERN_ERR
344               "%s: request %p for device [%s] already tagged %d",
345               __func__, rq,
346               rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
347        BUG();
348    }
349
350    /*
351     * Protect against shared tag maps, as we may not have exclusive
352     * access to the tag map.
353     *
354     * We reserve a few tags just for sync IO, since we don't want
355     * to starve sync IO on behalf of flooding async IO.
356     */
357    max_depth = bqt->max_depth;
358    if (!rq_is_sync(rq) && max_depth > 1) {
359        max_depth -= 2;
360        if (!max_depth)
361            max_depth = 1;
362        if (q->in_flight[BLK_RW_ASYNC] > max_depth)
363            return 1;
364    }
365
366    do {
367        tag = find_first_zero_bit(bqt->tag_map, max_depth);
368        if (tag >= max_depth)
369            return 1;
370
371    } while (test_and_set_bit_lock(tag, bqt->tag_map));
372    /*
373     * We need lock ordering semantics given by test_and_set_bit_lock.
374     * See blk_queue_end_tag for details.
375     */
376
377    rq->cmd_flags |= REQ_QUEUED;
378    rq->tag = tag;
379    bqt->tag_index[tag] = rq;
380    blk_start_request(rq);
381    list_add(&rq->queuelist, &q->tag_busy_list);
382    return 0;
383}
384EXPORT_SYMBOL(blk_queue_start_tag);
385
386/**
387 * blk_queue_invalidate_tags - invalidate all pending tags
388 * @q: the request queue for the device
389 *
390 * Description:
391 * Hardware conditions may dictate a need to stop all pending requests.
392 * In this case, we will safely clear the block side of the tag queue and
393 * readd all requests to the request queue in the right order.
394 *
395 * Notes:
396 * queue lock must be held.
397 **/
398void blk_queue_invalidate_tags(struct request_queue *q)
399{
400    struct list_head *tmp, *n;
401
402    list_for_each_safe(tmp, n, &q->tag_busy_list)
403        blk_requeue_request(q, list_entry_rq(tmp));
404}
405EXPORT_SYMBOL(blk_queue_invalidate_tags);
406

Archive Download this file



interactive