Root/block/blk-ioc.c

1/*
2 * Functions related to io context handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10#include <linux/slab.h>
11
12#include "blk.h"
13
14/*
15 * For io context allocations
16 */
17static struct kmem_cache *iocontext_cachep;
18
19/**
20 * get_io_context - increment reference count to io_context
21 * @ioc: io_context to get
22 *
23 * Increment reference count to @ioc.
24 */
25void get_io_context(struct io_context *ioc)
26{
27    BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28    atomic_long_inc(&ioc->refcount);
29}
30EXPORT_SYMBOL(get_io_context);
31
32static void icq_free_icq_rcu(struct rcu_head *head)
33{
34    struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35
36    kmem_cache_free(icq->__rcu_icq_cache, icq);
37}
38
39/* Exit an icq. Called with both ioc and q locked. */
40static void ioc_exit_icq(struct io_cq *icq)
41{
42    struct elevator_type *et = icq->q->elevator->type;
43
44    if (icq->flags & ICQ_EXITED)
45        return;
46
47    if (et->ops.elevator_exit_icq_fn)
48        et->ops.elevator_exit_icq_fn(icq);
49
50    icq->flags |= ICQ_EXITED;
51}
52
53/* Release an icq. Called with both ioc and q locked. */
54static void ioc_destroy_icq(struct io_cq *icq)
55{
56    struct io_context *ioc = icq->ioc;
57    struct request_queue *q = icq->q;
58    struct elevator_type *et = q->elevator->type;
59
60    lockdep_assert_held(&ioc->lock);
61    lockdep_assert_held(q->queue_lock);
62
63    radix_tree_delete(&ioc->icq_tree, icq->q->id);
64    hlist_del_init(&icq->ioc_node);
65    list_del_init(&icq->q_node);
66
67    /*
68     * Both setting lookup hint to and clearing it from @icq are done
69     * under queue_lock. If it's not pointing to @icq now, it never
70     * will. Hint assignment itself can race safely.
71     */
72    if (rcu_dereference_raw(ioc->icq_hint) == icq)
73        rcu_assign_pointer(ioc->icq_hint, NULL);
74
75    ioc_exit_icq(icq);
76
77    /*
78     * @icq->q might have gone away by the time RCU callback runs
79     * making it impossible to determine icq_cache. Record it in @icq.
80     */
81    icq->__rcu_icq_cache = et->icq_cache;
82    call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
83}
84
85/*
86 * Slow path for ioc release in put_io_context(). Performs double-lock
87 * dancing to unlink all icq's and then frees ioc.
88 */
89static void ioc_release_fn(struct work_struct *work)
90{
91    struct io_context *ioc = container_of(work, struct io_context,
92                          release_work);
93    unsigned long flags;
94
95    /*
96     * Exiting icq may call into put_io_context() through elevator
97     * which will trigger lockdep warning. The ioc's are guaranteed to
98     * be different, use a different locking subclass here. Use
99     * irqsave variant as there's no spin_lock_irq_nested().
100     */
101    spin_lock_irqsave_nested(&ioc->lock, flags, 1);
102
103    while (!hlist_empty(&ioc->icq_list)) {
104        struct io_cq *icq = hlist_entry(ioc->icq_list.first,
105                        struct io_cq, ioc_node);
106        struct request_queue *q = icq->q;
107
108        if (spin_trylock(q->queue_lock)) {
109            ioc_destroy_icq(icq);
110            spin_unlock(q->queue_lock);
111        } else {
112            spin_unlock_irqrestore(&ioc->lock, flags);
113            cpu_relax();
114            spin_lock_irqsave_nested(&ioc->lock, flags, 1);
115        }
116    }
117
118    spin_unlock_irqrestore(&ioc->lock, flags);
119
120    kmem_cache_free(iocontext_cachep, ioc);
121}
122
123/**
124 * put_io_context - put a reference of io_context
125 * @ioc: io_context to put
126 *
127 * Decrement reference count of @ioc and release it if the count reaches
128 * zero.
129 */
130void put_io_context(struct io_context *ioc)
131{
132    unsigned long flags;
133    bool free_ioc = false;
134
135    if (ioc == NULL)
136        return;
137
138    BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
139
140    /*
141     * Releasing ioc requires reverse order double locking and we may
142     * already be holding a queue_lock. Do it asynchronously from wq.
143     */
144    if (atomic_long_dec_and_test(&ioc->refcount)) {
145        spin_lock_irqsave(&ioc->lock, flags);
146        if (!hlist_empty(&ioc->icq_list))
147            queue_work(system_power_efficient_wq,
148                    &ioc->release_work);
149        else
150            free_ioc = true;
151        spin_unlock_irqrestore(&ioc->lock, flags);
152    }
153
154    if (free_ioc)
155        kmem_cache_free(iocontext_cachep, ioc);
156}
157EXPORT_SYMBOL(put_io_context);
158
159/**
160 * put_io_context_active - put active reference on ioc
161 * @ioc: ioc of interest
162 *
163 * Undo get_io_context_active(). If active reference reaches zero after
164 * put, @ioc can never issue further IOs and ioscheds are notified.
165 */
166void put_io_context_active(struct io_context *ioc)
167{
168    unsigned long flags;
169    struct io_cq *icq;
170
171    if (!atomic_dec_and_test(&ioc->active_ref)) {
172        put_io_context(ioc);
173        return;
174    }
175
176    /*
177     * Need ioc lock to walk icq_list and q lock to exit icq. Perform
178     * reverse double locking. Read comment in ioc_release_fn() for
179     * explanation on the nested locking annotation.
180     */
181retry:
182    spin_lock_irqsave_nested(&ioc->lock, flags, 1);
183    hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
184        if (icq->flags & ICQ_EXITED)
185            continue;
186        if (spin_trylock(icq->q->queue_lock)) {
187            ioc_exit_icq(icq);
188            spin_unlock(icq->q->queue_lock);
189        } else {
190            spin_unlock_irqrestore(&ioc->lock, flags);
191            cpu_relax();
192            goto retry;
193        }
194    }
195    spin_unlock_irqrestore(&ioc->lock, flags);
196
197    put_io_context(ioc);
198}
199
200/* Called by the exiting task */
201void exit_io_context(struct task_struct *task)
202{
203    struct io_context *ioc;
204
205    task_lock(task);
206    ioc = task->io_context;
207    task->io_context = NULL;
208    task_unlock(task);
209
210    atomic_dec(&ioc->nr_tasks);
211    put_io_context_active(ioc);
212}
213
214/**
215 * ioc_clear_queue - break any ioc association with the specified queue
216 * @q: request_queue being cleared
217 *
218 * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
219 */
220void ioc_clear_queue(struct request_queue *q)
221{
222    lockdep_assert_held(q->queue_lock);
223
224    while (!list_empty(&q->icq_list)) {
225        struct io_cq *icq = list_entry(q->icq_list.next,
226                           struct io_cq, q_node);
227        struct io_context *ioc = icq->ioc;
228
229        spin_lock(&ioc->lock);
230        ioc_destroy_icq(icq);
231        spin_unlock(&ioc->lock);
232    }
233}
234
235int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
236{
237    struct io_context *ioc;
238    int ret;
239
240    ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
241                    node);
242    if (unlikely(!ioc))
243        return -ENOMEM;
244
245    /* initialize */
246    atomic_long_set(&ioc->refcount, 1);
247    atomic_set(&ioc->nr_tasks, 1);
248    atomic_set(&ioc->active_ref, 1);
249    spin_lock_init(&ioc->lock);
250    INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
251    INIT_HLIST_HEAD(&ioc->icq_list);
252    INIT_WORK(&ioc->release_work, ioc_release_fn);
253
254    /*
255     * Try to install. ioc shouldn't be installed if someone else
256     * already did or @task, which isn't %current, is exiting. Note
257     * that we need to allow ioc creation on exiting %current as exit
258     * path may issue IOs from e.g. exit_files(). The exit path is
259     * responsible for not issuing IO after exit_io_context().
260     */
261    task_lock(task);
262    if (!task->io_context &&
263        (task == current || !(task->flags & PF_EXITING)))
264        task->io_context = ioc;
265    else
266        kmem_cache_free(iocontext_cachep, ioc);
267
268    ret = task->io_context ? 0 : -EBUSY;
269
270    task_unlock(task);
271
272    return ret;
273}
274
275/**
276 * get_task_io_context - get io_context of a task
277 * @task: task of interest
278 * @gfp_flags: allocation flags, used if allocation is necessary
279 * @node: allocation node, used if allocation is necessary
280 *
281 * Return io_context of @task. If it doesn't exist, it is created with
282 * @gfp_flags and @node. The returned io_context has its reference count
283 * incremented.
284 *
285 * This function always goes through task_lock() and it's better to use
286 * %current->io_context + get_io_context() for %current.
287 */
288struct io_context *get_task_io_context(struct task_struct *task,
289                       gfp_t gfp_flags, int node)
290{
291    struct io_context *ioc;
292
293    might_sleep_if(gfp_flags & __GFP_WAIT);
294
295    do {
296        task_lock(task);
297        ioc = task->io_context;
298        if (likely(ioc)) {
299            get_io_context(ioc);
300            task_unlock(task);
301            return ioc;
302        }
303        task_unlock(task);
304    } while (!create_task_io_context(task, gfp_flags, node));
305
306    return NULL;
307}
308EXPORT_SYMBOL(get_task_io_context);
309
310/**
311 * ioc_lookup_icq - lookup io_cq from ioc
312 * @ioc: the associated io_context
313 * @q: the associated request_queue
314 *
315 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
316 * with @q->queue_lock held.
317 */
318struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
319{
320    struct io_cq *icq;
321
322    lockdep_assert_held(q->queue_lock);
323
324    /*
325     * icq's are indexed from @ioc using radix tree and hint pointer,
326     * both of which are protected with RCU. All removals are done
327     * holding both q and ioc locks, and we're holding q lock - if we
328     * find a icq which points to us, it's guaranteed to be valid.
329     */
330    rcu_read_lock();
331    icq = rcu_dereference(ioc->icq_hint);
332    if (icq && icq->q == q)
333        goto out;
334
335    icq = radix_tree_lookup(&ioc->icq_tree, q->id);
336    if (icq && icq->q == q)
337        rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
338    else
339        icq = NULL;
340out:
341    rcu_read_unlock();
342    return icq;
343}
344EXPORT_SYMBOL(ioc_lookup_icq);
345
346/**
347 * ioc_create_icq - create and link io_cq
348 * @ioc: io_context of interest
349 * @q: request_queue of interest
350 * @gfp_mask: allocation mask
351 *
352 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
353 * will be created using @gfp_mask.
354 *
355 * The caller is responsible for ensuring @ioc won't go away and @q is
356 * alive and will stay alive until this function returns.
357 */
358struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
359                 gfp_t gfp_mask)
360{
361    struct elevator_type *et = q->elevator->type;
362    struct io_cq *icq;
363
364    /* allocate stuff */
365    icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
366                    q->node);
367    if (!icq)
368        return NULL;
369
370    if (radix_tree_preload(gfp_mask) < 0) {
371        kmem_cache_free(et->icq_cache, icq);
372        return NULL;
373    }
374
375    icq->ioc = ioc;
376    icq->q = q;
377    INIT_LIST_HEAD(&icq->q_node);
378    INIT_HLIST_NODE(&icq->ioc_node);
379
380    /* lock both q and ioc and try to link @icq */
381    spin_lock_irq(q->queue_lock);
382    spin_lock(&ioc->lock);
383
384    if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
385        hlist_add_head(&icq->ioc_node, &ioc->icq_list);
386        list_add(&icq->q_node, &q->icq_list);
387        if (et->ops.elevator_init_icq_fn)
388            et->ops.elevator_init_icq_fn(icq);
389    } else {
390        kmem_cache_free(et->icq_cache, icq);
391        icq = ioc_lookup_icq(ioc, q);
392        if (!icq)
393            printk(KERN_ERR "cfq: icq link failed!\n");
394    }
395
396    spin_unlock(&ioc->lock);
397    spin_unlock_irq(q->queue_lock);
398    radix_tree_preload_end();
399    return icq;
400}
401
402static int __init blk_ioc_init(void)
403{
404    iocontext_cachep = kmem_cache_create("blkdev_ioc",
405            sizeof(struct io_context), 0, SLAB_PANIC, NULL);
406    return 0;
407}
408subsys_initcall(blk_ioc_init);
409

Archive Download this file



interactive