Root/block/blk-ioc.c

1/*
2 * Functions related to io context handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10#include <linux/slab.h>
11
12#include "blk.h"
13
14/*
15 * For io context allocations
16 */
17static struct kmem_cache *iocontext_cachep;
18
19static void cfq_dtor(struct io_context *ioc)
20{
21    if (!hlist_empty(&ioc->cic_list)) {
22        struct cfq_io_context *cic;
23
24        cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
25                                cic_list);
26        cic->dtor(ioc);
27    }
28}
29
30/*
31 * IO Context helper functions. put_io_context() returns 1 if there are no
32 * more users of this io context, 0 otherwise.
33 */
34int put_io_context(struct io_context *ioc)
35{
36    if (ioc == NULL)
37        return 1;
38
39    BUG_ON(atomic_long_read(&ioc->refcount) == 0);
40
41    if (atomic_long_dec_and_test(&ioc->refcount)) {
42        rcu_read_lock();
43        cfq_dtor(ioc);
44        rcu_read_unlock();
45
46        kmem_cache_free(iocontext_cachep, ioc);
47        return 1;
48    }
49    return 0;
50}
51EXPORT_SYMBOL(put_io_context);
52
53static void cfq_exit(struct io_context *ioc)
54{
55    rcu_read_lock();
56
57    if (!hlist_empty(&ioc->cic_list)) {
58        struct cfq_io_context *cic;
59
60        cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
61                                cic_list);
62        cic->exit(ioc);
63    }
64    rcu_read_unlock();
65}
66
67/* Called by the exitting task */
68void exit_io_context(struct task_struct *task)
69{
70    struct io_context *ioc;
71
72    task_lock(task);
73    ioc = task->io_context;
74    task->io_context = NULL;
75    task_unlock(task);
76
77    if (atomic_dec_and_test(&ioc->nr_tasks)) {
78        cfq_exit(ioc);
79
80    }
81    put_io_context(ioc);
82}
83
84struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
85{
86    struct io_context *ret;
87
88    ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
89    if (ret) {
90        atomic_long_set(&ret->refcount, 1);
91        atomic_set(&ret->nr_tasks, 1);
92        spin_lock_init(&ret->lock);
93        ret->ioprio_changed = 0;
94        ret->ioprio = 0;
95        ret->last_waited = 0; /* doesn't matter... */
96        ret->nr_batch_requests = 0; /* because this is 0 */
97        INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
98        INIT_HLIST_HEAD(&ret->cic_list);
99        ret->ioc_data = NULL;
100    }
101
102    return ret;
103}
104
105/*
106 * If the current task has no IO context then create one and initialise it.
107 * Otherwise, return its existing IO context.
108 *
109 * This returned IO context doesn't have a specifically elevated refcount,
110 * but since the current task itself holds a reference, the context can be
111 * used in general code, so long as it stays within `current` context.
112 */
113struct io_context *current_io_context(gfp_t gfp_flags, int node)
114{
115    struct task_struct *tsk = current;
116    struct io_context *ret;
117
118    ret = tsk->io_context;
119    if (likely(ret))
120        return ret;
121
122    ret = alloc_io_context(gfp_flags, node);
123    if (ret) {
124        /* make sure set_task_ioprio() sees the settings above */
125        smp_wmb();
126        tsk->io_context = ret;
127    }
128
129    return ret;
130}
131
132/*
133 * If the current task has no IO context then create one and initialise it.
134 * If it does have a context, take a ref on it.
135 *
136 * This is always called in the context of the task which submitted the I/O.
137 */
138struct io_context *get_io_context(gfp_t gfp_flags, int node)
139{
140    struct io_context *ret = NULL;
141
142    /*
143     * Check for unlikely race with exiting task. ioc ref count is
144     * zero when ioc is being detached.
145     */
146    do {
147        ret = current_io_context(gfp_flags, node);
148        if (unlikely(!ret))
149            break;
150    } while (!atomic_long_inc_not_zero(&ret->refcount));
151
152    return ret;
153}
154EXPORT_SYMBOL(get_io_context);
155
156void copy_io_context(struct io_context **pdst, struct io_context **psrc)
157{
158    struct io_context *src = *psrc;
159    struct io_context *dst = *pdst;
160
161    if (src) {
162        BUG_ON(atomic_long_read(&src->refcount) == 0);
163        atomic_long_inc(&src->refcount);
164        put_io_context(dst);
165        *pdst = src;
166    }
167}
168EXPORT_SYMBOL(copy_io_context);
169
170static int __init blk_ioc_init(void)
171{
172    iocontext_cachep = kmem_cache_create("blkdev_ioc",
173            sizeof(struct io_context), 0, SLAB_PANIC, NULL);
174    return 0;
175}
176subsys_initcall(blk_ioc_init);
177

Archive Download this file



interactive