2 * Functions related to io context handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10 #include <linux/slab.h>
15 * For io context allocations
17 static struct kmem_cache *iocontext_cachep;
19 static void cfq_dtor(struct io_context *ioc)
21 if (!hlist_empty(&ioc->cic_list)) {
22 struct cfq_io_context *cic;
24 cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
31 * IO Context helper functions. put_io_context() returns 1 if there are no
32 * more users of this io context, 0 otherwise.
34 int put_io_context(struct io_context *ioc)
39 BUG_ON(atomic_long_read(&ioc->refcount) == 0);
41 if (atomic_long_dec_and_test(&ioc->refcount)) {
46 kmem_cache_free(iocontext_cachep, ioc);
51 EXPORT_SYMBOL(put_io_context);
53 static void cfq_exit(struct io_context *ioc)
57 if (!hlist_empty(&ioc->cic_list)) {
58 struct cfq_io_context *cic;
60 cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
67 /* Called by the exitting task */
68 void exit_io_context(struct task_struct *task)
70 struct io_context *ioc;
73 ioc = task->io_context;
74 task->io_context = NULL;
77 if (atomic_dec_and_test(&ioc->nr_tasks)) {
84 struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
86 struct io_context *ret;
88 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
90 atomic_long_set(&ret->refcount, 1);
91 atomic_set(&ret->nr_tasks, 1);
92 spin_lock_init(&ret->lock);
93 ret->ioprio_changed = 0;
95 ret->last_waited = 0; /* doesn't matter... */
96 ret->nr_batch_requests = 0; /* because this is 0 */
97 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
98 INIT_HLIST_HEAD(&ret->cic_list);
106 * If the current task has no IO context then create one and initialise it.
107 * Otherwise, return its existing IO context.
109 * This returned IO context doesn't have a specifically elevated refcount,
110 * but since the current task itself holds a reference, the context can be
111 * used in general code, so long as it stays within `current` context.
113 struct io_context *current_io_context(gfp_t gfp_flags, int node)
115 struct task_struct *tsk = current;
116 struct io_context *ret;
118 ret = tsk->io_context;
122 ret = alloc_io_context(gfp_flags, node);
124 /* make sure set_task_ioprio() sees the settings above */
126 tsk->io_context = ret;
133 * If the current task has no IO context then create one and initialise it.
134 * If it does have a context, take a ref on it.
136 * This is always called in the context of the task which submitted the I/O.
138 struct io_context *get_io_context(gfp_t gfp_flags, int node)
140 struct io_context *ret = NULL;
143 * Check for unlikely race with exiting task. ioc ref count is
144 * zero when ioc is being detached.
147 ret = current_io_context(gfp_flags, node);
150 } while (!atomic_long_inc_not_zero(&ret->refcount));
154 EXPORT_SYMBOL(get_io_context);
156 void copy_io_context(struct io_context **pdst, struct io_context **psrc)
158 struct io_context *src = *psrc;
159 struct io_context *dst = *pdst;
162 BUG_ON(atomic_long_read(&src->refcount) == 0);
163 atomic_long_inc(&src->refcount);
168 EXPORT_SYMBOL(copy_io_context);
170 static int __init blk_ioc_init(void)
172 iocontext_cachep = kmem_cache_create("blkdev_ioc",
173 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
176 subsys_initcall(blk_ioc_init);