blk-ioc.c 3.6 KB
Newer Older
J
Jens Axboe 已提交
1 2 3 4 5 6 7 8 9
/*
 * Functions related to io context handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
10
#include <linux/slab.h>
J
Jens Axboe 已提交
11 12 13 14 15 16 17 18 19 20

#include "blk.h"

/*
 * For io context allocations
 */
static struct kmem_cache *iocontext_cachep;

static void cfq_dtor(struct io_context *ioc)
{
21 22 23
	if (!hlist_empty(&ioc->cic_list)) {
		struct cfq_io_context *cic;

24
		cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
25 26 27
								cic_list);
		cic->dtor(ioc);
	}
J
Jens Axboe 已提交
28 29
}

T
Tejun Heo 已提交
30 31 32 33 34 35
/**
 * put_io_context - put a reference of io_context
 * @ioc: io_context to put
 *
 * Decrement reference count of @ioc and release it if the count reaches
 * zero.
J
Jens Axboe 已提交
36
 */
T
Tejun Heo 已提交
37
void put_io_context(struct io_context *ioc)
J
Jens Axboe 已提交
38 39
{
	if (ioc == NULL)
T
Tejun Heo 已提交
40
		return;
J
Jens Axboe 已提交
41

T
Tejun Heo 已提交
42
	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
J
Jens Axboe 已提交
43

T
Tejun Heo 已提交
44 45
	if (!atomic_long_dec_and_test(&ioc->refcount))
		return;
J
Jens Axboe 已提交
46

T
Tejun Heo 已提交
47 48 49 50 51
	rcu_read_lock();
	cfq_dtor(ioc);
	rcu_read_unlock();

	kmem_cache_free(iocontext_cachep, ioc);
J
Jens Axboe 已提交
52 53 54 55 56 57 58
}
EXPORT_SYMBOL(put_io_context);

static void cfq_exit(struct io_context *ioc)
{
	rcu_read_lock();

59 60 61
	if (!hlist_empty(&ioc->cic_list)) {
		struct cfq_io_context *cic;

62
		cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
63 64 65 66
								cic_list);
		cic->exit(ioc);
	}
	rcu_read_unlock();
J
Jens Axboe 已提交
67 68
}

69
/* Called by the exiting task */
70
void exit_io_context(struct task_struct *task)
J
Jens Axboe 已提交
71 72 73
{
	struct io_context *ioc;

74 75 76 77
	task_lock(task);
	ioc = task->io_context;
	task->io_context = NULL;
	task_unlock(task);
J
Jens Axboe 已提交
78

79
	if (atomic_dec_and_test(&ioc->nr_tasks))
J
Jens Axboe 已提交
80 81
		cfq_exit(ioc);

82
	put_io_context(ioc);
J
Jens Axboe 已提交
83 84 85 86
}

struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{
87
	struct io_context *ioc;
J
Jens Axboe 已提交
88

T
Tejun Heo 已提交
89 90 91 92 93 94 95 96 97 98 99
	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
				    node);
	if (unlikely(!ioc))
		return NULL;

	/* initialize */
	atomic_long_set(&ioc->refcount, 1);
	atomic_set(&ioc->nr_tasks, 1);
	spin_lock_init(&ioc->lock);
	INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
	INIT_HLIST_HEAD(&ioc->cic_list);
J
Jens Axboe 已提交
100

101
	return ioc;
J
Jens Axboe 已提交
102 103
}

T
Tejun Heo 已提交
104 105 106 107
/**
 * current_io_context - get io_context of %current
 * @gfp_flags: allocation flags, used if allocation is necessary
 * @node: allocation node, used if allocation is necessary
J
Jens Axboe 已提交
108
 *
T
Tejun Heo 已提交
109 110 111 112 113
 * Return io_context of %current.  If it doesn't exist, it is created with
 * @gfp_flags and @node.  The returned io_context does NOT have its
 * reference count incremented.  Because io_context is exited only on task
 * exit, %current can be sure that the returned io_context is valid and
 * alive as long as it is executing.
J
Jens Axboe 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
 */
struct io_context *current_io_context(gfp_t gfp_flags, int node)
{
	struct task_struct *tsk = current;
	struct io_context *ret;

	ret = tsk->io_context;
	if (likely(ret))
		return ret;

	ret = alloc_io_context(gfp_flags, node);
	if (ret) {
		/* make sure set_task_ioprio() sees the settings above */
		smp_wmb();
		tsk->io_context = ret;
	}

	return ret;
}

/*
 * If the current task has no IO context then create one and initialise it.
 * If it does have a context, take a ref on it.
 *
 * This is always called in the context of the task which submitted the I/O.
 */
struct io_context *get_io_context(gfp_t gfp_flags, int node)
{
142
	struct io_context *ioc = NULL;
J
Jens Axboe 已提交
143 144 145 146 147 148

	/*
	 * Check for unlikely race with exiting task. ioc ref count is
	 * zero when ioc is being detached.
	 */
	do {
149 150
		ioc = current_io_context(gfp_flags, node);
		if (unlikely(!ioc))
J
Jens Axboe 已提交
151
			break;
152
	} while (!atomic_long_inc_not_zero(&ioc->refcount));
J
Jens Axboe 已提交
153

154
	return ioc;
J
Jens Axboe 已提交
155 156 157
}
EXPORT_SYMBOL(get_io_context);

A
Adrian Bunk 已提交
158
static int __init blk_ioc_init(void)
J
Jens Axboe 已提交
159 160 161 162 163 164
{
	iocontext_cachep = kmem_cache_create("blkdev_ioc",
			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
	return 0;
}
subsys_initcall(blk_ioc_init);