blk-ioc.c 3.8 KB
Newer Older
J
Jens Axboe 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Functions related to io context handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */

#include "blk.h"

/*
 * For io context allocations
 */
static struct kmem_cache *iocontext_cachep;

static void cfq_dtor(struct io_context *ioc)
{
20 21 22 23 24 25 26
	if (!hlist_empty(&ioc->cic_list)) {
		struct cfq_io_context *cic;

		cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
								cic_list);
		cic->dtor(ioc);
	}
J
Jens Axboe 已提交
27 28 29 30 31 32 33 34 35 36 37
}

/*
 * IO Context helper functions. put_io_context() returns 1 if there are no
 * more users of this io context, 0 otherwise.
 */
int put_io_context(struct io_context *ioc)
{
	if (ioc == NULL)
		return 1;

38
	BUG_ON(atomic_long_read(&ioc->refcount) == 0);
J
Jens Axboe 已提交
39

40
	if (atomic_long_dec_and_test(&ioc->refcount)) {
J
Jens Axboe 已提交
41 42
		rcu_read_lock();
		cfq_dtor(ioc);
43
		rcu_read_unlock();
J
Jens Axboe 已提交
44 45 46 47 48 49 50 51 52 53 54 55

		kmem_cache_free(iocontext_cachep, ioc);
		return 1;
	}
	return 0;
}
EXPORT_SYMBOL(put_io_context);

static void cfq_exit(struct io_context *ioc)
{
	rcu_read_lock();

56 57 58 59 60 61 62 63
	if (!hlist_empty(&ioc->cic_list)) {
		struct cfq_io_context *cic;

		cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
								cic_list);
		cic->exit(ioc);
	}
	rcu_read_unlock();
J
Jens Axboe 已提交
64 65 66
}

/* Called by the exitting task */
67
void exit_io_context(struct task_struct *task)
J
Jens Axboe 已提交
68 69 70
{
	struct io_context *ioc;

71 72 73 74
	task_lock(task);
	ioc = task->io_context;
	task->io_context = NULL;
	task_unlock(task);
J
Jens Axboe 已提交
75 76 77 78 79

	if (atomic_dec_and_test(&ioc->nr_tasks)) {
		cfq_exit(ioc);

	}
80
	put_io_context(ioc);
J
Jens Axboe 已提交
81 82 83 84 85 86 87 88
}

struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{
	struct io_context *ret;

	ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
	if (ret) {
89
		atomic_long_set(&ret->refcount, 1);
J
Jens Axboe 已提交
90 91 92 93
		atomic_set(&ret->nr_tasks, 1);
		spin_lock_init(&ret->lock);
		ret->ioprio_changed = 0;
		ret->ioprio = 0;
94
		ret->last_waited = 0; /* doesn't matter... */
J
Jens Axboe 已提交
95 96
		ret->nr_batch_requests = 0; /* because this is 0 */
		INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
97
		INIT_HLIST_HEAD(&ret->cic_list);
J
Jens Axboe 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
		ret->ioc_data = NULL;
	}

	return ret;
}

/*
 * If the current task has no IO context then create one and initialise it.
 * Otherwise, return its existing IO context.
 *
 * This returned IO context doesn't have a specifically elevated refcount,
 * but since the current task itself holds a reference, the context can be
 * used in general code, so long as it stays within `current` context.
 */
struct io_context *current_io_context(gfp_t gfp_flags, int node)
{
	struct task_struct *tsk = current;
	struct io_context *ret;

	ret = tsk->io_context;
	if (likely(ret))
		return ret;

	ret = alloc_io_context(gfp_flags, node);
	if (ret) {
		/* make sure set_task_ioprio() sees the settings above */
		smp_wmb();
		tsk->io_context = ret;
	}

	return ret;
}

/*
 * If the current task has no IO context then create one and initialise it.
 * If it does have a context, take a ref on it.
 *
 * This is always called in the context of the task which submitted the I/O.
 */
struct io_context *get_io_context(gfp_t gfp_flags, int node)
{
	struct io_context *ret = NULL;

	/*
	 * Check for unlikely race with exiting task. ioc ref count is
	 * zero when ioc is being detached.
	 */
	do {
		ret = current_io_context(gfp_flags, node);
		if (unlikely(!ret))
			break;
149
	} while (!atomic_long_inc_not_zero(&ret->refcount));
J
Jens Axboe 已提交
150 151 152 153 154 155 156 157 158 159 160

	return ret;
}
EXPORT_SYMBOL(get_io_context);

void copy_io_context(struct io_context **pdst, struct io_context **psrc)
{
	struct io_context *src = *psrc;
	struct io_context *dst = *pdst;

	if (src) {
161 162
		BUG_ON(atomic_long_read(&src->refcount) == 0);
		atomic_long_inc(&src->refcount);
J
Jens Axboe 已提交
163 164 165 166 167 168
		put_io_context(dst);
		*pdst = src;
	}
}
EXPORT_SYMBOL(copy_io_context);

A
Adrian Bunk 已提交
169
static int __init blk_ioc_init(void)
J
Jens Axboe 已提交
170 171 172 173 174 175
{
	iocontext_cachep = kmem_cache_create("blkdev_ioc",
			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
	return 0;
}
subsys_initcall(blk_ioc_init);