blk-ioc.c 8.5 KB
Newer Older
J
Jens Axboe 已提交
1 2 3 4 5 6 7 8 9
/*
 * Functions related to io context handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
10
#include <linux/slab.h>
J
Jens Axboe 已提交
11 12 13 14 15 16 17 18

#include "blk.h"

/*
 * For io context allocations
 */
static struct kmem_cache *iocontext_cachep;

19 20 21 22 23 24 25 26 27 28 29 30 31
/**
 * get_io_context - increment reference count to io_context
 * @ioc: io_context to get
 *
 * Increment reference count to @ioc.
 */
void get_io_context(struct io_context *ioc)
{
	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
	atomic_long_inc(&ioc->refcount);
}
EXPORT_SYMBOL(get_io_context);

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
/*
 * Releasing ioc may nest into another put_io_context() leading to nested
 * fast path release.  As the ioc's can't be the same, this is okay but
 * makes lockdep whine.  Keep track of nesting and use it as subclass.
 */
#ifdef CONFIG_LOCKDEP
#define ioc_release_depth(q)		((q) ? (q)->ioc_release_depth : 0)
#define ioc_release_depth_inc(q)	(q)->ioc_release_depth++
#define ioc_release_depth_dec(q)	(q)->ioc_release_depth--
#else
#define ioc_release_depth(q)		0
#define ioc_release_depth_inc(q)	do { } while (0)
#define ioc_release_depth_dec(q)	do { } while (0)
#endif

/*
 * Slow path for ioc release in put_io_context().  Performs double-lock
49
 * dancing to unlink all icq's and then frees ioc.
50 51
 */
static void ioc_release_fn(struct work_struct *work)
J
Jens Axboe 已提交
52
{
53 54 55 56 57 58
	struct io_context *ioc = container_of(work, struct io_context,
					      release_work);
	struct request_queue *last_q = NULL;

	spin_lock_irq(&ioc->lock);

59 60 61 62
	while (!hlist_empty(&ioc->icq_list)) {
		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
						struct io_cq, ioc_node);
		struct request_queue *this_q = icq->q;
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90

		if (this_q != last_q) {
			/*
			 * Need to switch to @this_q.  Once we release
			 * @ioc->lock, it can go away along with @cic.
			 * Hold on to it.
			 */
			__blk_get_queue(this_q);

			/*
			 * blk_put_queue() might sleep thanks to kobject
			 * idiocy.  Always release both locks, put and
			 * restart.
			 */
			if (last_q) {
				spin_unlock(last_q->queue_lock);
				spin_unlock_irq(&ioc->lock);
				blk_put_queue(last_q);
			} else {
				spin_unlock_irq(&ioc->lock);
			}

			last_q = this_q;
			spin_lock_irq(this_q->queue_lock);
			spin_lock(&ioc->lock);
			continue;
		}
		ioc_release_depth_inc(this_q);
91 92
		icq->exit(icq);
		icq->release(icq);
93 94
		ioc_release_depth_dec(this_q);
	}
95

96 97 98 99 100 101
	if (last_q) {
		spin_unlock(last_q->queue_lock);
		spin_unlock_irq(&ioc->lock);
		blk_put_queue(last_q);
	} else {
		spin_unlock_irq(&ioc->lock);
102
	}
103 104

	kmem_cache_free(iocontext_cachep, ioc);
J
Jens Axboe 已提交
105 106
}

T
Tejun Heo 已提交
107 108 109
/**
 * put_io_context - put a reference of io_context
 * @ioc: io_context to put
110
 * @locked_q: request_queue the caller is holding queue_lock of (hint)
T
Tejun Heo 已提交
111 112
 *
 * Decrement reference count of @ioc and release it if the count reaches
113 114 115
 * zero.  If the caller is holding queue_lock of a queue, it can indicate
 * that with @locked_q.  This is an optimization hint and the caller is
 * allowed to pass in %NULL even when it's holding a queue_lock.
J
Jens Axboe 已提交
116
 */
117
void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
J
Jens Axboe 已提交
118
{
119 120 121
	struct request_queue *last_q = locked_q;
	unsigned long flags;

J
Jens Axboe 已提交
122
	if (ioc == NULL)
T
Tejun Heo 已提交
123
		return;
J
Jens Axboe 已提交
124

T
Tejun Heo 已提交
125
	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
126 127
	if (locked_q)
		lockdep_assert_held(locked_q->queue_lock);
J
Jens Axboe 已提交
128

T
Tejun Heo 已提交
129 130
	if (!atomic_long_dec_and_test(&ioc->refcount))
		return;
J
Jens Axboe 已提交
131

132
	/*
133
	 * Destroy @ioc.  This is a bit messy because icq's are chained
134
	 * from both ioc and queue, and ioc->lock nests inside queue_lock.
135 136
	 * The inner ioc->lock should be held to walk our icq_list and then
	 * for each icq the outer matching queue_lock should be grabbed.
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
	 * ie. We need to do reverse-order double lock dancing.
	 *
	 * Another twist is that we are often called with one of the
	 * matching queue_locks held as indicated by @locked_q, which
	 * prevents performing double-lock dance for other queues.
	 *
	 * So, we do it in two stages.  The fast path uses the queue_lock
	 * the caller is holding and, if other queues need to be accessed,
	 * uses trylock to avoid introducing locking dependency.  This can
	 * handle most cases, especially if @ioc was performing IO on only
	 * single device.
	 *
	 * If trylock doesn't cut it, we defer to @ioc->release_work which
	 * can do all the double-locking dancing.
	 */
	spin_lock_irqsave_nested(&ioc->lock, flags,
				 ioc_release_depth(locked_q));

155 156 157 158
	while (!hlist_empty(&ioc->icq_list)) {
		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
						struct io_cq, ioc_node);
		struct request_queue *this_q = icq->q;
159 160 161 162 163 164 165 166 167 168 169 170

		if (this_q != last_q) {
			if (last_q && last_q != locked_q)
				spin_unlock(last_q->queue_lock);
			last_q = NULL;

			if (!spin_trylock(this_q->queue_lock))
				break;
			last_q = this_q;
			continue;
		}
		ioc_release_depth_inc(this_q);
171 172
		icq->exit(icq);
		icq->release(icq);
173 174
		ioc_release_depth_dec(this_q);
	}
J
Jens Axboe 已提交
175

176 177
	if (last_q && last_q != locked_q)
		spin_unlock(last_q->queue_lock);
J
Jens Axboe 已提交
178

179
	spin_unlock_irqrestore(&ioc->lock, flags);
180

181 182
	/* if no icq is left, we're done; otherwise, kick release_work */
	if (hlist_empty(&ioc->icq_list))
183 184 185
		kmem_cache_free(iocontext_cachep, ioc);
	else
		schedule_work(&ioc->release_work);
J
Jens Axboe 已提交
186
}
187
EXPORT_SYMBOL(put_io_context);
J
Jens Axboe 已提交
188

189
/* Called by the exiting task */
190
void exit_io_context(struct task_struct *task)
J
Jens Axboe 已提交
191 192 193
{
	struct io_context *ioc;

194 195 196
	/* PF_EXITING prevents new io_context from being attached to @task */
	WARN_ON_ONCE(!(current->flags & PF_EXITING));

197 198 199 200
	task_lock(task);
	ioc = task->io_context;
	task->io_context = NULL;
	task_unlock(task);
J
Jens Axboe 已提交
201

202 203
	atomic_dec(&ioc->nr_tasks);
	put_io_context(ioc, NULL);
J
Jens Axboe 已提交
204 205
}

206 207
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
				int node)
J
Jens Axboe 已提交
208
{
209
	struct io_context *ioc;
J
Jens Axboe 已提交
210

T
Tejun Heo 已提交
211 212 213
	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
				    node);
	if (unlikely(!ioc))
214
		return;
T
Tejun Heo 已提交
215 216 217 218 219

	/* initialize */
	atomic_long_set(&ioc->refcount, 1);
	atomic_set(&ioc->nr_tasks, 1);
	spin_lock_init(&ioc->lock);
220 221
	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
	INIT_HLIST_HEAD(&ioc->icq_list);
222
	INIT_WORK(&ioc->release_work, ioc_release_fn);
J
Jens Axboe 已提交
223

224 225
	/* try to install, somebody might already have beaten us to it */
	task_lock(task);
226
	if (!task->io_context && !(task->flags & PF_EXITING))
227
		task->io_context = ioc;
228
	else
229 230
		kmem_cache_free(iocontext_cachep, ioc);
	task_unlock(task);
J
Jens Axboe 已提交
231
}
232
EXPORT_SYMBOL(create_io_context_slowpath);
J
Jens Axboe 已提交
233

234 235 236 237 238 239 240 241 242
/**
 * get_task_io_context - get io_context of a task
 * @task: task of interest
 * @gfp_flags: allocation flags, used if allocation is necessary
 * @node: allocation node, used if allocation is necessary
 *
 * Return io_context of @task.  If it doesn't exist, it is created with
 * @gfp_flags and @node.  The returned io_context has its reference count
 * incremented.
J
Jens Axboe 已提交
243
 *
244
 * This function always goes through task_lock() and it's better to use
245
 * %current->io_context + get_io_context() for %current.
J
Jens Axboe 已提交
246
 */
247 248
struct io_context *get_task_io_context(struct task_struct *task,
				       gfp_t gfp_flags, int node)
J
Jens Axboe 已提交
249
{
250
	struct io_context *ioc;
J
Jens Axboe 已提交
251

252 253
	might_sleep_if(gfp_flags & __GFP_WAIT);

254 255 256 257 258 259 260 261
	do {
		task_lock(task);
		ioc = task->io_context;
		if (likely(ioc)) {
			get_io_context(ioc);
			task_unlock(task);
			return ioc;
		}
262
		task_unlock(task);
263
	} while (create_io_context(task, gfp_flags, node));
264

265
	return NULL;
J
Jens Axboe 已提交
266
}
267
EXPORT_SYMBOL(get_task_io_context);
J
Jens Axboe 已提交
268

269 270
void ioc_set_changed(struct io_context *ioc, int which)
{
271
	struct io_cq *icq;
272 273
	struct hlist_node *n;

274 275
	hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
		set_bit(which, &icq->changed);
276 277 278 279 280 281 282
}

/**
 * ioc_ioprio_changed - notify ioprio change
 * @ioc: io_context of interest
 * @ioprio: new ioprio
 *
283 284
 * @ioc's ioprio has changed to @ioprio.  Set %ICQ_IOPRIO_CHANGED for all
 * icq's.  iosched is responsible for checking the bit and applying it on
285 286 287 288 289 290 291 292
 * request issue path.
 */
void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
{
	unsigned long flags;

	spin_lock_irqsave(&ioc->lock, flags);
	ioc->ioprio = ioprio;
293
	ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED);
294 295 296 297 298 299 300
	spin_unlock_irqrestore(&ioc->lock, flags);
}

/**
 * ioc_cgroup_changed - notify cgroup change
 * @ioc: io_context of interest
 *
301
 * @ioc's cgroup has changed.  Set %ICQ_CGROUP_CHANGED for all icq's.
302 303 304 305 306 307 308 309
 * iosched is responsible for checking the bit and applying it on request
 * issue path.
 */
void ioc_cgroup_changed(struct io_context *ioc)
{
	unsigned long flags;

	spin_lock_irqsave(&ioc->lock, flags);
310
	ioc_set_changed(ioc, ICQ_CGROUP_CHANGED);
311 312 313
	spin_unlock_irqrestore(&ioc->lock, flags);
}

A
Adrian Bunk 已提交
314
static int __init blk_ioc_init(void)
J
Jens Axboe 已提交
315 316 317 318 319 320
{
	iocontext_cachep = kmem_cache_create("blkdev_ioc",
			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
	return 0;
}
subsys_initcall(blk_ioc_init);