blk-ioc.c 11.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
J
Jens Axboe 已提交
2 3 4 5 6 7 8 9
/*
 * Functions related to io context handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
10
#include <linux/slab.h>
11
#include <linux/sched/task.h>
J
Jens Axboe 已提交
12 13

#include "blk.h"
14
#include "blk-mq-sched.h"
J
Jens Axboe 已提交
15 16 17 18 19 20

/*
 * For io context allocations
 */
static struct kmem_cache *iocontext_cachep;

21 22 23 24 25 26
/**
 * get_io_context - increment reference count to io_context
 * @ioc: io_context to get
 *
 * Increment reference count to @ioc.
 */
27
static void get_io_context(struct io_context *ioc)
28 29 30 31 32
{
	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
	atomic_long_inc(&ioc->refcount);
}

33 34 35 36 37 38 39
static void icq_free_icq_rcu(struct rcu_head *head)
{
	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);

	kmem_cache_free(icq->__rcu_icq_cache, icq);
}

40
/*
41 42
 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
 * and queue locked for legacy.
43
 */
44
static void ioc_exit_icq(struct io_cq *icq)
45 46 47 48 49 50
{
	struct elevator_type *et = icq->q->elevator->type;

	if (icq->flags & ICQ_EXITED)
		return;

51 52
	if (et->ops.exit_icq)
		et->ops.exit_icq(icq);
53 54 55 56

	icq->flags |= ICQ_EXITED;
}

57 58 59 60
/*
 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
 * and queue locked for legacy.
 */
61
static void ioc_destroy_icq(struct io_cq *icq)
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
{
	struct io_context *ioc = icq->ioc;
	struct request_queue *q = icq->q;
	struct elevator_type *et = q->elevator->type;

	lockdep_assert_held(&ioc->lock);

	radix_tree_delete(&ioc->icq_tree, icq->q->id);
	hlist_del_init(&icq->ioc_node);
	list_del_init(&icq->q_node);

	/*
	 * Both setting lookup hint to and clearing it from @icq are done
	 * under queue_lock.  If it's not pointing to @icq now, it never
	 * will.  Hint assignment itself can race safely.
	 */
78
	if (rcu_access_pointer(ioc->icq_hint) == icq)
79 80
		rcu_assign_pointer(ioc->icq_hint, NULL);

81
	ioc_exit_icq(icq);
82 83 84 85 86 87

	/*
	 * @icq->q might have gone away by the time RCU callback runs
	 * making it impossible to determine icq_cache.  Record it in @icq.
	 */
	icq->__rcu_icq_cache = et->icq_cache;
88
	icq->flags |= ICQ_DESTROYED;
89 90 91
	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
}

92 93
/*
 * Slow path for ioc release in put_io_context().  Performs double-lock
94
 * dancing to unlink all icq's and then frees ioc.
95 96
 */
static void ioc_release_fn(struct work_struct *work)
J
Jens Axboe 已提交
97
{
98 99
	struct io_context *ioc = container_of(work, struct io_context,
					      release_work);
100
	spin_lock_irq(&ioc->lock);
101

102 103 104
	while (!hlist_empty(&ioc->icq_list)) {
		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
						struct io_cq, ioc_node);
T
Tejun Heo 已提交
105 106
		struct request_queue *q = icq->q;

107
		if (spin_trylock(&q->queue_lock)) {
108
			ioc_destroy_icq(icq);
109
			spin_unlock(&q->queue_lock);
T
Tejun Heo 已提交
110
		} else {
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
			/* Make sure q and icq cannot be freed. */
			rcu_read_lock();

			/* Re-acquire the locks in the correct order. */
			spin_unlock(&ioc->lock);
			spin_lock(&q->queue_lock);
			spin_lock(&ioc->lock);

			/*
			 * The icq may have been destroyed when the ioc lock
			 * was released.
			 */
			if (!(icq->flags & ICQ_DESTROYED))
				ioc_destroy_icq(icq);

			spin_unlock(&q->queue_lock);
			rcu_read_unlock();
128 129
		}
	}
130

131
	spin_unlock_irq(&ioc->lock);
132 133

	kmem_cache_free(iocontext_cachep, ioc);
J
Jens Axboe 已提交
134 135
}

T
Tejun Heo 已提交
136 137 138 139 140
/**
 * put_io_context - put a reference of io_context
 * @ioc: io_context to put
 *
 * Decrement reference count of @ioc and release it if the count reaches
141
 * zero.
J
Jens Axboe 已提交
142
 */
143
void put_io_context(struct io_context *ioc)
J
Jens Axboe 已提交
144
{
145
	unsigned long flags;
146
	bool free_ioc = false;
147

J
Jens Axboe 已提交
148
	if (ioc == NULL)
T
Tejun Heo 已提交
149
		return;
J
Jens Axboe 已提交
150

T
Tejun Heo 已提交
151
	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
J
Jens Axboe 已提交
152

153
	/*
154 155
	 * Releasing ioc requires reverse order double locking and we may
	 * already be holding a queue_lock.  Do it asynchronously from wq.
156
	 */
157 158 159
	if (atomic_long_dec_and_test(&ioc->refcount)) {
		spin_lock_irqsave(&ioc->lock, flags);
		if (!hlist_empty(&ioc->icq_list))
160 161
			queue_work(system_power_efficient_wq,
					&ioc->release_work);
162 163
		else
			free_ioc = true;
164
		spin_unlock_irqrestore(&ioc->lock, flags);
165
	}
166 167 168

	if (free_ioc)
		kmem_cache_free(iocontext_cachep, ioc);
J
Jens Axboe 已提交
169
}
170
EXPORT_SYMBOL_GPL(put_io_context);
J
Jens Axboe 已提交
171

T
Tejun Heo 已提交
172 173 174 175
/**
 * put_io_context_active - put active reference on ioc
 * @ioc: ioc of interest
 *
176
 * Put an active reference to an ioc.  If active reference reaches zero after
T
Tejun Heo 已提交
177 178
 * put, @ioc can never issue further IOs and ioscheds are notified.
 */
179
static void put_io_context_active(struct io_context *ioc)
J
Jens Axboe 已提交
180
{
T
Tejun Heo 已提交
181
	struct io_cq *icq;
J
Jens Axboe 已提交
182

T
Tejun Heo 已提交
183
	if (!atomic_dec_and_test(&ioc->active_ref)) {
184 185 186 187
		put_io_context(ioc);
		return;
	}

188
	spin_lock_irq(&ioc->lock);
189
	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
190 191
		if (icq->flags & ICQ_EXITED)
			continue;
192

J
Jens Axboe 已提交
193
		ioc_exit_icq(icq);
194
	}
195
	spin_unlock_irq(&ioc->lock);
196

197
	put_io_context(ioc);
J
Jens Axboe 已提交
198 199
}

T
Tejun Heo 已提交
200 201 202 203 204 205 206 207 208 209 210 211 212 213
/* Called by the exiting task */
void exit_io_context(struct task_struct *task)
{
	struct io_context *ioc;

	task_lock(task);
	ioc = task->io_context;
	task->io_context = NULL;
	task_unlock(task);

	atomic_dec(&ioc->nr_tasks);
	put_io_context_active(ioc);
}

214 215 216 217
static void __ioc_clear_queue(struct list_head *icq_list)
{
	unsigned long flags;

218
	rcu_read_lock();
219 220
	while (!list_empty(icq_list)) {
		struct io_cq *icq = list_entry(icq_list->next,
J
Jens Axboe 已提交
221
						struct io_cq, q_node);
222 223 224
		struct io_context *ioc = icq->ioc;

		spin_lock_irqsave(&ioc->lock, flags);
225 226 227 228
		if (icq->flags & ICQ_DESTROYED) {
			spin_unlock_irqrestore(&ioc->lock, flags);
			continue;
		}
229 230 231
		ioc_destroy_icq(icq);
		spin_unlock_irqrestore(&ioc->lock, flags);
	}
232
	rcu_read_unlock();
233 234
}

235 236 237 238
/**
 * ioc_clear_queue - break any ioc association with the specified queue
 * @q: request_queue being cleared
 *
239
 * Walk @q->icq_list and exit all io_cq's.
240 241 242
 */
void ioc_clear_queue(struct request_queue *q)
{
243
	LIST_HEAD(icq_list);
244

245
	spin_lock_irq(&q->queue_lock);
246
	list_splice_init(&q->icq_list, &icq_list);
247
	spin_unlock_irq(&q->queue_lock);
248

J
Jens Axboe 已提交
249
	__ioc_clear_queue(&icq_list);
250 251
}

252
static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
J
Jens Axboe 已提交
253
{
254
	struct io_context *ioc;
J
Jens Axboe 已提交
255

T
Tejun Heo 已提交
256 257 258
	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
				    node);
	if (unlikely(!ioc))
259
		return NULL;
T
Tejun Heo 已提交
260 261

	atomic_long_set(&ioc->refcount, 1);
262
	atomic_set(&ioc->nr_tasks, 1);
T
Tejun Heo 已提交
263
	atomic_set(&ioc->active_ref, 1);
T
Tejun Heo 已提交
264
	spin_lock_init(&ioc->lock);
265
	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
266
	INIT_HLIST_HEAD(&ioc->icq_list);
267
	INIT_WORK(&ioc->release_work, ioc_release_fn);
268 269 270
	return ioc;
}

271 272
static struct io_context *create_task_io_context(struct task_struct *task,
		gfp_t gfp_flags, int node)
273 274 275 276 277
{
	struct io_context *ioc;

	ioc = alloc_io_context(gfp_flags, node);
	if (!ioc)
278
		return NULL;
J
Jens Axboe 已提交
279

280 281 282 283 284 285 286
	/*
	 * Try to install.  ioc shouldn't be installed if someone else
	 * already did or @task, which isn't %current, is exiting.  Note
	 * that we need to allow ioc creation on exiting %current as exit
	 * path may issue IOs from e.g. exit_files().  The exit path is
	 * responsible for not issuing IO after exit_io_context().
	 */
287
	task_lock(task);
288 289
	if (!task->io_context &&
	    (task == current || !(task->flags & PF_EXITING)))
290
		task->io_context = ioc;
291
	else
292
		kmem_cache_free(iocontext_cachep, ioc);
293

294 295 296
	ioc = task->io_context;
	if (ioc)
		get_io_context(ioc);
297
	task_unlock(task);
298
	return ioc;
J
Jens Axboe 已提交
299 300
}

301 302 303 304 305 306 307 308 309
/**
 * get_task_io_context - get io_context of a task
 * @task: task of interest
 * @gfp_flags: allocation flags, used if allocation is necessary
 * @node: allocation node, used if allocation is necessary
 *
 * Return io_context of @task.  If it doesn't exist, it is created with
 * @gfp_flags and @node.  The returned io_context has its reference count
 * incremented.
J
Jens Axboe 已提交
310
 *
311
 * This function always goes through task_lock() and it's better to use
312
 * %current->io_context + get_io_context() for %current.
J
Jens Axboe 已提交
313
 */
314 315
struct io_context *get_task_io_context(struct task_struct *task,
				       gfp_t gfp_flags, int node)
J
Jens Axboe 已提交
316
{
317
	struct io_context *ioc;
J
Jens Axboe 已提交
318

319
	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
320

321 322 323
	task_lock(task);
	ioc = task->io_context;
	if (unlikely(!ioc)) {
324
		task_unlock(task);
325 326 327 328 329
		return create_task_io_context(task, gfp_flags, node);
	}
	get_io_context(ioc);
	task_unlock(task);
	return ioc;
J
Jens Axboe 已提交
330 331
}

332 333 334 335 336 337 338 339
int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
{
	struct io_context *ioc = current->io_context;

	/*
	 * Share io context with parent, if CLONE_IO is set
	 */
	if (clone_flags & CLONE_IO) {
340 341
		atomic_long_inc(&ioc->refcount);
		atomic_inc(&ioc->active_ref);
342 343 344
		atomic_inc(&ioc->nr_tasks);
		tsk->io_context = ioc;
	} else if (ioprio_valid(ioc->ioprio)) {
345 346
		tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE);
		if (!tsk->io_context)
347
			return -ENOMEM;
348
		tsk->io_context->ioprio = ioc->ioprio;
349 350 351 352 353
	}

	return 0;
}

354 355 356 357 358 359 360 361 362 363 364 365
/**
 * ioc_lookup_icq - lookup io_cq from ioc
 * @ioc: the associated io_context
 * @q: the associated request_queue
 *
 * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
 * with @q->queue_lock held.
 */
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
{
	struct io_cq *icq;

366
	lockdep_assert_held(&q->queue_lock);
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389

	/*
	 * icq's are indexed from @ioc using radix tree and hint pointer,
	 * both of which are protected with RCU.  All removals are done
	 * holding both q and ioc locks, and we're holding q lock - if we
	 * find a icq which points to us, it's guaranteed to be valid.
	 */
	rcu_read_lock();
	icq = rcu_dereference(ioc->icq_hint);
	if (icq && icq->q == q)
		goto out;

	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
	if (icq && icq->q == q)
		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
	else
		icq = NULL;
out:
	rcu_read_unlock();
	return icq;
}
EXPORT_SYMBOL(ioc_lookup_icq);

390 391 392 393
/**
 * ioc_create_icq - create and link io_cq
 * @q: request_queue of interest
 *
394 395
 * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
 * will be created using @gfp_mask.
396 397 398 399
 *
 * The caller is responsible for ensuring @ioc won't go away and @q is
 * alive and will stay alive until this function returns.
 */
C
Christoph Hellwig 已提交
400
static struct io_cq *ioc_create_icq(struct request_queue *q)
401
{
C
Christoph Hellwig 已提交
402
	struct io_context *ioc = current->io_context;
403 404 405 406
	struct elevator_type *et = q->elevator->type;
	struct io_cq *icq;

	/* allocate stuff */
C
Christoph Hellwig 已提交
407
	icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO,
408 409 410 411
				    q->node);
	if (!icq)
		return NULL;

C
Christoph Hellwig 已提交
412
	if (radix_tree_maybe_preload(GFP_ATOMIC) < 0) {
413 414 415 416 417 418 419 420 421 422
		kmem_cache_free(et->icq_cache, icq);
		return NULL;
	}

	icq->ioc = ioc;
	icq->q = q;
	INIT_LIST_HEAD(&icq->q_node);
	INIT_HLIST_NODE(&icq->ioc_node);

	/* lock both q and ioc and try to link @icq */
423
	spin_lock_irq(&q->queue_lock);
424 425 426 427 428
	spin_lock(&ioc->lock);

	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
		list_add(&icq->q_node, &q->icq_list);
429 430
		if (et->ops.init_icq)
			et->ops.init_icq(icq);
431 432 433 434 435 436 437 438
	} else {
		kmem_cache_free(et->icq_cache, icq);
		icq = ioc_lookup_icq(ioc, q);
		if (!icq)
			printk(KERN_ERR "cfq: icq link failed!\n");
	}

	spin_unlock(&ioc->lock);
439
	spin_unlock_irq(&q->queue_lock);
440 441 442 443
	radix_tree_preload_end();
	return icq;
}

444 445
struct io_cq *ioc_find_get_icq(struct request_queue *q)
{
446 447
	struct io_context *ioc = current->io_context;
	struct io_cq *icq = NULL;
448

449 450 451 452 453 454
	if (unlikely(!ioc)) {
		ioc = create_task_io_context(current, GFP_ATOMIC, q->node);
		if (!ioc)
			return NULL;
	} else {
		get_io_context(ioc);
455

456 457 458 459
		spin_lock_irq(&q->queue_lock);
		icq = ioc_lookup_icq(ioc, q);
		spin_unlock_irq(&q->queue_lock);
	}
460 461

	if (!icq) {
C
Christoph Hellwig 已提交
462
		icq = ioc_create_icq(q);
463 464
		if (!icq) {
			put_io_context(ioc);
465
			return NULL;
466
		}
467 468 469 470 471
	}
	return icq;
}
EXPORT_SYMBOL_GPL(ioc_find_get_icq);

A
Adrian Bunk 已提交
472
static int __init blk_ioc_init(void)
J
Jens Axboe 已提交
473 474 475 476 477 478
{
	iocontext_cachep = kmem_cache_create("blkdev_ioc",
			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
	return 0;
}
subsys_initcall(blk_ioc_init);