blk-ioc.c 10.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
J
Jens Axboe 已提交
2 3 4 5 6 7 8 9
/*
 * Functions related to io context handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
10
#include <linux/slab.h>
11
#include <linux/sched/task.h>
J
Jens Axboe 已提交
12 13 14 15 16 17 18 19

#include "blk.h"

/*
 * For io context allocations
 */
static struct kmem_cache *iocontext_cachep;

20 21 22 23 24 25 26 27 28 29 30 31
/**
 * get_io_context - increment reference count to io_context
 * @ioc: io_context to get
 *
 * Increment reference count to @ioc.
 */
void get_io_context(struct io_context *ioc)
{
	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
	atomic_long_inc(&ioc->refcount);
}

32 33 34 35 36 37 38
static void icq_free_icq_rcu(struct rcu_head *head)
{
	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);

	kmem_cache_free(icq->__rcu_icq_cache, icq);
}

39
/*
40 41
 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
 * and queue locked for legacy.
42
 */
43
static void ioc_exit_icq(struct io_cq *icq)
44 45 46 47 48 49
{
	struct elevator_type *et = icq->q->elevator->type;

	if (icq->flags & ICQ_EXITED)
		return;

50 51
	if (et->ops.exit_icq)
		et->ops.exit_icq(icq);
52 53 54 55

	icq->flags |= ICQ_EXITED;
}

56 57 58 59
/*
 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
 * and queue locked for legacy.
 */
60
static void ioc_destroy_icq(struct io_cq *icq)
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
{
	struct io_context *ioc = icq->ioc;
	struct request_queue *q = icq->q;
	struct elevator_type *et = q->elevator->type;

	lockdep_assert_held(&ioc->lock);

	radix_tree_delete(&ioc->icq_tree, icq->q->id);
	hlist_del_init(&icq->ioc_node);
	list_del_init(&icq->q_node);

	/*
	 * Both setting lookup hint to and clearing it from @icq are done
	 * under queue_lock.  If it's not pointing to @icq now, it never
	 * will.  Hint assignment itself can race safely.
	 */
77
	if (rcu_access_pointer(ioc->icq_hint) == icq)
78 79
		rcu_assign_pointer(ioc->icq_hint, NULL);

80
	ioc_exit_icq(icq);
81 82 83 84 85 86 87 88 89

	/*
	 * @icq->q might have gone away by the time RCU callback runs
	 * making it impossible to determine icq_cache.  Record it in @icq.
	 */
	icq->__rcu_icq_cache = et->icq_cache;
	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
}

90 91
/*
 * Slow path for ioc release in put_io_context().  Performs double-lock
92
 * dancing to unlink all icq's and then frees ioc.
93 94
 */
static void ioc_release_fn(struct work_struct *work)
J
Jens Axboe 已提交
95
{
96 97
	struct io_context *ioc = container_of(work, struct io_context,
					      release_work);
98
	unsigned long flags;
99

100 101 102 103 104 105 106
	/*
	 * Exiting icq may call into put_io_context() through elevator
	 * which will trigger lockdep warning.  The ioc's are guaranteed to
	 * be different, use a different locking subclass here.  Use
	 * irqsave variant as there's no spin_lock_irq_nested().
	 */
	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
107

108 109 110
	while (!hlist_empty(&ioc->icq_list)) {
		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
						struct io_cq, ioc_node);
T
Tejun Heo 已提交
111 112 113
		struct request_queue *q = icq->q;

		if (spin_trylock(q->queue_lock)) {
114
			ioc_destroy_icq(icq);
T
Tejun Heo 已提交
115 116 117 118 119
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irqrestore(&ioc->lock, flags);
			cpu_relax();
			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
120 121
		}
	}
122

T
Tejun Heo 已提交
123
	spin_unlock_irqrestore(&ioc->lock, flags);
124 125

	kmem_cache_free(iocontext_cachep, ioc);
J
Jens Axboe 已提交
126 127
}

T
Tejun Heo 已提交
128 129 130 131 132
/**
 * put_io_context - put a reference of io_context
 * @ioc: io_context to put
 *
 * Decrement reference count of @ioc and release it if the count reaches
133
 * zero.
J
Jens Axboe 已提交
134
 */
135
void put_io_context(struct io_context *ioc)
J
Jens Axboe 已提交
136
{
137
	unsigned long flags;
138
	bool free_ioc = false;
139

J
Jens Axboe 已提交
140
	if (ioc == NULL)
T
Tejun Heo 已提交
141
		return;
J
Jens Axboe 已提交
142

T
Tejun Heo 已提交
143
	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
J
Jens Axboe 已提交
144

145
	/*
146 147
	 * Releasing ioc requires reverse order double locking and we may
	 * already be holding a queue_lock.  Do it asynchronously from wq.
148
	 */
149 150 151
	if (atomic_long_dec_and_test(&ioc->refcount)) {
		spin_lock_irqsave(&ioc->lock, flags);
		if (!hlist_empty(&ioc->icq_list))
152 153
			queue_work(system_power_efficient_wq,
					&ioc->release_work);
154 155
		else
			free_ioc = true;
156
		spin_unlock_irqrestore(&ioc->lock, flags);
157
	}
158 159 160

	if (free_ioc)
		kmem_cache_free(iocontext_cachep, ioc);
J
Jens Axboe 已提交
161 162
}

T
Tejun Heo 已提交
163 164 165 166 167 168 169 170
/**
 * put_io_context_active - put active reference on ioc
 * @ioc: ioc of interest
 *
 * Undo get_io_context_active().  If active reference reaches zero after
 * put, @ioc can never issue further IOs and ioscheds are notified.
 */
void put_io_context_active(struct io_context *ioc)
J
Jens Axboe 已提交
171
{
172
	unsigned long flags;
T
Tejun Heo 已提交
173
	struct io_cq *icq;
J
Jens Axboe 已提交
174

T
Tejun Heo 已提交
175
	if (!atomic_dec_and_test(&ioc->active_ref)) {
176 177 178 179 180 181 182 183 184 185
		put_io_context(ioc);
		return;
	}

	/*
	 * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
	 * reverse double locking.  Read comment in ioc_release_fn() for
	 * explanation on the nested locking annotation.
	 */
	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
186
	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
187 188
		if (icq->flags & ICQ_EXITED)
			continue;
189

J
Jens Axboe 已提交
190
		ioc_exit_icq(icq);
191 192 193
	}
	spin_unlock_irqrestore(&ioc->lock, flags);

194
	put_io_context(ioc);
J
Jens Axboe 已提交
195 196
}

T
Tejun Heo 已提交
197 198 199 200 201 202 203 204 205 206 207 208 209 210
/* Called by the exiting task */
void exit_io_context(struct task_struct *task)
{
	struct io_context *ioc;

	task_lock(task);
	ioc = task->io_context;
	task->io_context = NULL;
	task_unlock(task);

	atomic_dec(&ioc->nr_tasks);
	put_io_context_active(ioc);
}

211 212 213 214 215 216
static void __ioc_clear_queue(struct list_head *icq_list)
{
	unsigned long flags;

	while (!list_empty(icq_list)) {
		struct io_cq *icq = list_entry(icq_list->next,
J
Jens Axboe 已提交
217
						struct io_cq, q_node);
218 219 220 221 222 223 224 225
		struct io_context *ioc = icq->ioc;

		spin_lock_irqsave(&ioc->lock, flags);
		ioc_destroy_icq(icq);
		spin_unlock_irqrestore(&ioc->lock, flags);
	}
}

226 227 228 229
/**
 * ioc_clear_queue - break any ioc association with the specified queue
 * @q: request_queue being cleared
 *
230
 * Walk @q->icq_list and exit all io_cq's.
231 232 233
 */
void ioc_clear_queue(struct request_queue *q)
{
234
	LIST_HEAD(icq_list);
235

236 237
	spin_lock_irq(q->queue_lock);
	list_splice_init(&q->icq_list, &icq_list);
J
Jens Axboe 已提交
238
	spin_unlock_irq(q->queue_lock);
239

J
Jens Axboe 已提交
240
	__ioc_clear_queue(&icq_list);
241 242
}

243
int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
J
Jens Axboe 已提交
244
{
245
	struct io_context *ioc;
246
	int ret;
J
Jens Axboe 已提交
247

T
Tejun Heo 已提交
248 249 250
	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
				    node);
	if (unlikely(!ioc))
251
		return -ENOMEM;
T
Tejun Heo 已提交
252 253 254

	/* initialize */
	atomic_long_set(&ioc->refcount, 1);
255
	atomic_set(&ioc->nr_tasks, 1);
T
Tejun Heo 已提交
256
	atomic_set(&ioc->active_ref, 1);
T
Tejun Heo 已提交
257
	spin_lock_init(&ioc->lock);
258
	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
259
	INIT_HLIST_HEAD(&ioc->icq_list);
260
	INIT_WORK(&ioc->release_work, ioc_release_fn);
J
Jens Axboe 已提交
261

262 263 264 265 266 267 268
	/*
	 * Try to install.  ioc shouldn't be installed if someone else
	 * already did or @task, which isn't %current, is exiting.  Note
	 * that we need to allow ioc creation on exiting %current as exit
	 * path may issue IOs from e.g. exit_files().  The exit path is
	 * responsible for not issuing IO after exit_io_context().
	 */
269
	task_lock(task);
270 271
	if (!task->io_context &&
	    (task == current || !(task->flags & PF_EXITING)))
272
		task->io_context = ioc;
273
	else
274
		kmem_cache_free(iocontext_cachep, ioc);
275 276 277

	ret = task->io_context ? 0 : -EBUSY;

278
	task_unlock(task);
279

280
	return ret;
J
Jens Axboe 已提交
281 282
}

283 284 285 286 287 288 289 290 291
/**
 * get_task_io_context - get io_context of a task
 * @task: task of interest
 * @gfp_flags: allocation flags, used if allocation is necessary
 * @node: allocation node, used if allocation is necessary
 *
 * Return io_context of @task.  If it doesn't exist, it is created with
 * @gfp_flags and @node.  The returned io_context has its reference count
 * incremented.
J
Jens Axboe 已提交
292
 *
293
 * This function always goes through task_lock() and it's better to use
294
 * %current->io_context + get_io_context() for %current.
J
Jens Axboe 已提交
295
 */
296 297
struct io_context *get_task_io_context(struct task_struct *task,
				       gfp_t gfp_flags, int node)
J
Jens Axboe 已提交
298
{
299
	struct io_context *ioc;
J
Jens Axboe 已提交
300

301
	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
302

303 304 305 306 307 308 309 310
	do {
		task_lock(task);
		ioc = task->io_context;
		if (likely(ioc)) {
			get_io_context(ioc);
			task_unlock(task);
			return ioc;
		}
311
		task_unlock(task);
312
	} while (!create_task_io_context(task, gfp_flags, node));
313

314
	return NULL;
J
Jens Axboe 已提交
315 316
}

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
/**
 * ioc_lookup_icq - lookup io_cq from ioc
 * @ioc: the associated io_context
 * @q: the associated request_queue
 *
 * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
 * with @q->queue_lock held.
 */
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
{
	struct io_cq *icq;

	lockdep_assert_held(q->queue_lock);

	/*
	 * icq's are indexed from @ioc using radix tree and hint pointer,
	 * both of which are protected with RCU.  All removals are done
	 * holding both q and ioc locks, and we're holding q lock - if we
	 * find a icq which points to us, it's guaranteed to be valid.
	 */
	rcu_read_lock();
	icq = rcu_dereference(ioc->icq_hint);
	if (icq && icq->q == q)
		goto out;

	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
	if (icq && icq->q == q)
		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
	else
		icq = NULL;
out:
	rcu_read_unlock();
	return icq;
}
EXPORT_SYMBOL(ioc_lookup_icq);

353 354
/**
 * ioc_create_icq - create and link io_cq
355
 * @ioc: io_context of interest
356 357 358
 * @q: request_queue of interest
 * @gfp_mask: allocation mask
 *
359 360
 * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
 * will be created using @gfp_mask.
361 362 363 364
 *
 * The caller is responsible for ensuring @ioc won't go away and @q is
 * alive and will stay alive until this function returns.
 */
365 366
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
			     gfp_t gfp_mask)
367 368 369 370 371 372 373 374 375 376
{
	struct elevator_type *et = q->elevator->type;
	struct io_cq *icq;

	/* allocate stuff */
	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
				    q->node);
	if (!icq)
		return NULL;

377
	if (radix_tree_maybe_preload(gfp_mask) < 0) {
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
		kmem_cache_free(et->icq_cache, icq);
		return NULL;
	}

	icq->ioc = ioc;
	icq->q = q;
	INIT_LIST_HEAD(&icq->q_node);
	INIT_HLIST_NODE(&icq->ioc_node);

	/* lock both q and ioc and try to link @icq */
	spin_lock_irq(q->queue_lock);
	spin_lock(&ioc->lock);

	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
		list_add(&icq->q_node, &q->icq_list);
394 395
		if (et->ops.init_icq)
			et->ops.init_icq(icq);
396 397 398 399 400 401 402 403 404 405 406 407 408
	} else {
		kmem_cache_free(et->icq_cache, icq);
		icq = ioc_lookup_icq(ioc, q);
		if (!icq)
			printk(KERN_ERR "cfq: icq link failed!\n");
	}

	spin_unlock(&ioc->lock);
	spin_unlock_irq(q->queue_lock);
	radix_tree_preload_end();
	return icq;
}

A
Adrian Bunk 已提交
409
static int __init blk_ioc_init(void)
J
Jens Axboe 已提交
410 411 412 413 414 415
{
	iocontext_cachep = kmem_cache_create("blkdev_ioc",
			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
	return 0;
}
subsys_initcall(blk_ioc_init);