queue.c 12.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 *  Copyright (C) 2003 Russell King, All Rights Reserved.
3
 *  Copyright 2006-2007 Pierre Ossman
L
Linus Torvalds 已提交
4 5 6 7 8 9
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */
10
#include <linux/slab.h>
L
Linus Torvalds 已提交
11 12
#include <linux/module.h>
#include <linux/blkdev.h>
13
#include <linux/freezer.h>
C
Christoph Hellwig 已提交
14
#include <linux/kthread.h>
J
Jens Axboe 已提交
15
#include <linux/scatterlist.h>
16
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
17 18 19

#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
20

21
#include "queue.h"
22
#include "block.h"
23
#include "core.h"
24
#include "card.h"
A
Adrian Hunter 已提交
25
#include "host.h"
L
Linus Torvalds 已提交
26

A
Adrian Hunter 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
{
	/* Allow only 1 DCMD at a time */
	return mq->in_flight[MMC_ISSUE_DCMD];
}

void mmc_cqe_check_busy(struct mmc_queue *mq)
{
	if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
		mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;

	mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
}

static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
{
	return host->caps2 & MMC_CAP2_CQE_DCMD;
}

46 47
static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
					      struct request *req)
A
Adrian Hunter 已提交
48 49 50 51 52 53 54 55 56 57 58 59 60 61
{
	switch (req_op(req)) {
	case REQ_OP_DRV_IN:
	case REQ_OP_DRV_OUT:
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
		return MMC_ISSUE_SYNC;
	case REQ_OP_FLUSH:
		return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
	default:
		return MMC_ISSUE_ASYNC;
	}
}

A
Adrian Hunter 已提交
62 63
enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
{
A
Adrian Hunter 已提交
64 65 66 67 68
	struct mmc_host *host = mq->card->host;

	if (mq->use_cqe)
		return mmc_cqe_issue_type(host, req);

A
Adrian Hunter 已提交
69 70 71 72 73 74
	if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
		return MMC_ISSUE_ASYNC;

	return MMC_ISSUE_SYNC;
}

A
Adrian Hunter 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
{
	if (!mq->recovery_needed) {
		mq->recovery_needed = true;
		schedule_work(&mq->recovery_work);
	}
}

void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
{
	struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
						  brq.mrq);
	struct request *req = mmc_queue_req_to_req(mqrq);
	struct request_queue *q = req->q;
	struct mmc_queue *mq = q->queuedata;
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	__mmc_cqe_recovery_notifier(mq);
	spin_unlock_irqrestore(q->queue_lock, flags);
}

static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
{
	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
	struct mmc_request *mrq = &mqrq->brq.mrq;
	struct mmc_queue *mq = req->q->queuedata;
	struct mmc_host *host = mq->card->host;
	enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
	bool recovery_needed = false;

	switch (issue_type) {
	case MMC_ISSUE_ASYNC:
	case MMC_ISSUE_DCMD:
		if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
			if (recovery_needed)
				__mmc_cqe_recovery_notifier(mq);
			return BLK_EH_RESET_TIMER;
		}
		/* No timeout */
		return BLK_EH_HANDLED;
	default:
		/* Timeout is handled by mmc core */
		return BLK_EH_RESET_TIMER;
	}
}

A
Adrian Hunter 已提交
122 123 124
static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
						 bool reserved)
{
A
Adrian Hunter 已提交
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
	struct request_queue *q = req->q;
	struct mmc_queue *mq = q->queuedata;
	unsigned long flags;
	int ret;

	spin_lock_irqsave(q->queue_lock, flags);

	if (mq->recovery_needed || !mq->use_cqe)
		ret = BLK_EH_RESET_TIMER;
	else
		ret = mmc_cqe_timed_out(req);

	spin_unlock_irqrestore(q->queue_lock, flags);

	return ret;
}

static void mmc_mq_recovery_handler(struct work_struct *work)
{
	struct mmc_queue *mq = container_of(work, struct mmc_queue,
					    recovery_work);
	struct request_queue *q = mq->queue;

	mmc_get_card(mq->card, &mq->ctx);

	mq->in_recovery = true;

152 153 154 155
	if (mq->use_cqe)
		mmc_blk_cqe_recovery(mq);
	else
		mmc_blk_mq_recovery(mq);
A
Adrian Hunter 已提交
156 157 158 159 160 161 162 163 164 165

	mq->in_recovery = false;

	spin_lock_irq(q->queue_lock);
	mq->recovery_needed = false;
	spin_unlock_irq(q->queue_lock);

	mmc_put_card(mq->card, &mq->ctx);

	blk_mq_run_hw_queues(q, true);
A
Adrian Hunter 已提交
166 167
}

168
static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
169 170 171
{
	struct scatterlist *sg;

172
	sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
173
	if (sg)
174 175 176 177 178
		sg_init_table(sg, sg_len);

	return sg;
}

179 180 181 182 183 184 185 186 187 188
static void mmc_queue_setup_discard(struct request_queue *q,
				    struct mmc_card *card)
{
	unsigned max_discard;

	max_discard = mmc_calc_max_discard(card);
	if (!max_discard)
		return;

	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
189
	blk_queue_max_discard_sectors(q, max_discard);
190 191 192 193
	q->limits.discard_granularity = card->pref_erase << 9;
	/* granularity must not be greater than max. discard */
	if (card->pref_erase > max_discard)
		q->limits.discard_granularity = 0;
194
	if (mmc_can_secure_erase_trim(card))
195
		queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
196 197
}

198 199 200 201 202 203
/**
 * mmc_init_request() - initialize the MMC-specific per-request data
 * @q: the request queue
 * @req: the request
 * @gfp: memory allocation policy
 */
A
Adrian Hunter 已提交
204 205
static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
			      gfp_t gfp)
206
{
207 208 209
	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
	struct mmc_card *card = mq->card;
	struct mmc_host *host = card->host;
210

L
Linus Walleij 已提交
211 212 213
	mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
	if (!mq_rq->sg)
		return -ENOMEM;
214

215 216
	return 0;
}
217

218
static void mmc_exit_request(struct request_queue *q, struct request *req)
219
{
220
	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
221

222 223
	kfree(mq_rq->sg);
	mq_rq->sg = NULL;
224 225
}

A
Adrian Hunter 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
			       unsigned int hctx_idx, unsigned int numa_node)
{
	return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
}

static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
				unsigned int hctx_idx)
{
	struct mmc_queue *mq = set->driver_data;

	mmc_exit_request(mq->queue, req);
}

/*
 * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
 * will not be dispatched in parallel.
 */
static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
				    const struct blk_mq_queue_data *bd)
{
	struct request *req = bd->rq;
	struct request_queue *q = req->q;
	struct mmc_queue *mq = q->queuedata;
	struct mmc_card *card = mq->card;
A
Adrian Hunter 已提交
251
	struct mmc_host *host = card->host;
A
Adrian Hunter 已提交
252 253
	enum mmc_issue_type issue_type;
	enum mmc_issued issued;
A
Adrian Hunter 已提交
254
	bool get_card, cqe_retune_ok;
A
Adrian Hunter 已提交
255 256 257 258 259 260 261 262 263 264 265
	int ret;

	if (mmc_card_removed(mq->card)) {
		req->rq_flags |= RQF_QUIET;
		return BLK_STS_IOERR;
	}

	issue_type = mmc_issue_type(mq, req);

	spin_lock_irq(q->queue_lock);

A
Adrian Hunter 已提交
266 267 268 269 270
	if (mq->recovery_needed) {
		spin_unlock_irq(q->queue_lock);
		return BLK_STS_RESOURCE;
	}

A
Adrian Hunter 已提交
271
	switch (issue_type) {
A
Adrian Hunter 已提交
272 273 274 275 276 277 278
	case MMC_ISSUE_DCMD:
		if (mmc_cqe_dcmd_busy(mq)) {
			mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
			spin_unlock_irq(q->queue_lock);
			return BLK_STS_RESOURCE;
		}
		break;
A
Adrian Hunter 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
	case MMC_ISSUE_ASYNC:
		break;
	default:
		/*
		 * Timeouts are handled by mmc core, and we don't have a host
		 * API to abort requests, so we can't handle the timeout anyway.
		 * However, when the timeout happens, blk_mq_complete_request()
		 * no longer works (to stop the request disappearing under us).
		 * To avoid racing with that, set a large timeout.
		 */
		req->timeout = 600 * HZ;
		break;
	}

	mq->in_flight[issue_type] += 1;
	get_card = (mmc_tot_in_flight(mq) == 1);
A
Adrian Hunter 已提交
295
	cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
A
Adrian Hunter 已提交
296 297 298 299 300 301 302 303 304 305 306

	spin_unlock_irq(q->queue_lock);

	if (!(req->rq_flags & RQF_DONTPREP)) {
		req_to_mmc_queue_req(req)->retries = 0;
		req->rq_flags |= RQF_DONTPREP;
	}

	if (get_card)
		mmc_get_card(card, &mq->ctx);

A
Adrian Hunter 已提交
307 308 309 310 311
	if (mq->use_cqe) {
		host->retune_now = host->need_retune && cqe_retune_ok &&
				   !host->hold_retune;
	}

A
Adrian Hunter 已提交
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	blk_mq_start_request(req);

	issued = mmc_blk_mq_issue_rq(mq, req);

	switch (issued) {
	case MMC_REQ_BUSY:
		ret = BLK_STS_RESOURCE;
		break;
	case MMC_REQ_FAILED_TO_START:
		ret = BLK_STS_IOERR;
		break;
	default:
		ret = BLK_STS_OK;
		break;
	}

	if (issued != MMC_REQ_STARTED) {
		bool put_card = false;

		spin_lock_irq(q->queue_lock);
		mq->in_flight[issue_type] -= 1;
		if (mmc_tot_in_flight(mq) == 0)
			put_card = true;
		spin_unlock_irq(q->queue_lock);
		if (put_card)
			mmc_put_card(card, &mq->ctx);
	}

	return ret;
}

static const struct blk_mq_ops mmc_mq_ops = {
	.queue_rq	= mmc_mq_queue_rq,
	.init_request	= mmc_mq_init_request,
	.exit_request	= mmc_mq_exit_request,
	.complete	= mmc_blk_mq_complete,
	.timeout	= mmc_mq_timed_out,
};

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;

	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

	blk_queue_bounce_limit(mq->queue, limit);
	blk_queue_max_hw_sectors(mq->queue,
		min(host->max_blk_count, host->max_req_size / 512));
	blk_queue_max_segments(mq->queue, host->max_segs);
	blk_queue_max_segment_size(mq->queue, host->max_seg_size);

A
Adrian Hunter 已提交
370
	INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
A
Adrian Hunter 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
	INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);

	mutex_init(&mq->complete_lock);

	init_waitqueue_head(&mq->wait);
}

static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
			     const struct blk_mq_ops *mq_ops, spinlock_t *lock)
{
	int ret;

	memset(&mq->tag_set, 0, sizeof(mq->tag_set));
	mq->tag_set.ops = mq_ops;
	mq->tag_set.queue_depth = q_depth;
	mq->tag_set.numa_node = NUMA_NO_NODE;
	mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE |
			    BLK_MQ_F_BLOCKING;
	mq->tag_set.nr_hw_queues = 1;
	mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
	mq->tag_set.driver_data = mq;

	ret = blk_mq_alloc_tag_set(&mq->tag_set);
	if (ret)
		return ret;

	mq->queue = blk_mq_init_queue(&mq->tag_set);
	if (IS_ERR(mq->queue)) {
		ret = PTR_ERR(mq->queue);
		goto free_tag_set;
	}

	mq->queue->queue_lock = lock;
	mq->queue->queuedata = mq;

	return 0;

free_tag_set:
	blk_mq_free_tag_set(&mq->tag_set);

	return ret;
}

/* Set queue depth to get a reasonable value for q->nr_requests */
#define MMC_QUEUE_DEPTH 64

static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card,
			 spinlock_t *lock)
{
A
Adrian Hunter 已提交
420
	struct mmc_host *host = card->host;
A
Adrian Hunter 已提交
421 422 423
	int q_depth;
	int ret;

A
Adrian Hunter 已提交
424 425 426 427 428 429 430 431
	/*
	 * The queue depth for CQE must match the hardware because the request
	 * tag is used to index the hardware queue.
	 */
	if (mq->use_cqe)
		q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
	else
		q_depth = MMC_QUEUE_DEPTH;
A
Adrian Hunter 已提交
432 433 434 435 436 437 438 439 440 441

	ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock);
	if (ret)
		return ret;

	blk_queue_rq_timeout(mq->queue, 60 * HZ);

	mmc_setup_queue(mq, card);

	return 0;
442 443
}

L
Linus Torvalds 已提交
444 445 446 447 448
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
449
 * @subname: partition subname
L
Linus Torvalds 已提交
450 451 452
 *
 * Initialise a MMC card request queue.
 */
453 454
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
L
Linus Torvalds 已提交
455 456 457 458
{
	struct mmc_host *host = card->host;

	mq->card = card;
A
Adrian Hunter 已提交
459

A
Adrian Hunter 已提交
460 461
	mq->use_cqe = host->cqe_enabled;

462
	return mmc_mq_init(mq, card, lock);
L
Linus Torvalds 已提交
463 464
}

465
void mmc_queue_suspend(struct mmc_queue *mq)
A
Adrian Hunter 已提交
466 467 468 469 470 471 472 473 474 475 476
{
	blk_mq_quiesce_queue(mq->queue);

	/*
	 * The host remains claimed while there are outstanding requests, so
	 * simply claiming and releasing here ensures there are none.
	 */
	mmc_claim_host(mq->card->host);
	mmc_release_host(mq->card->host);
}

477
void mmc_queue_resume(struct mmc_queue *mq)
A
Adrian Hunter 已提交
478 479 480 481
{
	blk_mq_unquiesce_queue(mq->queue);
}

L
Linus Torvalds 已提交
482 483
void mmc_cleanup_queue(struct mmc_queue *mq)
{
484
	struct request_queue *q = mq->queue;
485

486 487 488 489 490 491
	/*
	 * The legacy code handled the possibility of being suspended,
	 * so do that here too.
	 */
	if (blk_queue_quiesced(q))
		blk_mq_unquiesce_queue(q);
A
Adrian Hunter 已提交
492

493 494
	blk_cleanup_queue(q);

A
Adrian Hunter 已提交
495 496 497 498 499 500 501
	/*
	 * A request can be completed before the next request, potentially
	 * leaving a complete_work with nothing to do. Such a work item might
	 * still be queued at this point. Flush it.
	 */
	flush_work(&mq->complete_work);

L
Linus Torvalds 已提交
502 503 504
	mq->card = NULL;
}

505 506 507
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
508
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
509
{
510
	struct request *req = mmc_queue_req_to_req(mqrq);
511

L
Linus Walleij 已提交
512
	return blk_rq_map_sg(mq->queue, req, mqrq->sg);
513
}