queue.c 12.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
L
Linus Torvalds 已提交
2 3
/*
 *  Copyright (C) 2003 Russell King, All Rights Reserved.
4
 *  Copyright 2006-2007 Pierre Ossman
L
Linus Torvalds 已提交
5
 */
6
#include <linux/slab.h>
L
Linus Torvalds 已提交
7 8
#include <linux/module.h>
#include <linux/blkdev.h>
9
#include <linux/freezer.h>
C
Christoph Hellwig 已提交
10
#include <linux/kthread.h>
J
Jens Axboe 已提交
11
#include <linux/scatterlist.h>
12
#include <linux/dma-mapping.h>
13
#include <linux/backing-dev.h>
L
Linus Torvalds 已提交
14 15 16

#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
17

18
#include "queue.h"
19
#include "block.h"
20
#include "core.h"
21
#include "card.h"
A
Adrian Hunter 已提交
22
#include "host.h"
L
Linus Torvalds 已提交
23

24 25
#define MMC_DMA_MAP_MERGE_SEGMENTS	512

A
Adrian Hunter 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
{
	/* Allow only 1 DCMD at a time */
	return mq->in_flight[MMC_ISSUE_DCMD];
}

void mmc_cqe_check_busy(struct mmc_queue *mq)
{
	if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
		mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;

	mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
}

static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
{
	return host->caps2 & MMC_CAP2_CQE_DCMD;
}

45 46
static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
					      struct request *req)
A
Adrian Hunter 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60
{
	switch (req_op(req)) {
	case REQ_OP_DRV_IN:
	case REQ_OP_DRV_OUT:
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
		return MMC_ISSUE_SYNC;
	case REQ_OP_FLUSH:
		return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
	default:
		return MMC_ISSUE_ASYNC;
	}
}

A
Adrian Hunter 已提交
61 62
enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
{
A
Adrian Hunter 已提交
63 64 65 66 67
	struct mmc_host *host = mq->card->host;

	if (mq->use_cqe)
		return mmc_cqe_issue_type(host, req);

A
Adrian Hunter 已提交
68 69 70 71 72 73
	if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
		return MMC_ISSUE_ASYNC;

	return MMC_ISSUE_SYNC;
}

A
Adrian Hunter 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
{
	if (!mq->recovery_needed) {
		mq->recovery_needed = true;
		schedule_work(&mq->recovery_work);
	}
}

void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
{
	struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
						  brq.mrq);
	struct request *req = mmc_queue_req_to_req(mqrq);
	struct request_queue *q = req->q;
	struct mmc_queue *mq = q->queuedata;
	unsigned long flags;

91
	spin_lock_irqsave(&mq->lock, flags);
A
Adrian Hunter 已提交
92
	__mmc_cqe_recovery_notifier(mq);
93
	spin_unlock_irqrestore(&mq->lock, flags);
A
Adrian Hunter 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
}

static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
{
	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
	struct mmc_request *mrq = &mqrq->brq.mrq;
	struct mmc_queue *mq = req->q->queuedata;
	struct mmc_host *host = mq->card->host;
	enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
	bool recovery_needed = false;

	switch (issue_type) {
	case MMC_ISSUE_ASYNC:
	case MMC_ISSUE_DCMD:
		if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
			if (recovery_needed)
				__mmc_cqe_recovery_notifier(mq);
			return BLK_EH_RESET_TIMER;
		}
113 114 115
		/* No timeout (XXX: huh? comment doesn't make much sense) */
		blk_mq_complete_request(req);
		return BLK_EH_DONE;
A
Adrian Hunter 已提交
116 117 118 119 120 121
	default:
		/* Timeout is handled by mmc core */
		return BLK_EH_RESET_TIMER;
	}
}

A
Adrian Hunter 已提交
122 123 124
static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
						 bool reserved)
{
A
Adrian Hunter 已提交
125 126 127 128 129
	struct request_queue *q = req->q;
	struct mmc_queue *mq = q->queuedata;
	unsigned long flags;
	int ret;

130
	spin_lock_irqsave(&mq->lock, flags);
A
Adrian Hunter 已提交
131 132 133 134 135 136

	if (mq->recovery_needed || !mq->use_cqe)
		ret = BLK_EH_RESET_TIMER;
	else
		ret = mmc_cqe_timed_out(req);

137
	spin_unlock_irqrestore(&mq->lock, flags);
A
Adrian Hunter 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151

	return ret;
}

static void mmc_mq_recovery_handler(struct work_struct *work)
{
	struct mmc_queue *mq = container_of(work, struct mmc_queue,
					    recovery_work);
	struct request_queue *q = mq->queue;

	mmc_get_card(mq->card, &mq->ctx);

	mq->in_recovery = true;

152 153 154 155
	if (mq->use_cqe)
		mmc_blk_cqe_recovery(mq);
	else
		mmc_blk_mq_recovery(mq);
A
Adrian Hunter 已提交
156 157 158

	mq->in_recovery = false;

159
	spin_lock_irq(&mq->lock);
A
Adrian Hunter 已提交
160
	mq->recovery_needed = false;
161
	spin_unlock_irq(&mq->lock);
A
Adrian Hunter 已提交
162 163 164 165

	mmc_put_card(mq->card, &mq->ctx);

	blk_mq_run_hw_queues(q, true);
A
Adrian Hunter 已提交
166 167
}

168
static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
169 170 171
{
	struct scatterlist *sg;

172
	sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
173
	if (sg)
174 175 176 177 178
		sg_init_table(sg, sg_len);

	return sg;
}

179 180 181 182 183 184 185 186 187
static void mmc_queue_setup_discard(struct request_queue *q,
				    struct mmc_card *card)
{
	unsigned max_discard;

	max_discard = mmc_calc_max_discard(card);
	if (!max_discard)
		return;

188
	blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
189
	blk_queue_max_discard_sectors(q, max_discard);
190 191 192 193
	q->limits.discard_granularity = card->pref_erase << 9;
	/* granularity must not be greater than max. discard */
	if (card->pref_erase > max_discard)
		q->limits.discard_granularity = 0;
194
	if (mmc_can_secure_erase_trim(card))
195
		blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
196 197
}

198 199 200 201 202 203
static unsigned int mmc_get_max_segments(struct mmc_host *host)
{
	return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
					 host->max_segs;
}

204 205 206 207 208 209
/**
 * mmc_init_request() - initialize the MMC-specific per-request data
 * @q: the request queue
 * @req: the request
 * @gfp: memory allocation policy
 */
A
Adrian Hunter 已提交
210 211
static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
			      gfp_t gfp)
212
{
213 214 215
	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
	struct mmc_card *card = mq->card;
	struct mmc_host *host = card->host;
216

217
	mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
L
Linus Walleij 已提交
218 219
	if (!mq_rq->sg)
		return -ENOMEM;
220

221 222
	return 0;
}
223

224
static void mmc_exit_request(struct request_queue *q, struct request *req)
225
{
226
	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
227

228 229
	kfree(mq_rq->sg);
	mq_rq->sg = NULL;
230 231
}

A
Adrian Hunter 已提交
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
			       unsigned int hctx_idx, unsigned int numa_node)
{
	return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
}

static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
				unsigned int hctx_idx)
{
	struct mmc_queue *mq = set->driver_data;

	mmc_exit_request(mq->queue, req);
}

static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
				    const struct blk_mq_queue_data *bd)
{
	struct request *req = bd->rq;
	struct request_queue *q = req->q;
	struct mmc_queue *mq = q->queuedata;
	struct mmc_card *card = mq->card;
A
Adrian Hunter 已提交
253
	struct mmc_host *host = card->host;
A
Adrian Hunter 已提交
254 255
	enum mmc_issue_type issue_type;
	enum mmc_issued issued;
A
Adrian Hunter 已提交
256
	bool get_card, cqe_retune_ok;
A
Adrian Hunter 已提交
257 258 259 260 261 262 263 264 265
	int ret;

	if (mmc_card_removed(mq->card)) {
		req->rq_flags |= RQF_QUIET;
		return BLK_STS_IOERR;
	}

	issue_type = mmc_issue_type(mq, req);

266
	spin_lock_irq(&mq->lock);
A
Adrian Hunter 已提交
267

268
	if (mq->recovery_needed || mq->busy) {
269
		spin_unlock_irq(&mq->lock);
A
Adrian Hunter 已提交
270 271 272
		return BLK_STS_RESOURCE;
	}

A
Adrian Hunter 已提交
273
	switch (issue_type) {
A
Adrian Hunter 已提交
274 275 276
	case MMC_ISSUE_DCMD:
		if (mmc_cqe_dcmd_busy(mq)) {
			mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
277
			spin_unlock_irq(&mq->lock);
A
Adrian Hunter 已提交
278 279 280
			return BLK_STS_RESOURCE;
		}
		break;
A
Adrian Hunter 已提交
281 282 283 284 285 286 287 288 289 290 291 292 293 294
	case MMC_ISSUE_ASYNC:
		break;
	default:
		/*
		 * Timeouts are handled by mmc core, and we don't have a host
		 * API to abort requests, so we can't handle the timeout anyway.
		 * However, when the timeout happens, blk_mq_complete_request()
		 * no longer works (to stop the request disappearing under us).
		 * To avoid racing with that, set a large timeout.
		 */
		req->timeout = 600 * HZ;
		break;
	}

295 296 297
	/* Parallel dispatch of requests is not supported at the moment */
	mq->busy = true;

A
Adrian Hunter 已提交
298 299
	mq->in_flight[issue_type] += 1;
	get_card = (mmc_tot_in_flight(mq) == 1);
A
Adrian Hunter 已提交
300
	cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
A
Adrian Hunter 已提交
301

302
	spin_unlock_irq(&mq->lock);
A
Adrian Hunter 已提交
303 304 305 306 307 308 309 310 311

	if (!(req->rq_flags & RQF_DONTPREP)) {
		req_to_mmc_queue_req(req)->retries = 0;
		req->rq_flags |= RQF_DONTPREP;
	}

	if (get_card)
		mmc_get_card(card, &mq->ctx);

A
Adrian Hunter 已提交
312 313 314 315 316
	if (mq->use_cqe) {
		host->retune_now = host->need_retune && cqe_retune_ok &&
				   !host->hold_retune;
	}

A
Adrian Hunter 已提交
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	blk_mq_start_request(req);

	issued = mmc_blk_mq_issue_rq(mq, req);

	switch (issued) {
	case MMC_REQ_BUSY:
		ret = BLK_STS_RESOURCE;
		break;
	case MMC_REQ_FAILED_TO_START:
		ret = BLK_STS_IOERR;
		break;
	default:
		ret = BLK_STS_OK;
		break;
	}

	if (issued != MMC_REQ_STARTED) {
		bool put_card = false;

336
		spin_lock_irq(&mq->lock);
A
Adrian Hunter 已提交
337 338 339
		mq->in_flight[issue_type] -= 1;
		if (mmc_tot_in_flight(mq) == 0)
			put_card = true;
340
		mq->busy = false;
341
		spin_unlock_irq(&mq->lock);
A
Adrian Hunter 已提交
342 343
		if (put_card)
			mmc_put_card(card, &mq->ctx);
344 345
	} else {
		WRITE_ONCE(mq->busy, false);
A
Adrian Hunter 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358
	}

	return ret;
}

static const struct blk_mq_ops mmc_mq_ops = {
	.queue_rq	= mmc_mq_queue_rq,
	.init_request	= mmc_mq_init_request,
	.exit_request	= mmc_mq_exit_request,
	.complete	= mmc_blk_mq_complete,
	.timeout	= mmc_mq_timed_out,
};

359 360 361
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
	struct mmc_host *host = card->host;
362
	unsigned block_size = 512;
363

364 365
	blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
366 367 368
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

369 370
	if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
		blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
371 372
	blk_queue_max_hw_sectors(mq->queue,
		min(host->max_blk_count, host->max_req_size / 512));
373 374 375 376 377
	if (host->can_dma_map_merge)
		WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
							mmc_dev(host)),
		     "merging was advertised but not possible");
	blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
378 379 380 381 382

	if (mmc_card_mmc(card))
		block_size = card->ext_csd.data_sector_size;

	blk_queue_logical_block_size(mq->queue, block_size);
383 384 385 386 387 388 389
	/*
	 * After blk_queue_can_use_dma_map_merging() was called with succeed,
	 * since it calls blk_queue_virt_boundary(), the mmc should not call
	 * both blk_queue_max_segment_size().
	 */
	if (!host->can_dma_map_merge)
		blk_queue_max_segment_size(mq->queue,
390
			round_down(host->max_seg_size, block_size));
391

392 393
	dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));

A
Adrian Hunter 已提交
394
	INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
A
Adrian Hunter 已提交
395 396 397 398 399 400 401
	INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);

	mutex_init(&mq->complete_lock);

	init_waitqueue_head(&mq->wait);
}

402 403 404 405 406
static inline bool mmc_merge_capable(struct mmc_host *host)
{
	return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
}

407 408 409 410 411 412 413 414 415 416
/* Set queue depth to get a reasonable value for q->nr_requests */
#define MMC_QUEUE_DEPTH 64

/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 *
 * Initialise a MMC card request queue.
 */
417
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
A
Adrian Hunter 已提交
418
{
419
	struct mmc_host *host = card->host;
A
Adrian Hunter 已提交
420 421
	int ret;

422 423
	mq->card = card;
	mq->use_cqe = host->cqe_enabled;
424 425
	
	spin_lock_init(&mq->lock);
426

A
Adrian Hunter 已提交
427
	memset(&mq->tag_set, 0, sizeof(mq->tag_set));
428 429 430 431 432 433 434 435 436 437
	mq->tag_set.ops = &mmc_mq_ops;
	/*
	 * The queue depth for CQE must match the hardware because the request
	 * tag is used to index the hardware queue.
	 */
	if (mq->use_cqe)
		mq->tag_set.queue_depth =
			min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
	else
		mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
A
Adrian Hunter 已提交
438
	mq->tag_set.numa_node = NUMA_NO_NODE;
M
Ming Lei 已提交
439
	mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
A
Adrian Hunter 已提交
440 441 442 443
	mq->tag_set.nr_hw_queues = 1;
	mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
	mq->tag_set.driver_data = mq;

444 445 446 447 448
	/*
	 * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
	 * the host->can_dma_map_merge should be set before to get max_segs
	 * from mmc_get_max_segments().
	 */
449 450
	if (mmc_merge_capable(host) &&
	    host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
451 452 453 454 455
	    dma_get_merge_boundary(mmc_dev(host)))
		host->can_dma_map_merge = 1;
	else
		host->can_dma_map_merge = 0;

A
Adrian Hunter 已提交
456 457 458 459 460 461 462 463 464 465
	ret = blk_mq_alloc_tag_set(&mq->tag_set);
	if (ret)
		return ret;

	mq->queue = blk_mq_init_queue(&mq->tag_set);
	if (IS_ERR(mq->queue)) {
		ret = PTR_ERR(mq->queue);
		goto free_tag_set;
	}

466 467 468 469
	if (mmc_host_is_spi(host) && host->use_spi_crc)
		mq->queue->backing_dev_info->capabilities |=
			BDI_CAP_STABLE_WRITES;

A
Adrian Hunter 已提交
470
	mq->queue->queuedata = mq;
471
	blk_queue_rq_timeout(mq->queue, 60 * HZ);
A
Adrian Hunter 已提交
472

473
	mmc_setup_queue(mq, card);
A
Adrian Hunter 已提交
474 475 476 477 478 479 480
	return 0;

free_tag_set:
	blk_mq_free_tag_set(&mq->tag_set);
	return ret;
}

481
void mmc_queue_suspend(struct mmc_queue *mq)
A
Adrian Hunter 已提交
482 483 484 485 486 487 488 489 490 491 492
{
	blk_mq_quiesce_queue(mq->queue);

	/*
	 * The host remains claimed while there are outstanding requests, so
	 * simply claiming and releasing here ensures there are none.
	 */
	mmc_claim_host(mq->card->host);
	mmc_release_host(mq->card->host);
}

493
void mmc_queue_resume(struct mmc_queue *mq)
A
Adrian Hunter 已提交
494 495 496 497
{
	blk_mq_unquiesce_queue(mq->queue);
}

L
Linus Torvalds 已提交
498 499
void mmc_cleanup_queue(struct mmc_queue *mq)
{
500
	struct request_queue *q = mq->queue;
501

502 503 504 505 506 507
	/*
	 * The legacy code handled the possibility of being suspended,
	 * so do that here too.
	 */
	if (blk_queue_quiesced(q))
		blk_mq_unquiesce_queue(q);
A
Adrian Hunter 已提交
508

509
	blk_cleanup_queue(q);
510
	blk_mq_free_tag_set(&mq->tag_set);
511

A
Adrian Hunter 已提交
512 513 514 515 516 517 518
	/*
	 * A request can be completed before the next request, potentially
	 * leaving a complete_work with nothing to do. Such a work item might
	 * still be queued at this point. Flush it.
	 */
	flush_work(&mq->complete_work);

L
Linus Torvalds 已提交
519 520 521
	mq->card = NULL;
}

522 523 524
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
525
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
526
{
527
	struct request *req = mmc_queue_req_to_req(mqrq);
528

L
Linus Walleij 已提交
529
	return blk_rq_map_sg(mq->queue, req, mqrq->sg);
530
}