core.c 75.3 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  linux/drivers/mmc/core/core.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
P
Pierre Ossman 已提交
5
 *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6
 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7
 *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/pagemap.h>
#include <linux/err.h>
P
Pierre Ossman 已提交
21
#include <linux/leds.h>
P
Pierre Ossman 已提交
22
#include <linux/scatterlist.h>
23
#include <linux/log2.h>
D
David Brownell 已提交
24
#include <linux/regulator/consumer.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/pm_wakeup.h>
27
#include <linux/suspend.h>
P
Per Forlin 已提交
28 29
#include <linux/fault-inject.h>
#include <linux/random.h>
30
#include <linux/slab.h>
31
#include <linux/of.h>
L
Linus Torvalds 已提交
32 33 34

#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
P
Pierre Ossman 已提交
35 36
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
37
#include <linux/mmc/slot-gpio.h>
L
Linus Torvalds 已提交
38

39 40 41
#define CREATE_TRACE_POINTS
#include <trace/events/mmc.h>

42
#include "core.h"
43
#include "card.h"
44 45
#include "bus.h"
#include "host.h"
P
Pierre Ossman 已提交
46
#include "sdio_bus.h"
47
#include "pwrseq.h"
P
Pierre Ossman 已提交
48 49 50

#include "mmc_ops.h"
#include "sd_ops.h"
P
Pierre Ossman 已提交
51
#include "sdio_ops.h"
L
Linus Torvalds 已提交
52

53 54 55
/* If the device is not responding */
#define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */

56 57 58 59 60 61
/*
 * Background operations can take a long time, depending on the housekeeping
 * operations the card has to perform.
 */
#define MMC_BKOPS_MAX_TIMEOUT	(4 * 60 * 1000) /* max time to wait in ms */

62 63 64
/* The max erase timeout, used when host->max_busy_timeout isn't specified */
#define MMC_ERASE_TIMEOUT_MS	(60 * 1000) /* 60 s */

65
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
66

D
David Brownell 已提交
67 68 69 70 71
/*
 * Enabling software CRCs on the data blocks can be a significant (30%)
 * performance cost, and for other reasons may not always be desired.
 * So we allow it it to be disabled.
 */
72
bool use_spi_crc = 1;
D
David Brownell 已提交
73 74
module_param(use_spi_crc, bool, 0);

75 76 77
static int mmc_schedule_delayed_work(struct delayed_work *work,
				     unsigned long delay)
{
78 79 80 81 82 83 84
	/*
	 * We use the system_freezable_wq, because of two reasons.
	 * First, it allows several works (not the same work item) to be
	 * executed simultaneously. Second, the queue becomes frozen when
	 * userspace becomes frozen during system PM.
	 */
	return queue_delayed_work(system_freezable_wq, work, delay);
85 86
}

P
Per Forlin 已提交
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
#ifdef CONFIG_FAIL_MMC_REQUEST

/*
 * Internal function. Inject random data errors.
 * If mmc_data is NULL no errors are injected.
 */
static void mmc_should_fail_request(struct mmc_host *host,
				    struct mmc_request *mrq)
{
	struct mmc_command *cmd = mrq->cmd;
	struct mmc_data *data = mrq->data;
	static const int data_errors[] = {
		-ETIMEDOUT,
		-EILSEQ,
		-EIO,
	};

	if (!data)
		return;

	if (cmd->error || data->error ||
	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
		return;

111 112
	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
P
Per Forlin 已提交
113 114 115 116 117 118 119 120 121 122 123
}

#else /* CONFIG_FAIL_MMC_REQUEST */

static inline void mmc_should_fail_request(struct mmc_host *host,
					   struct mmc_request *mrq)
{
}

#endif /* CONFIG_FAIL_MMC_REQUEST */

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
static inline void mmc_complete_cmd(struct mmc_request *mrq)
{
	if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
		complete_all(&mrq->cmd_completion);
}

void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
{
	if (!mrq->cap_cmd_during_tfr)
		return;

	mmc_complete_cmd(mrq);

	pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
		 mmc_hostname(host), mrq->cmd->opcode);
}
EXPORT_SYMBOL(mmc_command_done);

L
Linus Torvalds 已提交
142
/**
143 144 145
 *	mmc_request_done - finish processing an MMC request
 *	@host: MMC host which completed request
 *	@mrq: MMC request which request
L
Linus Torvalds 已提交
146 147
 *
 *	MMC drivers should call this function when they have completed
148
 *	their processing of a request.
L
Linus Torvalds 已提交
149 150 151 152
 */
void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
{
	struct mmc_command *cmd = mrq->cmd;
153 154
	int err = cmd->error;

155
	/* Flag re-tuning needed on CRC errors */
156 157 158
	if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
159
	    (mrq->data && mrq->data->error == -EILSEQ) ||
160
	    (mrq->stop && mrq->stop->error == -EILSEQ)))
161 162
		mmc_retune_needed(host);

D
David Brownell 已提交
163 164 165 166 167
	if (err && cmd->retries && mmc_host_is_spi(host)) {
		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
			cmd->retries = 0;
	}

168 169 170 171 172
	if (host->ongoing_mrq == mrq)
		host->ongoing_mrq = NULL;

	mmc_complete_cmd(mrq);

173 174
	trace_mmc_request_done(host, mrq);

175
	if (err && cmd->retries && !mmc_card_removed(host->card)) {
176 177 178 179 180 181
		/*
		 * Request starter must handle retries - see
		 * mmc_wait_for_req_done().
		 */
		if (mrq->done)
			mrq->done(mrq);
182
	} else {
P
Per Forlin 已提交
183 184
		mmc_should_fail_request(host, mrq);

185 186
		if (!host->ongoing_mrq)
			led_trigger_event(host->led, LED_OFF);
P
Pierre Ossman 已提交
187

188 189 190 191 192 193 194 195
		if (mrq->sbc) {
			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
				mmc_hostname(host), mrq->sbc->opcode,
				mrq->sbc->error,
				mrq->sbc->resp[0], mrq->sbc->resp[1],
				mrq->sbc->resp[2], mrq->sbc->resp[3]);
		}

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
			mmc_hostname(host), cmd->opcode, err,
			cmd->resp[0], cmd->resp[1],
			cmd->resp[2], cmd->resp[3]);

		if (mrq->data) {
			pr_debug("%s:     %d bytes transferred: %d\n",
				mmc_hostname(host),
				mrq->data->bytes_xfered, mrq->data->error);
		}

		if (mrq->stop) {
			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
				mmc_hostname(host), mrq->stop->opcode,
				mrq->stop->error,
				mrq->stop->resp[0], mrq->stop->resp[1],
				mrq->stop->resp[2], mrq->stop->resp[3]);
		}

		if (mrq->done)
			mrq->done(mrq);
L
Linus Torvalds 已提交
217 218 219 220 221
	}
}

EXPORT_SYMBOL(mmc_request_done);

222 223 224 225 226 227 228 229 230 231 232 233
static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
	int err;

	/* Assumes host controller has been runtime resumed by mmc_claim_host */
	err = mmc_retune(host);
	if (err) {
		mrq->cmd->error = err;
		mmc_request_done(host, mrq);
		return;
	}

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
	/*
	 * For sdio rw commands we must wait for card busy otherwise some
	 * sdio devices won't work properly.
	 */
	if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
		int tries = 500; /* Wait aprox 500ms at maximum */

		while (host->ops->card_busy(host) && --tries)
			mmc_delay(1);

		if (tries == 0) {
			mrq->cmd->error = -EBUSY;
			mmc_request_done(host, mrq);
			return;
		}
	}

251 252 253 254 255 256 257 258 259
	if (mrq->cap_cmd_during_tfr) {
		host->ongoing_mrq = mrq;
		/*
		 * Retry path could come through here without having waiting on
		 * cmd_completion, so ensure it is reinitialised.
		 */
		reinit_completion(&mrq->cmd_completion);
	}

260 261
	trace_mmc_request_start(host, mrq);

262 263 264
	host->ops->request(host, mrq);
}

265
static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
L
Linus Torvalds 已提交
266
{
P
Pierre Ossman 已提交
267 268
#ifdef CONFIG_MMC_DEBUG
	unsigned int i, sz;
269
	struct scatterlist *sg;
P
Pierre Ossman 已提交
270
#endif
271 272
	mmc_retune_hold(host);

273 274
	if (mmc_card_removed(host->card))
		return -ENOMEDIUM;
P
Pierre Ossman 已提交
275

276 277 278 279 280 281
	if (mrq->sbc) {
		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
			 mmc_hostname(host), mrq->sbc->opcode,
			 mrq->sbc->arg, mrq->sbc->flags);
	}

282 283 284
	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
		 mmc_hostname(host), mrq->cmd->opcode,
		 mrq->cmd->arg, mrq->cmd->flags);
L
Linus Torvalds 已提交
285

286 287 288 289 290
	if (mrq->data) {
		pr_debug("%s:     blksz %d blocks %d flags %08x "
			"tsac %d ms nsac %d\n",
			mmc_hostname(host), mrq->data->blksz,
			mrq->data->blocks, mrq->data->flags,
291
			mrq->data->timeout_ns / 1000000,
292 293 294 295 296 297 298 299 300
			mrq->data->timeout_clks);
	}

	if (mrq->stop) {
		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
			 mmc_hostname(host), mrq->stop->opcode,
			 mrq->stop->arg, mrq->stop->flags);
	}

P
Pierre Ossman 已提交
301
	WARN_ON(!host->claimed);
L
Linus Torvalds 已提交
302 303 304

	mrq->cmd->error = 0;
	mrq->cmd->mrq = mrq;
305 306 307 308
	if (mrq->sbc) {
		mrq->sbc->error = 0;
		mrq->sbc->mrq = mrq;
	}
L
Linus Torvalds 已提交
309
	if (mrq->data) {
S
Shawn Lin 已提交
310 311 312 313
		if (mrq->data->blksz > host->max_blk_size ||
		    mrq->data->blocks > host->max_blk_count ||
		    mrq->data->blocks * mrq->data->blksz > host->max_req_size)
			return -EINVAL;
P
Pierre Ossman 已提交
314 315
#ifdef CONFIG_MMC_DEBUG
		sz = 0;
316 317
		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
			sz += sg->length;
S
Shawn Lin 已提交
318 319
		if (sz != mrq->data->blocks * mrq->data->blksz)
			return -EINVAL;
P
Pierre Ossman 已提交
320 321
#endif

L
Linus Torvalds 已提交
322 323 324 325 326 327 328 329 330
		mrq->cmd->data = mrq->data;
		mrq->data->error = 0;
		mrq->data->mrq = mrq;
		if (mrq->stop) {
			mrq->data->stop = mrq->stop;
			mrq->stop->error = 0;
			mrq->stop->mrq = mrq;
		}
	}
331
	led_trigger_event(host->led, LED_FULL);
332
	__mmc_start_request(host, mrq);
333 334

	return 0;
L
Linus Torvalds 已提交
335 336
}

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
/**
 *	mmc_start_bkops - start BKOPS for supported cards
 *	@card: MMC card to start BKOPS
 *	@form_exception: A flag to indicate if this function was
 *			 called due to an exception raised by the card
 *
 *	Start background operations whenever requested.
 *	When the urgent BKOPS bit is set in a R1 command response
 *	then background operations should be started immediately.
*/
void mmc_start_bkops(struct mmc_card *card, bool from_exception)
{
	int err;
	int timeout;
	bool use_busy_signal;

353
	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
		return;

	err = mmc_read_bkops_status(card);
	if (err) {
		pr_err("%s: Failed to read bkops status: %d\n",
		       mmc_hostname(card->host), err);
		return;
	}

	if (!card->ext_csd.raw_bkops_status)
		return;

	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
	    from_exception)
		return;

	mmc_claim_host(card->host);
	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
		timeout = MMC_BKOPS_MAX_TIMEOUT;
		use_busy_signal = true;
	} else {
		timeout = 0;
		use_busy_signal = false;
	}

379 380
	mmc_retune_hold(card->host);

381
	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
382
			EXT_CSD_BKOPS_START, 1, timeout, 0,
383
			use_busy_signal, true, false);
384 385 386
	if (err) {
		pr_warn("%s: Error %d starting bkops\n",
			mmc_hostname(card->host), err);
387
		mmc_retune_release(card->host);
388 389 390 391 392 393 394 395 396 397
		goto out;
	}

	/*
	 * For urgent bkops status (LEVEL_2 and more)
	 * bkops executed synchronously, otherwise
	 * the operation is in progress
	 */
	if (!use_busy_signal)
		mmc_card_set_doing_bkops(card);
398 399
	else
		mmc_retune_release(card->host);
400 401 402 403 404
out:
	mmc_release_host(card->host);
}
EXPORT_SYMBOL(mmc_start_bkops);

405 406 407 408 409 410 411 412
/*
 * mmc_wait_data_done() - done callback for data request
 * @mrq: done data request
 *
 * Wakes up mmc context, passed as a callback to host controller driver
 */
static void mmc_wait_data_done(struct mmc_request *mrq)
{
413 414 415 416
	struct mmc_context_info *context_info = &mrq->host->context_info;

	context_info->is_done_rcv = true;
	wake_up_interruptible(&context_info->wait);
417 418
}

L
Linus Torvalds 已提交
419 420
static void mmc_wait_done(struct mmc_request *mrq)
{
421 422 423
	complete(&mrq->completion);
}

424 425 426 427 428 429 430 431 432 433 434 435
static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
{
	struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);

	/*
	 * If there is an ongoing transfer, wait for the command line to become
	 * available.
	 */
	if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
		wait_for_completion(&ongoing_mrq->cmd_completion);
}

436 437 438 439 440 441 442
/*
 *__mmc_start_data_req() - starts data request
 * @host: MMC host to start the request
 * @mrq: data request to start
 *
 * Sets the done callback to be called when request is completed by the card.
 * Starts data mmc request execution
443 444
 * If an ongoing transfer is already in progress, wait for the command line
 * to become available before sending another command.
445 446 447
 */
static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
{
448 449
	int err;

450 451
	mmc_wait_ongoing_tfr_cmd(host);

452 453
	mrq->done = mmc_wait_data_done;
	mrq->host = host;
454

455 456
	init_completion(&mrq->cmd_completion);

457 458 459
	err = mmc_start_request(host, mrq);
	if (err) {
		mrq->cmd->error = err;
460
		mmc_complete_cmd(mrq);
461
		mmc_wait_data_done(mrq);
462 463
	}

464
	return err;
465 466
}

467
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
468
{
469 470
	int err;

471 472
	mmc_wait_ongoing_tfr_cmd(host);

473 474
	init_completion(&mrq->completion);
	mrq->done = mmc_wait_done;
475

476 477
	init_completion(&mrq->cmd_completion);

478 479 480
	err = mmc_start_request(host, mrq);
	if (err) {
		mrq->cmd->error = err;
481
		mmc_complete_cmd(mrq);
482 483
		complete(&mrq->completion);
	}
484 485

	return err;
486 487
}

488 489 490 491 492 493 494 495 496 497 498
/*
 * mmc_wait_for_data_req_done() - wait for request completed
 * @host: MMC host to prepare the command.
 * @mrq: MMC request to wait for
 *
 * Blocks MMC context till host controller will ack end of data request
 * execution or new request notification arrives from the block layer.
 * Handles command retries.
 *
 * Returns enum mmc_blk_status after checking errors.
 */
499
static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
500
						      struct mmc_request *mrq)
501 502 503
{
	struct mmc_command *cmd;
	struct mmc_context_info *context_info = &host->context_info;
504
	enum mmc_blk_status status;
505 506 507 508 509

	while (1) {
		wait_event_interruptible(context_info->wait,
				(context_info->is_done_rcv ||
				 context_info->is_new_req));
510

511 512 513
		if (context_info->is_done_rcv) {
			context_info->is_done_rcv = false;
			cmd = mrq->cmd;
514

515 516
			if (!cmd->error || !cmd->retries ||
			    mmc_card_removed(host->card)) {
517 518 519
				status = host->areq->err_check(host->card,
							       host->areq);
				break; /* return status */
520
			} else {
521
				mmc_retune_recheck(host);
522 523 524 525 526
				pr_info("%s: req failed (CMD%u): %d, retrying...\n",
					mmc_hostname(host),
					cmd->opcode, cmd->error);
				cmd->retries--;
				cmd->error = 0;
527
				__mmc_start_request(host, mrq);
528 529 530
				continue; /* wait for done/new event again */
			}
		}
531 532

		return MMC_BLK_NEW_REQUEST;
533
	}
534
	mmc_retune_release(host);
535
	return status;
536 537
}

538
void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
539
{
540 541 542 543 544 545
	struct mmc_command *cmd;

	while (1) {
		wait_for_completion(&mrq->completion);

		cmd = mrq->cmd;
546 547 548 549 550 551 552 553 554

		/*
		 * If host has timed out waiting for the sanitize
		 * to complete, card might be still in programming state
		 * so let's try to bring the card out of programming
		 * state.
		 */
		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
			if (!mmc_interrupt_hpi(host->card)) {
J
Joe Perches 已提交
555 556
				pr_warn("%s: %s: Interrupted sanitize\n",
					mmc_hostname(host), __func__);
557 558 559 560 561 562 563
				cmd->error = 0;
				break;
			} else {
				pr_err("%s: %s: Failed to interrupt sanitize\n",
				       mmc_hostname(host), __func__);
			}
		}
564 565
		if (!cmd->error || !cmd->retries ||
		    mmc_card_removed(host->card))
566 567
			break;

568 569
		mmc_retune_recheck(host);

570 571 572 573
		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
			 mmc_hostname(host), cmd->opcode, cmd->error);
		cmd->retries--;
		cmd->error = 0;
574
		__mmc_start_request(host, mrq);
575
	}
576 577

	mmc_retune_release(host);
578
}
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
EXPORT_SYMBOL(mmc_wait_for_req_done);

/**
 *	mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
 *	@host: MMC host
 *	@mrq: MMC request
 *
 *	mmc_is_req_done() is used with requests that have
 *	mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
 *	starting a request and before waiting for it to complete. That is,
 *	either in between calls to mmc_start_req(), or after mmc_wait_for_req()
 *	and before mmc_wait_for_req_done(). If it is called at other times the
 *	result is not meaningful.
 */
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
{
	if (host->areq)
		return host->context_info.is_done_rcv;
	else
		return completion_done(&mrq->completion);
}
EXPORT_SYMBOL(mmc_is_req_done);
601 602 603 604 605 606 607 608 609 610

/**
 *	mmc_pre_req - Prepare for a new request
 *	@host: MMC host to prepare command
 *	@mrq: MMC request to prepare for
 *
 *	mmc_pre_req() is called in prior to mmc_start_req() to let
 *	host prepare for the new request. Preparation of a request may be
 *	performed while another request is running on the host.
 */
611
static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
612
{
U
Ulf Hansson 已提交
613
	if (host->ops->pre_req)
614
		host->ops->pre_req(host, mrq);
615 616 617 618 619 620 621 622 623 624 625 626 627 628
}

/**
 *	mmc_post_req - Post process a completed request
 *	@host: MMC host to post process command
 *	@mrq: MMC request to post process for
 *	@err: Error, if non zero, clean up any resources made in pre_req
 *
 *	Let the host post process a completed request. Post processing of
 *	a request may be performed while another reuqest is running.
 */
static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
			 int err)
{
U
Ulf Hansson 已提交
629
	if (host->ops->post_req)
630
		host->ops->post_req(host, mrq, err);
L
Linus Torvalds 已提交
631 632
}

633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
/**
 *	mmc_start_req - start a non-blocking request
 *	@host: MMC host to start command
 *	@areq: async request to start
 *	@error: out parameter returns 0 for success, otherwise non zero
 *
 *	Start a new MMC custom command request for a host.
 *	If there is on ongoing async request wait for completion
 *	of that request and start the new one and return.
 *	Does not wait for the new request to complete.
 *
 *      Returns the completed request, NULL in case of none completed.
 *	Wait for the an ongoing request (previoulsy started) to complete and
 *	return the completed request. If there is no ongoing request, NULL
 *	is returned without waiting. NULL is not an error condition.
 */
struct mmc_async_req *mmc_start_req(struct mmc_host *host,
650 651
				    struct mmc_async_req *areq,
				    enum mmc_blk_status *ret_stat)
652
{
653
	enum mmc_blk_status status = MMC_BLK_SUCCESS;
654
	int start_err = 0;
655 656 657 658
	struct mmc_async_req *data = host->areq;

	/* Prepare a new request */
	if (areq)
659
		mmc_pre_req(host, areq->mrq);
660 661

	if (host->areq) {
662
		status = mmc_wait_for_data_req_done(host, host->areq->mrq);
663 664 665
		if (status == MMC_BLK_NEW_REQUEST) {
			if (ret_stat)
				*ret_stat = status;
J
Jaehoon Chung 已提交
666 667 668 669 670 671
			/*
			 * The previous request was not completed,
			 * nothing to return
			 */
			return NULL;
		}
672 673 674 675 676 677
		/*
		 * Check BKOPS urgency for each R1 response
		 */
		if (host->card && mmc_card_mmc(host->card) &&
		    ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
		     (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
678 679 680 681 682 683
		    (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {

			/* Cancel the prepared request */
			if (areq)
				mmc_post_req(host, areq->mrq, -EINVAL);

684
			mmc_start_bkops(host->card, true);
685 686 687

			/* prepare the request again */
			if (areq)
688
				mmc_pre_req(host, areq->mrq);
689
		}
690 691
	}

692
	if (status == MMC_BLK_SUCCESS && areq)
693
		start_err = __mmc_start_data_req(host, areq->mrq);
694 695 696 697

	if (host->areq)
		mmc_post_req(host, host->areq->mrq, 0);

698
	 /* Cancel a prepared request if it was not started. */
699
	if ((status != MMC_BLK_SUCCESS || start_err) && areq)
J
Jaehoon Chung 已提交
700
		mmc_post_req(host, areq->mrq, -EINVAL);
701

702
	if (status != MMC_BLK_SUCCESS)
703 704 705 706
		host->areq = NULL;
	else
		host->areq = areq;

707 708
	if (ret_stat)
		*ret_stat = status;
709 710 711 712
	return data;
}
EXPORT_SYMBOL(mmc_start_req);

P
Pierre Ossman 已提交
713 714 715 716 717 718
/**
 *	mmc_wait_for_req - start a request and wait for completion
 *	@host: MMC host to start command
 *	@mrq: MMC request to start
 *
 *	Start a new MMC custom command request for a host, and wait
719 720 721 722 723
 *	for the command to complete. In the case of 'cap_cmd_during_tfr'
 *	requests, the transfer is ongoing and the caller can issue further
 *	commands that do not use the data lines, and then wait by calling
 *	mmc_wait_for_req_done().
 *	Does not attempt to parse the response.
P
Pierre Ossman 已提交
724 725
 */
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
L
Linus Torvalds 已提交
726
{
727
	__mmc_start_req(host, mrq);
728 729 730

	if (!mrq->cap_cmd_during_tfr)
		mmc_wait_for_req_done(host, mrq);
L
Linus Torvalds 已提交
731 732 733
}
EXPORT_SYMBOL(mmc_wait_for_req);

734 735 736 737 738
/**
 *	mmc_interrupt_hpi - Issue for High priority Interrupt
 *	@card: the MMC card associated with the HPI transfer
 *
 *	Issued High Priority Interrupt, and check for card status
739
 *	until out-of prg-state.
740 741 742 743 744
 */
int mmc_interrupt_hpi(struct mmc_card *card)
{
	int err;
	u32 status;
745
	unsigned long prg_wait;
746 747 748 749 750 751 752 753 754 755 756 757 758

	if (!card->ext_csd.hpi_en) {
		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
		return 1;
	}

	mmc_claim_host(card->host);
	err = mmc_send_status(card, &status);
	if (err) {
		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
		goto out;
	}

759 760 761 762
	switch (R1_CURRENT_STATE(status)) {
	case R1_STATE_IDLE:
	case R1_STATE_READY:
	case R1_STATE_STBY:
763
	case R1_STATE_TRAN:
764
		/*
765
		 * In idle and transfer states, HPI is not needed and the caller
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
		 * can issue the next intended command immediately
		 */
		goto out;
	case R1_STATE_PRG:
		break;
	default:
		/* In all other states, it's illegal to issue HPI */
		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
			mmc_hostname(card->host), R1_CURRENT_STATE(status));
		err = -EINVAL;
		goto out;
	}

	err = mmc_send_hpi_cmd(card, &status);
	if (err)
		goto out;

	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
	do {
		err = mmc_send_status(card, &status);

		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
			break;
		if (time_after(jiffies, prg_wait))
			err = -ETIMEDOUT;
	} while (!err);
792 793 794 795 796 797 798

out:
	mmc_release_host(card->host);
	return err;
}
EXPORT_SYMBOL(mmc_interrupt_hpi);

L
Linus Torvalds 已提交
799 800 801 802 803 804 805 806 807 808 809 810
/**
 *	mmc_wait_for_cmd - start a command and wait for completion
 *	@host: MMC host to start command
 *	@cmd: MMC command to start
 *	@retries: maximum number of retries
 *
 *	Start a new MMC command for a host, and wait for the command
 *	to complete.  Return any error that occurred while the command
 *	was executing.  Do not attempt to parse the response.
 */
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
{
811
	struct mmc_request mrq = {};
L
Linus Torvalds 已提交
812

P
Pierre Ossman 已提交
813
	WARN_ON(!host->claimed);
L
Linus Torvalds 已提交
814 815 816 817 818 819 820 821 822 823 824 825 826 827

	memset(cmd->resp, 0, sizeof(cmd->resp));
	cmd->retries = retries;

	mrq.cmd = cmd;
	cmd->data = NULL;

	mmc_wait_for_req(host, &mrq);

	return cmd->error;
}

EXPORT_SYMBOL(mmc_wait_for_cmd);

828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
/**
 *	mmc_stop_bkops - stop ongoing BKOPS
 *	@card: MMC card to check BKOPS
 *
 *	Send HPI command to stop ongoing background operations to
 *	allow rapid servicing of foreground operations, e.g. read/
 *	writes. Wait until the card comes out of the programming state
 *	to avoid errors in servicing read/write requests.
 */
int mmc_stop_bkops(struct mmc_card *card)
{
	int err = 0;

	err = mmc_interrupt_hpi(card);

	/*
	 * If err is EINVAL, we can't issue an HPI.
	 * It should complete the BKOPS.
	 */
	if (!err || (err == -EINVAL)) {
		mmc_card_clr_doing_bkops(card);
849
		mmc_retune_release(card->host);
850 851 852 853 854 855 856 857 858 859 860 861 862
		err = 0;
	}

	return err;
}
EXPORT_SYMBOL(mmc_stop_bkops);

int mmc_read_bkops_status(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;

	mmc_claim_host(card->host);
863
	err = mmc_get_ext_csd(card, &ext_csd);
864 865
	mmc_release_host(card->host);
	if (err)
866
		return err;
867 868 869 870

	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
	kfree(ext_csd);
871
	return 0;
872 873 874
}
EXPORT_SYMBOL(mmc_read_bkops_status);

875 876 877 878
/**
 *	mmc_set_data_timeout - set the timeout for a data command
 *	@data: data phase for command
 *	@card: the MMC card associated with the data transfer
P
Pierre Ossman 已提交
879 880 881
 *
 *	Computes the data timeout parameters according to the
 *	correct algorithm given the card type.
882
 */
883
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
884 885 886
{
	unsigned int mult;

887 888 889 890 891 892 893 894 895
	/*
	 * SDIO cards only define an upper 1 s limit on access.
	 */
	if (mmc_card_sdio(card)) {
		data->timeout_ns = 1000000000;
		data->timeout_clks = 0;
		return;
	}

896 897 898 899 900 901 902 903 904
	/*
	 * SD cards use a 100 multiplier rather than 10
	 */
	mult = mmc_card_sd(card) ? 100 : 10;

	/*
	 * Scale up the multiplier (and therefore the timeout) by
	 * the r2w factor for writes.
	 */
905
	if (data->flags & MMC_DATA_WRITE)
906 907 908 909 910 911 912 913 914 915 916 917
		mult <<= card->csd.r2w_factor;

	data->timeout_ns = card->csd.tacc_ns * mult;
	data->timeout_clks = card->csd.tacc_clks * mult;

	/*
	 * SD cards also have an upper limit on the timeout.
	 */
	if (mmc_card_sd(card)) {
		unsigned int timeout_us, limit_us;

		timeout_us = data->timeout_ns / 1000;
U
Ulf Hansson 已提交
918
		if (card->host->ios.clock)
919
			timeout_us += data->timeout_clks * 1000 /
U
Ulf Hansson 已提交
920
				(card->host->ios.clock / 1000);
921

922
		if (data->flags & MMC_DATA_WRITE)
923
			/*
924 925 926 927 928 929
			 * The MMC spec "It is strongly recommended
			 * for hosts to implement more than 500ms
			 * timeout value even if the card indicates
			 * the 250ms maximum busy length."  Even the
			 * previous value of 300ms is known to be
			 * insufficient for some cards.
930
			 */
931
			limit_us = 3000000;
932 933 934
		else
			limit_us = 100000;

935 936 937 938
		/*
		 * SDHC cards always use these fixed values.
		 */
		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
939 940 941
			data->timeout_ns = limit_us * 1000;
			data->timeout_clks = 0;
		}
942 943 944 945

		/* assign limit value if invalid */
		if (timeout_us == 0)
			data->timeout_ns = limit_us * 1000;
946
	}
947 948 949 950

	/*
	 * Some cards require longer data read timeout than indicated in CSD.
	 * Address this by setting the read timeout to a "reasonably high"
951
	 * value. For the cards tested, 600ms has proven enough. If necessary,
952 953 954
	 * this value can be increased if other problematic cards require this.
	 */
	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
955
		data->timeout_ns = 600000000;
956 957 958
		data->timeout_clks = 0;
	}

959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
	/*
	 * Some cards need very high timeouts if driven in SPI mode.
	 * The worst observed timeout was 900ms after writing a
	 * continuous stream of data until the internal logic
	 * overflowed.
	 */
	if (mmc_host_is_spi(card->host)) {
		if (data->flags & MMC_DATA_WRITE) {
			if (data->timeout_ns < 1000000000)
				data->timeout_ns = 1000000000;	/* 1s */
		} else {
			if (data->timeout_ns < 100000000)
				data->timeout_ns =  100000000;	/* 100ms */
		}
	}
974 975 976
}
EXPORT_SYMBOL(mmc_set_data_timeout);

977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
/**
 *	mmc_align_data_size - pads a transfer size to a more optimal value
 *	@card: the MMC card associated with the data transfer
 *	@sz: original transfer size
 *
 *	Pads the original data size with a number of extra bytes in
 *	order to avoid controller bugs and/or performance hits
 *	(e.g. some controllers revert to PIO for certain sizes).
 *
 *	Returns the improved size, which might be unmodified.
 *
 *	Note that this function is only relevant when issuing a
 *	single scatter gather entry.
 */
unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
{
	/*
	 * FIXME: We don't have a system for the controller to tell
	 * the core about its problems yet, so for now we just 32-bit
	 * align the size.
	 */
	sz = ((sz + 3) / 4) * 4;

	return sz;
}
EXPORT_SYMBOL(mmc_align_data_size);

L
Linus Torvalds 已提交
1004
/**
1005
 *	__mmc_claim_host - exclusively claim a host
L
Linus Torvalds 已提交
1006
 *	@host: mmc host to claim
1007
 *	@abort: whether or not the operation should be aborted
L
Linus Torvalds 已提交
1008
 *
1009 1010 1011 1012
 *	Claim a host for a set of operations.  If @abort is non null and
 *	dereference a non-zero value then this will return prematurely with
 *	that non-zero value without acquiring the lock.  Returns zero
 *	with the lock held otherwise.
L
Linus Torvalds 已提交
1013
 */
1014
int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
L
Linus Torvalds 已提交
1015 1016 1017
{
	DECLARE_WAITQUEUE(wait, current);
	unsigned long flags;
1018
	int stop;
1019
	bool pm = false;
L
Linus Torvalds 已提交
1020

1021 1022
	might_sleep();

L
Linus Torvalds 已提交
1023 1024 1025 1026
	add_wait_queue(&host->wq, &wait);
	spin_lock_irqsave(&host->lock, flags);
	while (1) {
		set_current_state(TASK_UNINTERRUPTIBLE);
1027
		stop = abort ? atomic_read(abort) : 0;
1028
		if (stop || !host->claimed || host->claimer == current)
L
Linus Torvalds 已提交
1029 1030 1031 1032 1033 1034
			break;
		spin_unlock_irqrestore(&host->lock, flags);
		schedule();
		spin_lock_irqsave(&host->lock, flags);
	}
	set_current_state(TASK_RUNNING);
1035
	if (!stop) {
1036
		host->claimed = 1;
1037 1038
		host->claimer = current;
		host->claim_cnt += 1;
1039 1040
		if (host->claim_cnt == 1)
			pm = true;
1041
	} else
1042
		wake_up(&host->wq);
L
Linus Torvalds 已提交
1043 1044
	spin_unlock_irqrestore(&host->lock, flags);
	remove_wait_queue(&host->wq, &wait);
1045 1046 1047 1048

	if (pm)
		pm_runtime_get_sync(mmc_dev(host));

1049
	return stop;
L
Linus Torvalds 已提交
1050
}
1051
EXPORT_SYMBOL(__mmc_claim_host);
1052

1053
/**
1054
 *	mmc_release_host - release a host
1055 1056
 *	@host: mmc host to release
 *
1057 1058
 *	Release a MMC host, allowing others to claim the host
 *	for their operations.
1059
 */
1060
void mmc_release_host(struct mmc_host *host)
1061 1062 1063
{
	unsigned long flags;

1064 1065
	WARN_ON(!host->claimed);

1066
	spin_lock_irqsave(&host->lock, flags);
1067 1068 1069 1070 1071 1072 1073 1074
	if (--host->claim_cnt) {
		/* Release for nested claim */
		spin_unlock_irqrestore(&host->lock, flags);
	} else {
		host->claimed = 0;
		host->claimer = NULL;
		spin_unlock_irqrestore(&host->lock, flags);
		wake_up(&host->wq);
1075 1076
		pm_runtime_mark_last_busy(mmc_dev(host));
		pm_runtime_put_autosuspend(mmc_dev(host));
1077
	}
1078
}
L
Linus Torvalds 已提交
1079 1080
EXPORT_SYMBOL(mmc_release_host);

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
/*
 * This is a helper function, which fetches a runtime pm reference for the
 * card device and also claims the host.
 */
void mmc_get_card(struct mmc_card *card)
{
	pm_runtime_get_sync(&card->dev);
	mmc_claim_host(card->host);
}
EXPORT_SYMBOL(mmc_get_card);

/*
 * This is a helper function, which releases the host and drops the runtime
 * pm reference for the card device.
 */
void mmc_put_card(struct mmc_card *card)
{
	mmc_release_host(card->host);
	pm_runtime_mark_last_busy(&card->dev);
	pm_runtime_put_autosuspend(&card->dev);
}
EXPORT_SYMBOL(mmc_put_card);

P
Pierre Ossman 已提交
1104 1105 1106 1107
/*
 * Internal function that does the actual ios call to the host driver,
 * optionally printing some debug output.
 */
1108 1109 1110 1111
static inline void mmc_set_ios(struct mmc_host *host)
{
	struct mmc_ios *ios = &host->ios;

1112 1113
	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
		"width %u timing %u\n",
1114 1115
		 mmc_hostname(host), ios->clock, ios->bus_mode,
		 ios->power_mode, ios->chip_select, ios->vdd,
1116
		 1 << ios->bus_width, ios->timing);
1117

1118 1119 1120
	host->ops->set_ios(host, ios);
}

P
Pierre Ossman 已提交
1121 1122 1123
/*
 * Control chip select pin on a host.
 */
P
Pierre Ossman 已提交
1124
void mmc_set_chip_select(struct mmc_host *host, int mode)
L
Linus Torvalds 已提交
1125
{
P
Pierre Ossman 已提交
1126 1127
	host->ios.chip_select = mode;
	mmc_set_ios(host);
L
Linus Torvalds 已提交
1128 1129
}

P
Pierre Ossman 已提交
1130 1131 1132 1133
/*
 * Sets the host clock to the highest possible frequency that
 * is below "hz".
 */
U
Ulf Hansson 已提交
1134
void mmc_set_clock(struct mmc_host *host, unsigned int hz)
P
Pierre Ossman 已提交
1135
{
1136
	WARN_ON(hz && hz < host->f_min);
P
Pierre Ossman 已提交
1137 1138 1139 1140 1141 1142 1143 1144

	if (hz > host->f_max)
		hz = host->f_max;

	host->ios.clock = hz;
	mmc_set_ios(host);
}

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
int mmc_execute_tuning(struct mmc_card *card)
{
	struct mmc_host *host = card->host;
	u32 opcode;
	int err;

	if (!host->ops->execute_tuning)
		return 0;

	if (mmc_card_mmc(card))
		opcode = MMC_SEND_TUNING_BLOCK_HS200;
	else
		opcode = MMC_SEND_TUNING_BLOCK;

	err = host->ops->execute_tuning(host, opcode);

	if (err)
1162 1163
		pr_err("%s: tuning execution failed: %d\n",
			mmc_hostname(host), err);
1164 1165
	else
		mmc_retune_enable(host);
1166 1167 1168 1169

	return err;
}

P
Pierre Ossman 已提交
1170 1171 1172 1173 1174 1175 1176 1177 1178
/*
 * Change the bus mode (open drain/push-pull) of a host.
 */
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
{
	host->ios.bus_mode = mode;
	mmc_set_ios(host);
}

1179 1180 1181 1182 1183
/*
 * Change data bus width of a host.
 */
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
{
1184 1185
	host->ios.bus_width = width;
	mmc_set_ios(host);
1186 1187
}

1188 1189 1190 1191 1192
/*
 * Set initial state after a power cycle or a hw_reset.
 */
void mmc_set_initial_state(struct mmc_host *host)
{
1193 1194
	mmc_retune_disable(host);

1195 1196 1197 1198 1199 1200 1201
	if (mmc_host_is_spi(host))
		host->ios.chip_select = MMC_CS_HIGH;
	else
		host->ios.chip_select = MMC_CS_DONTCARE;
	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
	host->ios.bus_width = MMC_BUS_WIDTH_1;
	host->ios.timing = MMC_TIMING_LEGACY;
1202
	host->ios.drv_type = 0;
1203 1204 1205 1206 1207 1208 1209 1210 1211
	host->ios.enhanced_strobe = false;

	/*
	 * Make sure we are in non-enhanced strobe mode before we
	 * actually enable it in ext_csd.
	 */
	if ((host->caps2 & MMC_CAP2_HS400_ES) &&
	     host->ops->hs400_enhanced_strobe)
		host->ops->hs400_enhanced_strobe(host, &host->ios);
1212 1213 1214 1215

	mmc_set_ios(host);
}

1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
/**
 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
 * @vdd:	voltage (mV)
 * @low_bits:	prefer low bits in boundary cases
 *
 * This function returns the OCR bit number according to the provided @vdd
 * value. If conversion is not possible a negative errno value returned.
 *
 * Depending on the @low_bits flag the function prefers low or high OCR bits
 * on boundary voltages. For example,
 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
 *
 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
 */
static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
{
	const int max_bit = ilog2(MMC_VDD_35_36);
	int bit;

	if (vdd < 1650 || vdd > 3600)
		return -EINVAL;

	if (vdd >= 1650 && vdd <= 1950)
		return ilog2(MMC_VDD_165_195);

	if (low_bits)
		vdd -= 1;

	/* Base 2000 mV, step 100 mV, bit's base 8. */
	bit = (vdd - 2000) / 100 + 8;
	if (bit > max_bit)
		return max_bit;
	return bit;
}

/**
 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
 * @vdd_min:	minimum voltage value (mV)
 * @vdd_max:	maximum voltage value (mV)
 *
 * This function returns the OCR mask bits according to the provided @vdd_min
 * and @vdd_max values. If conversion is not possible the function returns 0.
 *
 * Notes wrt boundary cases:
 * This function sets the OCR bits for all boundary voltages, for example
 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
 * MMC_VDD_34_35 mask.
 */
u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
{
	u32 mask = 0;

	if (vdd_max < vdd_min)
		return 0;

	/* Prefer high bits for the boundary vdd_max values. */
	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
	if (vdd_max < 0)
		return 0;

	/* Prefer low bits for the boundary vdd_min values. */
	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
	if (vdd_min < 0)
		return 0;

	/* Fill the mask, from max bit to min bit. */
	while (vdd_max >= vdd_min)
		mask |= 1 << vdd_max--;

	return mask;
}
EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);

1290 1291 1292 1293 1294 1295 1296
#ifdef CONFIG_OF

/**
 * mmc_of_parse_voltage - return mask of supported voltages
 * @np: The device node need to be parsed.
 * @mask: mask of voltages available for MMC/SD/SDIO
 *
1297 1298 1299
 * Parse the "voltage-ranges" DT property, returning zero if it is not
 * found, negative errno if the voltage-range specification is invalid,
 * or one if the voltage-range is specified and successfully parsed.
1300 1301 1302 1303 1304 1305 1306 1307
 */
int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
{
	const u32 *voltage_ranges;
	int num_ranges, i;

	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1308 1309
	if (!voltage_ranges) {
		pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
1310
		return 0;
1311 1312 1313
	}
	if (!num_ranges) {
		pr_err("%s: voltage-ranges empty\n", np->full_name);
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
		return -EINVAL;
	}

	for (i = 0; i < num_ranges; i++) {
		const int j = i * 2;
		u32 ocr_mask;

		ocr_mask = mmc_vddrange_to_ocrmask(
				be32_to_cpu(voltage_ranges[j]),
				be32_to_cpu(voltage_ranges[j + 1]));
		if (!ocr_mask) {
			pr_err("%s: voltage-range #%d is invalid\n",
				np->full_name, i);
			return -EINVAL;
		}
		*mask |= ocr_mask;
	}

1332
	return 1;
1333 1334 1335 1336 1337
}
EXPORT_SYMBOL(mmc_of_parse_voltage);

#endif /* CONFIG_OF */

1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
static int mmc_of_get_func_num(struct device_node *node)
{
	u32 reg;
	int ret;

	ret = of_property_read_u32(node, "reg", &reg);
	if (ret < 0)
		return ret;

	return reg;
}

struct device_node *mmc_of_find_child_device(struct mmc_host *host,
		unsigned func_num)
{
	struct device_node *node;

	if (!host->parent || !host->parent->of_node)
		return NULL;

	for_each_child_of_node(host->parent->of_node, node) {
		if (mmc_of_get_func_num(node) == func_num)
			return node;
	}

	return NULL;
}

D
David Brownell 已提交
1366 1367
#ifdef CONFIG_REGULATOR

1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
/**
 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
 * @vdd_bit:	OCR bit number
 * @min_uV:	minimum voltage value (mV)
 * @max_uV:	maximum voltage value (mV)
 *
 * This function returns the voltage range according to the provided OCR
 * bit number. If conversion is not possible a negative errno value returned.
 */
static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
{
	int		tmp;

	if (!vdd_bit)
		return -EINVAL;

	/*
	 * REVISIT mmc_vddrange_to_ocrmask() may have set some
	 * bits this regulator doesn't quite support ... don't
	 * be too picky, most cards and regulators are OK with
	 * a 0.1V range goof (it's a small error percentage).
	 */
	tmp = vdd_bit - ilog2(MMC_VDD_165_195);
	if (tmp == 0) {
		*min_uV = 1650 * 1000;
		*max_uV = 1950 * 1000;
	} else {
		*min_uV = 1900 * 1000 + tmp * 100 * 1000;
		*max_uV = *min_uV + 100 * 1000;
	}

	return 0;
}

D
David Brownell 已提交
1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
/**
 * mmc_regulator_get_ocrmask - return mask of supported voltages
 * @supply: regulator to use
 *
 * This returns either a negative errno, or a mask of voltages that
 * can be provided to MMC/SD/SDIO devices using the specified voltage
 * regulator.  This would normally be called before registering the
 * MMC host adapter.
 */
int mmc_regulator_get_ocrmask(struct regulator *supply)
{
	int			result = 0;
	int			count;
	int			i;
1416 1417
	int			vdd_uV;
	int			vdd_mV;
D
David Brownell 已提交
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431

	count = regulator_count_voltages(supply);
	if (count < 0)
		return count;

	for (i = 0; i < count; i++) {
		vdd_uV = regulator_list_voltage(supply, i);
		if (vdd_uV <= 0)
			continue;

		vdd_mV = vdd_uV / 1000;
		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
	}

1432 1433 1434 1435 1436 1437 1438 1439 1440
	if (!result) {
		vdd_uV = regulator_get_voltage(supply);
		if (vdd_uV <= 0)
			return vdd_uV;

		vdd_mV = vdd_uV / 1000;
		result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
	}

D
David Brownell 已提交
1441 1442
	return result;
}
1443
EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
D
David Brownell 已提交
1444 1445 1446

/**
 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1447
 * @mmc: the host to regulate
D
David Brownell 已提交
1448
 * @supply: regulator to use
1449
 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
D
David Brownell 已提交
1450 1451 1452 1453 1454 1455 1456
 *
 * Returns zero on success, else negative errno.
 *
 * MMC host drivers may use this to enable or disable a regulator using
 * a particular supply voltage.  This would normally be called from the
 * set_ios() method.
 */
1457 1458 1459
int mmc_regulator_set_ocr(struct mmc_host *mmc,
			struct regulator *supply,
			unsigned short vdd_bit)
D
David Brownell 已提交
1460 1461 1462 1463 1464
{
	int			result = 0;
	int			min_uV, max_uV;

	if (vdd_bit) {
1465
		mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
D
David Brownell 已提交
1466

1467
		result = regulator_set_voltage(supply, min_uV, max_uV);
1468
		if (result == 0 && !mmc->regulator_enabled) {
D
David Brownell 已提交
1469
			result = regulator_enable(supply);
1470 1471 1472 1473
			if (!result)
				mmc->regulator_enabled = true;
		}
	} else if (mmc->regulator_enabled) {
D
David Brownell 已提交
1474
		result = regulator_disable(supply);
1475 1476
		if (result == 0)
			mmc->regulator_enabled = false;
D
David Brownell 已提交
1477 1478
	}

1479 1480 1481
	if (result)
		dev_err(mmc_dev(mmc),
			"could not set regulator OCR (%d)\n", result);
D
David Brownell 已提交
1482 1483
	return result;
}
1484
EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
D
David Brownell 已提交
1485

1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
						  int min_uV, int target_uV,
						  int max_uV)
{
	/*
	 * Check if supported first to avoid errors since we may try several
	 * signal levels during power up and don't want to show errors.
	 */
	if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
		return -EINVAL;

	return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
					     max_uV);
}

/**
 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
 *
 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
 * That will match the behavior of old boards where VQMMC and VMMC were supplied
 * by the same supply.  The Bus Operating conditions for 3.3V signaling in the
 * SD card spec also define VQMMC in terms of VMMC.
 * If this is not possible we'll try the full 2.7-3.6V of the spec.
 *
 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
 * requested voltage.  This is definitely a good idea for UHS where there's a
 * separate regulator on the card that's trying to make 1.8V and it's best if
 * we match.
 *
 * This function is expected to be used by a controller's
 * start_signal_voltage_switch() function.
 */
int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct device *dev = mmc_dev(mmc);
	int ret, volt, min_uV, max_uV;

	/* If no vqmmc supply then we can't change the voltage */
	if (IS_ERR(mmc->supply.vqmmc))
		return -EINVAL;

	switch (ios->signal_voltage) {
	case MMC_SIGNAL_VOLTAGE_120:
		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
						1100000, 1200000, 1300000);
	case MMC_SIGNAL_VOLTAGE_180:
		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
						1700000, 1800000, 1950000);
	case MMC_SIGNAL_VOLTAGE_330:
		ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
		if (ret < 0)
			return ret;

		dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
			__func__, volt, max_uV);

		min_uV = max(volt - 300000, 2700000);
		max_uV = min(max_uV + 200000, 3600000);

		/*
		 * Due to a limitation in the current implementation of
		 * regulator_set_voltage_triplet() which is taking the lowest
		 * voltage possible if below the target, search for a suitable
		 * voltage in two steps and try to stay close to vmmc
		 * with a 0.3V tolerance at first.
		 */
		if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
						min_uV, volt, max_uV))
			return 0;

		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
						2700000, volt, 3600000);
	default:
		return -EINVAL;
	}
}
EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);

1564 1565
#endif /* CONFIG_REGULATOR */

1566 1567 1568 1569 1570
int mmc_regulator_get_supply(struct mmc_host *mmc)
{
	struct device *dev = mmc_dev(mmc);
	int ret;

1571
	mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1572
	mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1573

1574 1575 1576
	if (IS_ERR(mmc->supply.vmmc)) {
		if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
			return -EPROBE_DEFER;
1577
		dev_dbg(dev, "No vmmc regulator found\n");
1578 1579 1580 1581 1582 1583 1584
	} else {
		ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
		if (ret > 0)
			mmc->ocr_avail = ret;
		else
			dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
	}
1585

1586 1587 1588
	if (IS_ERR(mmc->supply.vqmmc)) {
		if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
			return -EPROBE_DEFER;
1589
		dev_dbg(dev, "No vqmmc regulator found\n");
1590
	}
1591 1592 1593 1594 1595

	return 0;
}
EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);

L
Linus Torvalds 已提交
1596 1597 1598 1599
/*
 * Mask off any voltages we don't support and select
 * the lowest voltage
 */
P
Pierre Ossman 已提交
1600
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
L
Linus Torvalds 已提交
1601 1602 1603
{
	int bit;

1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
	/*
	 * Sanity check the voltages that the card claims to
	 * support.
	 */
	if (ocr & 0x7F) {
		dev_warn(mmc_dev(host),
		"card claims to support voltages below defined range\n");
		ocr &= ~0x7F;
	}

L
Linus Torvalds 已提交
1614
	ocr &= host->ocr_avail;
1615 1616 1617 1618
	if (!ocr) {
		dev_warn(mmc_dev(host), "no support for card's volts\n");
		return 0;
	}
L
Linus Torvalds 已提交
1619

1620 1621
	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
		bit = ffs(ocr) - 1;
1622
		ocr &= 3 << bit;
1623
		mmc_power_cycle(host, ocr);
L
Linus Torvalds 已提交
1624
	} else {
1625 1626 1627 1628
		bit = fls(ocr) - 1;
		ocr &= 3 << bit;
		if (bit != host->ios.vdd)
			dev_warn(mmc_dev(host), "exceeding card's volts\n");
L
Linus Torvalds 已提交
1629 1630 1631 1632 1633
	}

	return ocr;
}

1634
int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1635 1636 1637 1638 1639
{
	int err = 0;
	int old_signal_voltage = host->ios.signal_voltage;

	host->ios.signal_voltage = signal_voltage;
U
Ulf Hansson 已提交
1640
	if (host->ops->start_signal_voltage_switch)
1641 1642 1643 1644 1645 1646 1647 1648 1649
		err = host->ops->start_signal_voltage_switch(host, &host->ios);

	if (err)
		host->ios.signal_voltage = old_signal_voltage;

	return err;

}

1650
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1651
{
1652
	struct mmc_command cmd = {};
1653
	int err = 0;
1654
	u32 clock;
1655

1656 1657 1658 1659 1660 1661 1662
	/*
	 * If we cannot switch voltages, return failure so the caller
	 * can continue without UHS mode
	 */
	if (!host->ops->start_signal_voltage_switch)
		return -EPERM;
	if (!host->ops->card_busy)
J
Joe Perches 已提交
1663 1664
		pr_warn("%s: cannot verify signal voltage switch\n",
			mmc_hostname(host));
1665 1666 1667 1668 1669 1670 1671

	cmd.opcode = SD_SWITCH_VOLTAGE;
	cmd.arg = 0;
	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;

	err = mmc_wait_for_cmd(host, &cmd, 0);
	if (err)
U
Ulf Hansson 已提交
1672 1673 1674 1675
		return err;

	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
		return -EIO;
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692

	/*
	 * The card should drive cmd and dat[0:3] low immediately
	 * after the response of cmd11, but wait 1 ms to be sure
	 */
	mmc_delay(1);
	if (host->ops->card_busy && !host->ops->card_busy(host)) {
		err = -EAGAIN;
		goto power_cycle;
	}
	/*
	 * During a signal voltage level switch, the clock must be gated
	 * for 5 ms according to the SD spec
	 */
	clock = host->ios.clock;
	host->ios.clock = 0;
	mmc_set_ios(host);
1693

1694
	if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
1695 1696 1697 1698 1699 1700
		/*
		 * Voltages may not have been switched, but we've already
		 * sent CMD11, so a power cycle is required anyway
		 */
		err = -EAGAIN;
		goto power_cycle;
1701 1702
	}

1703 1704
	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
	mmc_delay(10);
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721
	host->ios.clock = clock;
	mmc_set_ios(host);

	/* Wait for at least 1 ms according to spec */
	mmc_delay(1);

	/*
	 * Failure to switch is indicated by the card holding
	 * dat[0:3] low
	 */
	if (host->ops->card_busy && host->ops->card_busy(host))
		err = -EAGAIN;

power_cycle:
	if (err) {
		pr_debug("%s: Signal voltage switch failed, "
			"power cycling card\n", mmc_hostname(host));
1722
		mmc_power_cycle(host, ocr);
1723 1724 1725
	}

	return err;
1726 1727
}

P
Pierre Ossman 已提交
1728
/*
P
Pierre Ossman 已提交
1729
 * Select timing parameters for host.
P
Pierre Ossman 已提交
1730
 */
P
Pierre Ossman 已提交
1731
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
P
Pierre Ossman 已提交
1732
{
P
Pierre Ossman 已提交
1733 1734
	host->ios.timing = timing;
	mmc_set_ios(host);
P
Pierre Ossman 已提交
1735 1736
}

1737 1738 1739 1740 1741 1742 1743 1744 1745
/*
 * Select appropriate driver type for host.
 */
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
{
	host->ios.drv_type = drv_type;
	mmc_set_ios(host);
}

1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772
int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
			      int card_drv_type, int *drv_type)
{
	struct mmc_host *host = card->host;
	int host_drv_type = SD_DRIVER_TYPE_B;

	*drv_type = 0;

	if (!host->ops->select_drive_strength)
		return 0;

	/* Use SD definition of driver strength for hosts */
	if (host->caps & MMC_CAP_DRIVER_TYPE_A)
		host_drv_type |= SD_DRIVER_TYPE_A;

	if (host->caps & MMC_CAP_DRIVER_TYPE_C)
		host_drv_type |= SD_DRIVER_TYPE_C;

	if (host->caps & MMC_CAP_DRIVER_TYPE_D)
		host_drv_type |= SD_DRIVER_TYPE_D;

	/*
	 * The drive strength that the hardware can support
	 * depends on the board design.  Pass the appropriate
	 * information and let the hardware specific code
	 * return what is possible given the options
	 */
U
Ulf Hansson 已提交
1773 1774 1775 1776
	return host->ops->select_drive_strength(card, max_dtr,
						host_drv_type,
						card_drv_type,
						drv_type);
1777 1778
}

L
Linus Torvalds 已提交
1779
/*
1780 1781 1782 1783 1784 1785 1786 1787 1788
 * Apply power to the MMC stack.  This is a two-stage process.
 * First, we enable power to the card without the clock running.
 * We then wait a bit for the power to stabilise.  Finally,
 * enable the bus drivers and clock to the card.
 *
 * We must _NOT_ enable the clock prior to power stablising.
 *
 * If a host does all the power sequencing itself, ignore the
 * initial MMC_POWER_UP stage.
L
Linus Torvalds 已提交
1789
 */
1790
void mmc_power_up(struct mmc_host *host, u32 ocr)
L
Linus Torvalds 已提交
1791
{
1792 1793 1794
	if (host->ios.power_mode == MMC_POWER_ON)
		return;

1795 1796
	mmc_pwrseq_pre_power_on(host);

1797
	host->ios.vdd = fls(ocr) - 1;
L
Linus Torvalds 已提交
1798
	host->ios.power_mode = MMC_POWER_UP;
1799 1800
	/* Set initial state and call mmc_set_ios */
	mmc_set_initial_state(host);
L
Linus Torvalds 已提交
1801

1802
	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1803
	if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1804
		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1805
	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1806
		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1807
	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1808
		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1809

P
Pierre Ossman 已提交
1810 1811 1812 1813
	/*
	 * This delay should be sufficient to allow the power supply
	 * to reach the minimum voltage.
	 */
1814
	mmc_delay(10);
L
Linus Torvalds 已提交
1815

1816 1817
	mmc_pwrseq_post_power_on(host);

H
Hein Tibosch 已提交
1818
	host->ios.clock = host->f_init;
1819

L
Linus Torvalds 已提交
1820
	host->ios.power_mode = MMC_POWER_ON;
1821
	mmc_set_ios(host);
L
Linus Torvalds 已提交
1822

P
Pierre Ossman 已提交
1823 1824 1825 1826
	/*
	 * This delay must be at least 74 clock sizes, or 1 ms, or the
	 * time required to reach a stable voltage.
	 */
1827
	mmc_delay(10);
L
Linus Torvalds 已提交
1828 1829
}

1830
void mmc_power_off(struct mmc_host *host)
L
Linus Torvalds 已提交
1831
{
1832 1833 1834
	if (host->ios.power_mode == MMC_POWER_OFF)
		return;

1835 1836
	mmc_pwrseq_power_off(host);

L
Linus Torvalds 已提交
1837 1838
	host->ios.clock = 0;
	host->ios.vdd = 0;
1839

L
Linus Torvalds 已提交
1840
	host->ios.power_mode = MMC_POWER_OFF;
1841 1842
	/* Set initial state and call mmc_set_ios */
	mmc_set_initial_state(host);
1843

1844 1845 1846 1847 1848 1849
	/*
	 * Some configurations, such as the 802.11 SDIO card in the OLPC
	 * XO-1.5, require a short delay after poweroff before the card
	 * can be successfully turned on again.
	 */
	mmc_delay(1);
L
Linus Torvalds 已提交
1850 1851
}

1852
void mmc_power_cycle(struct mmc_host *host, u32 ocr)
J
Johan Rudholm 已提交
1853 1854 1855 1856
{
	mmc_power_off(host);
	/* Wait at least 1 ms according to SD spec */
	mmc_delay(1);
1857
	mmc_power_up(host, ocr);
J
Johan Rudholm 已提交
1858 1859
}

1860 1861 1862
/*
 * Cleanup when the last reference to the bus operator is dropped.
 */
1863
static void __mmc_release_bus(struct mmc_host *host)
1864
{
S
Shawn Lin 已提交
1865
	WARN_ON(!host->bus_dead);
1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896

	host->bus_ops = NULL;
}

/*
 * Increase reference count of bus operator
 */
static inline void mmc_bus_get(struct mmc_host *host)
{
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
	host->bus_refs++;
	spin_unlock_irqrestore(&host->lock, flags);
}

/*
 * Decrease reference count of bus operator and free it if
 * it is the last reference.
 */
static inline void mmc_bus_put(struct mmc_host *host)
{
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
	host->bus_refs--;
	if ((host->bus_refs == 0) && host->bus_ops)
		__mmc_release_bus(host);
	spin_unlock_irqrestore(&host->lock, flags);
}

L
Linus Torvalds 已提交
1897
/*
P
Pierre Ossman 已提交
1898 1899
 * Assign a mmc bus handler to a host. Only one bus handler may control a
 * host at any given time.
L
Linus Torvalds 已提交
1900
 */
P
Pierre Ossman 已提交
1901
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
L
Linus Torvalds 已提交
1902
{
P
Pierre Ossman 已提交
1903
	unsigned long flags;
1904

P
Pierre Ossman 已提交
1905
	WARN_ON(!host->claimed);
1906

P
Pierre Ossman 已提交
1907
	spin_lock_irqsave(&host->lock, flags);
1908

S
Shawn Lin 已提交
1909 1910
	WARN_ON(host->bus_ops);
	WARN_ON(host->bus_refs);
P
Pierre Ossman 已提交
1911

P
Pierre Ossman 已提交
1912 1913 1914
	host->bus_ops = ops;
	host->bus_refs = 1;
	host->bus_dead = 0;
P
Pierre Ossman 已提交
1915

P
Pierre Ossman 已提交
1916
	spin_unlock_irqrestore(&host->lock, flags);
P
Pierre Ossman 已提交
1917 1918
}

P
Pierre Ossman 已提交
1919
/*
1920
 * Remove the current bus handler from a host.
P
Pierre Ossman 已提交
1921 1922
 */
void mmc_detach_bus(struct mmc_host *host)
1923
{
P
Pierre Ossman 已提交
1924
	unsigned long flags;
1925

P
Pierre Ossman 已提交
1926 1927
	WARN_ON(!host->claimed);
	WARN_ON(!host->bus_ops);
1928

P
Pierre Ossman 已提交
1929
	spin_lock_irqsave(&host->lock, flags);
1930

P
Pierre Ossman 已提交
1931
	host->bus_dead = 1;
1932

P
Pierre Ossman 已提交
1933
	spin_unlock_irqrestore(&host->lock, flags);
L
Linus Torvalds 已提交
1934

P
Pierre Ossman 已提交
1935
	mmc_bus_put(host);
L
Linus Torvalds 已提交
1936 1937
}

1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
				bool cd_irq)
{
#ifdef CONFIG_MMC_DEBUG
	unsigned long flags;
	spin_lock_irqsave(&host->lock, flags);
	WARN_ON(host->removed);
	spin_unlock_irqrestore(&host->lock, flags);
#endif

	/*
	 * If the device is configured as wakeup, we prevent a new sleep for
	 * 5 s to give provision for user space to consume the event.
	 */
	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
		device_can_wakeup(mmc_dev(host)))
		pm_wakeup_event(mmc_dev(host), 5000);

	host->detect_change = 1;
	mmc_schedule_delayed_work(&host->detect, delay);
}

L
Linus Torvalds 已提交
1960 1961 1962
/**
 *	mmc_detect_change - process change of state on a MMC socket
 *	@host: host which changed state.
1963
 *	@delay: optional delay to wait before detection (jiffies)
L
Linus Torvalds 已提交
1964
 *
P
Pierre Ossman 已提交
1965 1966 1967 1968
 *	MMC drivers should call this when they detect a card has been
 *	inserted or removed. The MMC layer will confirm that any
 *	present card is still functional, and initialize any newly
 *	inserted.
L
Linus Torvalds 已提交
1969
 */
1970
void mmc_detect_change(struct mmc_host *host, unsigned long delay)
L
Linus Torvalds 已提交
1971
{
1972
	_mmc_detect_change(host, delay, true);
L
Linus Torvalds 已提交
1973 1974 1975
}
EXPORT_SYMBOL(mmc_detect_change);

1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993
void mmc_init_erase(struct mmc_card *card)
{
	unsigned int sz;

	if (is_power_of_2(card->erase_size))
		card->erase_shift = ffs(card->erase_size) - 1;
	else
		card->erase_shift = 0;

	/*
	 * It is possible to erase an arbitrarily large area of an SD or MMC
	 * card.  That is not desirable because it can take a long time
	 * (minutes) potentially delaying more important I/O, and also the
	 * timeout calculations become increasingly hugely over-estimated.
	 * Consequently, 'pref_erase' is defined as a guide to limit erases
	 * to that size and alignment.
	 *
	 * For SD cards that define Allocation Unit size, limit erases to one
1994 1995 1996 1997 1998
	 * Allocation Unit at a time.
	 * For MMC, have a stab at ai good value and for modern cards it will
	 * end up being 4MiB. Note that if the value is too small, it can end
	 * up taking longer to erase. Also note, erase_size is already set to
	 * High Capacity Erase Size if available when this function is called.
1999 2000 2001 2002
	 */
	if (mmc_card_sd(card) && card->ssr.au) {
		card->pref_erase = card->ssr.au;
		card->erase_shift = ffs(card->ssr.au) - 1;
2003
	} else if (card->erase_size) {
2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019
		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
		if (sz < 128)
			card->pref_erase = 512 * 1024 / 512;
		else if (sz < 512)
			card->pref_erase = 1024 * 1024 / 512;
		else if (sz < 1024)
			card->pref_erase = 2 * 1024 * 1024 / 512;
		else
			card->pref_erase = 4 * 1024 * 1024 / 512;
		if (card->pref_erase < card->erase_size)
			card->pref_erase = card->erase_size;
		else {
			sz = card->pref_erase % card->erase_size;
			if (sz)
				card->pref_erase += card->erase_size - sz;
		}
2020 2021
	} else
		card->pref_erase = 0;
2022 2023
}

2024 2025
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
				          unsigned int arg, unsigned int qty)
2026 2027 2028
{
	unsigned int erase_timeout;

2029 2030 2031 2032
	if (arg == MMC_DISCARD_ARG ||
	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
		erase_timeout = card->ext_csd.trim_timeout;
	} else if (card->ext_csd.erase_group_def & 1) {
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
		/* High Capacity Erase Group Size uses HC timeouts */
		if (arg == MMC_TRIM_ARG)
			erase_timeout = card->ext_csd.trim_timeout;
		else
			erase_timeout = card->ext_csd.hc_erase_timeout;
	} else {
		/* CSD Erase Group Size uses write timeout */
		unsigned int mult = (10 << card->csd.r2w_factor);
		unsigned int timeout_clks = card->csd.tacc_clks * mult;
		unsigned int timeout_us;

		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
		if (card->csd.tacc_ns < 1000000)
			timeout_us = (card->csd.tacc_ns * mult) / 1000;
		else
			timeout_us = (card->csd.tacc_ns / 1000) * mult;

		/*
		 * ios.clock is only a target.  The real clock rate might be
		 * less but not that much less, so fudge it by multiplying by 2.
		 */
		timeout_clks <<= 1;
		timeout_us += (timeout_clks * 1000) /
U
Ulf Hansson 已提交
2056
			      (card->host->ios.clock / 1000);
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084

		erase_timeout = timeout_us / 1000;

		/*
		 * Theoretically, the calculation could underflow so round up
		 * to 1ms in that case.
		 */
		if (!erase_timeout)
			erase_timeout = 1;
	}

	/* Multiplier for secure operations */
	if (arg & MMC_SECURE_ARGS) {
		if (arg == MMC_SECURE_ERASE_ARG)
			erase_timeout *= card->ext_csd.sec_erase_mult;
		else
			erase_timeout *= card->ext_csd.sec_trim_mult;
	}

	erase_timeout *= qty;

	/*
	 * Ensure at least a 1 second timeout for SPI as per
	 * 'mmc_set_data_timeout()'
	 */
	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
		erase_timeout = 1000;

2085
	return erase_timeout;
2086 2087
}

2088 2089 2090
static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
					 unsigned int arg,
					 unsigned int qty)
2091
{
2092 2093
	unsigned int erase_timeout;

2094 2095
	if (card->ssr.erase_timeout) {
		/* Erase timeout specified in SD Status Register (SSR) */
2096 2097
		erase_timeout = card->ssr.erase_timeout * qty +
				card->ssr.erase_offset;
2098 2099 2100 2101 2102
	} else {
		/*
		 * Erase timeout not specified in SD Status Register (SSR) so
		 * use 250ms per write block.
		 */
2103
		erase_timeout = 250 * qty;
2104 2105 2106
	}

	/* Must not be less than 1 second */
2107 2108 2109 2110
	if (erase_timeout < 1000)
		erase_timeout = 1000;

	return erase_timeout;
2111 2112
}

2113 2114 2115
static unsigned int mmc_erase_timeout(struct mmc_card *card,
				      unsigned int arg,
				      unsigned int qty)
2116 2117
{
	if (mmc_card_sd(card))
2118
		return mmc_sd_erase_timeout(card, arg, qty);
2119
	else
2120
		return mmc_mmc_erase_timeout(card, arg, qty);
2121 2122 2123 2124 2125
}

static int mmc_do_erase(struct mmc_card *card, unsigned int from,
			unsigned int to, unsigned int arg)
{
2126
	struct mmc_command cmd = {};
2127 2128
	unsigned int qty = 0, busy_timeout = 0;
	bool use_r1b_resp = false;
2129
	unsigned long timeout;
2130 2131
	int err;

2132 2133
	mmc_retune_hold(card->host);

2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171
	/*
	 * qty is used to calculate the erase timeout which depends on how many
	 * erase groups (or allocation units in SD terminology) are affected.
	 * We count erasing part of an erase group as one erase group.
	 * For SD, the allocation units are always a power of 2.  For MMC, the
	 * erase group size is almost certainly also power of 2, but it does not
	 * seem to insist on that in the JEDEC standard, so we fall back to
	 * division in that case.  SD may not specify an allocation unit size,
	 * in which case the timeout is based on the number of write blocks.
	 *
	 * Note that the timeout for secure trim 2 will only be correct if the
	 * number of erase groups specified is the same as the total of all
	 * preceding secure trim 1 commands.  Since the power may have been
	 * lost since the secure trim 1 commands occurred, it is generally
	 * impossible to calculate the secure trim 2 timeout correctly.
	 */
	if (card->erase_shift)
		qty += ((to >> card->erase_shift) -
			(from >> card->erase_shift)) + 1;
	else if (mmc_card_sd(card))
		qty += to - from + 1;
	else
		qty += ((to / card->erase_size) -
			(from / card->erase_size)) + 1;

	if (!mmc_card_blockaddr(card)) {
		from <<= 9;
		to <<= 9;
	}

	if (mmc_card_sd(card))
		cmd.opcode = SD_ERASE_WR_BLK_START;
	else
		cmd.opcode = MMC_ERASE_GROUP_START;
	cmd.arg = from;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
	err = mmc_wait_for_cmd(card->host, &cmd, 0);
	if (err) {
2172
		pr_err("mmc_erase: group start error %d, "
2173
		       "status %#x\n", err, cmd.resp[0]);
2174
		err = -EIO;
2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186
		goto out;
	}

	memset(&cmd, 0, sizeof(struct mmc_command));
	if (mmc_card_sd(card))
		cmd.opcode = SD_ERASE_WR_BLK_END;
	else
		cmd.opcode = MMC_ERASE_GROUP_END;
	cmd.arg = to;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
	err = mmc_wait_for_cmd(card->host, &cmd, 0);
	if (err) {
2187
		pr_err("mmc_erase: group end error %d, status %#x\n",
2188
		       err, cmd.resp[0]);
2189
		err = -EIO;
2190 2191 2192 2193 2194 2195
		goto out;
	}

	memset(&cmd, 0, sizeof(struct mmc_command));
	cmd.opcode = MMC_ERASE;
	cmd.arg = arg;
2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
	busy_timeout = mmc_erase_timeout(card, arg, qty);
	/*
	 * If the host controller supports busy signalling and the timeout for
	 * the erase operation does not exceed the max_busy_timeout, we should
	 * use R1B response. Or we need to prevent the host from doing hw busy
	 * detection, which is done by converting to a R1 response instead.
	 */
	if (card->host->max_busy_timeout &&
	    busy_timeout > card->host->max_busy_timeout) {
		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
	} else {
		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
		cmd.busy_timeout = busy_timeout;
		use_r1b_resp = true;
	}

2212 2213
	err = mmc_wait_for_cmd(card->host, &cmd, 0);
	if (err) {
2214
		pr_err("mmc_erase: erase error %d, status %#x\n",
2215 2216 2217 2218 2219 2220 2221 2222
		       err, cmd.resp[0]);
		err = -EIO;
		goto out;
	}

	if (mmc_host_is_spi(card->host))
		goto out;

2223 2224 2225 2226 2227 2228 2229 2230
	/*
	 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
	 * shall be avoided.
	 */
	if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
		goto out;

	timeout = jiffies + msecs_to_jiffies(busy_timeout);
2231 2232 2233 2234 2235 2236 2237 2238
	do {
		memset(&cmd, 0, sizeof(struct mmc_command));
		cmd.opcode = MMC_SEND_STATUS;
		cmd.arg = card->rca << 16;
		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
		/* Do not retry else we can't see errors */
		err = mmc_wait_for_cmd(card->host, &cmd, 0);
		if (err || (cmd.resp[0] & 0xFDF92000)) {
2239
			pr_err("error %d requesting status %#x\n",
2240 2241 2242 2243
				err, cmd.resp[0]);
			err = -EIO;
			goto out;
		}
2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254

		/* Timeout if the device never becomes ready for data and
		 * never leaves the program state.
		 */
		if (time_after(jiffies, timeout)) {
			pr_err("%s: Card stuck in programming state! %s\n",
				mmc_hostname(card->host), __func__);
			err =  -EIO;
			goto out;
		}

2255
	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2256
		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2257
out:
2258
	mmc_retune_release(card->host);
2259 2260 2261
	return err;
}

2262 2263 2264 2265 2266 2267 2268
static unsigned int mmc_align_erase_size(struct mmc_card *card,
					 unsigned int *from,
					 unsigned int *to,
					 unsigned int nr)
{
	unsigned int from_new = *from, nr_new = nr, rem;

2269 2270 2271 2272 2273 2274 2275 2276 2277 2278
	/*
	 * When the 'card->erase_size' is power of 2, we can use round_up/down()
	 * to align the erase size efficiently.
	 */
	if (is_power_of_2(card->erase_size)) {
		unsigned int temp = from_new;

		from_new = round_up(temp, card->erase_size);
		rem = from_new - temp;

2279 2280 2281 2282 2283
		if (nr_new > rem)
			nr_new -= rem;
		else
			return 0;

2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299
		nr_new = round_down(nr_new, card->erase_size);
	} else {
		rem = from_new % card->erase_size;
		if (rem) {
			rem = card->erase_size - rem;
			from_new += rem;
			if (nr_new > rem)
				nr_new -= rem;
			else
				return 0;
		}

		rem = nr_new % card->erase_size;
		if (rem)
			nr_new -= rem;
	}
2300 2301 2302 2303 2304 2305 2306 2307 2308 2309

	if (nr_new == 0)
		return 0;

	*to = from_new + nr_new;
	*from = from_new;

	return nr_new;
}

2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322
/**
 * mmc_erase - erase sectors.
 * @card: card to erase
 * @from: first sector to erase
 * @nr: number of sectors to erase
 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
 *
 * Caller must claim host before calling this function.
 */
int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
	      unsigned int arg)
{
	unsigned int rem, to = from + nr;
2323
	int err;
2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347

	if (!(card->host->caps & MMC_CAP_ERASE) ||
	    !(card->csd.cmdclass & CCC_ERASE))
		return -EOPNOTSUPP;

	if (!card->erase_size)
		return -EOPNOTSUPP;

	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
		return -EOPNOTSUPP;

	if ((arg & MMC_SECURE_ARGS) &&
	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
		return -EOPNOTSUPP;

	if ((arg & MMC_TRIM_ARGS) &&
	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
		return -EOPNOTSUPP;

	if (arg == MMC_SECURE_ERASE_ARG) {
		if (from % card->erase_size || nr % card->erase_size)
			return -EINVAL;
	}

2348 2349
	if (arg == MMC_ERASE_ARG)
		nr = mmc_align_erase_size(card, &from, &to, nr);
2350 2351 2352 2353 2354 2355 2356 2357 2358 2359

	if (nr == 0)
		return 0;

	if (to <= from)
		return -EINVAL;

	/* 'from' and 'to' are inclusive */
	to -= 1;

2360 2361 2362 2363 2364 2365 2366 2367
	/*
	 * Special case where only one erase-group fits in the timeout budget:
	 * If the region crosses an erase-group boundary on this particular
	 * case, we will be trimming more than one erase-group which, does not
	 * fit in the timeout budget of the controller, so we need to split it
	 * and call mmc_do_erase() twice if necessary. This special case is
	 * identified by the card->eg_boundary flag.
	 */
2368 2369
	rem = card->erase_size - (from % card->erase_size);
	if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2370 2371 2372 2373 2374 2375
		err = mmc_do_erase(card, from, from + rem - 1, arg);
		from += rem;
		if ((err) || (to <= from))
			return err;
	}

2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390
	return mmc_do_erase(card, from, to, arg);
}
EXPORT_SYMBOL(mmc_erase);

int mmc_can_erase(struct mmc_card *card)
{
	if ((card->host->caps & MMC_CAP_ERASE) &&
	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
		return 1;
	return 0;
}
EXPORT_SYMBOL(mmc_can_erase);

int mmc_can_trim(struct mmc_card *card)
{
2391 2392
	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
	    (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2393 2394 2395 2396 2397
		return 1;
	return 0;
}
EXPORT_SYMBOL(mmc_can_trim);

2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409
int mmc_can_discard(struct mmc_card *card)
{
	/*
	 * As there's no way to detect the discard support bit at v4.5
	 * use the s/w feature support filed.
	 */
	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
		return 1;
	return 0;
}
EXPORT_SYMBOL(mmc_can_discard);

2410 2411
int mmc_can_sanitize(struct mmc_card *card)
{
2412 2413
	if (!mmc_can_trim(card) && !mmc_can_erase(card))
		return 0;
2414 2415 2416 2417 2418 2419
	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
		return 1;
	return 0;
}
EXPORT_SYMBOL(mmc_can_sanitize);

2420 2421
int mmc_can_secure_erase_trim(struct mmc_card *card)
{
2422 2423
	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438
		return 1;
	return 0;
}
EXPORT_SYMBOL(mmc_can_secure_erase_trim);

int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
			    unsigned int nr)
{
	if (!card->erase_size)
		return 0;
	if (from % card->erase_size || nr % card->erase_size)
		return 0;
	return 1;
}
EXPORT_SYMBOL(mmc_erase_group_aligned);
L
Linus Torvalds 已提交
2439

2440 2441 2442 2443
static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
					    unsigned int arg)
{
	struct mmc_host *host = card->host;
2444
	unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2445
	unsigned int last_timeout = 0;
2446 2447
	unsigned int max_busy_timeout = host->max_busy_timeout ?
			host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2448

2449
	if (card->erase_shift) {
2450
		max_qty = UINT_MAX >> card->erase_shift;
2451 2452
		min_qty = card->pref_erase >> card->erase_shift;
	} else if (mmc_card_sd(card)) {
2453
		max_qty = UINT_MAX;
2454 2455
		min_qty = card->pref_erase;
	} else {
2456
		max_qty = UINT_MAX / card->erase_size;
2457 2458
		min_qty = card->pref_erase / card->erase_size;
	}
2459

2460 2461 2462 2463 2464 2465 2466 2467 2468 2469
	/*
	 * We should not only use 'host->max_busy_timeout' as the limitation
	 * when deciding the max discard sectors. We should set a balance value
	 * to improve the erase speed, and it can not get too long timeout at
	 * the same time.
	 *
	 * Here we set 'card->pref_erase' as the minimal discard sectors no
	 * matter what size of 'host->max_busy_timeout', but if the
	 * 'host->max_busy_timeout' is large enough for more discard sectors,
	 * then we can continue to increase the max discard sectors until we
2470 2471
	 * get a balance value. In cases when the 'host->max_busy_timeout'
	 * isn't specified, use the default max erase timeout.
2472
	 */
2473 2474 2475 2476
	do {
		y = 0;
		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
			timeout = mmc_erase_timeout(card, arg, qty + x);
2477

2478
			if (qty + x > min_qty && timeout > max_busy_timeout)
2479
				break;
2480

2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
			if (timeout < last_timeout)
				break;
			last_timeout = timeout;
			y = x;
		}
		qty += y;
	} while (y);

	if (!qty)
		return 0;

2492 2493 2494 2495 2496 2497 2498 2499 2500 2501
	/*
	 * When specifying a sector range to trim, chances are we might cross
	 * an erase-group boundary even if the amount of sectors is less than
	 * one erase-group.
	 * If we can only fit one erase-group in the controller timeout budget,
	 * we have to care that erase-group boundaries are not crossed by a
	 * single trim operation. We flag that special case with "eg_boundary".
	 * In all other cases we can just decrement qty and pretend that we
	 * always touch (qty + 1) erase-groups as a simple optimization.
	 */
2502
	if (qty == 1)
2503 2504 2505
		card->eg_boundary = 1;
	else
		qty--;
2506 2507 2508

	/* Convert qty to sectors */
	if (card->erase_shift)
2509
		max_discard = qty << card->erase_shift;
2510
	else if (mmc_card_sd(card))
2511
		max_discard = qty + 1;
2512
	else
2513
		max_discard = qty * card->erase_size;
2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539

	return max_discard;
}

unsigned int mmc_calc_max_discard(struct mmc_card *card)
{
	struct mmc_host *host = card->host;
	unsigned int max_discard, max_trim;

	/*
	 * Without erase_group_def set, MMC erase timeout depends on clock
	 * frequence which can change.  In that case, the best choice is
	 * just the preferred erase size.
	 */
	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
		return card->pref_erase;

	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
	if (mmc_can_trim(card)) {
		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
		if (max_trim < max_discard)
			max_discard = max_trim;
	} else if (max_discard < card->erase_size) {
		max_discard = 0;
	}
	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2540 2541
		mmc_hostname(host), max_discard, host->max_busy_timeout ?
		host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2542 2543 2544 2545
	return max_discard;
}
EXPORT_SYMBOL(mmc_calc_max_discard);

2546 2547
int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
{
2548
	struct mmc_command cmd = {};
2549

2550 2551
	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
	    mmc_card_hs400(card) || mmc_card_hs400es(card))
2552 2553 2554 2555 2556 2557 2558 2559 2560
		return 0;

	cmd.opcode = MMC_SET_BLOCKLEN;
	cmd.arg = blocklen;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
	return mmc_wait_for_cmd(card->host, &cmd, 5);
}
EXPORT_SYMBOL(mmc_set_blocklen);

2561 2562 2563
int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
			bool is_rel_write)
{
2564
	struct mmc_command cmd = {};
2565 2566 2567 2568 2569 2570 2571 2572 2573 2574

	cmd.opcode = MMC_SET_BLOCK_COUNT;
	cmd.arg = blockcount & 0x0000FFFF;
	if (is_rel_write)
		cmd.arg |= 1 << 31;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
	return mmc_wait_for_cmd(card->host, &cmd, 5);
}
EXPORT_SYMBOL(mmc_set_blockcount);

2575 2576 2577 2578 2579 2580 2581
static void mmc_hw_reset_for_init(struct mmc_host *host)
{
	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
		return;
	host->ops->hw_reset(host);
}

2582
int mmc_hw_reset(struct mmc_host *host)
2583
{
2584
	int ret;
2585

2586
	if (!host->card)
2587 2588
		return -EINVAL;

2589 2590 2591
	mmc_bus_get(host);
	if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
		mmc_bus_put(host);
2592 2593 2594
		return -EOPNOTSUPP;
	}

2595 2596
	ret = host->bus_ops->reset(host);
	mmc_bus_put(host);
2597

2598 2599 2600
	if (ret)
		pr_warn("%s: tried to reset card, got error %d\n",
			mmc_hostname(host), ret);
2601

2602
	return ret;
2603 2604 2605
}
EXPORT_SYMBOL(mmc_hw_reset);

2606 2607 2608 2609 2610 2611 2612 2613
static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
{
	host->f_init = freq;

#ifdef CONFIG_MMC_DEBUG
	pr_info("%s: %s: trying to init card at %u Hz\n",
		mmc_hostname(host), __func__, host->f_init);
#endif
2614
	mmc_power_up(host, host->ocr_avail);
2615

2616 2617 2618 2619 2620 2621
	/*
	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
	 * do a hardware reset if possible.
	 */
	mmc_hw_reset_for_init(host);

2622 2623 2624 2625
	/*
	 * sdio_reset sends CMD52 to reset card.  Since we do not know
	 * if the card is being re-initialized, just send it.  CMD52
	 * should be ignored by SD/eMMC cards.
2626
	 * Skip it if we already know that we do not support SDIO commands
2627
	 */
2628 2629 2630
	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
		sdio_reset(host);

2631 2632
	mmc_go_idle(host);

2633 2634
	if (!(host->caps2 & MMC_CAP2_NO_SD))
		mmc_send_if_cond(host, host->ocr_avail);
2635 2636

	/* Order's important: probe SDIO, then SD, then MMC */
2637 2638 2639 2640
	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
		if (!mmc_attach_sdio(host))
			return 0;

2641 2642 2643 2644
	if (!(host->caps2 & MMC_CAP2_NO_SD))
		if (!mmc_attach_sd(host))
			return 0;

2645 2646 2647
	if (!(host->caps2 & MMC_CAP2_NO_MMC))
		if (!mmc_attach_mmc(host))
			return 0;
2648 2649 2650 2651 2652

	mmc_power_off(host);
	return -EIO;
}

2653 2654 2655 2656 2657 2658 2659 2660
int _mmc_detect_card_removed(struct mmc_host *host)
{
	int ret;

	if (!host->card || mmc_card_removed(host->card))
		return 1;

	ret = host->bus_ops->alive(host);
2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673

	/*
	 * Card detect status and alive check may be out of sync if card is
	 * removed slowly, when card detect switch changes while card/slot
	 * pads are still contacted in hardware (refer to "SD Card Mechanical
	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
	 * detect work 200ms later for this case.
	 */
	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
		mmc_detect_change(host, msecs_to_jiffies(200));
		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
	}

2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
	if (ret) {
		mmc_card_set_removed(host->card);
		pr_debug("%s: card remove detected\n", mmc_hostname(host));
	}

	return ret;
}

int mmc_detect_card_removed(struct mmc_host *host)
{
	struct mmc_card *card = host->card;
2685
	int ret;
2686 2687

	WARN_ON(!host->claimed);
2688 2689 2690 2691

	if (!card)
		return 1;

2692
	if (!mmc_card_is_removable(host))
2693 2694
		return 0;

2695
	ret = mmc_card_removed(card);
2696 2697 2698 2699
	/*
	 * The card will be considered unchanged unless we have been asked to
	 * detect a change or host requires polling to provide card detection.
	 */
2700
	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2701
		return ret;
2702 2703

	host->detect_change = 0;
2704 2705
	if (!ret) {
		ret = _mmc_detect_card_removed(host);
2706
		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2707 2708 2709 2710 2711
			/*
			 * Schedule a detect work as soon as possible to let a
			 * rescan handle the card removal.
			 */
			cancel_delayed_work(&host->detect);
2712
			_mmc_detect_change(host, 0, false);
2713 2714
		}
	}
2715

2716
	return ret;
2717 2718 2719
}
EXPORT_SYMBOL(mmc_detect_card_removed);

2720
void mmc_rescan(struct work_struct *work)
L
Linus Torvalds 已提交
2721
{
D
David Howells 已提交
2722 2723
	struct mmc_host *host =
		container_of(work, struct mmc_host, detect.work);
H
Hein Tibosch 已提交
2724
	int i;
2725

2726
	if (host->rescan_disable)
2727
		return;
L
Linus Torvalds 已提交
2728

2729
	/* If there is a non-removable card registered, only scan once */
2730
	if (!mmc_card_is_removable(host) && host->rescan_entered)
2731 2732 2733
		return;
	host->rescan_entered = 1;

2734
	if (host->trigger_card_event && host->ops->card_event) {
2735
		mmc_claim_host(host);
2736
		host->ops->card_event(host);
2737
		mmc_release_host(host);
2738 2739 2740
		host->trigger_card_event = false;
	}

P
Pierre Ossman 已提交
2741
	mmc_bus_get(host);
P
Pierre Ossman 已提交
2742

2743 2744 2745 2746
	/*
	 * if there is a _removable_ card registered, check whether it is
	 * still present
	 */
2747
	if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2748 2749
		host->bus_ops->detect(host);

2750 2751
	host->detect_change = 0;

2752 2753 2754 2755
	/*
	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
	 * the card is no longer present.
	 */
2756 2757 2758 2759 2760
	mmc_bus_put(host);
	mmc_bus_get(host);

	/* if there still is a card present, stop here */
	if (host->bus_ops != NULL) {
P
Pierre Ossman 已提交
2761
		mmc_bus_put(host);
2762 2763
		goto out;
	}
L
Linus Torvalds 已提交
2764

2765 2766 2767 2768 2769
	/*
	 * Only we can add a new handler, so it's safe to
	 * release the lock here.
	 */
	mmc_bus_put(host);
L
Linus Torvalds 已提交
2770

2771
	mmc_claim_host(host);
2772
	if (mmc_card_is_removable(host) && host->ops->get_cd &&
2773
			host->ops->get_cd(host) == 0) {
2774 2775
		mmc_power_off(host);
		mmc_release_host(host);
2776
		goto out;
2777
	}
L
Linus Torvalds 已提交
2778

H
Hein Tibosch 已提交
2779
	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2780 2781
		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
			break;
2782
		if (freqs[i] <= host->f_min)
2783
			break;
H
Hein Tibosch 已提交
2784
	}
2785 2786 2787
	mmc_release_host(host);

 out:
2788 2789
	if (host->caps & MMC_CAP_NEEDS_POLL)
		mmc_schedule_delayed_work(&host->detect, HZ);
L
Linus Torvalds 已提交
2790 2791
}

2792
void mmc_start_host(struct mmc_host *host)
L
Linus Torvalds 已提交
2793
{
2794
	host->f_init = max(freqs[0], host->f_min);
2795
	host->rescan_disable = 0;
2796
	host->ios.power_mode = MMC_POWER_UNDEFINED;
2797

2798 2799
	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
		mmc_claim_host(host);
2800
		mmc_power_up(host, host->ocr_avail);
2801 2802
		mmc_release_host(host);
	}
2803

2804
	mmc_gpiod_request_cd_irq(host);
2805
	_mmc_detect_change(host, 0, false);
L
Linus Torvalds 已提交
2806 2807
}

2808
void mmc_stop_host(struct mmc_host *host)
L
Linus Torvalds 已提交
2809
{
2810
#ifdef CONFIG_MMC_DEBUG
2811 2812
	unsigned long flags;
	spin_lock_irqsave(&host->lock, flags);
2813
	host->removed = 1;
2814
	spin_unlock_irqrestore(&host->lock, flags);
2815
#endif
2816 2817
	if (host->slot.cd_irq >= 0)
		disable_irq(host->slot.cd_irq);
2818

2819
	host->rescan_disable = 1;
2820
	cancel_delayed_work_sync(&host->detect);
2821

2822 2823 2824
	/* clear pm flags now and let card drivers set them as needed */
	host->pm_flags = 0;

P
Pierre Ossman 已提交
2825 2826
	mmc_bus_get(host);
	if (host->bus_ops && !host->bus_dead) {
2827
		/* Calling bus_ops->remove() with a claimed host can deadlock */
2828
		host->bus_ops->remove(host);
P
Pierre Ossman 已提交
2829 2830
		mmc_claim_host(host);
		mmc_detach_bus(host);
2831
		mmc_power_off(host);
P
Pierre Ossman 已提交
2832
		mmc_release_host(host);
D
Denis Karpov 已提交
2833 2834
		mmc_bus_put(host);
		return;
L
Linus Torvalds 已提交
2835
	}
P
Pierre Ossman 已提交
2836 2837
	mmc_bus_put(host);

2838
	mmc_claim_host(host);
L
Linus Torvalds 已提交
2839
	mmc_power_off(host);
2840
	mmc_release_host(host);
L
Linus Torvalds 已提交
2841 2842
}

2843
int mmc_power_save_host(struct mmc_host *host)
2844
{
2845 2846
	int ret = 0;

2847 2848 2849 2850
#ifdef CONFIG_MMC_DEBUG
	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
#endif

2851 2852
	mmc_bus_get(host);

2853
	if (!host->bus_ops || host->bus_dead) {
2854
		mmc_bus_put(host);
2855
		return -EINVAL;
2856 2857 2858
	}

	if (host->bus_ops->power_save)
2859
		ret = host->bus_ops->power_save(host);
2860 2861 2862 2863

	mmc_bus_put(host);

	mmc_power_off(host);
2864 2865

	return ret;
2866 2867 2868
}
EXPORT_SYMBOL(mmc_power_save_host);

2869
int mmc_power_restore_host(struct mmc_host *host)
2870
{
2871 2872
	int ret;

2873 2874 2875 2876
#ifdef CONFIG_MMC_DEBUG
	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
#endif

2877 2878
	mmc_bus_get(host);

2879
	if (!host->bus_ops || host->bus_dead) {
2880
		mmc_bus_put(host);
2881
		return -EINVAL;
2882 2883
	}

2884
	mmc_power_up(host, host->card->ocr);
2885
	ret = host->bus_ops->power_restore(host);
2886 2887

	mmc_bus_put(host);
2888 2889

	return ret;
2890 2891 2892
}
EXPORT_SYMBOL(mmc_power_restore_host);

2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913
/*
 * Flush the cache to the non-volatile storage.
 */
int mmc_flush_cache(struct mmc_card *card)
{
	int err = 0;

	if (mmc_card_mmc(card) &&
			(card->ext_csd.cache_size > 0) &&
			(card->ext_csd.cache_ctrl & 1)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				EXT_CSD_FLUSH_CACHE, 1, 0);
		if (err)
			pr_err("%s: cache flush error %d\n",
					mmc_hostname(card->host), err);
	}

	return err;
}
EXPORT_SYMBOL(mmc_flush_cache);

2914
#ifdef CONFIG_PM_SLEEP
2915 2916 2917 2918
/* Do the card removal on suspend if card is assumed removeable
 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
   to sync the card.
*/
2919 2920
static int mmc_pm_notify(struct notifier_block *notify_block,
			unsigned long mode, void *unused)
2921 2922 2923 2924
{
	struct mmc_host *host = container_of(
		notify_block, struct mmc_host, pm_notify);
	unsigned long flags;
2925
	int err = 0;
2926 2927 2928 2929

	switch (mode) {
	case PM_HIBERNATION_PREPARE:
	case PM_SUSPEND_PREPARE:
2930
	case PM_RESTORE_PREPARE:
2931 2932 2933 2934 2935
		spin_lock_irqsave(&host->lock, flags);
		host->rescan_disable = 1;
		spin_unlock_irqrestore(&host->lock, flags);
		cancel_delayed_work_sync(&host->detect);

2936 2937 2938 2939 2940 2941
		if (!host->bus_ops)
			break;

		/* Validate prerequisites for suspend */
		if (host->bus_ops->pre_suspend)
			err = host->bus_ops->pre_suspend(host);
2942
		if (!err)
2943 2944
			break;

2945
		/* Calling bus_ops->remove() with a claimed host can deadlock */
2946
		host->bus_ops->remove(host);
2947
		mmc_claim_host(host);
2948
		mmc_detach_bus(host);
2949
		mmc_power_off(host);
2950 2951 2952 2953 2954 2955
		mmc_release_host(host);
		host->pm_flags = 0;
		break;

	case PM_POST_SUSPEND:
	case PM_POST_HIBERNATION:
2956
	case PM_POST_RESTORE:
2957 2958 2959 2960

		spin_lock_irqsave(&host->lock, flags);
		host->rescan_disable = 0;
		spin_unlock_irqrestore(&host->lock, flags);
2961
		_mmc_detect_change(host, 0, false);
2962 2963 2964 2965 2966

	}

	return 0;
}
2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977

void mmc_register_pm_notifier(struct mmc_host *host)
{
	host->pm_notify.notifier_call = mmc_pm_notify;
	register_pm_notifier(&host->pm_notify);
}

void mmc_unregister_pm_notifier(struct mmc_host *host)
{
	unregister_pm_notifier(&host->pm_notify);
}
L
Linus Torvalds 已提交
2978 2979
#endif

2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995
/**
 * mmc_init_context_info() - init synchronization context
 * @host: mmc host
 *
 * Init struct context_info needed to implement asynchronous
 * request mechanism, used by mmc core, host driver and mmc requests
 * supplier.
 */
void mmc_init_context_info(struct mmc_host *host)
{
	host->context_info.is_new_req = false;
	host->context_info.is_done_rcv = false;
	host->context_info.is_waiting_last_req = false;
	init_waitqueue_head(&host->context_info.wait);
}

2996 2997 2998 2999 3000
static int __init mmc_init(void)
{
	int ret;

	ret = mmc_register_bus();
P
Pierre Ossman 已提交
3001
	if (ret)
3002
		return ret;
P
Pierre Ossman 已提交
3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017

	ret = mmc_register_host_class();
	if (ret)
		goto unregister_bus;

	ret = sdio_register_bus();
	if (ret)
		goto unregister_host_class;

	return 0;

unregister_host_class:
	mmc_unregister_host_class();
unregister_bus:
	mmc_unregister_bus();
3018 3019 3020 3021 3022
	return ret;
}

static void __exit mmc_exit(void)
{
P
Pierre Ossman 已提交
3023
	sdio_unregister_bus();
3024 3025 3026 3027
	mmc_unregister_host_class();
	mmc_unregister_bus();
}

3028
subsys_initcall(mmc_init);
3029 3030
module_exit(mmc_exit);

L
Linus Torvalds 已提交
3031
MODULE_LICENSE("GPL");