sdhci.c 113.2 KB
Newer Older
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3
 *
4
 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 6
 *
 * This program is free software; you can redistribute it and/or modify
7 8 9
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or (at
 * your option) any later version.
10 11 12 13
 *
 * Thanks to the following companies for their support:
 *
 *     - JMicron (hardware and technical support)
14 15 16
 */

#include <linux/delay.h>
A
Adrian Hunter 已提交
17
#include <linux/ktime.h>
18
#include <linux/highmem.h>
19
#include <linux/io.h>
20
#include <linux/module.h>
21
#include <linux/dma-mapping.h>
22
#include <linux/slab.h>
23
#include <linux/scatterlist.h>
24
#include <linux/sizes.h>
25
#include <linux/swiotlb.h>
M
Marek Szyprowski 已提交
26
#include <linux/regulator/consumer.h>
27
#include <linux/pm_runtime.h>
28
#include <linux/of.h>
29

30 31
#include <linux/leds.h>

32
#include <linux/mmc/mmc.h>
33
#include <linux/mmc/host.h>
34
#include <linux/mmc/card.h>
35
#include <linux/mmc/sdio.h>
36
#include <linux/mmc/slot-gpio.h>
37 38 39 40 41 42

#include "sdhci.h"

#define DRIVER_NAME "sdhci"

#define DBG(f, x...) \
43
	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44

45 46 47
#define SDHCI_DUMP(f, x...) \
	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)

48 49
#define MAX_TUNING_LOOP 40

50
static unsigned int debug_quirks = 0;
51
static unsigned int debug_quirks2;
52

53 54
static void sdhci_finish_data(struct sdhci_host *);

55
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56

57
void sdhci_dumpregs(struct sdhci_host *host)
58
{
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");

	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
		   sdhci_readw(host, SDHCI_HOST_VERSION));
	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
		   sdhci_readl(host, SDHCI_ARGUMENT),
		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
		   sdhci_readl(host, SDHCI_PRESENT_STATE),
		   sdhci_readb(host, SDHCI_HOST_CONTROL));
	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
		   sdhci_readb(host, SDHCI_POWER_CONTROL),
		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
		   sdhci_readl(host, SDHCI_INT_STATUS));
	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
		   sdhci_readl(host, SDHCI_INT_ENABLE),
		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
85 86
	SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
		   sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
87 88 89 90 91 92 93 94
		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
		   sdhci_readl(host, SDHCI_CAPABILITIES),
		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
		   sdhci_readw(host, SDHCI_COMMAND),
		   sdhci_readl(host, SDHCI_MAX_CURRENT));
	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
95 96
		   sdhci_readl(host, SDHCI_RESPONSE),
		   sdhci_readl(host, SDHCI_RESPONSE + 4));
97
	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
98 99
		   sdhci_readl(host, SDHCI_RESPONSE + 8),
		   sdhci_readl(host, SDHCI_RESPONSE + 12));
100 101
	SDHCI_DUMP("Host ctl2: 0x%08x\n",
		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
102

103
	if (host->flags & SDHCI_USE_ADMA) {
104 105 106 107 108 109 110 111 112 113
		if (host->flags & SDHCI_USE_64_BIT_DMA) {
			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
				   sdhci_readl(host, SDHCI_ADMA_ERROR),
				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
		} else {
			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
				   sdhci_readl(host, SDHCI_ADMA_ERROR),
				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
		}
114
	}
115

116
	SDHCI_DUMP("============================================\n");
117
}
118
EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119 120 121 122 123 124 125

/*****************************************************************************\
 *                                                                           *
 * Low level functions                                                       *
 *                                                                           *
\*****************************************************************************/

C
Chunyan Zhang 已提交
126 127 128 129
static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
{
	u16 ctrl2;

130
	ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
C
Chunyan Zhang 已提交
131 132 133 134
	if (ctrl2 & SDHCI_CTRL_V4_MODE)
		return;

	ctrl2 |= SDHCI_CTRL_V4_MODE;
135
	sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
C
Chunyan Zhang 已提交
136 137 138 139 140 141 142 143 144 145 146 147 148
}

/*
 * This can be called before sdhci_add_host() by Vendor's host controller
 * driver to enable v4 mode if supported.
 */
void sdhci_enable_v4_mode(struct sdhci_host *host)
{
	host->v4_mode = true;
	sdhci_do_enable_v4_mode(host);
}
EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);

149 150 151 152 153
static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
{
	return cmd->data || cmd->flags & MMC_RSP_BUSY;
}

154 155
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
156
	u32 present;
157

158
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
159
	    !mmc_card_is_removable(host->mmc))
160 161
		return;

162 163 164
	if (enable) {
		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
165

166 167 168 169 170
		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
				       SDHCI_INT_CARD_INSERT;
	} else {
		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
	}
171 172 173

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
174 175 176 177 178 179 180 181 182 183 184 185
}

static void sdhci_enable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, true);
}

static void sdhci_disable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, false);
}

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
{
	if (host->bus_on)
		return;
	host->bus_on = true;
	pm_runtime_get_noresume(host->mmc->parent);
}

static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
{
	if (!host->bus_on)
		return;
	host->bus_on = false;
	pm_runtime_put_noidle(host->mmc->parent);
}

202
void sdhci_reset(struct sdhci_host *host, u8 mask)
203
{
A
Adrian Hunter 已提交
204
	ktime_t timeout;
205

206
	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
207

208
	if (mask & SDHCI_RESET_ALL) {
209
		host->clock = 0;
210 211 212 213
		/* Reset-all turns off SD Bus Power */
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
	}
214

215
	/* Wait max 100 ms */
A
Adrian Hunter 已提交
216
	timeout = ktime_add_ms(ktime_get(), 100);
217 218

	/* hw clears the bit when it's done */
219 220 221 222 223 224
	while (1) {
		bool timedout = ktime_after(ktime_get(), timeout);

		if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
			break;
		if (timedout) {
225
			pr_err("%s: Reset 0x%x never completed.\n",
226 227 228 229
				mmc_hostname(host->mmc), (int)mask);
			sdhci_dumpregs(host);
			return;
		}
A
Adrian Hunter 已提交
230
		udelay(10);
231
	}
232 233 234 235 236 237
}
EXPORT_SYMBOL_GPL(sdhci_reset);

static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
238 239 240
		struct mmc_host *mmc = host->mmc;

		if (!mmc->ops->get_cd(mmc))
241 242
			return;
	}
243

244
	host->ops->reset(host, mask);
245

246 247 248 249 250 251 252 253
	if (mask & SDHCI_RESET_ALL) {
		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
			if (host->ops->enable_dma)
				host->ops->enable_dma(host);
		}

		/* Resetting the controller clears many */
		host->preset_enabled = false;
254
	}
255 256
}

257
static void sdhci_set_default_irqs(struct sdhci_host *host)
258
{
259 260 261 262 263 264
	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
		    SDHCI_INT_RESPONSE;

265 266 267 268
	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
	    host->tuning_mode == SDHCI_TUNING_MODE_3)
		host->ier |= SDHCI_INT_RETUNE;

269 270
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
271 272
}

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
static void sdhci_config_dma(struct sdhci_host *host)
{
	u8 ctrl;
	u16 ctrl2;

	if (host->version < SDHCI_SPEC_200)
		return;

	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);

	/*
	 * Always adjust the DMA selection as some controllers
	 * (e.g. JMicron) can't do PIO properly when the selection
	 * is ADMA.
	 */
	ctrl &= ~SDHCI_CTRL_DMA_MASK;
	if (!(host->flags & SDHCI_REQ_USE_DMA))
		goto out;

	/* Note if DMA Select is zero then SDMA is selected */
	if (host->flags & SDHCI_USE_ADMA)
		ctrl |= SDHCI_CTRL_ADMA32;

	if (host->flags & SDHCI_USE_64_BIT_DMA) {
		/*
		 * If v4 mode, all supported DMA can be 64-bit addressing if
		 * controller supports 64-bit system address, otherwise only
		 * ADMA can support 64-bit addressing.
		 */
		if (host->v4_mode) {
			ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
			ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
			sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
		} else if (host->flags & SDHCI_USE_ADMA) {
			/*
			 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
			 * set SDHCI_CTRL_ADMA64.
			 */
			ctrl |= SDHCI_CTRL_ADMA64;
		}
	}

out:
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}

319 320 321 322 323 324 325 326 327
static void sdhci_init(struct sdhci_host *host, int soft)
{
	struct mmc_host *mmc = host->mmc;

	if (soft)
		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
	else
		sdhci_do_reset(host, SDHCI_RESET_ALL);

C
Chunyan Zhang 已提交
328 329 330
	if (host->v4_mode)
		sdhci_do_enable_v4_mode(host);

331
	sdhci_set_default_irqs(host);
332

A
Adrian Hunter 已提交
333 334
	host->cqe_on = false;

335 336 337
	if (soft) {
		/* force clock reconfiguration */
		host->clock = 0;
338
		mmc->ops->set_ios(mmc, &mmc->ios);
339
	}
340
}
341

342 343
static void sdhci_reinit(struct sdhci_host *host)
{
344
	sdhci_init(host, 0);
345
	sdhci_enable_card_detection(host);
346 347
}

348
static void __sdhci_led_activate(struct sdhci_host *host)
349 350 351
{
	u8 ctrl;

352 353 354
	if (host->quirks & SDHCI_QUIRK_NO_LED)
		return;

355
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
356
	ctrl |= SDHCI_CTRL_LED;
357
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
358 359
}

360
static void __sdhci_led_deactivate(struct sdhci_host *host)
361 362 363
{
	u8 ctrl;

364 365 366
	if (host->quirks & SDHCI_QUIRK_NO_LED)
		return;

367
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
368
	ctrl &= ~SDHCI_CTRL_LED;
369
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
370 371
}

372
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
373
static void sdhci_led_control(struct led_classdev *led,
374
			      enum led_brightness brightness)
375 376 377 378 379 380
{
	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);

381 382 383
	if (host->runtime_suspended)
		goto out;

384
	if (brightness == LED_OFF)
385
		__sdhci_led_deactivate(host);
386
	else
387
		__sdhci_led_activate(host);
388
out:
389 390
	spin_unlock_irqrestore(&host->lock, flags);
}
391 392 393 394 395

static int sdhci_led_register(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;

396 397 398
	if (host->quirks & SDHCI_QUIRK_NO_LED)
		return 0;

399 400 401 402 403 404 405 406 407 408 409 410 411
	snprintf(host->led_name, sizeof(host->led_name),
		 "%s::", mmc_hostname(mmc));

	host->led.name = host->led_name;
	host->led.brightness = LED_OFF;
	host->led.default_trigger = mmc_hostname(mmc);
	host->led.brightness_set = sdhci_led_control;

	return led_classdev_register(mmc_dev(mmc), &host->led);
}

static void sdhci_led_unregister(struct sdhci_host *host)
{
412 413 414
	if (host->quirks & SDHCI_QUIRK_NO_LED)
		return;

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
	led_classdev_unregister(&host->led);
}

static inline void sdhci_led_activate(struct sdhci_host *host)
{
}

static inline void sdhci_led_deactivate(struct sdhci_host *host)
{
}

#else

static inline int sdhci_led_register(struct sdhci_host *host)
{
	return 0;
}

static inline void sdhci_led_unregister(struct sdhci_host *host)
{
}

static inline void sdhci_led_activate(struct sdhci_host *host)
{
	__sdhci_led_activate(host);
}

static inline void sdhci_led_deactivate(struct sdhci_host *host)
{
	__sdhci_led_deactivate(host);
}

447 448
#endif

449 450 451 452 453 454
/*****************************************************************************\
 *                                                                           *
 * Core functions                                                            *
 *                                                                           *
\*****************************************************************************/

P
Pierre Ossman 已提交
455
static void sdhci_read_block_pio(struct sdhci_host *host)
456
{
457 458
	unsigned long flags;
	size_t blksize, len, chunk;
459
	u32 uninitialized_var(scratch);
460
	u8 *buf;
461

P
Pierre Ossman 已提交
462
	DBG("PIO reading\n");
463

P
Pierre Ossman 已提交
464
	blksize = host->data->blksz;
465
	chunk = 0;
466

467
	local_irq_save(flags);
468

P
Pierre Ossman 已提交
469
	while (blksize) {
F
Fabio Estevam 已提交
470
		BUG_ON(!sg_miter_next(&host->sg_miter));
471

472
		len = min(host->sg_miter.length, blksize);
473

474 475
		blksize -= len;
		host->sg_miter.consumed = len;
476

477
		buf = host->sg_miter.addr;
478

479 480
		while (len) {
			if (chunk == 0) {
481
				scratch = sdhci_readl(host, SDHCI_BUFFER);
482
				chunk = 4;
P
Pierre Ossman 已提交
483
			}
484 485 486 487 488 489 490

			*buf = scratch & 0xFF;

			buf++;
			scratch >>= 8;
			chunk--;
			len--;
491
		}
P
Pierre Ossman 已提交
492
	}
493 494 495 496

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
497
}
498

P
Pierre Ossman 已提交
499 500
static void sdhci_write_block_pio(struct sdhci_host *host)
{
501 502 503 504
	unsigned long flags;
	size_t blksize, len, chunk;
	u32 scratch;
	u8 *buf;
505

P
Pierre Ossman 已提交
506 507 508
	DBG("PIO writing\n");

	blksize = host->data->blksz;
509 510
	chunk = 0;
	scratch = 0;
511

512
	local_irq_save(flags);
513

P
Pierre Ossman 已提交
514
	while (blksize) {
F
Fabio Estevam 已提交
515
		BUG_ON(!sg_miter_next(&host->sg_miter));
P
Pierre Ossman 已提交
516

517 518 519 520 521 522
		len = min(host->sg_miter.length, blksize);

		blksize -= len;
		host->sg_miter.consumed = len;

		buf = host->sg_miter.addr;
523

524 525 526 527 528 529 530 531
		while (len) {
			scratch |= (u32)*buf << (chunk * 8);

			buf++;
			chunk++;
			len--;

			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
532
				sdhci_writel(host, scratch, SDHCI_BUFFER);
533 534
				chunk = 0;
				scratch = 0;
535 536 537
			}
		}
	}
538 539 540 541

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
542 543 544 545 546 547
}

static void sdhci_transfer_pio(struct sdhci_host *host)
{
	u32 mask;

548
	if (host->blocks == 0)
P
Pierre Ossman 已提交
549 550 551 552 553 554 555
		return;

	if (host->data->flags & MMC_DATA_READ)
		mask = SDHCI_DATA_AVAILABLE;
	else
		mask = SDHCI_SPACE_AVAILABLE;

556 557 558 559 560 561 562 563 564
	/*
	 * Some controllers (JMicron JMB38x) mess up the buffer bits
	 * for transfers < 4 bytes. As long as it is just one block,
	 * we can ignore the bits.
	 */
	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
		(host->data->blocks == 1))
		mask = ~0;

565
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
566 567 568
		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
			udelay(100);

P
Pierre Ossman 已提交
569 570 571 572
		if (host->data->flags & MMC_DATA_READ)
			sdhci_read_block_pio(host);
		else
			sdhci_write_block_pio(host);
573

574 575
		host->blocks--;
		if (host->blocks == 0)
P
Pierre Ossman 已提交
576 577
			break;
	}
578

P
Pierre Ossman 已提交
579
	DBG("PIO transfer complete.\n");
580 581
}

582
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
583
				  struct mmc_data *data, int cookie)
584 585 586
{
	int sg_count;

587 588 589 590 591
	/*
	 * If the data buffers are already mapped, return the previous
	 * dma_map_sg() result.
	 */
	if (data->host_cookie == COOKIE_PRE_MAPPED)
592 593
		return data->sg_count;

594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
	/* Bounce write requests to the bounce buffer */
	if (host->bounce_buffer) {
		unsigned int length = data->blksz * data->blocks;

		if (length > host->bounce_buffer_size) {
			pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
			       mmc_hostname(host->mmc), length,
			       host->bounce_buffer_size);
			return -EIO;
		}
		if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
			/* Copy the data to the bounce buffer */
			sg_copy_to_buffer(data->sg, data->sg_len,
					  host->bounce_buffer,
					  length);
		}
		/* Switch ownership to the DMA */
		dma_sync_single_for_device(host->mmc->parent,
					   host->bounce_addr,
					   host->bounce_buffer_size,
					   mmc_get_dma_dir(data));
		/* Just a dummy value */
		sg_count = 1;
	} else {
		/* Just access the data directly from memory */
		sg_count = dma_map_sg(mmc_dev(host->mmc),
				      data->sg, data->sg_len,
				      mmc_get_dma_dir(data));
	}
623 624 625 626 627

	if (sg_count == 0)
		return -ENOSPC;

	data->sg_count = sg_count;
628
	data->host_cookie = cookie;
629 630 631 632

	return sg_count;
}

633 634 635
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
	local_irq_save(*flags);
636
	return kmap_atomic(sg_page(sg)) + sg->offset;
637 638 639 640
}

static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
{
641
	kunmap_atomic(buffer);
642 643 644
	local_irq_restore(*flags);
}

645 646
void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
			   dma_addr_t addr, int len, unsigned int cmd)
B
Ben Dooks 已提交
647
{
648
	struct sdhci_adma2_64_desc *dma_desc = *desc;
B
Ben Dooks 已提交
649

650
	/* 32-bit and 64-bit descriptors have these members in same position */
651 652
	dma_desc->cmd = cpu_to_le16(cmd);
	dma_desc->len = cpu_to_le16(len);
653 654 655 656
	dma_desc->addr_lo = cpu_to_le32((u32)addr);

	if (host->flags & SDHCI_USE_64_BIT_DMA)
		dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
657 658 659 660 661 662 663 664 665 666 667

	*desc += host->desc_sz;
}
EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);

static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
					   void **desc, dma_addr_t addr,
					   int len, unsigned int cmd)
{
	if (host->ops->adma_write_desc)
		host->ops->adma_write_desc(host, desc, addr, len, cmd);
668 669
	else
		sdhci_adma_write_desc(host, desc, addr, len, cmd);
B
Ben Dooks 已提交
670 671
}

672 673
static void sdhci_adma_mark_end(void *desc)
{
674
	struct sdhci_adma2_64_desc *dma_desc = desc;
675

676
	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
677
	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
678 679
}

680 681
static void sdhci_adma_table_pre(struct sdhci_host *host,
	struct mmc_data *data, int sg_count)
682 683 684
{
	struct scatterlist *sg;
	unsigned long flags;
685 686 687 688
	dma_addr_t addr, align_addr;
	void *desc, *align;
	char *buffer;
	int len, offset, i;
689 690 691 692 693 694

	/*
	 * The spec does not specify endianness of descriptor table.
	 * We currently guess that it is LE.
	 */

695
	host->sg_count = sg_count;
696

697
	desc = host->adma_table;
698 699 700 701 702 703 704 705 706
	align = host->align_buffer;

	align_addr = host->align_addr;

	for_each_sg(data->sg, sg, host->sg_count, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);

		/*
707 708 709
		 * The SDHCI specification states that ADMA addresses must
		 * be 32-bit aligned. If they aren't, then we use a bounce
		 * buffer for the (up to three) bytes that screw up the
710 711
		 * alignment.
		 */
712 713
		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
			 SDHCI_ADMA2_MASK;
714 715 716 717 718 719 720
		if (offset) {
			if (data->flags & MMC_DATA_WRITE) {
				buffer = sdhci_kmap_atomic(sg, &flags);
				memcpy(align, buffer, offset);
				sdhci_kunmap_atomic(buffer, &flags);
			}

B
Ben Dooks 已提交
721
			/* tran, valid */
722 723
			__sdhci_adma_write_desc(host, &desc, align_addr,
						offset, ADMA2_TRAN_VALID);
724 725 726

			BUG_ON(offset > 65536);

727 728
			align += SDHCI_ADMA2_ALIGN;
			align_addr += SDHCI_ADMA2_ALIGN;
729 730 731 732 733 734 735

			addr += offset;
			len -= offset;
		}

		BUG_ON(len > 65536);

736 737 738 739
		/* tran, valid */
		if (len)
			__sdhci_adma_write_desc(host, &desc, addr, len,
						ADMA2_TRAN_VALID);
740 741 742 743 744

		/*
		 * If this triggers then we have a calculation bug
		 * somewhere. :/
		 */
745
		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
746 747
	}

748
	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
749
		/* Mark the last descriptor as the terminating descriptor */
750
		if (desc != host->adma_table) {
751
			desc -= host->desc_sz;
752
			sdhci_adma_mark_end(desc);
753 754
		}
	} else {
755
		/* Add a terminating entry - nop, end, valid */
756
		__sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
757
	}
758 759 760 761 762 763 764
}

static void sdhci_adma_table_post(struct sdhci_host *host,
	struct mmc_data *data)
{
	struct scatterlist *sg;
	int i, size;
765
	void *align;
766 767 768
	char *buffer;
	unsigned long flags;

769 770
	if (data->flags & MMC_DATA_READ) {
		bool has_unaligned = false;
771

772 773 774 775 776 777
		/* Do a quick scan of the SG list for any unaligned mappings */
		for_each_sg(data->sg, sg, host->sg_count, i)
			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
				has_unaligned = true;
				break;
			}
778

779 780
		if (has_unaligned) {
			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
781
					    data->sg_len, DMA_FROM_DEVICE);
782

783
			align = host->align_buffer;
784

785 786 787 788 789 790 791 792
			for_each_sg(data->sg, sg, host->sg_count, i) {
				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
					size = SDHCI_ADMA2_ALIGN -
					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);

					buffer = sdhci_kmap_atomic(sg, &flags);
					memcpy(buffer, align, size);
					sdhci_kunmap_atomic(buffer, &flags);
793

794 795
					align += SDHCI_ADMA2_ALIGN;
				}
796 797 798 799 800
			}
		}
	}
}

801
static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
802 803 804 805 806 807 808
{
	if (host->bounce_buffer)
		return host->bounce_addr;
	else
		return sg_dma_address(host->data->sg);
}

809 810 811 812 813 814 815 816 817 818 819
static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
{
	if (host->v4_mode) {
		sdhci_writel(host, addr, SDHCI_ADMA_ADDRESS);
		if (host->flags & SDHCI_USE_64_BIT_DMA)
			sdhci_writel(host, (u64)addr >> 32, SDHCI_ADMA_ADDRESS_HI);
	} else {
		sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
	}
}

820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
static unsigned int sdhci_target_timeout(struct sdhci_host *host,
					 struct mmc_command *cmd,
					 struct mmc_data *data)
{
	unsigned int target_timeout;

	/* timeout in us */
	if (!data) {
		target_timeout = cmd->busy_timeout * 1000;
	} else {
		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
		if (host->clock && data->timeout_clks) {
			unsigned long long val;

			/*
			 * data->timeout_clks is in units of clock cycles.
			 * host->clock is in Hz.  target_timeout is in us.
			 * Hence, us = 1000000 * cycles / Hz.  Round up.
			 */
			val = 1000000ULL * data->timeout_clks;
			if (do_div(val, host->clock))
				target_timeout++;
			target_timeout += val;
		}
	}

	return target_timeout;
}

849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
static void sdhci_calc_sw_timeout(struct sdhci_host *host,
				  struct mmc_command *cmd)
{
	struct mmc_data *data = cmd->data;
	struct mmc_host *mmc = host->mmc;
	struct mmc_ios *ios = &mmc->ios;
	unsigned char bus_width = 1 << ios->bus_width;
	unsigned int blksz;
	unsigned int freq;
	u64 target_timeout;
	u64 transfer_time;

	target_timeout = sdhci_target_timeout(host, cmd, data);
	target_timeout *= NSEC_PER_USEC;

	if (data) {
		blksz = data->blksz;
		freq = host->mmc->actual_clock ? : host->clock;
		transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
		do_div(transfer_time, freq);
		/* multiply by '2' to account for any unknowns */
		transfer_time = transfer_time * 2;
		/* calculate timeout for the entire data */
		host->data_timeout = data->blocks * target_timeout +
				     transfer_time;
	} else {
		host->data_timeout = target_timeout;
	}

	if (host->data_timeout)
		host->data_timeout += MMC_CMD_TRANSFER_TIME;
}

882 883
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
			     bool *too_big)
884
{
885
	u8 count;
886
	struct mmc_data *data = cmd->data;
887
	unsigned target_timeout, current_timeout;
888

889 890
	*too_big = true;

891 892 893 894 895 896
	/*
	 * If the host controller provides us with an incorrect timeout
	 * value, just skip the check and use 0xE.  The hardware may take
	 * longer to time out, but that's much better than having a too-short
	 * timeout value.
	 */
897
	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
898
		return 0xE;
899

900
	/* Unspecified timeout, assume max */
901
	if (!data && !cmd->busy_timeout)
902
		return 0xE;
903

904
	/* timeout in us */
905
	target_timeout = sdhci_target_timeout(host, cmd, data);
906

907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
	/*
	 * Figure out needed cycles.
	 * We do this in steps in order to fit inside a 32 bit int.
	 * The first step is the minimum timeout, which will have a
	 * minimum resolution of 6 bits:
	 * (1) 2^13*1000 > 2^22,
	 * (2) host->timeout_clk < 2^16
	 *     =>
	 *     (1) / (2) > 2^6
	 */
	count = 0;
	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
	while (current_timeout < target_timeout) {
		count++;
		current_timeout <<= 1;
		if (count >= 0xF)
			break;
	}

	if (count >= 0xF) {
927 928 929
		if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
			DBG("Too large timeout 0x%x requested for CMD%d!\n",
			    count, cmd->opcode);
930
		count = 0xE;
931 932
	} else {
		*too_big = false;
933 934
	}

935 936 937
	return count;
}

938 939 940 941 942 943
static void sdhci_set_transfer_irqs(struct sdhci_host *host)
{
	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;

	if (host->flags & SDHCI_REQ_USE_DMA)
944
		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
945
	else
946 947
		host->ier = (host->ier & ~dma_irqs) | pio_irqs;

948 949 950 951 952
	if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
		host->ier |= SDHCI_INT_AUTO_CMD_ERR;
	else
		host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;

953 954
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
955 956
}

957 958 959 960 961 962 963 964 965 966
static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
{
	if (enable)
		host->ier |= SDHCI_INT_DATA_TIMEOUT;
	else
		host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}

967
static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
968 969
{
	u8 count;
970 971 972 973

	if (host->ops->set_timeout) {
		host->ops->set_timeout(host, cmd);
	} else {
974 975 976 977 978 979
		bool too_big = false;

		count = sdhci_calc_timeout(host, cmd, &too_big);

		if (too_big &&
		    host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
980
			sdhci_calc_sw_timeout(host, cmd);
981 982 983 984 985
			sdhci_set_data_timeout_irq(host, false);
		} else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
			sdhci_set_data_timeout_irq(host, true);
		}

986 987 988 989 990 991
		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
	}
}

static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
{
992
	struct mmc_data *data = cmd->data;
993

994 995
	host->data_timeout = 0;

996
	if (sdhci_data_line_cmd(cmd))
997
		sdhci_set_timeout(host, cmd);
998 999

	if (!data)
1000 1001
		return;

1002 1003
	WARN_ON(host->data);

1004 1005 1006 1007 1008 1009 1010
	/* Sanity checks */
	BUG_ON(data->blksz * data->blocks > 524288);
	BUG_ON(data->blksz > host->mmc->max_blk_size);
	BUG_ON(data->blocks > 65535);

	host->data = data;
	host->data_early = 0;
1011
	host->data->bytes_xfered = 0;
1012

1013
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1014
		struct scatterlist *sg;
1015
		unsigned int length_mask, offset_mask;
1016
		int i;
1017

1018 1019 1020 1021 1022 1023 1024 1025 1026
		host->flags |= SDHCI_REQ_USE_DMA;

		/*
		 * FIXME: This doesn't account for merging when mapping the
		 * scatterlist.
		 *
		 * The assumption here being that alignment and lengths are
		 * the same after DMA mapping to device address space.
		 */
1027
		length_mask = 0;
1028
		offset_mask = 0;
1029
		if (host->flags & SDHCI_USE_ADMA) {
1030
			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1031
				length_mask = 3;
1032 1033 1034 1035 1036 1037 1038
				/*
				 * As we use up to 3 byte chunks to work
				 * around alignment problems, we need to
				 * check the offset as well.
				 */
				offset_mask = 3;
			}
1039 1040
		} else {
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1041
				length_mask = 3;
1042 1043
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
				offset_mask = 3;
1044 1045
		}

1046
		if (unlikely(length_mask | offset_mask)) {
1047
			for_each_sg(data->sg, sg, data->sg_len, i) {
1048
				if (sg->length & length_mask) {
1049
					DBG("Reverting to PIO because of transfer size (%d)\n",
1050
					    sg->length);
1051 1052 1053
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
1054
				if (sg->offset & offset_mask) {
1055
					DBG("Reverting to PIO because of bad alignment\n");
1056 1057 1058 1059 1060 1061 1062
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
			}
		}
	}

1063
	if (host->flags & SDHCI_REQ_USE_DMA) {
1064
		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080

		if (sg_cnt <= 0) {
			/*
			 * This only happens when someone fed
			 * us an invalid request.
			 */
			WARN_ON(1);
			host->flags &= ~SDHCI_REQ_USE_DMA;
		} else if (host->flags & SDHCI_USE_ADMA) {
			sdhci_adma_table_pre(host, data, sg_cnt);

			sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
			if (host->flags & SDHCI_USE_64_BIT_DMA)
				sdhci_writel(host,
					     (u64)host->adma_addr >> 32,
					     SDHCI_ADMA_ADDRESS_HI);
1081
		} else {
1082
			WARN_ON(sg_cnt != 1);
1083
			sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1084 1085 1086
		}
	}

1087
	sdhci_config_dma(host);
1088

1089
	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1090 1091 1092 1093 1094 1095 1096 1097
		int flags;

		flags = SG_MITER_ATOMIC;
		if (host->data->flags & MMC_DATA_READ)
			flags |= SG_MITER_TO_SG;
		else
			flags |= SG_MITER_FROM_SG;
		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1098
		host->blocks = data->blocks;
1099
	}
1100

1101 1102
	sdhci_set_transfer_irqs(host);

1103
	/* Set the DMA boundary value and block size */
1104 1105
	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
		     SDHCI_BLOCK_SIZE);
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118

	/*
	 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
	 * can be supported, in that case 16-bit block count register must be 0.
	 */
	if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
	    (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
		if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
			sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
		sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
	} else {
		sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
	}
1119 1120
}

1121 1122 1123
static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
				    struct mmc_request *mrq)
{
1124 1125
	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
	       !mrq->cap_cmd_during_tfr;
1126 1127
}

1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
					 struct mmc_command *cmd,
					 u16 *mode)
{
	bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
			 (cmd->opcode != SD_IO_RW_EXTENDED);
	bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
	u16 ctrl2;

	/*
	 * In case of Version 4.10 or later, use of 'Auto CMD Auto
	 * Select' is recommended rather than use of 'Auto CMD12
	 * Enable' or 'Auto CMD23 Enable'.
	 */
	if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
		*mode |= SDHCI_TRNS_AUTO_SEL;

		ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (use_cmd23)
			ctrl2 |= SDHCI_CMD23_ENABLE;
		else
			ctrl2 &= ~SDHCI_CMD23_ENABLE;
		sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);

		return;
	}

	/*
	 * If we are sending CMD23, CMD12 never gets sent
	 * on successful completion (so no Auto-CMD12).
	 */
	if (use_cmd12)
		*mode |= SDHCI_TRNS_AUTO_CMD12;
	else if (use_cmd23)
		*mode |= SDHCI_TRNS_AUTO_CMD23;
}

1165
static void sdhci_set_transfer_mode(struct sdhci_host *host,
1166
	struct mmc_command *cmd)
1167
{
1168
	u16 mode = 0;
1169
	struct mmc_data *data = cmd->data;
1170

1171
	if (data == NULL) {
1172 1173
		if (host->quirks2 &
			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1174 1175 1176
			/* must not clear SDHCI_TRANSFER_MODE when tuning */
			if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
				sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1177
		} else {
1178
		/* clear Auto CMD settings for no data CMDs */
1179 1180
			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1181
				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1182
		}
1183
		return;
1184
	}
1185

1186 1187
	WARN_ON(!host->data);

1188 1189 1190
	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
		mode = SDHCI_TRNS_BLK_CNT_EN;

1191
	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1192
		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1193 1194
		sdhci_auto_cmd_select(host, cmd, &mode);
		if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23))
1195
			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1196
	}
1197

1198 1199
	if (data->flags & MMC_DATA_READ)
		mode |= SDHCI_TRNS_READ;
1200
	if (host->flags & SDHCI_REQ_USE_DMA)
1201 1202
		mode |= SDHCI_TRNS_DMA;

1203
	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1204 1205
}

1206 1207 1208 1209 1210
static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
{
	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
		((mrq->cmd && mrq->cmd->error) ||
		 (mrq->sbc && mrq->sbc->error) ||
1211
		 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1212 1213 1214
		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
}

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
	int i;

	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
		if (host->mrqs_done[i] == mrq) {
			WARN_ON(1);
			return;
		}
	}

	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
		if (!host->mrqs_done[i]) {
			host->mrqs_done[i] = mrq;
			break;
		}
	}

	WARN_ON(i >= SDHCI_MAX_MRQS);

	tasklet_schedule(&host->finish_tasklet);
}

1238 1239
static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
1240 1241 1242 1243 1244 1245 1246 1247 1248
	if (host->cmd && host->cmd->mrq == mrq)
		host->cmd = NULL;

	if (host->data_cmd && host->data_cmd->mrq == mrq)
		host->data_cmd = NULL;

	if (host->data && host->data->mrq == mrq)
		host->data = NULL;

1249 1250 1251
	if (sdhci_needs_reset(host, mrq))
		host->pending_reset = true;

1252
	__sdhci_finish_mrq(host, mrq);
1253 1254
}

1255 1256
static void sdhci_finish_data(struct sdhci_host *host)
{
1257 1258
	struct mmc_command *data_cmd = host->data_cmd;
	struct mmc_data *data = host->data;
1259 1260

	host->data = NULL;
1261
	host->data_cmd = NULL;
1262

1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
	/*
	 * The controller needs a reset of internal state machines upon error
	 * conditions.
	 */
	if (data->error) {
		if (!host->cmd || host->cmd == data_cmd)
			sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
	}

1273 1274 1275
	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
		sdhci_adma_table_post(host, data);
1276 1277

	/*
1278 1279 1280 1281 1282
	 * The specification states that the block count register must
	 * be updated, but it does not specify at what point in the
	 * data flow. That makes the register entirely useless to read
	 * back so we have to assume that nothing made it to the card
	 * in the event of an error.
1283
	 */
1284 1285
	if (data->error)
		data->bytes_xfered = 0;
1286
	else
1287
		data->bytes_xfered = data->blksz * data->blocks;
1288

1289 1290 1291 1292 1293 1294 1295
	/*
	 * Need to send CMD12 if -
	 * a) open-ended multiblock transfer (no CMD23)
	 * b) error in multiblock transfer
	 */
	if (data->stop &&
	    (data->error ||
1296
	     !data->mrq->sbc)) {
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
		/*
		 * 'cap_cmd_during_tfr' request must not use the command line
		 * after mmc_command_done() has been called. It is upper layer's
		 * responsibility to send the stop command if required.
		 */
		if (data->mrq->cap_cmd_during_tfr) {
			sdhci_finish_mrq(host, data->mrq);
		} else {
			/* Avoid triggering warning in sdhci_send_command() */
			host->cmd = NULL;
			sdhci_send_command(host, data->stop);
		}
1309 1310 1311
	} else {
		sdhci_finish_mrq(host, data->mrq);
	}
1312 1313
}

1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
			    unsigned long timeout)
{
	if (sdhci_data_line_cmd(mrq->cmd))
		mod_timer(&host->data_timer, timeout);
	else
		mod_timer(&host->timer, timeout);
}

static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
{
	if (sdhci_data_line_cmd(mrq->cmd))
		del_timer(&host->data_timer);
	else
		del_timer(&host->timer);
}

1331
void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1332 1333
{
	int flags;
1334
	u32 mask;
1335
	unsigned long timeout;
1336 1337 1338

	WARN_ON(host->cmd);

1339 1340 1341
	/* Initially, a command has no error */
	cmd->error = 0;

1342 1343 1344 1345
	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
	    cmd->opcode == MMC_STOP_TRANSMISSION)
		cmd->flags |= MMC_RSP_BUSY;

1346
	/* Wait max 10 ms */
1347
	timeout = 10;
1348 1349

	mask = SDHCI_CMD_INHIBIT;
1350
	if (sdhci_data_line_cmd(cmd))
1351 1352 1353 1354
		mask |= SDHCI_DATA_INHIBIT;

	/* We shouldn't wait for data inihibit for stop commands, even
	   though they might use busy signaling */
1355
	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1356 1357
		mask &= ~SDHCI_DATA_INHIBIT;

1358
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1359
		if (timeout == 0) {
1360 1361
			pr_err("%s: Controller never released inhibit bit(s).\n",
			       mmc_hostname(host->mmc));
1362
			sdhci_dumpregs(host);
P
Pierre Ossman 已提交
1363
			cmd->error = -EIO;
1364
			sdhci_finish_mrq(host, cmd->mrq);
1365 1366
			return;
		}
1367 1368 1369
		timeout--;
		mdelay(1);
	}
1370 1371

	host->cmd = cmd;
1372
	if (sdhci_data_line_cmd(cmd)) {
1373 1374 1375
		WARN_ON(host->data_cmd);
		host->data_cmd = cmd;
	}
1376

1377
	sdhci_prepare_data(host, cmd);
1378

1379
	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1380

1381
	sdhci_set_transfer_mode(host, cmd);
1382

1383
	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1384
		pr_err("%s: Unsupported response type!\n",
1385
			mmc_hostname(host->mmc));
P
Pierre Ossman 已提交
1386
		cmd->error = -EINVAL;
1387
		sdhci_finish_mrq(host, cmd->mrq);
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
		return;
	}

	if (!(cmd->flags & MMC_RSP_PRESENT))
		flags = SDHCI_CMD_RESP_NONE;
	else if (cmd->flags & MMC_RSP_136)
		flags = SDHCI_CMD_RESP_LONG;
	else if (cmd->flags & MMC_RSP_BUSY)
		flags = SDHCI_CMD_RESP_SHORT_BUSY;
	else
		flags = SDHCI_CMD_RESP_SHORT;

	if (cmd->flags & MMC_RSP_CRC)
		flags |= SDHCI_CMD_CRC;
	if (cmd->flags & MMC_RSP_OPCODE)
		flags |= SDHCI_CMD_INDEX;
1404 1405

	/* CMD19 is special in that the Data Present Select should be set */
1406 1407
	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1408 1409
		flags |= SDHCI_CMD_DATA;

1410 1411 1412 1413 1414 1415 1416 1417 1418
	timeout = jiffies;
	if (host->data_timeout)
		timeout += nsecs_to_jiffies(host->data_timeout);
	else if (!cmd->data && cmd->busy_timeout > 9000)
		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
	else
		timeout += 10 * HZ;
	sdhci_mod_timer(host, cmd->mrq, timeout);

1419
	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1420
}
1421
EXPORT_SYMBOL_GPL(sdhci_send_command);
1422

1423 1424 1425 1426 1427 1428 1429 1430 1431
static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
{
	int i, reg;

	for (i = 0; i < 4; i++) {
		reg = SDHCI_RESPONSE + (3 - i) * 4;
		cmd->resp[i] = sdhci_readl(host, reg);
	}

1432 1433 1434
	if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
		return;

1435 1436 1437 1438 1439 1440 1441 1442
	/* CRC is stripped so we need to do some shifting */
	for (i = 0; i < 4; i++) {
		cmd->resp[i] <<= 8;
		if (i != 3)
			cmd->resp[i] |= cmd->resp[i + 1] >> 24;
	}
}

1443 1444
static void sdhci_finish_command(struct sdhci_host *host)
{
1445
	struct mmc_command *cmd = host->cmd;
1446

1447 1448 1449 1450
	host->cmd = NULL;

	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136) {
1451
			sdhci_read_rsp_136(host, cmd);
1452
		} else {
1453
			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1454 1455 1456
		}
	}

1457 1458 1459
	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
		mmc_command_done(host->mmc, cmd->mrq);

1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
	/*
	 * The host can send and interrupt when the busy state has
	 * ended, allowing us to wait without wasting CPU cycles.
	 * The busy signal uses DAT0 so this is similar to waiting
	 * for data to complete.
	 *
	 * Note: The 1.0 specification is a bit ambiguous about this
	 *       feature so there might be some problems with older
	 *       controllers.
	 */
1470 1471
	if (cmd->flags & MMC_RSP_BUSY) {
		if (cmd->data) {
1472 1473
			DBG("Cannot wait for busy signal when also doing a data transfer");
		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1474 1475
			   cmd == host->data_cmd) {
			/* Command complete before busy is ended */
1476 1477 1478 1479
			return;
		}
	}

1480
	/* Finished CMD23, now send actual command. */
1481 1482
	if (cmd == cmd->mrq->sbc) {
		sdhci_send_command(host, cmd->mrq->cmd);
1483
	} else {
1484

1485 1486 1487
		/* Processed actual command. */
		if (host->data && host->data_early)
			sdhci_finish_data(host);
1488

1489
		if (!cmd->data)
1490
			sdhci_finish_mrq(host, cmd->mrq);
1491
	}
1492 1493
}

1494 1495
static u16 sdhci_get_preset_value(struct sdhci_host *host)
{
1496
	u16 preset = 0;
1497

1498 1499
	switch (host->timing) {
	case MMC_TIMING_UHS_SDR12:
1500 1501
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
		break;
1502
	case MMC_TIMING_UHS_SDR25:
1503 1504
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
		break;
1505
	case MMC_TIMING_UHS_SDR50:
1506 1507
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
		break;
1508 1509
	case MMC_TIMING_UHS_SDR104:
	case MMC_TIMING_MMC_HS200:
1510 1511
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
		break;
1512
	case MMC_TIMING_UHS_DDR50:
1513
	case MMC_TIMING_MMC_DDR52:
1514 1515
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
		break;
1516 1517 1518
	case MMC_TIMING_MMC_HS400:
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
		break;
1519 1520 1521 1522 1523 1524 1525 1526 1527
	default:
		pr_warn("%s: Invalid UHS-I mode selected\n",
			mmc_hostname(host->mmc));
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
		break;
	}
	return preset;
}

1528 1529
u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
		   unsigned int *actual_clock)
1530
{
1531
	int div = 0; /* Initialized for compiler warning */
1532
	int real_div = div, clk_mul = 1;
1533
	u16 clk = 0;
1534
	bool switch_base_clk = false;
1535

1536
	if (host->version >= SDHCI_SPEC_300) {
1537
		if (host->preset_enabled) {
1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
			u16 pre_val;

			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
			pre_val = sdhci_get_preset_value(host);
			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
			if (host->clk_mul &&
				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
				clk = SDHCI_PROG_CLOCK_MODE;
				real_div = div + 1;
				clk_mul = host->clk_mul;
			} else {
				real_div = max_t(int, 1, div << 1);
			}
			goto clock_set;
		}

1555 1556 1557 1558 1559
		/*
		 * Check if the Host Controller supports Programmable Clock
		 * Mode.
		 */
		if (host->clk_mul) {
1560 1561 1562 1563 1564
			for (div = 1; div <= 1024; div++) {
				if ((host->max_clk * host->clk_mul / div)
					<= clock)
					break;
			}
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
			if ((host->max_clk * host->clk_mul / div) <= clock) {
				/*
				 * Set Programmable Clock Mode in the Clock
				 * Control register.
				 */
				clk = SDHCI_PROG_CLOCK_MODE;
				real_div = div;
				clk_mul = host->clk_mul;
				div--;
			} else {
				/*
				 * Divisor can be too small to reach clock
				 * speed requirement. Then use the base clock.
				 */
				switch_base_clk = true;
			}
		}

		if (!host->clk_mul || switch_base_clk) {
1584 1585 1586 1587 1588 1589 1590 1591 1592
			/* Version 3.00 divisors must be a multiple of 2. */
			if (host->max_clk <= clock)
				div = 1;
			else {
				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
				     div += 2) {
					if ((host->max_clk / div) <= clock)
						break;
				}
1593
			}
1594
			real_div = div;
1595
			div >>= 1;
1596 1597 1598
			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
				&& !div && host->max_clk <= 25000000)
				div = 1;
1599 1600 1601
		}
	} else {
		/* Version 2.00 divisors must be a power of 2. */
1602
		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1603 1604 1605
			if ((host->max_clk / div) <= clock)
				break;
		}
1606
		real_div = div;
1607
		div >>= 1;
1608 1609
	}

1610
clock_set:
1611
	if (real_div)
1612
		*actual_clock = (host->max_clk * clk_mul) / real_div;
1613
	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1614 1615
	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
		<< SDHCI_DIVIDER_HI_SHIFT;
1616 1617 1618 1619 1620

	return clk;
}
EXPORT_SYMBOL_GPL(sdhci_calc_clk);

1621
void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1622
{
A
Adrian Hunter 已提交
1623
	ktime_t timeout;
1624

1625
	clk |= SDHCI_CLOCK_INT_EN;
1626
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1627

1628
	/* Wait max 20 ms */
A
Adrian Hunter 已提交
1629
	timeout = ktime_add_ms(ktime_get(), 20);
1630 1631 1632 1633 1634 1635 1636
	while (1) {
		bool timedout = ktime_after(ktime_get(), timeout);

		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
		if (clk & SDHCI_CLOCK_INT_STABLE)
			break;
		if (timedout) {
1637 1638
			pr_err("%s: Internal clock never stabilised.\n",
			       mmc_hostname(host->mmc));
1639 1640 1641
			sdhci_dumpregs(host);
			return;
		}
A
Adrian Hunter 已提交
1642
		udelay(10);
1643
	}
1644 1645

	clk |= SDHCI_CLOCK_CARD_EN;
1646
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1647
}
1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
EXPORT_SYMBOL_GPL(sdhci_enable_clk);

void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
	u16 clk;

	host->mmc->actual_clock = 0;

	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);

	if (clock == 0)
		return;

	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
	sdhci_enable_clk(host, clk);
}
1664
EXPORT_SYMBOL_GPL(sdhci_set_clock);
1665

1666 1667
static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
				unsigned short vdd)
1668
{
1669
	struct mmc_host *mmc = host->mmc;
1670 1671 1672 1673 1674 1675 1676 1677 1678

	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);

	if (mode != MMC_POWER_OFF)
		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
	else
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
}

1679 1680
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
			   unsigned short vdd)
1681
{
1682
	u8 pwr = 0;
1683

1684 1685
	if (mode != MMC_POWER_OFF) {
		switch (1 << vdd) {
1686
		case MMC_VDD_165_195:
1687 1688 1689 1690 1691 1692 1693
		/*
		 * Without a regulator, SDHCI does not support 2.0v
		 * so we only get here if the driver deliberately
		 * added the 2.0v range to ocr_avail. Map it to 1.8v
		 * for the purpose of turning on the power.
		 */
		case MMC_VDD_20_21:
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
			pwr = SDHCI_POWER_180;
			break;
		case MMC_VDD_29_30:
		case MMC_VDD_30_31:
			pwr = SDHCI_POWER_300;
			break;
		case MMC_VDD_32_33:
		case MMC_VDD_33_34:
			pwr = SDHCI_POWER_330;
			break;
		default:
1705 1706 1707
			WARN(1, "%s: Invalid vdd %#x\n",
			     mmc_hostname(host->mmc), vdd);
			break;
1708 1709 1710 1711
		}
	}

	if (host->pwr == pwr)
1712
		return;
1713

1714 1715 1716
	host->pwr = pwr;

	if (pwr == 0) {
1717
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1718 1719
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
1720 1721 1722 1723 1724 1725 1726
	} else {
		/*
		 * Spec says that we should clear the power reg before setting
		 * a new value. Some controllers don't seem to like this though.
		 */
		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1727

1728 1729 1730 1731 1732 1733 1734
		/*
		 * At least the Marvell CaFe chip gets confused if we set the
		 * voltage and set turn on power at the same time, so set the
		 * voltage first.
		 */
		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1735

1736
		pwr |= SDHCI_POWER_ON;
1737

1738
		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1739

1740 1741
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_on(host);
1742

1743 1744 1745 1746 1747 1748 1749
		/*
		 * Some controllers need an extra 10ms delay of 10ms before
		 * they can apply clock after applying power
		 */
		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
			mdelay(10);
	}
1750
}
1751
EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1752

1753 1754
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
		     unsigned short vdd)
1755
{
1756 1757
	if (IS_ERR(host->mmc->supply.vmmc))
		sdhci_set_power_noreg(host, mode, vdd);
1758
	else
1759
		sdhci_set_power_reg(host, mode, vdd);
1760
}
1761
EXPORT_SYMBOL_GPL(sdhci_set_power);
1762

1763 1764 1765 1766 1767 1768
/*****************************************************************************\
 *                                                                           *
 * MMC callbacks                                                             *
 *                                                                           *
\*****************************************************************************/

A
Aapo Vienamo 已提交
1769
void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1770 1771
{
	struct sdhci_host *host;
1772
	int present;
1773 1774 1775 1776
	unsigned long flags;

	host = mmc_priv(mmc);

1777
	/* Firstly check card presence */
1778
	present = mmc->ops->get_cd(mmc);
1779

1780 1781
	spin_lock_irqsave(&host->lock, flags);

1782
	sdhci_led_activate(host);
1783 1784 1785 1786 1787

	/*
	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
	 * requests if Auto-CMD12 is enabled.
	 */
1788
	if (sdhci_auto_cmd12(host, mrq)) {
1789 1790 1791 1792 1793
		if (mrq->stop) {
			mrq->data->stop = NULL;
			mrq->stop = NULL;
		}
	}
1794

1795
	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1796
		mrq->cmd->error = -ENOMEDIUM;
1797
		sdhci_finish_mrq(host, mrq);
1798
	} else {
1799
		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1800 1801 1802
			sdhci_send_command(host, mrq->sbc);
		else
			sdhci_send_command(host, mrq->cmd);
1803
	}
1804

1805
	mmiowb();
1806 1807
	spin_unlock_irqrestore(&host->lock, flags);
}
A
Aapo Vienamo 已提交
1808
EXPORT_SYMBOL_GPL(sdhci_request);
1809

1810 1811 1812 1813 1814 1815 1816
void sdhci_set_bus_width(struct sdhci_host *host, int width)
{
	u8 ctrl;

	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
	if (width == MMC_BUS_WIDTH_8) {
		ctrl &= ~SDHCI_CTRL_4BITBUS;
1817
		ctrl |= SDHCI_CTRL_8BITBUS;
1818
	} else {
1819
		if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
			ctrl &= ~SDHCI_CTRL_8BITBUS;
		if (width == MMC_BUS_WIDTH_4)
			ctrl |= SDHCI_CTRL_4BITBUS;
		else
			ctrl &= ~SDHCI_CTRL_4BITBUS;
	}
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_set_bus_width);

1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
{
	u16 ctrl_2;

	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	/* Select Bus Speed Mode for host */
	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
	if ((timing == MMC_TIMING_MMC_HS200) ||
	    (timing == MMC_TIMING_UHS_SDR104))
		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
	else if (timing == MMC_TIMING_UHS_SDR12)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
	else if (timing == MMC_TIMING_UHS_SDR25)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
	else if (timing == MMC_TIMING_UHS_SDR50)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
	else if ((timing == MMC_TIMING_UHS_DDR50) ||
		 (timing == MMC_TIMING_MMC_DDR52))
		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1849 1850
	else if (timing == MMC_TIMING_MMC_HS400)
		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1851 1852 1853 1854
	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);

1855
void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1856
{
1857
	struct sdhci_host *host = mmc_priv(mmc);
1858 1859
	u8 ctrl;

1860 1861 1862
	if (ios->power_mode == MMC_POWER_UNDEFINED)
		return;

A
Adrian Hunter 已提交
1863
	if (host->flags & SDHCI_DEVICE_DEAD) {
1864 1865
		if (!IS_ERR(mmc->supply.vmmc) &&
		    ios->power_mode == MMC_POWER_OFF)
1866
			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
A
Adrian Hunter 已提交
1867 1868
		return;
	}
P
Pierre Ossman 已提交
1869

1870 1871 1872 1873 1874
	/*
	 * Reset the chip on each power off.
	 * Should clear out any weird states.
	 */
	if (ios->power_mode == MMC_POWER_OFF) {
1875
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1876
		sdhci_reinit(host);
1877 1878
	}

1879
	if (host->version >= SDHCI_SPEC_300 &&
1880 1881
		(ios->power_mode == MMC_POWER_UP) &&
		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1882 1883
		sdhci_enable_preset_value(host, false);

1884
	if (!ios->clock || ios->clock != host->clock) {
1885
		host->ops->set_clock(host, ios->clock);
1886
		host->clock = ios->clock;
1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898

		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
		    host->clock) {
			host->timeout_clk = host->mmc->actual_clock ?
						host->mmc->actual_clock / 1000 :
						host->clock / 1000;
			host->mmc->max_busy_timeout =
				host->ops->get_max_timeout_count ?
				host->ops->get_max_timeout_count(host) :
				1 << 27;
			host->mmc->max_busy_timeout /= host->timeout_clk;
		}
1899
	}
1900

1901 1902 1903 1904
	if (host->ops->set_power)
		host->ops->set_power(host, ios->power_mode, ios->vdd);
	else
		sdhci_set_power(host, ios->power_mode, ios->vdd);
1905

1906 1907 1908
	if (host->ops->platform_send_init_74_clocks)
		host->ops->platform_send_init_74_clocks(host, ios->power_mode);

1909
	host->ops->set_bus_width(host, ios->bus_width);
1910

1911
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1912

1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
	if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
		if (ios->timing == MMC_TIMING_SD_HS ||
		     ios->timing == MMC_TIMING_MMC_HS ||
		     ios->timing == MMC_TIMING_MMC_HS400 ||
		     ios->timing == MMC_TIMING_MMC_HS200 ||
		     ios->timing == MMC_TIMING_MMC_DDR52 ||
		     ios->timing == MMC_TIMING_UHS_SDR50 ||
		     ios->timing == MMC_TIMING_UHS_SDR104 ||
		     ios->timing == MMC_TIMING_UHS_DDR50 ||
		     ios->timing == MMC_TIMING_UHS_SDR25)
			ctrl |= SDHCI_CTRL_HISPD;
		else
			ctrl &= ~SDHCI_CTRL_HISPD;
	}
1927

1928
	if (host->version >= SDHCI_SPEC_300) {
1929 1930
		u16 clk, ctrl_2;

1931
		if (!host->preset_enabled) {
1932
			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1933 1934 1935 1936
			/*
			 * We only need to set Driver Strength if the
			 * preset value enable is not set.
			 */
1937
			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1938 1939 1940
			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1941 1942
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1943 1944
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1945 1946 1947
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
			else {
1948 1949
				pr_warn("%s: invalid driver type, default to driver type B\n",
					mmc_hostname(mmc));
1950 1951
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
			}
1952 1953

			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969
		} else {
			/*
			 * According to SDHC Spec v3.00, if the Preset Value
			 * Enable in the Host Control 2 register is set, we
			 * need to reset SD Clock Enable before changing High
			 * Speed Enable to avoid generating clock gliches.
			 */

			/* Reset SD Clock Enable */
			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
			clk &= ~SDHCI_CLOCK_CARD_EN;
			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);

			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);

			/* Re-enable SD Clock */
1970
			host->ops->set_clock(host, host->clock);
1971
		}
1972 1973 1974 1975 1976 1977

		/* Reset SD Clock Enable */
		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
		clk &= ~SDHCI_CLOCK_CARD_EN;
		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);

1978
		host->ops->set_uhs_signaling(host, ios->timing);
1979
		host->timing = ios->timing;
1980

1981 1982 1983 1984 1985
		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
				((ios->timing == MMC_TIMING_UHS_SDR12) ||
				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1986 1987
				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1988 1989 1990 1991 1992 1993 1994 1995
			u16 preset;

			sdhci_enable_preset_value(host, true);
			preset = sdhci_get_preset_value(host);
			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
				>> SDHCI_PRESET_DRV_SHIFT;
		}

1996
		/* Re-enable SD Clock */
1997
		host->ops->set_clock(host, host->clock);
1998 1999
	} else
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2000

2001 2002 2003 2004 2005
	/*
	 * Some (ENE) controllers go apeshit on some ios operation,
	 * signalling timeout and CRC errors even on CMD0. Resetting
	 * it on each ios seems to solve the problem.
	 */
2006
	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2007
		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2008

2009
	mmiowb();
2010
}
2011
EXPORT_SYMBOL_GPL(sdhci_set_ios);
2012

2013
static int sdhci_get_cd(struct mmc_host *mmc)
2014 2015
{
	struct sdhci_host *host = mmc_priv(mmc);
2016
	int gpio_cd = mmc_gpio_get_cd(mmc);
2017 2018 2019 2020

	if (host->flags & SDHCI_DEVICE_DEAD)
		return 0;

2021
	/* If nonremovable, assume that the card is always present. */
2022
	if (!mmc_card_is_removable(host->mmc))
2023 2024
		return 1;

2025 2026 2027 2028
	/*
	 * Try slot gpio detect, if defined it take precedence
	 * over build in controller functionality
	 */
2029
	if (gpio_cd >= 0)
2030 2031
		return !!gpio_cd;

2032 2033 2034 2035
	/* If polling, assume that the card is always present. */
	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
		return 1;

2036 2037 2038 2039
	/* Host native card detect */
	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}

2040
static int sdhci_check_ro(struct sdhci_host *host)
2041 2042
{
	unsigned long flags;
2043
	int is_readonly;
2044 2045 2046

	spin_lock_irqsave(&host->lock, flags);

P
Pierre Ossman 已提交
2047
	if (host->flags & SDHCI_DEVICE_DEAD)
2048 2049 2050
		is_readonly = 0;
	else if (host->ops->get_ro)
		is_readonly = host->ops->get_ro(host);
P
Pierre Ossman 已提交
2051
	else
2052 2053
		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
				& SDHCI_WRITE_PROTECT);
2054 2055 2056

	spin_unlock_irqrestore(&host->lock, flags);

2057 2058 2059
	/* This quirk needs to be replaced by a callback-function later */
	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
		!is_readonly : is_readonly;
2060 2061
}

2062 2063
#define SAMPLE_COUNT	5

2064
static int sdhci_get_ro(struct mmc_host *mmc)
2065
{
2066
	struct sdhci_host *host = mmc_priv(mmc);
2067 2068 2069
	int i, ro_count;

	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2070
		return sdhci_check_ro(host);
2071 2072 2073

	ro_count = 0;
	for (i = 0; i < SAMPLE_COUNT; i++) {
2074
		if (sdhci_check_ro(host)) {
2075 2076 2077 2078 2079 2080 2081 2082
			if (++ro_count > SAMPLE_COUNT / 2)
				return 1;
		}
		msleep(30);
	}
	return 0;
}

2083 2084 2085 2086 2087 2088 2089 2090
static void sdhci_hw_reset(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);

	if (host->ops && host->ops->hw_reset)
		host->ops->hw_reset(host);
}

2091 2092
static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
{
2093
	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2094
		if (enable)
2095
			host->ier |= SDHCI_INT_CARD_INT;
2096
		else
2097 2098 2099 2100
			host->ier &= ~SDHCI_INT_CARD_INT;

		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2101 2102
		mmiowb();
	}
2103 2104
}

2105
void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2106 2107 2108
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;
P
Pierre Ossman 已提交
2109

2110 2111 2112
	if (enable)
		pm_runtime_get_noresume(host->mmc->parent);

2113
	spin_lock_irqsave(&host->lock, flags);
2114 2115 2116 2117 2118
	if (enable)
		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
	else
		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;

2119
	sdhci_enable_sdio_irq_nolock(host, enable);
P
Pierre Ossman 已提交
2120
	spin_unlock_irqrestore(&host->lock, flags);
2121 2122 2123

	if (!enable)
		pm_runtime_put_noidle(host->mmc->parent);
P
Pierre Ossman 已提交
2124
}
2125
EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
P
Pierre Ossman 已提交
2126

2127 2128
int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
				      struct mmc_ios *ios)
2129
{
2130
	struct sdhci_host *host = mmc_priv(mmc);
2131
	u16 ctrl;
2132
	int ret;
2133

2134 2135 2136 2137 2138 2139
	/*
	 * Signal Voltage Switching is only applicable for Host Controllers
	 * v3.00 and above.
	 */
	if (host->version < SDHCI_SPEC_300)
		return 0;
2140

2141 2142
	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

2143
	switch (ios->signal_voltage) {
2144
	case MMC_SIGNAL_VOLTAGE_330:
2145 2146
		if (!(host->flags & SDHCI_SIGNALING_330))
			return -EINVAL;
2147 2148 2149
		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
		ctrl &= ~SDHCI_CTRL_VDD_180;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2150

2151
		if (!IS_ERR(mmc->supply.vqmmc)) {
2152
			ret = mmc_regulator_set_vqmmc(mmc, ios);
2153
			if (ret) {
J
Joe Perches 已提交
2154 2155
				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
					mmc_hostname(mmc));
2156 2157 2158 2159 2160
				return -EIO;
			}
		}
		/* Wait for 5ms */
		usleep_range(5000, 5500);
2161

2162 2163 2164 2165
		/* 3.3V regulator output should be stable within 5 ms */
		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (!(ctrl & SDHCI_CTRL_VDD_180))
			return 0;
2166

J
Joe Perches 已提交
2167 2168
		pr_warn("%s: 3.3V regulator output did not became stable\n",
			mmc_hostname(mmc));
2169 2170 2171

		return -EAGAIN;
	case MMC_SIGNAL_VOLTAGE_180:
2172 2173
		if (!(host->flags & SDHCI_SIGNALING_180))
			return -EINVAL;
2174
		if (!IS_ERR(mmc->supply.vqmmc)) {
2175
			ret = mmc_regulator_set_vqmmc(mmc, ios);
2176
			if (ret) {
J
Joe Perches 已提交
2177 2178
				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
					mmc_hostname(mmc));
2179 2180 2181
				return -EIO;
			}
		}
2182 2183 2184 2185 2186

		/*
		 * Enable 1.8V Signal Enable in the Host Control2
		 * register
		 */
2187 2188
		ctrl |= SDHCI_CTRL_VDD_180;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2189

2190 2191 2192 2193
		/* Some controller need to do more when switching */
		if (host->ops->voltage_switch)
			host->ops->voltage_switch(host);

2194 2195 2196 2197
		/* 1.8V regulator output should be stable within 5 ms */
		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (ctrl & SDHCI_CTRL_VDD_180)
			return 0;
2198

J
Joe Perches 已提交
2199 2200
		pr_warn("%s: 1.8V regulator output did not became stable\n",
			mmc_hostname(mmc));
2201

2202 2203
		return -EAGAIN;
	case MMC_SIGNAL_VOLTAGE_120:
2204 2205
		if (!(host->flags & SDHCI_SIGNALING_120))
			return -EINVAL;
2206
		if (!IS_ERR(mmc->supply.vqmmc)) {
2207
			ret = mmc_regulator_set_vqmmc(mmc, ios);
2208
			if (ret) {
J
Joe Perches 已提交
2209 2210
				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
					mmc_hostname(mmc));
2211
				return -EIO;
2212 2213
			}
		}
2214
		return 0;
2215
	default:
2216 2217
		/* No signal voltage switch required */
		return 0;
2218
	}
2219
}
2220
EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2221

2222 2223 2224 2225 2226
static int sdhci_card_busy(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);
	u32 present_state;

2227
	/* Check whether DAT[0] is 0 */
2228 2229
	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);

2230
	return !(present_state & SDHCI_DATA_0_LVL_MASK);
2231 2232
}

2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244
static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
	host->flags |= SDHCI_HS400_TUNING;
	spin_unlock_irqrestore(&host->lock, flags);

	return 0;
}

2245
void sdhci_start_tuning(struct sdhci_host *host)
2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267
{
	u16 ctrl;

	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	ctrl |= SDHCI_CTRL_EXEC_TUNING;
	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
		ctrl |= SDHCI_CTRL_TUNED_CLK;
	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);

	/*
	 * As per the Host Controller spec v3.00, tuning command
	 * generates Buffer Read Ready interrupt, so enable that.
	 *
	 * Note: The spec clearly says that when tuning sequence
	 * is being performed, the controller does not generate
	 * interrupts other than Buffer Read Ready interrupt. But
	 * to make sure we don't hit a controller bug, we _only_
	 * enable Buffer Read Ready interrupt here.
	 */
	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
}
2268
EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2269

2270
void sdhci_end_tuning(struct sdhci_host *host)
2271 2272 2273 2274
{
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
2275
EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2276

2277
void sdhci_reset_tuning(struct sdhci_host *host)
2278 2279 2280 2281 2282 2283 2284 2285
{
	u16 ctrl;

	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
}
2286
EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2287

2288
static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
{
	sdhci_reset_tuning(host);

	sdhci_do_reset(host, SDHCI_RESET_CMD);
	sdhci_do_reset(host, SDHCI_RESET_DATA);

	sdhci_end_tuning(host);

	mmc_abort_tuning(host->mmc, opcode);
}

/*
 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
 * tuning command does not have a data payload (or rather the hardware does it
 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
 * interrupt setup is different to other commands and there is no timeout
 * interrupt so special handling is needed.
 */
2307
void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2308 2309
{
	struct mmc_host *mmc = host->mmc;
2310 2311
	struct mmc_command cmd = {};
	struct mmc_request mrq = {};
2312
	unsigned long flags;
2313
	u32 b = host->sdma_boundary;
2314 2315

	spin_lock_irqsave(&host->lock, flags);
2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326

	cmd.opcode = opcode;
	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
	cmd.mrq = &mrq;

	mrq.cmd = &cmd;
	/*
	 * In response to CMD19, the card sends 64 bytes of tuning
	 * block to the Host Controller. So we set the block size
	 * to 64 here.
	 */
2327 2328
	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2329
		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2330
	else
2331
		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348

	/*
	 * The tuning block is sent by the card to the host controller.
	 * So we set the TRNS_READ bit in the Transfer Mode register.
	 * This also takes care of setting DMA Enable and Multi Block
	 * Select in the same register to 0.
	 */
	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);

	sdhci_send_command(host, &cmd);

	host->cmd = NULL;

	sdhci_del_timer(host, &mrq);

	host->tuning_done = 0;

2349
	mmiowb();
2350 2351 2352 2353 2354 2355 2356
	spin_unlock_irqrestore(&host->lock, flags);

	/* Wait for Buffer Read Ready interrupt */
	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
			   msecs_to_jiffies(50));

}
2357
EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2358

Y
Yinbo Zhu 已提交
2359
static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
A
Adrian Hunter 已提交
2360 2361 2362 2363 2364 2365 2366 2367 2368 2369
{
	int i;

	/*
	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
	 * of loops reaches 40 times.
	 */
	for (i = 0; i < MAX_TUNING_LOOP; i++) {
		u16 ctrl;

2370
		sdhci_send_tuning(host, opcode);
A
Adrian Hunter 已提交
2371 2372 2373 2374

		if (!host->tuning_done) {
			pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
				mmc_hostname(host->mmc));
2375
			sdhci_abort_tuning(host, opcode);
Y
Yinbo Zhu 已提交
2376
			return -ETIMEDOUT;
A
Adrian Hunter 已提交
2377 2378 2379 2380 2381
		}

		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
			if (ctrl & SDHCI_CTRL_TUNED_CLK)
Y
Yinbo Zhu 已提交
2382
				return 0; /* Success! */
A
Adrian Hunter 已提交
2383 2384 2385
			break;
		}

2386 2387 2388
		/* Spec does not require a delay between tuning cycles */
		if (host->tuning_delay > 0)
			mdelay(host->tuning_delay);
A
Adrian Hunter 已提交
2389 2390 2391 2392 2393
	}

	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
		mmc_hostname(host->mmc));
	sdhci_reset_tuning(host);
Y
Yinbo Zhu 已提交
2394
	return -EAGAIN;
A
Adrian Hunter 已提交
2395 2396
}

2397
int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2398
{
2399
	struct sdhci_host *host = mmc_priv(mmc);
2400
	int err = 0;
2401
	unsigned int tuning_count = 0;
2402
	bool hs400_tuning;
2403

2404 2405
	hs400_tuning = host->flags & SDHCI_HS400_TUNING;

2406 2407 2408
	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
		tuning_count = host->tuning_count;

2409
	/*
W
Weijun Yang 已提交
2410 2411 2412
	 * The Host Controller needs tuning in case of SDR104 and DDR50
	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
	 * the Capabilities register.
2413 2414
	 * If the Host Controller supports the HS200 mode then the
	 * tuning function has to be executed.
2415
	 */
2416
	switch (host->timing) {
2417
	/* HS400 tuning is done in HS200 mode */
2418
	case MMC_TIMING_MMC_HS400:
2419
		err = -EINVAL;
2420
		goto out;
2421

2422
	case MMC_TIMING_MMC_HS200:
2423 2424 2425 2426 2427 2428 2429 2430
		/*
		 * Periodic re-tuning for HS400 is not expected to be needed, so
		 * disable it here.
		 */
		if (hs400_tuning)
			tuning_count = 0;
		break;

2431
	case MMC_TIMING_UHS_SDR104:
W
Weijun Yang 已提交
2432
	case MMC_TIMING_UHS_DDR50:
2433 2434 2435
		break;

	case MMC_TIMING_UHS_SDR50:
2436
		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2437 2438 2439 2440
			break;
		/* FALLTHROUGH */

	default:
2441
		goto out;
2442 2443
	}

2444
	if (host->ops->platform_execute_tuning) {
2445
		err = host->ops->platform_execute_tuning(host, opcode);
2446
		goto out;
2447 2448
	}

A
Adrian Hunter 已提交
2449
	host->mmc->retune_period = tuning_count;
2450

2451 2452 2453
	if (host->tuning_delay < 0)
		host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;

A
Adrian Hunter 已提交
2454
	sdhci_start_tuning(host);
2455

Y
Yinbo Zhu 已提交
2456
	host->tuning_err = __sdhci_execute_tuning(host, opcode);
2457

2458
	sdhci_end_tuning(host);
2459
out:
2460
	host->flags &= ~SDHCI_HS400_TUNING;
A
Adrian Hunter 已提交
2461

2462 2463
	return err;
}
2464
EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2465

2466
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2467 2468 2469 2470 2471 2472 2473 2474 2475
{
	/* Host Controller v3.00 defines preset value registers */
	if (host->version < SDHCI_SPEC_300)
		return;

	/*
	 * We only enable or disable Preset Value if they are not already
	 * enabled or disabled respectively. Otherwise, we bail out.
	 */
2476 2477 2478 2479 2480 2481 2482 2483
	if (host->preset_enabled != enable) {
		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

		if (enable)
			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
		else
			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;

2484
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2485 2486 2487 2488 2489 2490 2491

		if (enable)
			host->flags |= SDHCI_PV_ENABLED;
		else
			host->flags &= ~SDHCI_PV_ENABLED;

		host->preset_enabled = enable;
2492
	}
2493 2494
}

2495 2496 2497 2498 2499 2500
static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
				int err)
{
	struct sdhci_host *host = mmc_priv(mmc);
	struct mmc_data *data = mrq->data;

2501
	if (data->host_cookie != COOKIE_UNMAPPED)
2502
		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2503
			     mmc_get_dma_dir(data));
2504 2505

	data->host_cookie = COOKIE_UNMAPPED;
2506 2507
}

2508
static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2509 2510 2511
{
	struct sdhci_host *host = mmc_priv(mmc);

2512
	mrq->data->host_cookie = COOKIE_UNMAPPED;
2513

2514 2515 2516 2517 2518 2519
	/*
	 * No pre-mapping in the pre hook if we're using the bounce buffer,
	 * for that we would need two bounce buffers since one buffer is
	 * in flight when this is getting called.
	 */
	if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2520
		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2521 2522
}

2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540
static inline bool sdhci_has_requests(struct sdhci_host *host)
{
	return host->cmd || host->data_cmd;
}

static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
{
	if (host->data_cmd) {
		host->data_cmd->error = err;
		sdhci_finish_mrq(host, host->data_cmd->mrq);
	}

	if (host->cmd) {
		host->cmd->error = err;
		sdhci_finish_mrq(host, host->cmd->mrq);
	}
}

2541
static void sdhci_card_event(struct mmc_host *mmc)
2542
{
2543
	struct sdhci_host *host = mmc_priv(mmc);
2544
	unsigned long flags;
2545
	int present;
2546

2547 2548 2549 2550
	/* First check if client has provided their own card event */
	if (host->ops->card_event)
		host->ops->card_event(host);

2551
	present = mmc->ops->get_cd(mmc);
2552

2553 2554
	spin_lock_irqsave(&host->lock, flags);

2555 2556
	/* Check sdhci_has_requests() first in case we are runtime suspended */
	if (sdhci_has_requests(host) && !present) {
2557
		pr_err("%s: Card removed during transfer!\n",
2558
			mmc_hostname(host->mmc));
2559
		pr_err("%s: Resetting controller.\n",
2560
			mmc_hostname(host->mmc));
2561

2562 2563
		sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
2564

2565
		sdhci_error_out_mrqs(host, -ENOMEDIUM);
2566 2567 2568
	}

	spin_unlock_irqrestore(&host->lock, flags);
2569 2570 2571 2572
}

static const struct mmc_host_ops sdhci_ops = {
	.request	= sdhci_request,
2573 2574
	.post_req	= sdhci_post_req,
	.pre_req	= sdhci_pre_req,
2575
	.set_ios	= sdhci_set_ios,
2576
	.get_cd		= sdhci_get_cd,
2577 2578 2579 2580
	.get_ro		= sdhci_get_ro,
	.hw_reset	= sdhci_hw_reset,
	.enable_sdio_irq = sdhci_enable_sdio_irq,
	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2581
	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2582 2583
	.execute_tuning			= sdhci_execute_tuning,
	.card_event			= sdhci_card_event,
2584
	.card_busy	= sdhci_card_busy,
2585 2586 2587 2588 2589 2590 2591 2592
};

/*****************************************************************************\
 *                                                                           *
 * Tasklets                                                                  *
 *                                                                           *
\*****************************************************************************/

2593
static bool sdhci_request_done(struct sdhci_host *host)
2594 2595 2596
{
	unsigned long flags;
	struct mmc_request *mrq;
2597
	int i;
2598

2599 2600
	spin_lock_irqsave(&host->lock, flags);

2601 2602
	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
		mrq = host->mrqs_done[i];
2603
		if (mrq)
2604
			break;
2605
	}
2606

2607 2608 2609 2610
	if (!mrq) {
		spin_unlock_irqrestore(&host->lock, flags);
		return true;
	}
2611

2612 2613
	sdhci_del_timer(host, mrq);

2614 2615 2616 2617 2618 2619 2620 2621 2622
	/*
	 * Always unmap the data buffers if they were mapped by
	 * sdhci_prepare_data() whenever we finish with a request.
	 * This avoids leaking DMA mappings on error.
	 */
	if (host->flags & SDHCI_REQ_USE_DMA) {
		struct mmc_data *data = mrq->data;

		if (data && data->host_cookie == COOKIE_MAPPED) {
2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
			if (host->bounce_buffer) {
				/*
				 * On reads, copy the bounced data into the
				 * sglist
				 */
				if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
					unsigned int length = data->bytes_xfered;

					if (length > host->bounce_buffer_size) {
						pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
						       mmc_hostname(host->mmc),
						       host->bounce_buffer_size,
						       data->bytes_xfered);
						/* Cap it down and continue */
						length = host->bounce_buffer_size;
					}
					dma_sync_single_for_cpu(
						host->mmc->parent,
						host->bounce_addr,
						host->bounce_buffer_size,
						DMA_FROM_DEVICE);
					sg_copy_from_buffer(data->sg,
						data->sg_len,
						host->bounce_buffer,
						length);
				} else {
					/* No copying, just switch ownership */
					dma_sync_single_for_cpu(
						host->mmc->parent,
						host->bounce_addr,
						host->bounce_buffer_size,
						mmc_get_dma_dir(data));
				}
			} else {
				/* Unmap the raw data */
				dma_unmap_sg(mmc_dev(host->mmc), data->sg,
					     data->sg_len,
					     mmc_get_dma_dir(data));
			}
2662 2663 2664 2665
			data->host_cookie = COOKIE_UNMAPPED;
		}
	}

2666 2667 2668 2669
	/*
	 * The controller needs a reset of internal state machines
	 * upon error conditions.
	 */
2670
	if (sdhci_needs_reset(host, mrq)) {
2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
		/*
		 * Do not finish until command and data lines are available for
		 * reset. Note there can only be one other mrq, so it cannot
		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
		 * would both be null.
		 */
		if (host->cmd || host->data_cmd) {
			spin_unlock_irqrestore(&host->lock, flags);
			return true;
		}

2682
		/* Some controllers need this kick or reset won't work here */
2683
		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2684
			/* This is to force an update */
2685
			host->ops->set_clock(host, host->clock);
2686 2687 2688

		/* Spec says we should do both at the same time, but Ricoh
		   controllers do not like that. */
2689 2690
		sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
2691 2692

		host->pending_reset = false;
2693 2694
	}

2695 2696
	if (!sdhci_has_requests(host))
		sdhci_led_deactivate(host);
2697

2698 2699
	host->mrqs_done[i] = NULL;

2700
	mmiowb();
2701 2702 2703
	spin_unlock_irqrestore(&host->lock, flags);

	mmc_request_done(host->mmc, mrq);
2704 2705 2706 2707 2708 2709 2710 2711 2712 2713

	return false;
}

static void sdhci_tasklet_finish(unsigned long param)
{
	struct sdhci_host *host = (struct sdhci_host *)param;

	while (!sdhci_request_done(host))
		;
2714 2715
}

2716
static void sdhci_timeout_timer(struct timer_list *t)
2717 2718 2719 2720
{
	struct sdhci_host *host;
	unsigned long flags;

2721
	host = from_timer(host, t, timer);
2722 2723 2724

	spin_lock_irqsave(&host->lock, flags);

2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737
	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
		       mmc_hostname(host->mmc));
		sdhci_dumpregs(host);

		host->cmd->error = -ETIMEDOUT;
		sdhci_finish_mrq(host, host->cmd->mrq);
	}

	mmiowb();
	spin_unlock_irqrestore(&host->lock, flags);
}

2738
static void sdhci_timeout_data_timer(struct timer_list *t)
2739 2740 2741 2742
{
	struct sdhci_host *host;
	unsigned long flags;

2743
	host = from_timer(host, t, data_timer);
2744 2745 2746 2747 2748

	spin_lock_irqsave(&host->lock, flags);

	if (host->data || host->data_cmd ||
	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2749 2750
		pr_err("%s: Timeout waiting for hardware interrupt.\n",
		       mmc_hostname(host->mmc));
2751 2752 2753
		sdhci_dumpregs(host);

		if (host->data) {
P
Pierre Ossman 已提交
2754
			host->data->error = -ETIMEDOUT;
2755
			sdhci_finish_data(host);
2756 2757 2758
		} else if (host->data_cmd) {
			host->data_cmd->error = -ETIMEDOUT;
			sdhci_finish_mrq(host, host->data_cmd->mrq);
2759
		} else {
2760 2761
			host->cmd->error = -ETIMEDOUT;
			sdhci_finish_mrq(host, host->cmd->mrq);
2762 2763 2764
		}
	}

2765
	mmiowb();
2766 2767 2768 2769 2770 2771 2772 2773 2774
	spin_unlock_irqrestore(&host->lock, flags);
}

/*****************************************************************************\
 *                                                                           *
 * Interrupt handling                                                        *
 *                                                                           *
\*****************************************************************************/

2775
static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
2776
{
2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791
	/* Handle auto-CMD12 error */
	if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
		struct mmc_request *mrq = host->data_cmd->mrq;
		u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
		int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
				   SDHCI_INT_DATA_TIMEOUT :
				   SDHCI_INT_DATA_CRC;

		/* Treat auto-CMD12 error the same as data error */
		if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
			*intmask_p |= data_err_bit;
			return;
		}
	}

2792
	if (!host->cmd) {
2793 2794 2795 2796 2797 2798 2799
		/*
		 * SDHCI recovers from errors by resetting the cmd and data
		 * circuits.  Until that is done, there very well might be more
		 * interrupts, so ignore them in that case.
		 */
		if (host->pending_reset)
			return;
2800 2801
		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
		       mmc_hostname(host->mmc), (unsigned)intmask);
2802 2803 2804 2805
		sdhci_dumpregs(host);
		return;
	}

2806 2807 2808 2809 2810 2811
	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
		if (intmask & SDHCI_INT_TIMEOUT)
			host->cmd->error = -ETIMEDOUT;
		else
			host->cmd->error = -EILSEQ;
2812

2813
		/* Treat data command CRC error the same as data CRC error */
2814 2815 2816 2817
		if (host->cmd->data &&
		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
		     SDHCI_INT_CRC) {
			host->cmd = NULL;
2818
			*intmask_p |= SDHCI_INT_DATA_CRC;
2819 2820 2821
			return;
		}

2822
		sdhci_finish_mrq(host, host->cmd->mrq);
2823 2824 2825
		return;
	}

2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840
	/* Handle auto-CMD23 error */
	if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
		struct mmc_request *mrq = host->cmd->mrq;
		u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
		int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
			  -ETIMEDOUT :
			  -EILSEQ;

		if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
			mrq->sbc->error = err;
			sdhci_finish_mrq(host, mrq);
			return;
		}
	}

2841
	if (intmask & SDHCI_INT_RESPONSE)
2842
		sdhci_finish_command(host);
2843 2844
}

2845
static void sdhci_adma_show_error(struct sdhci_host *host)
2846
{
2847
	void *desc = host->adma_table;
2848 2849 2850 2851

	sdhci_dumpregs(host);

	while (true) {
2852 2853 2854
		struct sdhci_adma2_64_desc *dma_desc = desc;

		if (host->flags & SDHCI_USE_64_BIT_DMA)
2855 2856
			DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
			    desc, le32_to_cpu(dma_desc->addr_hi),
2857 2858 2859 2860
			    le32_to_cpu(dma_desc->addr_lo),
			    le16_to_cpu(dma_desc->len),
			    le16_to_cpu(dma_desc->cmd));
		else
2861 2862
			DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
			    desc, le32_to_cpu(dma_desc->addr_lo),
2863 2864
			    le16_to_cpu(dma_desc->len),
			    le16_to_cpu(dma_desc->cmd));
2865

2866
		desc += host->desc_sz;
2867

2868
		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2869 2870 2871 2872
			break;
	}
}

2873 2874
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
2875
	u32 command;
2876

2877 2878
	/* CMD19 generates _only_ Buffer Read Ready interrupt */
	if (intmask & SDHCI_INT_DATA_AVAIL) {
2879 2880 2881
		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
		if (command == MMC_SEND_TUNING_BLOCK ||
		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2882 2883 2884 2885 2886 2887
			host->tuning_done = 1;
			wake_up(&host->buf_ready_int);
			return;
		}
	}

2888
	if (!host->data) {
2889 2890
		struct mmc_command *data_cmd = host->data_cmd;

2891
		/*
2892 2893 2894
		 * The "data complete" interrupt is also used to
		 * indicate that a busy state has ended. See comment
		 * above in sdhci_cmd_irq().
2895
		 */
2896
		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2897
			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2898
				host->data_cmd = NULL;
2899
				data_cmd->error = -ETIMEDOUT;
2900
				sdhci_finish_mrq(host, data_cmd->mrq);
2901 2902
				return;
			}
2903
			if (intmask & SDHCI_INT_DATA_END) {
2904
				host->data_cmd = NULL;
2905 2906 2907 2908 2909
				/*
				 * Some cards handle busy-end interrupt
				 * before the command completed, so make
				 * sure we do things in the proper order.
				 */
2910 2911 2912
				if (host->cmd == data_cmd)
					return;

2913
				sdhci_finish_mrq(host, data_cmd->mrq);
2914 2915 2916
				return;
			}
		}
2917

2918 2919 2920 2921 2922 2923 2924 2925
		/*
		 * SDHCI recovers from errors by resetting the cmd and data
		 * circuits. Until that is done, there very well might be more
		 * interrupts, so ignore them in that case.
		 */
		if (host->pending_reset)
			return;

2926 2927
		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
		       mmc_hostname(host->mmc), (unsigned)intmask);
2928 2929 2930 2931 2932 2933
		sdhci_dumpregs(host);

		return;
	}

	if (intmask & SDHCI_INT_DATA_TIMEOUT)
P
Pierre Ossman 已提交
2934
		host->data->error = -ETIMEDOUT;
2935 2936 2937 2938 2939
	else if (intmask & SDHCI_INT_DATA_END_BIT)
		host->data->error = -EILSEQ;
	else if ((intmask & SDHCI_INT_DATA_CRC) &&
		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
			!= MMC_BUS_TEST_R)
P
Pierre Ossman 已提交
2940
		host->data->error = -EILSEQ;
2941
	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2942
		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2943
		sdhci_adma_show_error(host);
2944
		host->data->error = -EIO;
2945 2946
		if (host->ops->adma_workaround)
			host->ops->adma_workaround(host, intmask);
2947
	}
2948

P
Pierre Ossman 已提交
2949
	if (host->data->error)
2950 2951
		sdhci_finish_data(host);
	else {
P
Pierre Ossman 已提交
2952
		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2953 2954
			sdhci_transfer_pio(host);

2955 2956 2957 2958
		/*
		 * We currently don't do anything fancy with DMA
		 * boundaries, but as we can't disable the feature
		 * we need to at least restart the transfer.
2959 2960 2961 2962
		 *
		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
		 * should return a valid address to continue from, but as
		 * some controllers are faulty, don't trust them.
2963
		 */
2964
		if (intmask & SDHCI_INT_DMA_END) {
2965
			dma_addr_t dmastart, dmanow;
2966 2967

			dmastart = sdhci_sdma_address(host);
2968 2969 2970 2971 2972
			dmanow = dmastart + host->data->bytes_xfered;
			/*
			 * Force update to the next DMA block boundary.
			 */
			dmanow = (dmanow &
2973
				~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2974 2975
				SDHCI_DEFAULT_BOUNDARY_SIZE;
			host->data->bytes_xfered = dmanow - dmastart;
2976 2977 2978
			DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
			    &dmastart, host->data->bytes_xfered, &dmanow);
			sdhci_set_sdma_addr(host, dmanow);
2979
		}
2980

2981
		if (intmask & SDHCI_INT_DATA_END) {
2982
			if (host->cmd == host->data_cmd) {
2983 2984 2985 2986 2987 2988 2989 2990 2991 2992
				/*
				 * Data managed to finish before the
				 * command completed. Make sure we do
				 * things in the proper order.
				 */
				host->data_early = 1;
			} else {
				sdhci_finish_data(host);
			}
		}
2993 2994 2995
	}
}

2996
static irqreturn_t sdhci_irq(int irq, void *dev_id)
2997
{
2998
	irqreturn_t result = IRQ_NONE;
2999
	struct sdhci_host *host = dev_id;
3000
	u32 intmask, mask, unexpected = 0;
3001
	int max_loops = 16;
3002 3003 3004

	spin_lock(&host->lock);

3005
	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
3006
		spin_unlock(&host->lock);
3007
		return IRQ_NONE;
3008 3009
	}

3010
	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3011
	if (!intmask || intmask == 0xffffffff) {
3012 3013 3014 3015
		result = IRQ_NONE;
		goto out;
	}

3016
	do {
A
Adrian Hunter 已提交
3017 3018 3019 3020 3021 3022 3023 3024
		DBG("IRQ status 0x%08x\n", intmask);

		if (host->ops->irq) {
			intmask = host->ops->irq(host, intmask);
			if (!intmask)
				goto cont;
		}

3025 3026 3027 3028
		/* Clear selected interrupts. */
		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
				  SDHCI_INT_BUS_POWER);
		sdhci_writel(host, mask, SDHCI_INT_STATUS);
3029

3030 3031 3032
		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
3033

3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044
			/*
			 * There is a observation on i.mx esdhc.  INSERT
			 * bit will be immediately set again when it gets
			 * cleared, if a card is inserted.  We have to mask
			 * the irq to prevent interrupt storm which will
			 * freeze the system.  And the REMOVE gets the
			 * same situation.
			 *
			 * More testing are needed here to ensure it works
			 * for other platforms though.
			 */
3045 3046 3047 3048 3049 3050
			host->ier &= ~(SDHCI_INT_CARD_INSERT |
				       SDHCI_INT_CARD_REMOVE);
			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
					       SDHCI_INT_CARD_INSERT;
			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3051 3052 3053

			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3054 3055 3056 3057

			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
						       SDHCI_INT_CARD_REMOVE);
			result = IRQ_WAKE_THREAD;
3058
		}
3059

3060
		if (intmask & SDHCI_INT_CMD_MASK)
3061
			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3062

3063 3064
		if (intmask & SDHCI_INT_DATA_MASK)
			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3065

3066 3067 3068
		if (intmask & SDHCI_INT_BUS_POWER)
			pr_err("%s: Card is consuming too much power!\n",
				mmc_hostname(host->mmc));
3069

3070 3071 3072
		if (intmask & SDHCI_INT_RETUNE)
			mmc_retune_needed(host->mmc);

3073 3074
		if ((intmask & SDHCI_INT_CARD_INT) &&
		    (host->ier & SDHCI_INT_CARD_INT)) {
3075 3076 3077 3078
			sdhci_enable_sdio_irq_nolock(host, false);
			host->thread_isr |= SDHCI_INT_CARD_INT;
			result = IRQ_WAKE_THREAD;
		}
P
Pierre Ossman 已提交
3079

3080 3081 3082
		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3083
			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
P
Pierre Ossman 已提交
3084

3085 3086 3087 3088
		if (intmask) {
			unexpected |= intmask;
			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
		}
A
Adrian Hunter 已提交
3089
cont:
3090 3091
		if (result == IRQ_NONE)
			result = IRQ_HANDLED;
3092

3093 3094
		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
	} while (intmask && --max_loops);
3095 3096 3097
out:
	spin_unlock(&host->lock);

3098 3099 3100 3101 3102
	if (unexpected) {
		pr_err("%s: Unexpected interrupt 0x%08x.\n",
			   mmc_hostname(host->mmc), unexpected);
		sdhci_dumpregs(host);
	}
P
Pierre Ossman 已提交
3103

3104 3105 3106
	return result;
}

3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117
static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
{
	struct sdhci_host *host = dev_id;
	unsigned long flags;
	u32 isr;

	spin_lock_irqsave(&host->lock, flags);
	isr = host->thread_isr;
	host->thread_isr = 0;
	spin_unlock_irqrestore(&host->lock, flags);

3118
	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3119 3120 3121 3122
		struct mmc_host *mmc = host->mmc;

		mmc->ops->card_event(mmc);
		mmc_detect_change(mmc, msecs_to_jiffies(200));
3123 3124
	}

3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136
	if (isr & SDHCI_INT_CARD_INT) {
		sdio_run_irqs(host->mmc);

		spin_lock_irqsave(&host->lock, flags);
		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
			sdhci_enable_sdio_irq_nolock(host, true);
		spin_unlock_irqrestore(&host->lock, flags);
	}

	return isr ? IRQ_HANDLED : IRQ_NONE;
}

3137 3138 3139 3140 3141 3142 3143
/*****************************************************************************\
 *                                                                           *
 * Suspend/resume                                                            *
 *                                                                           *
\*****************************************************************************/

#ifdef CONFIG_PM
3144 3145 3146 3147 3148 3149 3150 3151

static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
{
	return mmc_card_is_removable(host->mmc) &&
	       !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
	       !mmc_can_gpio_cd(host->mmc);
}

3152 3153 3154 3155 3156 3157 3158 3159
/*
 * To enable wakeup events, the corresponding events have to be enabled in
 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
 * Table' in the SD Host Controller Standard Specification.
 * It is useless to restore SDHCI_INT_ENABLE state in
 * sdhci_disable_irq_wakeups() since it will be set by
 * sdhci_enable_card_detection() or sdhci_init().
 */
3160
static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
K
Kevin Liu 已提交
3161
{
3162 3163 3164 3165
	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
		  SDHCI_WAKE_ON_INT;
	u32 irq_val = 0;
	u8 wake_val = 0;
K
Kevin Liu 已提交
3166 3167
	u8 val;

3168
	if (sdhci_cd_irq_can_wakeup(host)) {
3169 3170
		wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
		irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3171
	}
3172

3173 3174 3175 3176 3177 3178 3179
	if (mmc_card_wake_sdio_irq(host->mmc)) {
		wake_val |= SDHCI_WAKE_ON_INT;
		irq_val |= SDHCI_INT_CARD_INT;
	}

	if (!irq_val)
		return false;
3180 3181 3182 3183

	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
	val &= ~mask;
	val |= wake_val;
K
Kevin Liu 已提交
3184
	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3185

3186
	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3187 3188 3189 3190

	host->irq_wake_enabled = !enable_irq_wake(host->irq);

	return host->irq_wake_enabled;
K
Kevin Liu 已提交
3191 3192
}

3193
static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
K
Kevin Liu 已提交
3194 3195 3196 3197 3198 3199 3200 3201
{
	u8 val;
	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
			| SDHCI_WAKE_ON_INT;

	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
	val &= ~mask;
	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3202 3203 3204 3205

	disable_irq_wake(host->irq);

	host->irq_wake_enabled = false;
K
Kevin Liu 已提交
3206
}
3207

3208
int sdhci_suspend_host(struct sdhci_host *host)
3209
{
3210 3211
	sdhci_disable_card_detection(host);

3212
	mmc_retune_timer_stop(host->mmc);
3213

3214 3215
	if (!device_may_wakeup(mmc_dev(host->mmc)) ||
	    !sdhci_enable_irq_wakeups(host)) {
3216 3217 3218
		host->ier = 0;
		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
K
Kevin Liu 已提交
3219 3220
		free_irq(host->irq, host);
	}
3221

3222
	return 0;
3223 3224
}

3225
EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3226

3227 3228
int sdhci_resume_host(struct sdhci_host *host)
{
3229
	struct mmc_host *mmc = host->mmc;
3230
	int ret = 0;
3231

3232
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3233 3234 3235
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}
3236

3237 3238 3239 3240 3241 3242
	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
		/* Card keeps power but host controller does not */
		sdhci_init(host, 0);
		host->pwr = 0;
		host->clock = 0;
3243
		mmc->ops->set_ios(mmc, &mmc->ios);
3244 3245 3246 3247
	} else {
		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
		mmiowb();
	}
3248

3249 3250 3251
	if (host->irq_wake_enabled) {
		sdhci_disable_irq_wakeups(host);
	} else {
3252 3253 3254 3255 3256 3257 3258
		ret = request_threaded_irq(host->irq, sdhci_irq,
					   sdhci_thread_irq, IRQF_SHARED,
					   mmc_hostname(host->mmc), host);
		if (ret)
			return ret;
	}

3259 3260
	sdhci_enable_card_detection(host);

3261
	return ret;
3262 3263
}

3264
EXPORT_SYMBOL_GPL(sdhci_resume_host);
3265 3266 3267 3268 3269

int sdhci_runtime_suspend_host(struct sdhci_host *host)
{
	unsigned long flags;

3270
	mmc_retune_timer_stop(host->mmc);
3271 3272

	spin_lock_irqsave(&host->lock, flags);
3273 3274 3275
	host->ier &= SDHCI_INT_CARD_INT;
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3276 3277
	spin_unlock_irqrestore(&host->lock, flags);

3278
	synchronize_hardirq(host->irq);
3279 3280 3281 3282 3283

	spin_lock_irqsave(&host->lock, flags);
	host->runtime_suspended = true;
	spin_unlock_irqrestore(&host->lock, flags);

3284
	return 0;
3285 3286 3287 3288 3289
}
EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);

int sdhci_runtime_resume_host(struct sdhci_host *host)
{
3290
	struct mmc_host *mmc = host->mmc;
3291
	unsigned long flags;
3292
	int host_flags = host->flags;
3293 3294 3295 3296 3297 3298 3299 3300

	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}

	sdhci_init(host, 0);

3301 3302
	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
	    mmc->ios.power_mode != MMC_POWER_OFF) {
3303 3304 3305 3306 3307
		/* Force clock and power re-program */
		host->pwr = 0;
		host->clock = 0;
		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
		mmc->ops->set_ios(mmc, &mmc->ios);
3308

3309 3310 3311 3312 3313 3314
		if ((host_flags & SDHCI_PV_ENABLED) &&
		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
			spin_lock_irqsave(&host->lock, flags);
			sdhci_enable_preset_value(host, true);
			spin_unlock_irqrestore(&host->lock, flags);
		}
3315

3316 3317 3318 3319
		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
		    mmc->ops->hs400_enhanced_strobe)
			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
	}
3320

3321 3322 3323 3324 3325
	spin_lock_irqsave(&host->lock, flags);

	host->runtime_suspended = false;

	/* Enable SDIO IRQ */
3326
	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3327 3328 3329 3330 3331 3332 3333
		sdhci_enable_sdio_irq_nolock(host, true);

	/* Enable Card Detection */
	sdhci_enable_card_detection(host);

	spin_unlock_irqrestore(&host->lock, flags);

3334
	return 0;
3335 3336 3337
}
EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);

3338
#endif /* CONFIG_PM */
3339

A
Adrian Hunter 已提交
3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361
/*****************************************************************************\
 *                                                                           *
 * Command Queue Engine (CQE) helpers                                        *
 *                                                                           *
\*****************************************************************************/

void sdhci_cqe_enable(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;
	u8 ctrl;

	spin_lock_irqsave(&host->lock, flags);

	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
	ctrl &= ~SDHCI_CTRL_DMA_MASK;
	if (host->flags & SDHCI_USE_64_BIT_DMA)
		ctrl |= SDHCI_CTRL_ADMA64;
	else
		ctrl |= SDHCI_CTRL_ADMA32;
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);

3362
	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
A
Adrian Hunter 已提交
3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452
		     SDHCI_BLOCK_SIZE);

	/* Set maximum timeout */
	sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);

	host->ier = host->cqe_ier;

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);

	host->cqe_on = true;

	pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
		 mmc_hostname(mmc), host->ier,
		 sdhci_readl(host, SDHCI_INT_STATUS));

	mmiowb();
	spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_cqe_enable);

void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);

	sdhci_set_default_irqs(host);

	host->cqe_on = false;

	if (recovery) {
		sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
	}

	pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
		 mmc_hostname(mmc), host->ier,
		 sdhci_readl(host, SDHCI_INT_STATUS));

	mmiowb();
	spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_cqe_disable);

bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
		   int *data_error)
{
	u32 mask;

	if (!host->cqe_on)
		return false;

	if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
		*cmd_error = -EILSEQ;
	else if (intmask & SDHCI_INT_TIMEOUT)
		*cmd_error = -ETIMEDOUT;
	else
		*cmd_error = 0;

	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
		*data_error = -EILSEQ;
	else if (intmask & SDHCI_INT_DATA_TIMEOUT)
		*data_error = -ETIMEDOUT;
	else if (intmask & SDHCI_INT_ADMA_ERROR)
		*data_error = -EIO;
	else
		*data_error = 0;

	/* Clear selected interrupts. */
	mask = intmask & host->cqe_ier;
	sdhci_writel(host, mask, SDHCI_INT_STATUS);

	if (intmask & SDHCI_INT_BUS_POWER)
		pr_err("%s: Card is consuming too much power!\n",
		       mmc_hostname(host->mmc));

	intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
	if (intmask) {
		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
		pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
		       mmc_hostname(host->mmc), intmask);
		sdhci_dumpregs(host);
	}

	return true;
}
EXPORT_SYMBOL_GPL(sdhci_cqe_irq);

3453 3454
/*****************************************************************************\
 *                                                                           *
3455
 * Device allocation/registration                                            *
3456 3457 3458
 *                                                                           *
\*****************************************************************************/

3459 3460
struct sdhci_host *sdhci_alloc_host(struct device *dev,
	size_t priv_size)
3461 3462 3463 3464
{
	struct mmc_host *mmc;
	struct sdhci_host *host;

3465
	WARN_ON(dev == NULL);
3466

3467
	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3468
	if (!mmc)
3469
		return ERR_PTR(-ENOMEM);
3470 3471 3472

	host = mmc_priv(mmc);
	host->mmc = mmc;
3473 3474
	host->mmc_host_ops = sdhci_ops;
	mmc->ops = &host->mmc_host_ops;
3475

3476 3477
	host->flags = SDHCI_SIGNALING_330;

A
Adrian Hunter 已提交
3478 3479 3480
	host->cqe_ier     = SDHCI_CQE_INT_MASK;
	host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;

3481 3482
	host->tuning_delay = -1;

3483 3484
	host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;

3485 3486 3487 3488 3489 3490 3491
	/*
	 * The DMA table descriptor count is calculated as the maximum
	 * number of segments times 2, to allow for an alignment
	 * descriptor for each segment, plus 1 for a nop end descriptor.
	 */
	host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;

3492 3493
	return host;
}
3494

3495
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3496

3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526
static int sdhci_set_dma_mask(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;
	struct device *dev = mmc_dev(mmc);
	int ret = -EINVAL;

	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
		host->flags &= ~SDHCI_USE_64_BIT_DMA;

	/* Try 64-bit mask if hardware is capable  of it */
	if (host->flags & SDHCI_USE_64_BIT_DMA) {
		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
		if (ret) {
			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
				mmc_hostname(mmc));
			host->flags &= ~SDHCI_USE_64_BIT_DMA;
		}
	}

	/* 32-bit mask as default & fallback */
	if (ret) {
		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
		if (ret)
			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
				mmc_hostname(mmc));
	}

	return ret;
}

3527 3528 3529
void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
{
	u16 v;
3530 3531
	u64 dt_caps_mask = 0;
	u64 dt_caps = 0;
3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545

	if (host->read_caps)
		return;

	host->read_caps = true;

	if (debug_quirks)
		host->quirks = debug_quirks;

	if (debug_quirks2)
		host->quirks2 = debug_quirks2;

	sdhci_do_reset(host, SDHCI_RESET_ALL);

C
Chunyan Zhang 已提交
3546 3547 3548
	if (host->v4_mode)
		sdhci_do_enable_v4_mode(host);

3549 3550 3551 3552 3553
	of_property_read_u64(mmc_dev(host->mmc)->of_node,
			     "sdhci-caps-mask", &dt_caps_mask);
	of_property_read_u64(mmc_dev(host->mmc)->of_node,
			     "sdhci-caps", &dt_caps);

3554 3555 3556 3557 3558 3559
	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;

	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
		return;

3560 3561 3562 3563 3564 3565 3566
	if (caps) {
		host->caps = *caps;
	} else {
		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
		host->caps &= ~lower_32_bits(dt_caps_mask);
		host->caps |= lower_32_bits(dt_caps);
	}
3567 3568 3569 3570

	if (host->version < SDHCI_SPEC_300)
		return;

3571 3572 3573 3574 3575 3576 3577
	if (caps1) {
		host->caps1 = *caps1;
	} else {
		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
		host->caps1 &= ~upper_32_bits(dt_caps_mask);
		host->caps1 |= upper_32_bits(dt_caps);
	}
3578 3579 3580
}
EXPORT_SYMBOL_GPL(__sdhci_read_caps);

3581
static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618
{
	struct mmc_host *mmc = host->mmc;
	unsigned int max_blocks;
	unsigned int bounce_size;
	int ret;

	/*
	 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
	 * has diminishing returns, this is probably because SD/MMC
	 * cards are usually optimized to handle this size of requests.
	 */
	bounce_size = SZ_64K;
	/*
	 * Adjust downwards to maximum request size if this is less
	 * than our segment size, else hammer down the maximum
	 * request size to the maximum buffer size.
	 */
	if (mmc->max_req_size < bounce_size)
		bounce_size = mmc->max_req_size;
	max_blocks = bounce_size / 512;

	/*
	 * When we just support one segment, we can get significant
	 * speedups by the help of a bounce buffer to group scattered
	 * reads/writes together.
	 */
	host->bounce_buffer = devm_kmalloc(mmc->parent,
					   bounce_size,
					   GFP_KERNEL);
	if (!host->bounce_buffer) {
		pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
		       mmc_hostname(mmc),
		       bounce_size);
		/*
		 * Exiting with zero here makes sure we proceed with
		 * mmc->max_segs == 1.
		 */
3619
		return;
3620 3621 3622 3623 3624 3625 3626 3627 3628
	}

	host->bounce_addr = dma_map_single(mmc->parent,
					   host->bounce_buffer,
					   bounce_size,
					   DMA_BIDIRECTIONAL);
	ret = dma_mapping_error(mmc->parent, host->bounce_addr);
	if (ret)
		/* Again fall back to max_segs == 1 */
3629
		return;
3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640
	host->bounce_buffer_size = bounce_size;

	/* Lie about this since we're bouncing */
	mmc->max_segs = max_blocks;
	mmc->max_seg_size = bounce_size;
	mmc->max_req_size = bounce_size;

	pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
		mmc_hostname(mmc), max_blocks, bounce_size);
}

3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653
static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
{
	/*
	 * According to SD Host Controller spec v4.10, bit[27] added from
	 * version 4.10 in Capabilities Register is used as 64-bit System
	 * Address support for V4 mode.
	 */
	if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
		return host->caps & SDHCI_CAN_64BIT_V4;

	return host->caps & SDHCI_CAN_64BIT;
}

3654
int sdhci_setup_host(struct sdhci_host *host)
3655 3656
{
	struct mmc_host *mmc;
3657 3658
	u32 max_current_caps;
	unsigned int ocr_avail;
3659
	unsigned int override_timeout_clk;
3660
	u32 max_clk;
3661
	int ret;
3662

3663 3664 3665
	WARN_ON(host == NULL);
	if (host == NULL)
		return -EINVAL;
3666

3667
	mmc = host->mmc;
3668

3669 3670 3671 3672 3673 3674 3675
	/*
	 * If there are external regulators, get them. Note this must be done
	 * early before resetting the host and reading the capabilities so that
	 * the host can take the appropriate action if regulators are not
	 * available.
	 */
	ret = mmc_regulator_get_supply(mmc);
3676
	if (ret)
3677 3678
		return ret;

3679 3680 3681 3682 3683 3684 3685
	DBG("Version:   0x%08x | Present:  0x%08x\n",
	    sdhci_readw(host, SDHCI_HOST_VERSION),
	    sdhci_readl(host, SDHCI_PRESENT_STATE));
	DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
	    sdhci_readl(host, SDHCI_CAPABILITIES),
	    sdhci_readl(host, SDHCI_CAPABILITIES_1));

3686
	sdhci_read_caps(host);
3687

3688 3689
	override_timeout_clk = host->timeout_clk;

3690
	if (host->version > SDHCI_SPEC_420) {
3691 3692
		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
		       mmc_hostname(mmc), host->version);
3693 3694
	}

3695
	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3696
		host->flags |= SDHCI_USE_SDMA;
3697
	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3698
		DBG("Controller doesn't have SDMA capability\n");
3699
	else
3700
		host->flags |= SDHCI_USE_SDMA;
3701

3702
	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3703
		(host->flags & SDHCI_USE_SDMA)) {
R
Rolf Eike Beer 已提交
3704
		DBG("Disabling DMA as it is marked broken\n");
3705
		host->flags &= ~SDHCI_USE_SDMA;
3706 3707
	}

3708
	if ((host->version >= SDHCI_SPEC_200) &&
3709
		(host->caps & SDHCI_CAN_DO_ADMA2))
3710
		host->flags |= SDHCI_USE_ADMA;
3711 3712 3713 3714 3715 3716 3717

	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
		(host->flags & SDHCI_USE_ADMA)) {
		DBG("Disabling ADMA as it is marked broken\n");
		host->flags &= ~SDHCI_USE_ADMA;
	}

3718 3719 3720 3721 3722 3723 3724
	/*
	 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
	 * and *must* do 64-bit DMA.  A driver has the opportunity to change
	 * that during the first call to ->enable_dma().  Similarly
	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
	 * implement.
	 */
3725
	if (sdhci_can_64bit_dma(host))
3726 3727
		host->flags |= SDHCI_USE_64_BIT_DMA;

3728
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739
		ret = sdhci_set_dma_mask(host);

		if (!ret && host->ops->enable_dma)
			ret = host->ops->enable_dma(host);

		if (ret) {
			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
				mmc_hostname(mmc));
			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);

			ret = 0;
3740 3741 3742
		}
	}

3743 3744
	/* SDMA does not support 64-bit DMA if v4 mode not set */
	if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
3745 3746
		host->flags &= ~SDHCI_USE_SDMA;

3747
	if (host->flags & SDHCI_USE_ADMA) {
3748 3749 3750
		dma_addr_t dma;
		void *buf;

3751
		if (host->flags & SDHCI_USE_64_BIT_DMA) {
3752
			host->adma_table_sz = host->adma_table_cnt *
3753 3754
					      SDHCI_ADMA2_64_DESC_SZ(host);
			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
3755
		} else {
3756
			host->adma_table_sz = host->adma_table_cnt *
3757 3758 3759
					      SDHCI_ADMA2_32_DESC_SZ;
			host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
		}
3760

3761
		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3762 3763 3764 3765 3766
		/*
		 * Use zalloc to zero the reserved high 32-bits of 128-bit
		 * descriptors so that they never need to be written.
		 */
		buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3767 3768
					 host->adma_table_sz, &dma, GFP_KERNEL);
		if (!buf) {
J
Joe Perches 已提交
3769
			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3770 3771
				mmc_hostname(mmc));
			host->flags &= ~SDHCI_USE_ADMA;
3772 3773
		} else if ((dma + host->align_buffer_sz) &
			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
J
Joe Perches 已提交
3774 3775
			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
				mmc_hostname(mmc));
3776
			host->flags &= ~SDHCI_USE_ADMA;
3777 3778 3779 3780 3781
			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
					  host->adma_table_sz, buf, dma);
		} else {
			host->align_buffer = buf;
			host->align_addr = dma;
3782

3783 3784 3785
			host->adma_table = buf + host->align_buffer_sz;
			host->adma_addr = dma + host->align_buffer_sz;
		}
3786 3787
	}

3788 3789 3790 3791 3792
	/*
	 * If we use DMA, then it's up to the caller to set the DMA
	 * mask, but PIO does not need the hw shim so we set a new
	 * mask here in that case.
	 */
3793
	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3794
		host->dma_mask = DMA_BIT_MASK(64);
3795
		mmc_dev(mmc)->dma_mask = &host->dma_mask;
3796
	}
3797

3798
	if (host->version >= SDHCI_SPEC_300)
3799
		host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3800 3801
			>> SDHCI_CLOCK_BASE_SHIFT;
	else
3802
		host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3803 3804
			>> SDHCI_CLOCK_BASE_SHIFT;

3805
	host->max_clk *= 1000000;
3806 3807
	if (host->max_clk == 0 || host->quirks &
			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3808
		if (!host->ops->get_max_clock) {
3809 3810
			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
			       mmc_hostname(mmc));
3811 3812
			ret = -ENODEV;
			goto undma;
3813 3814
		}
		host->max_clk = host->ops->get_max_clock(host);
3815
	}
3816

3817 3818 3819 3820
	/*
	 * In case of Host Controller v3.00, find out whether clock
	 * multiplier is supported.
	 */
3821
	host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832
			SDHCI_CLOCK_MUL_SHIFT;

	/*
	 * In case the value in Clock Multiplier is 0, then programmable
	 * clock mode is not supported, otherwise the actual clock
	 * multiplier is one more than the value of Clock Multiplier
	 * in the Capabilities Register.
	 */
	if (host->clk_mul)
		host->clk_mul += 1;

3833 3834 3835
	/*
	 * Set host parameters.
	 */
3836 3837
	max_clk = host->max_clk;

3838
	if (host->ops->get_min_clock)
3839
		mmc->f_min = host->ops->get_min_clock(host);
3840 3841 3842
	else if (host->version >= SDHCI_SPEC_300) {
		if (host->clk_mul) {
			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3843
			max_clk = host->max_clk * host->clk_mul;
3844 3845 3846
		} else
			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
	} else
3847
		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3848

3849
	if (!mmc->f_max || mmc->f_max > max_clk)
3850 3851
		mmc->f_max = max_clk;

3852
	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3853
		host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3854
					SDHCI_TIMEOUT_CLK_SHIFT;
3855 3856 3857 3858

		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
			host->timeout_clk *= 1000;

3859
		if (host->timeout_clk == 0) {
3860
			if (!host->ops->get_timeout_clock) {
3861 3862
				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
					mmc_hostname(mmc));
3863 3864
				ret = -ENODEV;
				goto undma;
3865
			}
3866

3867 3868 3869 3870
			host->timeout_clk =
				DIV_ROUND_UP(host->ops->get_timeout_clock(host),
					     1000);
		}
3871

3872 3873 3874
		if (override_timeout_clk)
			host->timeout_clk = override_timeout_clk;

3875
		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3876
			host->ops->get_max_timeout_count(host) : 1 << 27;
3877 3878
		mmc->max_busy_timeout /= host->timeout_clk;
	}
3879

3880 3881 3882 3883
	if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
	    !host->ops->get_max_timeout_count)
		mmc->max_busy_timeout = 0;

3884
	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3885
	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3886 3887 3888

	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
		host->flags |= SDHCI_AUTO_CMD12;
3889

3890 3891 3892 3893
	/*
	 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
	 * For v4 mode, SDMA may use Auto-CMD23 as well.
	 */
A
Andrei Warkentin 已提交
3894
	if ((host->version >= SDHCI_SPEC_300) &&
3895
	    ((host->flags & SDHCI_USE_ADMA) ||
3896
	     !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
3897
	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3898
		host->flags |= SDHCI_AUTO_CMD23;
3899
		DBG("Auto-CMD23 available\n");
3900
	} else {
3901
		DBG("Auto-CMD23 unavailable\n");
3902 3903
	}

3904 3905 3906 3907 3908 3909 3910
	/*
	 * A controller may support 8-bit width, but the board itself
	 * might not have the pins brought out.  Boards that support
	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
	 * their platform code before calling sdhci_add_host(), and we
	 * won't assume 8-bit width for hosts without that CAP.
	 */
3911
	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3912
		mmc->caps |= MMC_CAP_4_BIT_DATA;
3913

3914 3915 3916
	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
		mmc->caps &= ~MMC_CAP_CMD23;

3917
	if (host->caps & SDHCI_CAN_DO_HISPD)
3918
		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3919

3920
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3921
	    mmc_card_is_removable(mmc) &&
3922
	    mmc_gpio_get_cd(host->mmc) < 0)
3923 3924
		mmc->caps |= MMC_CAP_NEEDS_POLL;

3925 3926
	if (!IS_ERR(mmc->supply.vqmmc)) {
		ret = regulator_enable(mmc->supply.vqmmc);
3927 3928

		/* If vqmmc provides no 1.8V signalling, then there's no UHS */
3929 3930
		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
						    1950000))
3931 3932 3933
			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
					 SDHCI_SUPPORT_SDR50 |
					 SDHCI_SUPPORT_DDR50);
3934 3935 3936 3937 3938 3939

		/* In eMMC case vqmmc might be a fixed 1.8V regulator */
		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
						    3600000))
			host->flags &= ~SDHCI_SIGNALING_330;

3940 3941 3942
		if (ret) {
			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
				mmc_hostname(mmc), ret);
3943
			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3944
		}
3945
	}
3946

3947 3948 3949
	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
				 SDHCI_SUPPORT_DDR50);
3950 3951 3952 3953 3954 3955 3956 3957 3958 3959
		/*
		 * The SDHCI controller in a SoC might support HS200/HS400
		 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
		 * but if the board is modeled such that the IO lines are not
		 * connected to 1.8v then HS200/HS400 cannot be supported.
		 * Disable HS200/HS400 if the board does not have 1.8v connected
		 * to the IO lines. (Applicable for other modes in 1.8v)
		 */
		mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
		mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
3960
	}
3961

3962
	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3963 3964
	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
			   SDHCI_SUPPORT_DDR50))
3965 3966 3967
		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;

	/* SDR104 supports also implies SDR50 support */
3968
	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3969
		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3970 3971 3972
		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
		 * field can be promoted to support HS200.
		 */
3973
		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3974
			mmc->caps2 |= MMC_CAP2_HS200;
3975
	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3976
		mmc->caps |= MMC_CAP_UHS_SDR50;
3977
	}
3978

3979
	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3980
	    (host->caps1 & SDHCI_SUPPORT_HS400))
3981 3982
		mmc->caps2 |= MMC_CAP2_HS400;

3983 3984 3985 3986 3987 3988
	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
	    (IS_ERR(mmc->supply.vqmmc) ||
	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
					     1300000)))
		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;

3989 3990
	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3991 3992
		mmc->caps |= MMC_CAP_UHS_DDR50;

3993
	/* Does the host need tuning for SDR50? */
3994
	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3995 3996
		host->flags |= SDHCI_SDR50_NEEDS_TUNING;

3997
	/* Driver Type(s) (A, C, D) supported by the host */
3998
	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3999
		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4000
	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4001
		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4002
	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4003 4004
		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;

4005
	/* Initial value for re-tuning timer count */
4006 4007
	host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
			     SDHCI_RETUNING_TIMER_COUNT_SHIFT;
4008 4009 4010 4011 4012 4013 4014 4015 4016

	/*
	 * In case Re-tuning Timer is not disabled, the actual value of
	 * re-tuning timer will be 2 ^ (n - 1).
	 */
	if (host->tuning_count)
		host->tuning_count = 1 << (host->tuning_count - 1);

	/* Re-tuning mode supported by the Host Controller */
4017
	host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
4018 4019
			     SDHCI_RETUNING_MODE_SHIFT;

4020
	ocr_avail = 0;
4021

4022 4023 4024 4025 4026 4027 4028 4029
	/*
	 * According to SD Host Controller spec v3.00, if the Host System
	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
	 * the value is meaningful only if Voltage Support in the Capabilities
	 * register is set. The actual current value is 4 times the register
	 * value.
	 */
	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4030
	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4031
		int curr = regulator_get_current_limit(mmc->supply.vmmc);
4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044
		if (curr > 0) {

			/* convert to SDHCI_MAX_CURRENT format */
			curr = curr/1000;  /* convert to mA */
			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;

			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
			max_current_caps =
				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
		}
	}
4045

4046
	if (host->caps & SDHCI_CAN_VDD_330) {
4047
		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4048

A
Aaron Lu 已提交
4049
		mmc->max_current_330 = ((max_current_caps &
4050 4051 4052 4053
				   SDHCI_MAX_CURRENT_330_MASK) >>
				   SDHCI_MAX_CURRENT_330_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}
4054
	if (host->caps & SDHCI_CAN_VDD_300) {
4055
		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4056

A
Aaron Lu 已提交
4057
		mmc->max_current_300 = ((max_current_caps &
4058 4059 4060 4061
				   SDHCI_MAX_CURRENT_300_MASK) >>
				   SDHCI_MAX_CURRENT_300_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}
4062
	if (host->caps & SDHCI_CAN_VDD_180) {
4063 4064
		ocr_avail |= MMC_VDD_165_195;

A
Aaron Lu 已提交
4065
		mmc->max_current_180 = ((max_current_caps &
4066 4067 4068 4069 4070
				   SDHCI_MAX_CURRENT_180_MASK) >>
				   SDHCI_MAX_CURRENT_180_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}

4071 4072 4073 4074 4075
	/* If OCR set by host, use it instead. */
	if (host->ocr_mask)
		ocr_avail = host->ocr_mask;

	/* If OCR set by external regulators, give it highest prio. */
4076
	if (mmc->ocr_avail)
4077
		ocr_avail = mmc->ocr_avail;
4078

4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090
	mmc->ocr_avail = ocr_avail;
	mmc->ocr_avail_sdio = ocr_avail;
	if (host->ocr_avail_sdio)
		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
	mmc->ocr_avail_sd = ocr_avail;
	if (host->ocr_avail_sd)
		mmc->ocr_avail_sd &= host->ocr_avail_sd;
	else /* normal SD controllers don't support 1.8V */
		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
	mmc->ocr_avail_mmc = ocr_avail;
	if (host->ocr_avail_mmc)
		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4091 4092

	if (mmc->ocr_avail == 0) {
4093 4094
		pr_err("%s: Hardware doesn't report any support voltages.\n",
		       mmc_hostname(mmc));
4095 4096
		ret = -ENODEV;
		goto unreg;
4097 4098
	}

4099 4100 4101 4102 4103 4104 4105 4106 4107
	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
		host->flags |= SDHCI_SIGNALING_180;

	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
		host->flags |= SDHCI_SIGNALING_120;

4108 4109
	spin_lock_init(&host->lock);

4110 4111 4112 4113 4114 4115 4116
	/*
	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
	 * is less anyway.
	 */
	mmc->max_req_size = 524288;

4117
	/*
4118 4119
	 * Maximum number of segments. Depends on if the hardware
	 * can do scatter/gather or not.
4120
	 */
4121
	if (host->flags & SDHCI_USE_ADMA) {
4122
		mmc->max_segs = SDHCI_MAX_SEGS;
4123
	} else if (host->flags & SDHCI_USE_SDMA) {
4124
		mmc->max_segs = 1;
4125 4126 4127 4128 4129 4130 4131
		if (swiotlb_max_segment()) {
			unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
						IO_TLB_SEGSIZE;
			mmc->max_req_size = min(mmc->max_req_size,
						max_req_size);
		}
	} else { /* PIO */
4132
		mmc->max_segs = SDHCI_MAX_SEGS;
4133
	}
4134 4135 4136

	/*
	 * Maximum segment size. Could be one segment with the maximum number
4137 4138
	 * of bytes. When doing hardware scatter/gather, each entry cannot
	 * be larger than 64 KiB though.
4139
	 */
4140 4141 4142 4143 4144 4145
	if (host->flags & SDHCI_USE_ADMA) {
		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
			mmc->max_seg_size = 65535;
		else
			mmc->max_seg_size = 65536;
	} else {
4146
		mmc->max_seg_size = mmc->max_req_size;
4147
	}
4148

4149 4150 4151 4152
	/*
	 * Maximum block size. This varies from controller to controller and
	 * is specified in the capabilities register.
	 */
4153 4154 4155
	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
		mmc->max_blk_size = 2;
	} else {
4156
		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4157 4158
				SDHCI_MAX_BLOCK_SHIFT;
		if (mmc->max_blk_size >= 3) {
J
Joe Perches 已提交
4159 4160
			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
				mmc_hostname(mmc));
4161 4162 4163 4164 4165
			mmc->max_blk_size = 0;
		}
	}

	mmc->max_blk_size = 512 << mmc->max_blk_size;
4166

4167 4168 4169
	/*
	 * Maximum block count.
	 */
4170
	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4171

4172
	if (mmc->max_segs == 1)
4173
		/* This may alter mmc->*_blk_* parameters */
4174
		sdhci_allocate_bounce_buffer(host);
4175

4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192
	return 0;

unreg:
	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);
undma:
	if (host->align_buffer)
		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
				  host->adma_table_sz, host->align_buffer,
				  host->align_addr);
	host->adma_table = NULL;
	host->align_buffer = NULL;

	return ret;
}
EXPORT_SYMBOL_GPL(sdhci_setup_host);

4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208
void sdhci_cleanup_host(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;

	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);

	if (host->align_buffer)
		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
				  host->adma_table_sz, host->align_buffer,
				  host->align_addr);
	host->adma_table = NULL;
	host->align_buffer = NULL;
}
EXPORT_SYMBOL_GPL(sdhci_cleanup_host);

4209 4210 4211 4212 4213
int __sdhci_add_host(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;
	int ret;

4214 4215 4216 4217 4218 4219
	/*
	 * Init tasklets.
	 */
	tasklet_init(&host->finish_tasklet,
		sdhci_tasklet_finish, (unsigned long)host);

4220 4221
	timer_setup(&host->timer, sdhci_timeout_timer, 0);
	timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4222

4223
	init_waitqueue_head(&host->buf_ready_int);
4224

4225 4226
	sdhci_init(host, 0);

4227 4228
	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
				   IRQF_SHARED,	mmc_hostname(mmc), host);
4229 4230 4231
	if (ret) {
		pr_err("%s: Failed to request IRQ %d: %d\n",
		       mmc_hostname(mmc), host->irq, ret);
4232
		goto untasklet;
4233
	}
4234

4235
	ret = sdhci_led_register(host);
4236 4237 4238
	if (ret) {
		pr_err("%s: Failed to register LED device: %d\n",
		       mmc_hostname(mmc), ret);
4239
		goto unirq;
4240
	}
4241

4242 4243
	mmiowb();

4244 4245 4246
	ret = mmc_add_host(mmc);
	if (ret)
		goto unled;
4247

4248
	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4249
		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4250 4251
		(host->flags & SDHCI_USE_ADMA) ?
		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4252
		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4253

4254 4255
	sdhci_enable_card_detection(host);

4256 4257
	return 0;

4258
unled:
4259
	sdhci_led_unregister(host);
4260
unirq:
4261
	sdhci_do_reset(host, SDHCI_RESET_ALL);
4262 4263
	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4264
	free_irq(host->irq, host);
4265
untasklet:
4266
	tasklet_kill(&host->finish_tasklet);
4267

4268 4269
	return ret;
}
4270 4271 4272 4273 4274 4275 4276 4277 4278
EXPORT_SYMBOL_GPL(__sdhci_add_host);

int sdhci_add_host(struct sdhci_host *host)
{
	int ret;

	ret = sdhci_setup_host(host);
	if (ret)
		return ret;
4279

4280 4281 4282 4283 4284 4285 4286 4287 4288 4289
	ret = __sdhci_add_host(host);
	if (ret)
		goto cleanup;

	return 0;

cleanup:
	sdhci_cleanup_host(host);

	return ret;
4290
}
4291
EXPORT_SYMBOL_GPL(sdhci_add_host);
4292

P
Pierre Ossman 已提交
4293
void sdhci_remove_host(struct sdhci_host *host, int dead)
4294
{
4295
	struct mmc_host *mmc = host->mmc;
P
Pierre Ossman 已提交
4296 4297 4298 4299 4300 4301 4302
	unsigned long flags;

	if (dead) {
		spin_lock_irqsave(&host->lock, flags);

		host->flags |= SDHCI_DEVICE_DEAD;

4303
		if (sdhci_has_requests(host)) {
4304
			pr_err("%s: Controller removed during "
4305
				" transfer!\n", mmc_hostname(mmc));
4306
			sdhci_error_out_mrqs(host, -ENOMEDIUM);
P
Pierre Ossman 已提交
4307 4308 4309 4310 4311
		}

		spin_unlock_irqrestore(&host->lock, flags);
	}

4312 4313
	sdhci_disable_card_detection(host);

4314
	mmc_remove_host(mmc);
4315

4316
	sdhci_led_unregister(host);
4317

P
Pierre Ossman 已提交
4318
	if (!dead)
4319
		sdhci_do_reset(host, SDHCI_RESET_ALL);
4320

4321 4322
	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4323 4324 4325
	free_irq(host->irq, host);

	del_timer_sync(&host->timer);
4326
	del_timer_sync(&host->data_timer);
4327 4328

	tasklet_kill(&host->finish_tasklet);
4329

4330 4331
	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);
4332

4333
	if (host->align_buffer)
4334 4335 4336
		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
				  host->adma_table_sz, host->align_buffer,
				  host->align_addr);
4337

4338
	host->adma_table = NULL;
4339
	host->align_buffer = NULL;
4340 4341
}

4342
EXPORT_SYMBOL_GPL(sdhci_remove_host);
4343

4344
void sdhci_free_host(struct sdhci_host *host)
4345
{
4346
	mmc_free_host(host->mmc);
4347 4348
}

4349
EXPORT_SYMBOL_GPL(sdhci_free_host);
4350 4351 4352 4353 4354 4355 4356 4357 4358

/*****************************************************************************\
 *                                                                           *
 * Driver init/exit                                                          *
 *                                                                           *
\*****************************************************************************/

static int __init sdhci_drv_init(void)
{
4359
	pr_info(DRIVER_NAME
4360
		": Secure Digital Host Controller Interface driver\n");
4361
	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4362

4363
	return 0;
4364 4365 4366 4367 4368 4369 4370 4371 4372
}

static void __exit sdhci_drv_exit(void)
{
}

module_init(sdhci_drv_init);
module_exit(sdhci_drv_exit);

4373
module_param(debug_quirks, uint, 0444);
4374
module_param(debug_quirks2, uint, 0444);
4375

4376
MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4377
MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4378
MODULE_LICENSE("GPL");
4379

4380
MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4381
MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");