sdhci.c 96.9 KB
Newer Older
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3
 *
4
 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 6
 *
 * This program is free software; you can redistribute it and/or modify
7 8 9
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or (at
 * your option) any later version.
10 11 12 13
 *
 * Thanks to the following companies for their support:
 *
 *     - JMicron (hardware and technical support)
14 15 16
 */

#include <linux/delay.h>
A
Adrian Hunter 已提交
17
#include <linux/ktime.h>
18
#include <linux/highmem.h>
19
#include <linux/io.h>
20
#include <linux/module.h>
21
#include <linux/dma-mapping.h>
22
#include <linux/slab.h>
23
#include <linux/scatterlist.h>
M
Marek Szyprowski 已提交
24
#include <linux/regulator/consumer.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/of.h>
27

28 29
#include <linux/leds.h>

30
#include <linux/mmc/mmc.h>
31
#include <linux/mmc/host.h>
32
#include <linux/mmc/card.h>
33
#include <linux/mmc/sdio.h>
34
#include <linux/mmc/slot-gpio.h>
35 36 37 38 39 40

#include "sdhci.h"

#define DRIVER_NAME "sdhci"

#define DBG(f, x...) \
41
	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
42

43 44
#define MAX_TUNING_LOOP 40

45
static unsigned int debug_quirks = 0;
46
static unsigned int debug_quirks2;
47

48 49
static void sdhci_finish_data(struct sdhci_host *);

50
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
51 52 53

static void sdhci_dumpregs(struct sdhci_host *host)
{
54 55
	pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
	       mmc_hostname(host->mmc));
56

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
	pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
	       sdhci_readl(host, SDHCI_DMA_ADDRESS),
	       sdhci_readw(host, SDHCI_HOST_VERSION));
	pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
	       sdhci_readw(host, SDHCI_BLOCK_SIZE),
	       sdhci_readw(host, SDHCI_BLOCK_COUNT));
	pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
	       sdhci_readl(host, SDHCI_ARGUMENT),
	       sdhci_readw(host, SDHCI_TRANSFER_MODE));
	pr_err(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
	       sdhci_readl(host, SDHCI_PRESENT_STATE),
	       sdhci_readb(host, SDHCI_HOST_CONTROL));
	pr_err(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
	       sdhci_readb(host, SDHCI_POWER_CONTROL),
	       sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
	pr_err(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
	       sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
	       sdhci_readw(host, SDHCI_CLOCK_CONTROL));
	pr_err(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
	       sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
	       sdhci_readl(host, SDHCI_INT_STATUS));
	pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
	       sdhci_readl(host, SDHCI_INT_ENABLE),
	       sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
	pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
	       sdhci_readw(host, SDHCI_ACMD12_ERR),
	       sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
	pr_err(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
	       sdhci_readl(host, SDHCI_CAPABILITIES),
	       sdhci_readl(host, SDHCI_CAPABILITIES_1));
	pr_err(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
	       sdhci_readw(host, SDHCI_COMMAND),
	       sdhci_readl(host, SDHCI_MAX_CURRENT));
90 91 92 93 94 95 96
	pr_err(DRIVER_NAME ": Resp[0]:  0x%08x | Resp[1]:  0x%08x\n",
		   sdhci_readl(host, SDHCI_RESPONSE),
		   sdhci_readl(host, SDHCI_RESPONSE + 4));
	pr_err(DRIVER_NAME ": Resp[2]:  0x%08x | Resp[3]:  0x%08x\n",
		   sdhci_readl(host, SDHCI_RESPONSE + 8),
		   sdhci_readl(host, SDHCI_RESPONSE + 12));

97 98
	pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n",
	       sdhci_readw(host, SDHCI_HOST_CONTROL2));
99

100 101
	if (host->flags & SDHCI_USE_ADMA) {
		if (host->flags & SDHCI_USE_64_BIT_DMA)
102
			pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
103 104 105
			       sdhci_readl(host, SDHCI_ADMA_ERROR),
			       sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
			       sdhci_readl(host, SDHCI_ADMA_ADDRESS));
106
		else
107
			pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
108 109
			       sdhci_readl(host, SDHCI_ADMA_ERROR),
			       sdhci_readl(host, SDHCI_ADMA_ADDRESS));
110
	}
111

112
	pr_err(DRIVER_NAME ": ===========================================\n");
113 114 115 116 117 118 119 120
}

/*****************************************************************************\
 *                                                                           *
 * Low level functions                                                       *
 *                                                                           *
\*****************************************************************************/

121 122 123 124 125
static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
{
	return cmd->data || cmd->flags & MMC_RSP_BUSY;
}

126 127
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
128
	u32 present;
129

130
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
131
	    !mmc_card_is_removable(host->mmc))
132 133
		return;

134 135 136
	if (enable) {
		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
137

138 139 140 141 142
		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
				       SDHCI_INT_CARD_INSERT;
	} else {
		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
	}
143 144 145

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
146 147 148 149 150 151 152 153 154 155 156 157
}

static void sdhci_enable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, true);
}

static void sdhci_disable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, false);
}

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
{
	if (host->bus_on)
		return;
	host->bus_on = true;
	pm_runtime_get_noresume(host->mmc->parent);
}

static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
{
	if (!host->bus_on)
		return;
	host->bus_on = false;
	pm_runtime_put_noidle(host->mmc->parent);
}

174
void sdhci_reset(struct sdhci_host *host, u8 mask)
175
{
A
Adrian Hunter 已提交
176
	ktime_t timeout;
177

178
	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
179

180
	if (mask & SDHCI_RESET_ALL) {
181
		host->clock = 0;
182 183 184 185
		/* Reset-all turns off SD Bus Power */
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
	}
186

187
	/* Wait max 100 ms */
A
Adrian Hunter 已提交
188
	timeout = ktime_add_ms(ktime_get(), 100);
189 190

	/* hw clears the bit when it's done */
191
	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
A
Adrian Hunter 已提交
192
		if (ktime_after(ktime_get(), timeout)) {
193
			pr_err("%s: Reset 0x%x never completed.\n",
194 195 196 197
				mmc_hostname(host->mmc), (int)mask);
			sdhci_dumpregs(host);
			return;
		}
A
Adrian Hunter 已提交
198
		udelay(10);
199
	}
200 201 202 203 204 205
}
EXPORT_SYMBOL_GPL(sdhci_reset);

static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
206 207 208
		struct mmc_host *mmc = host->mmc;

		if (!mmc->ops->get_cd(mmc))
209 210
			return;
	}
211

212
	host->ops->reset(host, mask);
213

214 215 216 217 218 219 220 221
	if (mask & SDHCI_RESET_ALL) {
		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
			if (host->ops->enable_dma)
				host->ops->enable_dma(host);
		}

		/* Resetting the controller clears many */
		host->preset_enabled = false;
222
	}
223 224
}

225
static void sdhci_init(struct sdhci_host *host, int soft)
226
{
227 228
	struct mmc_host *mmc = host->mmc;

229
	if (soft)
230
		sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
231
	else
232
		sdhci_do_reset(host, SDHCI_RESET_ALL);
233

234 235 236 237 238 239
	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
		    SDHCI_INT_RESPONSE;

240 241 242 243
	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
	    host->tuning_mode == SDHCI_TUNING_MODE_3)
		host->ier |= SDHCI_INT_RETUNE;

244 245
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
246 247 248 249

	if (soft) {
		/* force clock reconfiguration */
		host->clock = 0;
250
		mmc->ops->set_ios(mmc, &mmc->ios);
251
	}
252
}
253

254 255
static void sdhci_reinit(struct sdhci_host *host)
{
256
	sdhci_init(host, 0);
257
	sdhci_enable_card_detection(host);
258 259
}

260
static void __sdhci_led_activate(struct sdhci_host *host)
261 262 263
{
	u8 ctrl;

264
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
265
	ctrl |= SDHCI_CTRL_LED;
266
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
267 268
}

269
static void __sdhci_led_deactivate(struct sdhci_host *host)
270 271 272
{
	u8 ctrl;

273
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
274
	ctrl &= ~SDHCI_CTRL_LED;
275
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
276 277
}

278
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
279
static void sdhci_led_control(struct led_classdev *led,
280
			      enum led_brightness brightness)
281 282 283 284 285 286
{
	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);

287 288 289
	if (host->runtime_suspended)
		goto out;

290
	if (brightness == LED_OFF)
291
		__sdhci_led_deactivate(host);
292
	else
293
		__sdhci_led_activate(host);
294
out:
295 296
	spin_unlock_irqrestore(&host->lock, flags);
}
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346

static int sdhci_led_register(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;

	snprintf(host->led_name, sizeof(host->led_name),
		 "%s::", mmc_hostname(mmc));

	host->led.name = host->led_name;
	host->led.brightness = LED_OFF;
	host->led.default_trigger = mmc_hostname(mmc);
	host->led.brightness_set = sdhci_led_control;

	return led_classdev_register(mmc_dev(mmc), &host->led);
}

static void sdhci_led_unregister(struct sdhci_host *host)
{
	led_classdev_unregister(&host->led);
}

static inline void sdhci_led_activate(struct sdhci_host *host)
{
}

static inline void sdhci_led_deactivate(struct sdhci_host *host)
{
}

#else

static inline int sdhci_led_register(struct sdhci_host *host)
{
	return 0;
}

static inline void sdhci_led_unregister(struct sdhci_host *host)
{
}

static inline void sdhci_led_activate(struct sdhci_host *host)
{
	__sdhci_led_activate(host);
}

static inline void sdhci_led_deactivate(struct sdhci_host *host)
{
	__sdhci_led_deactivate(host);
}

347 348
#endif

349 350 351 352 353 354
/*****************************************************************************\
 *                                                                           *
 * Core functions                                                            *
 *                                                                           *
\*****************************************************************************/

P
Pierre Ossman 已提交
355
static void sdhci_read_block_pio(struct sdhci_host *host)
356
{
357 358
	unsigned long flags;
	size_t blksize, len, chunk;
359
	u32 uninitialized_var(scratch);
360
	u8 *buf;
361

P
Pierre Ossman 已提交
362
	DBG("PIO reading\n");
363

P
Pierre Ossman 已提交
364
	blksize = host->data->blksz;
365
	chunk = 0;
366

367
	local_irq_save(flags);
368

P
Pierre Ossman 已提交
369
	while (blksize) {
F
Fabio Estevam 已提交
370
		BUG_ON(!sg_miter_next(&host->sg_miter));
371

372
		len = min(host->sg_miter.length, blksize);
373

374 375
		blksize -= len;
		host->sg_miter.consumed = len;
376

377
		buf = host->sg_miter.addr;
378

379 380
		while (len) {
			if (chunk == 0) {
381
				scratch = sdhci_readl(host, SDHCI_BUFFER);
382
				chunk = 4;
P
Pierre Ossman 已提交
383
			}
384 385 386 387 388 389 390

			*buf = scratch & 0xFF;

			buf++;
			scratch >>= 8;
			chunk--;
			len--;
391
		}
P
Pierre Ossman 已提交
392
	}
393 394 395 396

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
397
}
398

P
Pierre Ossman 已提交
399 400
static void sdhci_write_block_pio(struct sdhci_host *host)
{
401 402 403 404
	unsigned long flags;
	size_t blksize, len, chunk;
	u32 scratch;
	u8 *buf;
405

P
Pierre Ossman 已提交
406 407 408
	DBG("PIO writing\n");

	blksize = host->data->blksz;
409 410
	chunk = 0;
	scratch = 0;
411

412
	local_irq_save(flags);
413

P
Pierre Ossman 已提交
414
	while (blksize) {
F
Fabio Estevam 已提交
415
		BUG_ON(!sg_miter_next(&host->sg_miter));
P
Pierre Ossman 已提交
416

417 418 419 420 421 422
		len = min(host->sg_miter.length, blksize);

		blksize -= len;
		host->sg_miter.consumed = len;

		buf = host->sg_miter.addr;
423

424 425 426 427 428 429 430 431
		while (len) {
			scratch |= (u32)*buf << (chunk * 8);

			buf++;
			chunk++;
			len--;

			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
432
				sdhci_writel(host, scratch, SDHCI_BUFFER);
433 434
				chunk = 0;
				scratch = 0;
435 436 437
			}
		}
	}
438 439 440 441

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
442 443 444 445 446 447
}

static void sdhci_transfer_pio(struct sdhci_host *host)
{
	u32 mask;

448
	if (host->blocks == 0)
P
Pierre Ossman 已提交
449 450 451 452 453 454 455
		return;

	if (host->data->flags & MMC_DATA_READ)
		mask = SDHCI_DATA_AVAILABLE;
	else
		mask = SDHCI_SPACE_AVAILABLE;

456 457 458 459 460 461 462 463 464
	/*
	 * Some controllers (JMicron JMB38x) mess up the buffer bits
	 * for transfers < 4 bytes. As long as it is just one block,
	 * we can ignore the bits.
	 */
	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
		(host->data->blocks == 1))
		mask = ~0;

465
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
466 467 468
		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
			udelay(100);

P
Pierre Ossman 已提交
469 470 471 472
		if (host->data->flags & MMC_DATA_READ)
			sdhci_read_block_pio(host);
		else
			sdhci_write_block_pio(host);
473

474 475
		host->blocks--;
		if (host->blocks == 0)
P
Pierre Ossman 已提交
476 477
			break;
	}
478

P
Pierre Ossman 已提交
479
	DBG("PIO transfer complete.\n");
480 481
}

482
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
483
				  struct mmc_data *data, int cookie)
484 485 486
{
	int sg_count;

487 488 489 490 491
	/*
	 * If the data buffers are already mapped, return the previous
	 * dma_map_sg() result.
	 */
	if (data->host_cookie == COOKIE_PRE_MAPPED)
492 493 494 495 496 497 498 499 500 501
		return data->sg_count;

	sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
				data->flags & MMC_DATA_WRITE ?
				DMA_TO_DEVICE : DMA_FROM_DEVICE);

	if (sg_count == 0)
		return -ENOSPC;

	data->sg_count = sg_count;
502
	data->host_cookie = cookie;
503 504 505 506

	return sg_count;
}

507 508 509
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
	local_irq_save(*flags);
510
	return kmap_atomic(sg_page(sg)) + sg->offset;
511 512 513 514
}

static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
{
515
	kunmap_atomic(buffer);
516 517 518
	local_irq_restore(*flags);
}

519 520
static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
				  dma_addr_t addr, int len, unsigned cmd)
B
Ben Dooks 已提交
521
{
522
	struct sdhci_adma2_64_desc *dma_desc = desc;
B
Ben Dooks 已提交
523

524
	/* 32-bit and 64-bit descriptors have these members in same position */
525 526
	dma_desc->cmd = cpu_to_le16(cmd);
	dma_desc->len = cpu_to_le16(len);
527 528 529 530
	dma_desc->addr_lo = cpu_to_le32((u32)addr);

	if (host->flags & SDHCI_USE_64_BIT_DMA)
		dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
B
Ben Dooks 已提交
531 532
}

533 534
static void sdhci_adma_mark_end(void *desc)
{
535
	struct sdhci_adma2_64_desc *dma_desc = desc;
536

537
	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
538
	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
539 540
}

541 542
static void sdhci_adma_table_pre(struct sdhci_host *host,
	struct mmc_data *data, int sg_count)
543 544 545
{
	struct scatterlist *sg;
	unsigned long flags;
546 547 548 549
	dma_addr_t addr, align_addr;
	void *desc, *align;
	char *buffer;
	int len, offset, i;
550 551 552 553 554 555

	/*
	 * The spec does not specify endianness of descriptor table.
	 * We currently guess that it is LE.
	 */

556
	host->sg_count = sg_count;
557

558
	desc = host->adma_table;
559 560 561 562 563 564 565 566 567
	align = host->align_buffer;

	align_addr = host->align_addr;

	for_each_sg(data->sg, sg, host->sg_count, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);

		/*
568 569 570
		 * The SDHCI specification states that ADMA addresses must
		 * be 32-bit aligned. If they aren't, then we use a bounce
		 * buffer for the (up to three) bytes that screw up the
571 572
		 * alignment.
		 */
573 574
		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
			 SDHCI_ADMA2_MASK;
575 576 577 578 579 580 581
		if (offset) {
			if (data->flags & MMC_DATA_WRITE) {
				buffer = sdhci_kmap_atomic(sg, &flags);
				memcpy(align, buffer, offset);
				sdhci_kunmap_atomic(buffer, &flags);
			}

B
Ben Dooks 已提交
582
			/* tran, valid */
583
			sdhci_adma_write_desc(host, desc, align_addr, offset,
A
Adrian Hunter 已提交
584
					      ADMA2_TRAN_VALID);
585 586 587

			BUG_ON(offset > 65536);

588 589
			align += SDHCI_ADMA2_ALIGN;
			align_addr += SDHCI_ADMA2_ALIGN;
590

591
			desc += host->desc_sz;
592 593 594 595 596 597 598

			addr += offset;
			len -= offset;
		}

		BUG_ON(len > 65536);

599 600 601 602 603 604
		if (len) {
			/* tran, valid */
			sdhci_adma_write_desc(host, desc, addr, len,
					      ADMA2_TRAN_VALID);
			desc += host->desc_sz;
		}
605 606 607 608 609

		/*
		 * If this triggers then we have a calculation bug
		 * somewhere. :/
		 */
610
		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
611 612
	}

613
	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
614
		/* Mark the last descriptor as the terminating descriptor */
615
		if (desc != host->adma_table) {
616
			desc -= host->desc_sz;
617
			sdhci_adma_mark_end(desc);
618 619
		}
	} else {
620
		/* Add a terminating entry - nop, end, valid */
621
		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
622
	}
623 624 625 626 627 628 629
}

static void sdhci_adma_table_post(struct sdhci_host *host,
	struct mmc_data *data)
{
	struct scatterlist *sg;
	int i, size;
630
	void *align;
631 632 633
	char *buffer;
	unsigned long flags;

634 635
	if (data->flags & MMC_DATA_READ) {
		bool has_unaligned = false;
636

637 638 639 640 641 642
		/* Do a quick scan of the SG list for any unaligned mappings */
		for_each_sg(data->sg, sg, host->sg_count, i)
			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
				has_unaligned = true;
				break;
			}
643

644 645
		if (has_unaligned) {
			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
646
					    data->sg_len, DMA_FROM_DEVICE);
647

648
			align = host->align_buffer;
649

650 651 652 653 654 655 656 657
			for_each_sg(data->sg, sg, host->sg_count, i) {
				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
					size = SDHCI_ADMA2_ALIGN -
					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);

					buffer = sdhci_kmap_atomic(sg, &flags);
					memcpy(buffer, align, size);
					sdhci_kunmap_atomic(buffer, &flags);
658

659 660
					align += SDHCI_ADMA2_ALIGN;
				}
661 662 663 664 665
			}
		}
	}
}

666
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
667
{
668
	u8 count;
669
	struct mmc_data *data = cmd->data;
670
	unsigned target_timeout, current_timeout;
671

672 673 674 675 676 677
	/*
	 * If the host controller provides us with an incorrect timeout
	 * value, just skip the check and use 0xE.  The hardware may take
	 * longer to time out, but that's much better than having a too-short
	 * timeout value.
	 */
678
	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
679
		return 0xE;
680

681
	/* Unspecified timeout, assume max */
682
	if (!data && !cmd->busy_timeout)
683
		return 0xE;
684

685 686
	/* timeout in us */
	if (!data)
687
		target_timeout = cmd->busy_timeout * 1000;
688
	else {
689
		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
690 691 692 693 694 695 696 697
		if (host->clock && data->timeout_clks) {
			unsigned long long val;

			/*
			 * data->timeout_clks is in units of clock cycles.
			 * host->clock is in Hz.  target_timeout is in us.
			 * Hence, us = 1000000 * cycles / Hz.  Round up.
			 */
698
			val = 1000000ULL * data->timeout_clks;
699 700 701 702
			if (do_div(val, host->clock))
				target_timeout++;
			target_timeout += val;
		}
703
	}
704

705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
	/*
	 * Figure out needed cycles.
	 * We do this in steps in order to fit inside a 32 bit int.
	 * The first step is the minimum timeout, which will have a
	 * minimum resolution of 6 bits:
	 * (1) 2^13*1000 > 2^22,
	 * (2) host->timeout_clk < 2^16
	 *     =>
	 *     (1) / (2) > 2^6
	 */
	count = 0;
	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
	while (current_timeout < target_timeout) {
		count++;
		current_timeout <<= 1;
		if (count >= 0xF)
			break;
	}

	if (count >= 0xF) {
725 726
		DBG("Too large timeout 0x%x requested for CMD%d!\n",
		    count, cmd->opcode);
727 728 729
		count = 0xE;
	}

730 731 732
	return count;
}

733 734 735 736 737 738
static void sdhci_set_transfer_irqs(struct sdhci_host *host)
{
	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;

	if (host->flags & SDHCI_REQ_USE_DMA)
739
		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
740
	else
741 742 743 744
		host->ier = (host->ier & ~dma_irqs) | pio_irqs;

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
745 746
}

747
static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
748 749
{
	u8 count;
750 751 752 753 754 755 756 757 758 759 760

	if (host->ops->set_timeout) {
		host->ops->set_timeout(host, cmd);
	} else {
		count = sdhci_calc_timeout(host, cmd);
		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
	}
}

static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
{
761
	u8 ctrl;
762
	struct mmc_data *data = cmd->data;
763

764
	if (sdhci_data_line_cmd(cmd))
765
		sdhci_set_timeout(host, cmd);
766 767

	if (!data)
768 769
		return;

770 771
	WARN_ON(host->data);

772 773 774 775 776 777 778
	/* Sanity checks */
	BUG_ON(data->blksz * data->blocks > 524288);
	BUG_ON(data->blksz > host->mmc->max_blk_size);
	BUG_ON(data->blocks > 65535);

	host->data = data;
	host->data_early = 0;
779
	host->data->bytes_xfered = 0;
780

781
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
782
		struct scatterlist *sg;
783
		unsigned int length_mask, offset_mask;
784
		int i;
785

786 787 788 789 790 791 792 793 794
		host->flags |= SDHCI_REQ_USE_DMA;

		/*
		 * FIXME: This doesn't account for merging when mapping the
		 * scatterlist.
		 *
		 * The assumption here being that alignment and lengths are
		 * the same after DMA mapping to device address space.
		 */
795
		length_mask = 0;
796
		offset_mask = 0;
797
		if (host->flags & SDHCI_USE_ADMA) {
798
			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
799
				length_mask = 3;
800 801 802 803 804 805 806
				/*
				 * As we use up to 3 byte chunks to work
				 * around alignment problems, we need to
				 * check the offset as well.
				 */
				offset_mask = 3;
			}
807 808
		} else {
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
809
				length_mask = 3;
810 811
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
				offset_mask = 3;
812 813
		}

814
		if (unlikely(length_mask | offset_mask)) {
815
			for_each_sg(data->sg, sg, data->sg_len, i) {
816
				if (sg->length & length_mask) {
817
					DBG("Reverting to PIO because of transfer size (%d)\n",
818
					    sg->length);
819 820 821
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
822
				if (sg->offset & offset_mask) {
823
					DBG("Reverting to PIO because of bad alignment\n");
824 825 826 827 828 829 830
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
			}
		}
	}

831
	if (host->flags & SDHCI_REQ_USE_DMA) {
832
		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848

		if (sg_cnt <= 0) {
			/*
			 * This only happens when someone fed
			 * us an invalid request.
			 */
			WARN_ON(1);
			host->flags &= ~SDHCI_REQ_USE_DMA;
		} else if (host->flags & SDHCI_USE_ADMA) {
			sdhci_adma_table_pre(host, data, sg_cnt);

			sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
			if (host->flags & SDHCI_USE_64_BIT_DMA)
				sdhci_writel(host,
					     (u64)host->adma_addr >> 32,
					     SDHCI_ADMA_ADDRESS_HI);
849
		} else {
850 851 852
			WARN_ON(sg_cnt != 1);
			sdhci_writel(host, sg_dma_address(data->sg),
				SDHCI_DMA_ADDRESS);
853 854 855
		}
	}

856 857 858 859 860 861
	/*
	 * Always adjust the DMA selection as some controllers
	 * (e.g. JMicron) can't do PIO properly when the selection
	 * is ADMA.
	 */
	if (host->version >= SDHCI_SPEC_200) {
862
		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
863 864
		ctrl &= ~SDHCI_CTRL_DMA_MASK;
		if ((host->flags & SDHCI_REQ_USE_DMA) &&
865 866 867 868 869 870
			(host->flags & SDHCI_USE_ADMA)) {
			if (host->flags & SDHCI_USE_64_BIT_DMA)
				ctrl |= SDHCI_CTRL_ADMA64;
			else
				ctrl |= SDHCI_CTRL_ADMA32;
		} else {
871
			ctrl |= SDHCI_CTRL_SDMA;
872
		}
873
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
874 875
	}

876
	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
877 878 879 880 881 882 883 884
		int flags;

		flags = SG_MITER_ATOMIC;
		if (host->data->flags & MMC_DATA_READ)
			flags |= SG_MITER_TO_SG;
		else
			flags |= SG_MITER_FROM_SG;
		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
885
		host->blocks = data->blocks;
886
	}
887

888 889
	sdhci_set_transfer_irqs(host);

890 891 892
	/* Set the DMA boundary value and block size */
	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
		data->blksz), SDHCI_BLOCK_SIZE);
893
	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
894 895
}

896 897 898
static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
				    struct mmc_request *mrq)
{
899 900
	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
	       !mrq->cap_cmd_during_tfr;
901 902
}

903
static void sdhci_set_transfer_mode(struct sdhci_host *host,
904
	struct mmc_command *cmd)
905
{
906
	u16 mode = 0;
907
	struct mmc_data *data = cmd->data;
908

909
	if (data == NULL) {
910 911 912 913
		if (host->quirks2 &
			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
			sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
		} else {
914
		/* clear Auto CMD settings for no data CMDs */
915 916
			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
917
				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
918
		}
919
		return;
920
	}
921

922 923
	WARN_ON(!host->data);

924 925 926
	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
		mode = SDHCI_TRNS_BLK_CNT_EN;

927
	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
928
		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
929 930 931 932
		/*
		 * If we are sending CMD23, CMD12 never gets sent
		 * on successful completion (so no Auto-CMD12).
		 */
933
		if (sdhci_auto_cmd12(host, cmd->mrq) &&
934
		    (cmd->opcode != SD_IO_RW_EXTENDED))
935
			mode |= SDHCI_TRNS_AUTO_CMD12;
936
		else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
937
			mode |= SDHCI_TRNS_AUTO_CMD23;
938
			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
939
		}
940
	}
941

942 943
	if (data->flags & MMC_DATA_READ)
		mode |= SDHCI_TRNS_READ;
944
	if (host->flags & SDHCI_REQ_USE_DMA)
945 946
		mode |= SDHCI_TRNS_DMA;

947
	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
948 949
}

950 951 952 953 954 955 956 957 958 959
static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
{
	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
		((mrq->cmd && mrq->cmd->error) ||
		 (mrq->sbc && mrq->sbc->error) ||
		 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
				(mrq->data->stop && mrq->data->stop->error))) ||
		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
}

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
	int i;

	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
		if (host->mrqs_done[i] == mrq) {
			WARN_ON(1);
			return;
		}
	}

	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
		if (!host->mrqs_done[i]) {
			host->mrqs_done[i] = mrq;
			break;
		}
	}

	WARN_ON(i >= SDHCI_MAX_MRQS);

	tasklet_schedule(&host->finish_tasklet);
}

983 984
static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
985 986 987 988 989 990 991 992 993
	if (host->cmd && host->cmd->mrq == mrq)
		host->cmd = NULL;

	if (host->data_cmd && host->data_cmd->mrq == mrq)
		host->data_cmd = NULL;

	if (host->data && host->data->mrq == mrq)
		host->data = NULL;

994 995 996
	if (sdhci_needs_reset(host, mrq))
		host->pending_reset = true;

997
	__sdhci_finish_mrq(host, mrq);
998 999
}

1000 1001
static void sdhci_finish_data(struct sdhci_host *host)
{
1002 1003
	struct mmc_command *data_cmd = host->data_cmd;
	struct mmc_data *data = host->data;
1004 1005

	host->data = NULL;
1006
	host->data_cmd = NULL;
1007

1008 1009 1010
	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
		sdhci_adma_table_post(host, data);
1011 1012

	/*
1013 1014 1015 1016 1017
	 * The specification states that the block count register must
	 * be updated, but it does not specify at what point in the
	 * data flow. That makes the register entirely useless to read
	 * back so we have to assume that nothing made it to the card
	 * in the event of an error.
1018
	 */
1019 1020
	if (data->error)
		data->bytes_xfered = 0;
1021
	else
1022
		data->bytes_xfered = data->blksz * data->blocks;
1023

1024 1025 1026 1027 1028 1029 1030
	/*
	 * Need to send CMD12 if -
	 * a) open-ended multiblock transfer (no CMD23)
	 * b) error in multiblock transfer
	 */
	if (data->stop &&
	    (data->error ||
1031
	     !data->mrq->sbc)) {
1032

1033 1034 1035 1036
		/*
		 * The controller needs a reset of internal state machines
		 * upon error conditions.
		 */
P
Pierre Ossman 已提交
1037
		if (data->error) {
1038 1039
			if (!host->cmd || host->cmd == data_cmd)
				sdhci_do_reset(host, SDHCI_RESET_CMD);
1040
			sdhci_do_reset(host, SDHCI_RESET_DATA);
1041 1042
		}

1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
		/*
		 * 'cap_cmd_during_tfr' request must not use the command line
		 * after mmc_command_done() has been called. It is upper layer's
		 * responsibility to send the stop command if required.
		 */
		if (data->mrq->cap_cmd_during_tfr) {
			sdhci_finish_mrq(host, data->mrq);
		} else {
			/* Avoid triggering warning in sdhci_send_command() */
			host->cmd = NULL;
			sdhci_send_command(host, data->stop);
		}
1055 1056 1057
	} else {
		sdhci_finish_mrq(host, data->mrq);
	}
1058 1059
}

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
			    unsigned long timeout)
{
	if (sdhci_data_line_cmd(mrq->cmd))
		mod_timer(&host->data_timer, timeout);
	else
		mod_timer(&host->timer, timeout);
}

static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
{
	if (sdhci_data_line_cmd(mrq->cmd))
		del_timer(&host->data_timer);
	else
		del_timer(&host->timer);
}

1077
void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1078 1079
{
	int flags;
1080
	u32 mask;
1081
	unsigned long timeout;
1082 1083 1084

	WARN_ON(host->cmd);

1085 1086 1087
	/* Initially, a command has no error */
	cmd->error = 0;

1088 1089 1090 1091
	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
	    cmd->opcode == MMC_STOP_TRANSMISSION)
		cmd->flags |= MMC_RSP_BUSY;

1092
	/* Wait max 10 ms */
1093
	timeout = 10;
1094 1095

	mask = SDHCI_CMD_INHIBIT;
1096
	if (sdhci_data_line_cmd(cmd))
1097 1098 1099 1100
		mask |= SDHCI_DATA_INHIBIT;

	/* We shouldn't wait for data inihibit for stop commands, even
	   though they might use busy signaling */
1101
	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1102 1103
		mask &= ~SDHCI_DATA_INHIBIT;

1104
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1105
		if (timeout == 0) {
1106 1107
			pr_err("%s: Controller never released inhibit bit(s).\n",
			       mmc_hostname(host->mmc));
1108
			sdhci_dumpregs(host);
P
Pierre Ossman 已提交
1109
			cmd->error = -EIO;
1110
			sdhci_finish_mrq(host, cmd->mrq);
1111 1112
			return;
		}
1113 1114 1115
		timeout--;
		mdelay(1);
	}
1116

1117
	timeout = jiffies;
1118 1119
	if (!cmd->data && cmd->busy_timeout > 9000)
		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1120 1121
	else
		timeout += 10 * HZ;
1122
	sdhci_mod_timer(host, cmd->mrq, timeout);
1123 1124

	host->cmd = cmd;
1125
	if (sdhci_data_line_cmd(cmd)) {
1126 1127 1128
		WARN_ON(host->data_cmd);
		host->data_cmd = cmd;
	}
1129

1130
	sdhci_prepare_data(host, cmd);
1131

1132
	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1133

1134
	sdhci_set_transfer_mode(host, cmd);
1135

1136
	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1137
		pr_err("%s: Unsupported response type!\n",
1138
			mmc_hostname(host->mmc));
P
Pierre Ossman 已提交
1139
		cmd->error = -EINVAL;
1140
		sdhci_finish_mrq(host, cmd->mrq);
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
		return;
	}

	if (!(cmd->flags & MMC_RSP_PRESENT))
		flags = SDHCI_CMD_RESP_NONE;
	else if (cmd->flags & MMC_RSP_136)
		flags = SDHCI_CMD_RESP_LONG;
	else if (cmd->flags & MMC_RSP_BUSY)
		flags = SDHCI_CMD_RESP_SHORT_BUSY;
	else
		flags = SDHCI_CMD_RESP_SHORT;

	if (cmd->flags & MMC_RSP_CRC)
		flags |= SDHCI_CMD_CRC;
	if (cmd->flags & MMC_RSP_OPCODE)
		flags |= SDHCI_CMD_INDEX;
1157 1158

	/* CMD19 is special in that the Data Present Select should be set */
1159 1160
	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1161 1162
		flags |= SDHCI_CMD_DATA;

1163
	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1164
}
1165
EXPORT_SYMBOL_GPL(sdhci_send_command);
1166 1167 1168

static void sdhci_finish_command(struct sdhci_host *host)
{
1169
	struct mmc_command *cmd = host->cmd;
1170 1171
	int i;

1172 1173 1174 1175
	host->cmd = NULL;

	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136) {
1176 1177
			/* CRC is stripped so we need to do some shifting. */
			for (i = 0;i < 4;i++) {
1178
				cmd->resp[i] = sdhci_readl(host,
1179 1180
					SDHCI_RESPONSE + (3-i)*4) << 8;
				if (i != 3)
1181
					cmd->resp[i] |=
1182
						sdhci_readb(host,
1183 1184 1185
						SDHCI_RESPONSE + (3-i)*4-1);
			}
		} else {
1186
			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1187 1188 1189
		}
	}

1190 1191 1192
	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
		mmc_command_done(host->mmc, cmd->mrq);

1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
	/*
	 * The host can send and interrupt when the busy state has
	 * ended, allowing us to wait without wasting CPU cycles.
	 * The busy signal uses DAT0 so this is similar to waiting
	 * for data to complete.
	 *
	 * Note: The 1.0 specification is a bit ambiguous about this
	 *       feature so there might be some problems with older
	 *       controllers.
	 */
1203 1204
	if (cmd->flags & MMC_RSP_BUSY) {
		if (cmd->data) {
1205 1206
			DBG("Cannot wait for busy signal when also doing a data transfer");
		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1207 1208
			   cmd == host->data_cmd) {
			/* Command complete before busy is ended */
1209 1210 1211 1212
			return;
		}
	}

1213
	/* Finished CMD23, now send actual command. */
1214 1215
	if (cmd == cmd->mrq->sbc) {
		sdhci_send_command(host, cmd->mrq->cmd);
1216
	} else {
1217

1218 1219 1220
		/* Processed actual command. */
		if (host->data && host->data_early)
			sdhci_finish_data(host);
1221

1222
		if (!cmd->data)
1223
			sdhci_finish_mrq(host, cmd->mrq);
1224
	}
1225 1226
}

1227 1228
static u16 sdhci_get_preset_value(struct sdhci_host *host)
{
1229
	u16 preset = 0;
1230

1231 1232
	switch (host->timing) {
	case MMC_TIMING_UHS_SDR12:
1233 1234
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
		break;
1235
	case MMC_TIMING_UHS_SDR25:
1236 1237
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
		break;
1238
	case MMC_TIMING_UHS_SDR50:
1239 1240
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
		break;
1241 1242
	case MMC_TIMING_UHS_SDR104:
	case MMC_TIMING_MMC_HS200:
1243 1244
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
		break;
1245
	case MMC_TIMING_UHS_DDR50:
1246
	case MMC_TIMING_MMC_DDR52:
1247 1248
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
		break;
1249 1250 1251
	case MMC_TIMING_MMC_HS400:
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
		break;
1252 1253 1254 1255 1256 1257 1258 1259 1260
	default:
		pr_warn("%s: Invalid UHS-I mode selected\n",
			mmc_hostname(host->mmc));
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
		break;
	}
	return preset;
}

1261 1262
u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
		   unsigned int *actual_clock)
1263
{
1264
	int div = 0; /* Initialized for compiler warning */
1265
	int real_div = div, clk_mul = 1;
1266
	u16 clk = 0;
1267
	bool switch_base_clk = false;
1268

1269
	if (host->version >= SDHCI_SPEC_300) {
1270
		if (host->preset_enabled) {
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
			u16 pre_val;

			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
			pre_val = sdhci_get_preset_value(host);
			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
			if (host->clk_mul &&
				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
				clk = SDHCI_PROG_CLOCK_MODE;
				real_div = div + 1;
				clk_mul = host->clk_mul;
			} else {
				real_div = max_t(int, 1, div << 1);
			}
			goto clock_set;
		}

1288 1289 1290 1291 1292
		/*
		 * Check if the Host Controller supports Programmable Clock
		 * Mode.
		 */
		if (host->clk_mul) {
1293 1294 1295 1296 1297
			for (div = 1; div <= 1024; div++) {
				if ((host->max_clk * host->clk_mul / div)
					<= clock)
					break;
			}
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
			if ((host->max_clk * host->clk_mul / div) <= clock) {
				/*
				 * Set Programmable Clock Mode in the Clock
				 * Control register.
				 */
				clk = SDHCI_PROG_CLOCK_MODE;
				real_div = div;
				clk_mul = host->clk_mul;
				div--;
			} else {
				/*
				 * Divisor can be too small to reach clock
				 * speed requirement. Then use the base clock.
				 */
				switch_base_clk = true;
			}
		}

		if (!host->clk_mul || switch_base_clk) {
1317 1318 1319 1320 1321 1322 1323 1324 1325
			/* Version 3.00 divisors must be a multiple of 2. */
			if (host->max_clk <= clock)
				div = 1;
			else {
				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
				     div += 2) {
					if ((host->max_clk / div) <= clock)
						break;
				}
1326
			}
1327
			real_div = div;
1328
			div >>= 1;
1329 1330 1331
			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
				&& !div && host->max_clk <= 25000000)
				div = 1;
1332 1333 1334
		}
	} else {
		/* Version 2.00 divisors must be a power of 2. */
1335
		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1336 1337 1338
			if ((host->max_clk / div) <= clock)
				break;
		}
1339
		real_div = div;
1340
		div >>= 1;
1341 1342
	}

1343
clock_set:
1344
	if (real_div)
1345
		*actual_clock = (host->max_clk * clk_mul) / real_div;
1346
	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1347 1348
	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
		<< SDHCI_DIVIDER_HI_SHIFT;
1349 1350 1351 1352 1353

	return clk;
}
EXPORT_SYMBOL_GPL(sdhci_calc_clk);

1354
void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1355
{
A
Adrian Hunter 已提交
1356
	ktime_t timeout;
1357

1358
	clk |= SDHCI_CLOCK_INT_EN;
1359
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1360

1361
	/* Wait max 20 ms */
A
Adrian Hunter 已提交
1362
	timeout = ktime_add_ms(ktime_get(), 20);
1363
	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1364
		& SDHCI_CLOCK_INT_STABLE)) {
A
Adrian Hunter 已提交
1365
		if (ktime_after(ktime_get(), timeout)) {
1366 1367
			pr_err("%s: Internal clock never stabilised.\n",
			       mmc_hostname(host->mmc));
1368 1369 1370
			sdhci_dumpregs(host);
			return;
		}
A
Adrian Hunter 已提交
1371
		udelay(10);
1372
	}
1373 1374

	clk |= SDHCI_CLOCK_CARD_EN;
1375
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1376
}
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
EXPORT_SYMBOL_GPL(sdhci_enable_clk);

void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
	u16 clk;

	host->mmc->actual_clock = 0;

	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);

	if (clock == 0)
		return;

	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
	sdhci_enable_clk(host, clk);
}
1393
EXPORT_SYMBOL_GPL(sdhci_set_clock);
1394

1395 1396
static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
				unsigned short vdd)
1397
{
1398
	struct mmc_host *mmc = host->mmc;
1399 1400 1401 1402 1403 1404 1405 1406 1407

	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);

	if (mode != MMC_POWER_OFF)
		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
	else
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
}

1408 1409
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
			   unsigned short vdd)
1410
{
1411
	u8 pwr = 0;
1412

1413 1414
	if (mode != MMC_POWER_OFF) {
		switch (1 << vdd) {
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
		case MMC_VDD_165_195:
			pwr = SDHCI_POWER_180;
			break;
		case MMC_VDD_29_30:
		case MMC_VDD_30_31:
			pwr = SDHCI_POWER_300;
			break;
		case MMC_VDD_32_33:
		case MMC_VDD_33_34:
			pwr = SDHCI_POWER_330;
			break;
		default:
1427 1428 1429
			WARN(1, "%s: Invalid vdd %#x\n",
			     mmc_hostname(host->mmc), vdd);
			break;
1430 1431 1432 1433
		}
	}

	if (host->pwr == pwr)
1434
		return;
1435

1436 1437 1438
	host->pwr = pwr;

	if (pwr == 0) {
1439
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1440 1441
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
1442 1443 1444 1445 1446 1447 1448
	} else {
		/*
		 * Spec says that we should clear the power reg before setting
		 * a new value. Some controllers don't seem to like this though.
		 */
		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1449

1450 1451 1452 1453 1454 1455 1456
		/*
		 * At least the Marvell CaFe chip gets confused if we set the
		 * voltage and set turn on power at the same time, so set the
		 * voltage first.
		 */
		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1457

1458
		pwr |= SDHCI_POWER_ON;
1459

1460
		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1461

1462 1463
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_on(host);
1464

1465 1466 1467 1468 1469 1470 1471
		/*
		 * Some controllers need an extra 10ms delay of 10ms before
		 * they can apply clock after applying power
		 */
		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
			mdelay(10);
	}
1472
}
1473
EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1474

1475 1476
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
		     unsigned short vdd)
1477
{
1478 1479
	if (IS_ERR(host->mmc->supply.vmmc))
		sdhci_set_power_noreg(host, mode, vdd);
1480
	else
1481
		sdhci_set_power_reg(host, mode, vdd);
1482
}
1483
EXPORT_SYMBOL_GPL(sdhci_set_power);
1484

1485 1486 1487 1488 1489 1490 1491 1492 1493
/*****************************************************************************\
 *                                                                           *
 * MMC callbacks                                                             *
 *                                                                           *
\*****************************************************************************/

static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
	struct sdhci_host *host;
1494
	int present;
1495 1496 1497 1498
	unsigned long flags;

	host = mmc_priv(mmc);

1499
	/* Firstly check card presence */
1500
	present = mmc->ops->get_cd(mmc);
1501

1502 1503
	spin_lock_irqsave(&host->lock, flags);

1504
	sdhci_led_activate(host);
1505 1506 1507 1508 1509

	/*
	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
	 * requests if Auto-CMD12 is enabled.
	 */
1510
	if (sdhci_auto_cmd12(host, mrq)) {
1511 1512 1513 1514 1515
		if (mrq->stop) {
			mrq->data->stop = NULL;
			mrq->stop = NULL;
		}
	}
1516

1517
	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1518
		mrq->cmd->error = -ENOMEDIUM;
1519
		sdhci_finish_mrq(host, mrq);
1520
	} else {
1521
		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1522 1523 1524
			sdhci_send_command(host, mrq->sbc);
		else
			sdhci_send_command(host, mrq->cmd);
1525
	}
1526

1527
	mmiowb();
1528 1529 1530
	spin_unlock_irqrestore(&host->lock, flags);
}

1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551
void sdhci_set_bus_width(struct sdhci_host *host, int width)
{
	u8 ctrl;

	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
	if (width == MMC_BUS_WIDTH_8) {
		ctrl &= ~SDHCI_CTRL_4BITBUS;
		if (host->version >= SDHCI_SPEC_300)
			ctrl |= SDHCI_CTRL_8BITBUS;
	} else {
		if (host->version >= SDHCI_SPEC_300)
			ctrl &= ~SDHCI_CTRL_8BITBUS;
		if (width == MMC_BUS_WIDTH_4)
			ctrl |= SDHCI_CTRL_4BITBUS;
		else
			ctrl &= ~SDHCI_CTRL_4BITBUS;
	}
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_set_bus_width);

1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
{
	u16 ctrl_2;

	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	/* Select Bus Speed Mode for host */
	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
	if ((timing == MMC_TIMING_MMC_HS200) ||
	    (timing == MMC_TIMING_UHS_SDR104))
		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
	else if (timing == MMC_TIMING_UHS_SDR12)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
	else if (timing == MMC_TIMING_UHS_SDR25)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
	else if (timing == MMC_TIMING_UHS_SDR50)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
	else if ((timing == MMC_TIMING_UHS_DDR50) ||
		 (timing == MMC_TIMING_MMC_DDR52))
		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1571 1572
	else if (timing == MMC_TIMING_MMC_HS400)
		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1573 1574 1575 1576
	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);

1577
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1578
{
1579
	struct sdhci_host *host = mmc_priv(mmc);
1580 1581
	u8 ctrl;

1582 1583 1584
	if (ios->power_mode == MMC_POWER_UNDEFINED)
		return;

A
Adrian Hunter 已提交
1585
	if (host->flags & SDHCI_DEVICE_DEAD) {
1586 1587
		if (!IS_ERR(mmc->supply.vmmc) &&
		    ios->power_mode == MMC_POWER_OFF)
1588
			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
A
Adrian Hunter 已提交
1589 1590
		return;
	}
P
Pierre Ossman 已提交
1591

1592 1593 1594 1595 1596
	/*
	 * Reset the chip on each power off.
	 * Should clear out any weird states.
	 */
	if (ios->power_mode == MMC_POWER_OFF) {
1597
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1598
		sdhci_reinit(host);
1599 1600
	}

1601
	if (host->version >= SDHCI_SPEC_300 &&
1602 1603
		(ios->power_mode == MMC_POWER_UP) &&
		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1604 1605
		sdhci_enable_preset_value(host, false);

1606
	if (!ios->clock || ios->clock != host->clock) {
1607
		host->ops->set_clock(host, ios->clock);
1608
		host->clock = ios->clock;
1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620

		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
		    host->clock) {
			host->timeout_clk = host->mmc->actual_clock ?
						host->mmc->actual_clock / 1000 :
						host->clock / 1000;
			host->mmc->max_busy_timeout =
				host->ops->get_max_timeout_count ?
				host->ops->get_max_timeout_count(host) :
				1 << 27;
			host->mmc->max_busy_timeout /= host->timeout_clk;
		}
1621
	}
1622

1623 1624 1625 1626
	if (host->ops->set_power)
		host->ops->set_power(host, ios->power_mode, ios->vdd);
	else
		sdhci_set_power(host, ios->power_mode, ios->vdd);
1627

1628 1629 1630
	if (host->ops->platform_send_init_74_clocks)
		host->ops->platform_send_init_74_clocks(host, ios->power_mode);

1631
	host->ops->set_bus_width(host, ios->bus_width);
1632

1633
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1634

1635
	if ((ios->timing == MMC_TIMING_SD_HS ||
1636 1637 1638 1639 1640 1641 1642 1643
	     ios->timing == MMC_TIMING_MMC_HS ||
	     ios->timing == MMC_TIMING_MMC_HS400 ||
	     ios->timing == MMC_TIMING_MMC_HS200 ||
	     ios->timing == MMC_TIMING_MMC_DDR52 ||
	     ios->timing == MMC_TIMING_UHS_SDR50 ||
	     ios->timing == MMC_TIMING_UHS_SDR104 ||
	     ios->timing == MMC_TIMING_UHS_DDR50 ||
	     ios->timing == MMC_TIMING_UHS_SDR25)
1644
	    && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1645 1646 1647 1648
		ctrl |= SDHCI_CTRL_HISPD;
	else
		ctrl &= ~SDHCI_CTRL_HISPD;

1649
	if (host->version >= SDHCI_SPEC_300) {
1650 1651
		u16 clk, ctrl_2;

1652
		if (!host->preset_enabled) {
1653
			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1654 1655 1656 1657
			/*
			 * We only need to set Driver Strength if the
			 * preset value enable is not set.
			 */
1658
			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1659 1660 1661
			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1662 1663
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1664 1665
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1666 1667 1668
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
			else {
1669 1670
				pr_warn("%s: invalid driver type, default to driver type B\n",
					mmc_hostname(mmc));
1671 1672
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
			}
1673 1674

			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
		} else {
			/*
			 * According to SDHC Spec v3.00, if the Preset Value
			 * Enable in the Host Control 2 register is set, we
			 * need to reset SD Clock Enable before changing High
			 * Speed Enable to avoid generating clock gliches.
			 */

			/* Reset SD Clock Enable */
			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
			clk &= ~SDHCI_CLOCK_CARD_EN;
			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);

			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);

			/* Re-enable SD Clock */
1691
			host->ops->set_clock(host, host->clock);
1692
		}
1693 1694 1695 1696 1697 1698

		/* Reset SD Clock Enable */
		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
		clk &= ~SDHCI_CLOCK_CARD_EN;
		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);

1699
		host->ops->set_uhs_signaling(host, ios->timing);
1700
		host->timing = ios->timing;
1701

1702 1703 1704 1705 1706
		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
				((ios->timing == MMC_TIMING_UHS_SDR12) ||
				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1707 1708
				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1709 1710 1711 1712 1713 1714 1715 1716
			u16 preset;

			sdhci_enable_preset_value(host, true);
			preset = sdhci_get_preset_value(host);
			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
				>> SDHCI_PRESET_DRV_SHIFT;
		}

1717
		/* Re-enable SD Clock */
1718
		host->ops->set_clock(host, host->clock);
1719 1720
	} else
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1721

1722 1723 1724 1725 1726
	/*
	 * Some (ENE) controllers go apeshit on some ios operation,
	 * signalling timeout and CRC errors even on CMD0. Resetting
	 * it on each ios seems to solve the problem.
	 */
1727
	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1728
		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1729

1730
	mmiowb();
1731 1732
}

1733
static int sdhci_get_cd(struct mmc_host *mmc)
1734 1735
{
	struct sdhci_host *host = mmc_priv(mmc);
1736
	int gpio_cd = mmc_gpio_get_cd(mmc);
1737 1738 1739 1740

	if (host->flags & SDHCI_DEVICE_DEAD)
		return 0;

1741
	/* If nonremovable, assume that the card is always present. */
1742
	if (!mmc_card_is_removable(host->mmc))
1743 1744
		return 1;

1745 1746 1747 1748
	/*
	 * Try slot gpio detect, if defined it take precedence
	 * over build in controller functionality
	 */
1749
	if (gpio_cd >= 0)
1750 1751
		return !!gpio_cd;

1752 1753 1754 1755
	/* If polling, assume that the card is always present. */
	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
		return 1;

1756 1757 1758 1759
	/* Host native card detect */
	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}

1760
static int sdhci_check_ro(struct sdhci_host *host)
1761 1762
{
	unsigned long flags;
1763
	int is_readonly;
1764 1765 1766

	spin_lock_irqsave(&host->lock, flags);

P
Pierre Ossman 已提交
1767
	if (host->flags & SDHCI_DEVICE_DEAD)
1768 1769 1770
		is_readonly = 0;
	else if (host->ops->get_ro)
		is_readonly = host->ops->get_ro(host);
P
Pierre Ossman 已提交
1771
	else
1772 1773
		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
				& SDHCI_WRITE_PROTECT);
1774 1775 1776

	spin_unlock_irqrestore(&host->lock, flags);

1777 1778 1779
	/* This quirk needs to be replaced by a callback-function later */
	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
		!is_readonly : is_readonly;
1780 1781
}

1782 1783
#define SAMPLE_COUNT	5

1784
static int sdhci_get_ro(struct mmc_host *mmc)
1785
{
1786
	struct sdhci_host *host = mmc_priv(mmc);
1787 1788 1789
	int i, ro_count;

	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1790
		return sdhci_check_ro(host);
1791 1792 1793

	ro_count = 0;
	for (i = 0; i < SAMPLE_COUNT; i++) {
1794
		if (sdhci_check_ro(host)) {
1795 1796 1797 1798 1799 1800 1801 1802
			if (++ro_count > SAMPLE_COUNT / 2)
				return 1;
		}
		msleep(30);
	}
	return 0;
}

1803 1804 1805 1806 1807 1808 1809 1810
static void sdhci_hw_reset(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);

	if (host->ops && host->ops->hw_reset)
		host->ops->hw_reset(host);
}

1811 1812
static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
{
1813
	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1814
		if (enable)
1815
			host->ier |= SDHCI_INT_CARD_INT;
1816
		else
1817 1818 1819 1820
			host->ier &= ~SDHCI_INT_CARD_INT;

		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1821 1822
		mmiowb();
	}
1823 1824 1825 1826 1827 1828
}

static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;
P
Pierre Ossman 已提交
1829

1830 1831 1832
	if (enable)
		pm_runtime_get_noresume(host->mmc->parent);

1833
	spin_lock_irqsave(&host->lock, flags);
1834 1835 1836 1837 1838
	if (enable)
		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
	else
		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;

1839
	sdhci_enable_sdio_irq_nolock(host, enable);
P
Pierre Ossman 已提交
1840
	spin_unlock_irqrestore(&host->lock, flags);
1841 1842 1843

	if (!enable)
		pm_runtime_put_noidle(host->mmc->parent);
P
Pierre Ossman 已提交
1844 1845
}

1846 1847
static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
					     struct mmc_ios *ios)
1848
{
1849
	struct sdhci_host *host = mmc_priv(mmc);
1850
	u16 ctrl;
1851
	int ret;
1852

1853 1854 1855 1856 1857 1858
	/*
	 * Signal Voltage Switching is only applicable for Host Controllers
	 * v3.00 and above.
	 */
	if (host->version < SDHCI_SPEC_300)
		return 0;
1859

1860 1861
	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

1862
	switch (ios->signal_voltage) {
1863
	case MMC_SIGNAL_VOLTAGE_330:
1864 1865
		if (!(host->flags & SDHCI_SIGNALING_330))
			return -EINVAL;
1866 1867 1868
		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
		ctrl &= ~SDHCI_CTRL_VDD_180;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1869

1870
		if (!IS_ERR(mmc->supply.vqmmc)) {
1871
			ret = mmc_regulator_set_vqmmc(mmc, ios);
1872
			if (ret) {
J
Joe Perches 已提交
1873 1874
				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
					mmc_hostname(mmc));
1875 1876 1877 1878 1879
				return -EIO;
			}
		}
		/* Wait for 5ms */
		usleep_range(5000, 5500);
1880

1881 1882 1883 1884
		/* 3.3V regulator output should be stable within 5 ms */
		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (!(ctrl & SDHCI_CTRL_VDD_180))
			return 0;
1885

J
Joe Perches 已提交
1886 1887
		pr_warn("%s: 3.3V regulator output did not became stable\n",
			mmc_hostname(mmc));
1888 1889 1890

		return -EAGAIN;
	case MMC_SIGNAL_VOLTAGE_180:
1891 1892
		if (!(host->flags & SDHCI_SIGNALING_180))
			return -EINVAL;
1893
		if (!IS_ERR(mmc->supply.vqmmc)) {
1894
			ret = mmc_regulator_set_vqmmc(mmc, ios);
1895
			if (ret) {
J
Joe Perches 已提交
1896 1897
				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
					mmc_hostname(mmc));
1898 1899 1900
				return -EIO;
			}
		}
1901 1902 1903 1904 1905

		/*
		 * Enable 1.8V Signal Enable in the Host Control2
		 * register
		 */
1906 1907
		ctrl |= SDHCI_CTRL_VDD_180;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1908

1909 1910 1911 1912
		/* Some controller need to do more when switching */
		if (host->ops->voltage_switch)
			host->ops->voltage_switch(host);

1913 1914 1915 1916
		/* 1.8V regulator output should be stable within 5 ms */
		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (ctrl & SDHCI_CTRL_VDD_180)
			return 0;
1917

J
Joe Perches 已提交
1918 1919
		pr_warn("%s: 1.8V regulator output did not became stable\n",
			mmc_hostname(mmc));
1920

1921 1922
		return -EAGAIN;
	case MMC_SIGNAL_VOLTAGE_120:
1923 1924
		if (!(host->flags & SDHCI_SIGNALING_120))
			return -EINVAL;
1925
		if (!IS_ERR(mmc->supply.vqmmc)) {
1926
			ret = mmc_regulator_set_vqmmc(mmc, ios);
1927
			if (ret) {
J
Joe Perches 已提交
1928 1929
				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
					mmc_hostname(mmc));
1930
				return -EIO;
1931 1932
			}
		}
1933
		return 0;
1934
	default:
1935 1936
		/* No signal voltage switch required */
		return 0;
1937
	}
1938 1939
}

1940 1941 1942 1943 1944
static int sdhci_card_busy(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);
	u32 present_state;

1945
	/* Check whether DAT[0] is 0 */
1946 1947
	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);

1948
	return !(present_state & SDHCI_DATA_0_LVL_MASK);
1949 1950
}

1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
	host->flags |= SDHCI_HS400_TUNING;
	spin_unlock_irqrestore(&host->lock, flags);

	return 0;
}

1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
static void sdhci_start_tuning(struct sdhci_host *host)
{
	u16 ctrl;

	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	ctrl |= SDHCI_CTRL_EXEC_TUNING;
	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
		ctrl |= SDHCI_CTRL_TUNED_CLK;
	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);

	/*
	 * As per the Host Controller spec v3.00, tuning command
	 * generates Buffer Read Ready interrupt, so enable that.
	 *
	 * Note: The spec clearly says that when tuning sequence
	 * is being performed, the controller does not generate
	 * interrupts other than Buffer Read Ready interrupt. But
	 * to make sure we don't hit a controller bug, we _only_
	 * enable Buffer Read Ready interrupt here.
	 */
	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
}

static void sdhci_end_tuning(struct sdhci_host *host)
{
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}

static void sdhci_reset_tuning(struct sdhci_host *host)
{
	u16 ctrl;

	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
}

2003
static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
{
	sdhci_reset_tuning(host);

	sdhci_do_reset(host, SDHCI_RESET_CMD);
	sdhci_do_reset(host, SDHCI_RESET_DATA);

	sdhci_end_tuning(host);

	mmc_abort_tuning(host->mmc, opcode);
}

/*
 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
 * tuning command does not have a data payload (or rather the hardware does it
 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
 * interrupt setup is different to other commands and there is no timeout
 * interrupt so special handling is needed.
 */
2022
static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2023 2024
{
	struct mmc_host *mmc = host->mmc;
2025 2026
	struct mmc_command cmd = {};
	struct mmc_request mrq = {};
2027 2028 2029
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040

	cmd.opcode = opcode;
	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
	cmd.mrq = &mrq;

	mrq.cmd = &cmd;
	/*
	 * In response to CMD19, the card sends 64 bytes of tuning
	 * block to the Host Controller. So we set the block size
	 * to 64 here.
	 */
2041 2042 2043 2044 2045
	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
		sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE);
	else
		sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062

	/*
	 * The tuning block is sent by the card to the host controller.
	 * So we set the TRNS_READ bit in the Transfer Mode register.
	 * This also takes care of setting DMA Enable and Multi Block
	 * Select in the same register to 0.
	 */
	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);

	sdhci_send_command(host, &cmd);

	host->cmd = NULL;

	sdhci_del_timer(host, &mrq);

	host->tuning_done = 0;

2063
	mmiowb();
2064 2065 2066 2067 2068 2069 2070 2071
	spin_unlock_irqrestore(&host->lock, flags);

	/* Wait for Buffer Read Ready interrupt */
	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
			   msecs_to_jiffies(50));

}

2072
static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
A
Adrian Hunter 已提交
2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
{
	int i;

	/*
	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
	 * of loops reaches 40 times.
	 */
	for (i = 0; i < MAX_TUNING_LOOP; i++) {
		u16 ctrl;

2083
		sdhci_send_tuning(host, opcode);
A
Adrian Hunter 已提交
2084 2085 2086 2087

		if (!host->tuning_done) {
			pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
				mmc_hostname(host->mmc));
2088
			sdhci_abort_tuning(host, opcode);
A
Adrian Hunter 已提交
2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108
			return;
		}

		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
			if (ctrl & SDHCI_CTRL_TUNED_CLK)
				return; /* Success! */
			break;
		}

		/* eMMC spec does not require a delay between tuning cycles */
		if (opcode == MMC_SEND_TUNING_BLOCK)
			mdelay(1);
	}

	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
		mmc_hostname(host->mmc));
	sdhci_reset_tuning(host);
}

2109
int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2110
{
2111
	struct sdhci_host *host = mmc_priv(mmc);
2112
	int err = 0;
2113
	unsigned int tuning_count = 0;
2114
	bool hs400_tuning;
2115

2116 2117
	hs400_tuning = host->flags & SDHCI_HS400_TUNING;

2118 2119 2120
	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
		tuning_count = host->tuning_count;

2121
	/*
W
Weijun Yang 已提交
2122 2123 2124
	 * The Host Controller needs tuning in case of SDR104 and DDR50
	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
	 * the Capabilities register.
2125 2126
	 * If the Host Controller supports the HS200 mode then the
	 * tuning function has to be executed.
2127
	 */
2128
	switch (host->timing) {
2129
	/* HS400 tuning is done in HS200 mode */
2130
	case MMC_TIMING_MMC_HS400:
2131
		err = -EINVAL;
2132
		goto out;
2133

2134
	case MMC_TIMING_MMC_HS200:
2135 2136 2137 2138 2139 2140 2141 2142
		/*
		 * Periodic re-tuning for HS400 is not expected to be needed, so
		 * disable it here.
		 */
		if (hs400_tuning)
			tuning_count = 0;
		break;

2143
	case MMC_TIMING_UHS_SDR104:
W
Weijun Yang 已提交
2144
	case MMC_TIMING_UHS_DDR50:
2145 2146 2147
		break;

	case MMC_TIMING_UHS_SDR50:
2148
		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2149 2150 2151 2152
			break;
		/* FALLTHROUGH */

	default:
2153
		goto out;
2154 2155
	}

2156
	if (host->ops->platform_execute_tuning) {
2157
		err = host->ops->platform_execute_tuning(host, opcode);
2158
		goto out;
2159 2160
	}

A
Adrian Hunter 已提交
2161
	host->mmc->retune_period = tuning_count;
2162

A
Adrian Hunter 已提交
2163
	sdhci_start_tuning(host);
2164

2165
	__sdhci_execute_tuning(host, opcode);
2166

2167
	sdhci_end_tuning(host);
2168
out:
2169
	host->flags &= ~SDHCI_HS400_TUNING;
A
Adrian Hunter 已提交
2170

2171 2172
	return err;
}
2173
EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2174

2175
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2176 2177 2178 2179 2180 2181 2182 2183 2184
{
	/* Host Controller v3.00 defines preset value registers */
	if (host->version < SDHCI_SPEC_300)
		return;

	/*
	 * We only enable or disable Preset Value if they are not already
	 * enabled or disabled respectively. Otherwise, we bail out.
	 */
2185 2186 2187 2188 2189 2190 2191 2192
	if (host->preset_enabled != enable) {
		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

		if (enable)
			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
		else
			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;

2193
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2194 2195 2196 2197 2198 2199 2200

		if (enable)
			host->flags |= SDHCI_PV_ENABLED;
		else
			host->flags &= ~SDHCI_PV_ENABLED;

		host->preset_enabled = enable;
2201
	}
2202 2203
}

2204 2205 2206 2207 2208 2209
static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
				int err)
{
	struct sdhci_host *host = mmc_priv(mmc);
	struct mmc_data *data = mrq->data;

2210
	if (data->host_cookie != COOKIE_UNMAPPED)
2211 2212 2213 2214 2215
		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
			     data->flags & MMC_DATA_WRITE ?
			       DMA_TO_DEVICE : DMA_FROM_DEVICE);

	data->host_cookie = COOKIE_UNMAPPED;
2216 2217
}

2218
static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2219 2220 2221
{
	struct sdhci_host *host = mmc_priv(mmc);

2222
	mrq->data->host_cookie = COOKIE_UNMAPPED;
2223 2224

	if (host->flags & SDHCI_REQ_USE_DMA)
2225
		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2226 2227
}

2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
static inline bool sdhci_has_requests(struct sdhci_host *host)
{
	return host->cmd || host->data_cmd;
}

static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
{
	if (host->data_cmd) {
		host->data_cmd->error = err;
		sdhci_finish_mrq(host, host->data_cmd->mrq);
	}

	if (host->cmd) {
		host->cmd->error = err;
		sdhci_finish_mrq(host, host->cmd->mrq);
	}
}

2246
static void sdhci_card_event(struct mmc_host *mmc)
2247
{
2248
	struct sdhci_host *host = mmc_priv(mmc);
2249
	unsigned long flags;
2250
	int present;
2251

2252 2253 2254 2255
	/* First check if client has provided their own card event */
	if (host->ops->card_event)
		host->ops->card_event(host);

2256
	present = mmc->ops->get_cd(mmc);
2257

2258 2259
	spin_lock_irqsave(&host->lock, flags);

2260 2261
	/* Check sdhci_has_requests() first in case we are runtime suspended */
	if (sdhci_has_requests(host) && !present) {
2262
		pr_err("%s: Card removed during transfer!\n",
2263
			mmc_hostname(host->mmc));
2264
		pr_err("%s: Resetting controller.\n",
2265
			mmc_hostname(host->mmc));
2266

2267 2268
		sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
2269

2270
		sdhci_error_out_mrqs(host, -ENOMEDIUM);
2271 2272 2273
	}

	spin_unlock_irqrestore(&host->lock, flags);
2274 2275 2276 2277
}

static const struct mmc_host_ops sdhci_ops = {
	.request	= sdhci_request,
2278 2279
	.post_req	= sdhci_post_req,
	.pre_req	= sdhci_pre_req,
2280
	.set_ios	= sdhci_set_ios,
2281
	.get_cd		= sdhci_get_cd,
2282 2283 2284 2285
	.get_ro		= sdhci_get_ro,
	.hw_reset	= sdhci_hw_reset,
	.enable_sdio_irq = sdhci_enable_sdio_irq,
	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2286
	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2287 2288
	.execute_tuning			= sdhci_execute_tuning,
	.card_event			= sdhci_card_event,
2289
	.card_busy	= sdhci_card_busy,
2290 2291 2292 2293 2294 2295 2296 2297
};

/*****************************************************************************\
 *                                                                           *
 * Tasklets                                                                  *
 *                                                                           *
\*****************************************************************************/

2298
static bool sdhci_request_done(struct sdhci_host *host)
2299 2300 2301
{
	unsigned long flags;
	struct mmc_request *mrq;
2302
	int i;
2303

2304 2305
	spin_lock_irqsave(&host->lock, flags);

2306 2307
	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
		mrq = host->mrqs_done[i];
2308
		if (mrq)
2309
			break;
2310
	}
2311

2312 2313 2314 2315
	if (!mrq) {
		spin_unlock_irqrestore(&host->lock, flags);
		return true;
	}
2316

2317 2318
	sdhci_del_timer(host, mrq);

2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
	/*
	 * Always unmap the data buffers if they were mapped by
	 * sdhci_prepare_data() whenever we finish with a request.
	 * This avoids leaking DMA mappings on error.
	 */
	if (host->flags & SDHCI_REQ_USE_DMA) {
		struct mmc_data *data = mrq->data;

		if (data && data->host_cookie == COOKIE_MAPPED) {
			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
				     (data->flags & MMC_DATA_READ) ?
				     DMA_FROM_DEVICE : DMA_TO_DEVICE);
			data->host_cookie = COOKIE_UNMAPPED;
		}
	}

2335 2336 2337 2338
	/*
	 * The controller needs a reset of internal state machines
	 * upon error conditions.
	 */
2339
	if (sdhci_needs_reset(host, mrq)) {
2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350
		/*
		 * Do not finish until command and data lines are available for
		 * reset. Note there can only be one other mrq, so it cannot
		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
		 * would both be null.
		 */
		if (host->cmd || host->data_cmd) {
			spin_unlock_irqrestore(&host->lock, flags);
			return true;
		}

2351
		/* Some controllers need this kick or reset won't work here */
2352
		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2353
			/* This is to force an update */
2354
			host->ops->set_clock(host, host->clock);
2355 2356 2357

		/* Spec says we should do both at the same time, but Ricoh
		   controllers do not like that. */
2358 2359
		sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
2360 2361

		host->pending_reset = false;
2362 2363
	}

2364 2365
	if (!sdhci_has_requests(host))
		sdhci_led_deactivate(host);
2366

2367 2368
	host->mrqs_done[i] = NULL;

2369
	mmiowb();
2370 2371 2372
	spin_unlock_irqrestore(&host->lock, flags);

	mmc_request_done(host->mmc, mrq);
2373 2374 2375 2376 2377 2378 2379 2380 2381 2382

	return false;
}

static void sdhci_tasklet_finish(unsigned long param)
{
	struct sdhci_host *host = (struct sdhci_host *)param;

	while (!sdhci_request_done(host))
		;
2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
}

static void sdhci_timeout_timer(unsigned long data)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = (struct sdhci_host*)data;

	spin_lock_irqsave(&host->lock, flags);

2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417
	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
		       mmc_hostname(host->mmc));
		sdhci_dumpregs(host);

		host->cmd->error = -ETIMEDOUT;
		sdhci_finish_mrq(host, host->cmd->mrq);
	}

	mmiowb();
	spin_unlock_irqrestore(&host->lock, flags);
}

static void sdhci_timeout_data_timer(unsigned long data)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = (struct sdhci_host *)data;

	spin_lock_irqsave(&host->lock, flags);

	if (host->data || host->data_cmd ||
	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2418 2419
		pr_err("%s: Timeout waiting for hardware interrupt.\n",
		       mmc_hostname(host->mmc));
2420 2421 2422
		sdhci_dumpregs(host);

		if (host->data) {
P
Pierre Ossman 已提交
2423
			host->data->error = -ETIMEDOUT;
2424
			sdhci_finish_data(host);
2425 2426 2427
		} else if (host->data_cmd) {
			host->data_cmd->error = -ETIMEDOUT;
			sdhci_finish_mrq(host, host->data_cmd->mrq);
2428
		} else {
2429 2430
			host->cmd->error = -ETIMEDOUT;
			sdhci_finish_mrq(host, host->cmd->mrq);
2431 2432 2433
		}
	}

2434
	mmiowb();
2435 2436 2437 2438 2439 2440 2441 2442 2443
	spin_unlock_irqrestore(&host->lock, flags);
}

/*****************************************************************************\
 *                                                                           *
 * Interrupt handling                                                        *
 *                                                                           *
\*****************************************************************************/

2444
static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2445 2446
{
	if (!host->cmd) {
2447 2448 2449 2450 2451 2452 2453
		/*
		 * SDHCI recovers from errors by resetting the cmd and data
		 * circuits.  Until that is done, there very well might be more
		 * interrupts, so ignore them in that case.
		 */
		if (host->pending_reset)
			return;
2454 2455
		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
		       mmc_hostname(host->mmc), (unsigned)intmask);
2456 2457 2458 2459
		sdhci_dumpregs(host);
		return;
	}

2460 2461 2462 2463 2464 2465
	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
		if (intmask & SDHCI_INT_TIMEOUT)
			host->cmd->error = -ETIMEDOUT;
		else
			host->cmd->error = -EILSEQ;
2466

2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483
		/*
		 * If this command initiates a data phase and a response
		 * CRC error is signalled, the card can start transferring
		 * data - the card may have received the command without
		 * error.  We must not terminate the mmc_request early.
		 *
		 * If the card did not receive the command or returned an
		 * error which prevented it sending data, the data phase
		 * will time out.
		 */
		if (host->cmd->data &&
		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
		     SDHCI_INT_CRC) {
			host->cmd = NULL;
			return;
		}

2484
		sdhci_finish_mrq(host, host->cmd->mrq);
2485 2486 2487 2488
		return;
	}

	if (intmask & SDHCI_INT_RESPONSE)
2489
		sdhci_finish_command(host);
2490 2491
}

2492
#ifdef CONFIG_MMC_DEBUG
2493
static void sdhci_adma_show_error(struct sdhci_host *host)
2494
{
2495
	void *desc = host->adma_table;
2496 2497 2498 2499

	sdhci_dumpregs(host);

	while (true) {
2500 2501 2502
		struct sdhci_adma2_64_desc *dma_desc = desc;

		if (host->flags & SDHCI_USE_64_BIT_DMA)
2503 2504
			DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
			    desc, le32_to_cpu(dma_desc->addr_hi),
2505 2506 2507 2508
			    le32_to_cpu(dma_desc->addr_lo),
			    le16_to_cpu(dma_desc->len),
			    le16_to_cpu(dma_desc->cmd));
		else
2509 2510
			DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
			    desc, le32_to_cpu(dma_desc->addr_lo),
2511 2512
			    le16_to_cpu(dma_desc->len),
			    le16_to_cpu(dma_desc->cmd));
2513

2514
		desc += host->desc_sz;
2515

2516
		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2517 2518 2519 2520
			break;
	}
}
#else
2521
static void sdhci_adma_show_error(struct sdhci_host *host) { }
2522 2523
#endif

2524 2525
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
2526
	u32 command;
2527

2528 2529
	/* CMD19 generates _only_ Buffer Read Ready interrupt */
	if (intmask & SDHCI_INT_DATA_AVAIL) {
2530 2531 2532
		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
		if (command == MMC_SEND_TUNING_BLOCK ||
		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2533 2534 2535 2536 2537 2538
			host->tuning_done = 1;
			wake_up(&host->buf_ready_int);
			return;
		}
	}

2539
	if (!host->data) {
2540 2541
		struct mmc_command *data_cmd = host->data_cmd;

2542
		/*
2543 2544 2545
		 * The "data complete" interrupt is also used to
		 * indicate that a busy state has ended. See comment
		 * above in sdhci_cmd_irq().
2546
		 */
2547
		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2548
			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2549
				host->data_cmd = NULL;
2550
				data_cmd->error = -ETIMEDOUT;
2551
				sdhci_finish_mrq(host, data_cmd->mrq);
2552 2553
				return;
			}
2554
			if (intmask & SDHCI_INT_DATA_END) {
2555
				host->data_cmd = NULL;
2556 2557 2558 2559 2560
				/*
				 * Some cards handle busy-end interrupt
				 * before the command completed, so make
				 * sure we do things in the proper order.
				 */
2561 2562 2563
				if (host->cmd == data_cmd)
					return;

2564
				sdhci_finish_mrq(host, data_cmd->mrq);
2565 2566 2567
				return;
			}
		}
2568

2569 2570 2571 2572 2573 2574 2575 2576
		/*
		 * SDHCI recovers from errors by resetting the cmd and data
		 * circuits. Until that is done, there very well might be more
		 * interrupts, so ignore them in that case.
		 */
		if (host->pending_reset)
			return;

2577 2578
		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
		       mmc_hostname(host->mmc), (unsigned)intmask);
2579 2580 2581 2582 2583 2584
		sdhci_dumpregs(host);

		return;
	}

	if (intmask & SDHCI_INT_DATA_TIMEOUT)
P
Pierre Ossman 已提交
2585
		host->data->error = -ETIMEDOUT;
2586 2587 2588 2589 2590
	else if (intmask & SDHCI_INT_DATA_END_BIT)
		host->data->error = -EILSEQ;
	else if ((intmask & SDHCI_INT_DATA_CRC) &&
		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
			!= MMC_BUS_TEST_R)
P
Pierre Ossman 已提交
2591
		host->data->error = -EILSEQ;
2592
	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2593
		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2594
		sdhci_adma_show_error(host);
2595
		host->data->error = -EIO;
2596 2597
		if (host->ops->adma_workaround)
			host->ops->adma_workaround(host, intmask);
2598
	}
2599

P
Pierre Ossman 已提交
2600
	if (host->data->error)
2601 2602
		sdhci_finish_data(host);
	else {
P
Pierre Ossman 已提交
2603
		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2604 2605
			sdhci_transfer_pio(host);

2606 2607 2608 2609
		/*
		 * We currently don't do anything fancy with DMA
		 * boundaries, but as we can't disable the feature
		 * we need to at least restart the transfer.
2610 2611 2612 2613
		 *
		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
		 * should return a valid address to continue from, but as
		 * some controllers are faulty, don't trust them.
2614
		 */
2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625
		if (intmask & SDHCI_INT_DMA_END) {
			u32 dmastart, dmanow;
			dmastart = sg_dma_address(host->data->sg);
			dmanow = dmastart + host->data->bytes_xfered;
			/*
			 * Force update to the next DMA block boundary.
			 */
			dmanow = (dmanow &
				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
				SDHCI_DEFAULT_BOUNDARY_SIZE;
			host->data->bytes_xfered = dmanow - dmastart;
2626 2627
			DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
			    dmastart, host->data->bytes_xfered, dmanow);
2628 2629
			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
		}
2630

2631
		if (intmask & SDHCI_INT_DATA_END) {
2632
			if (host->cmd == host->data_cmd) {
2633 2634 2635 2636 2637 2638 2639 2640 2641 2642
				/*
				 * Data managed to finish before the
				 * command completed. Make sure we do
				 * things in the proper order.
				 */
				host->data_early = 1;
			} else {
				sdhci_finish_data(host);
			}
		}
2643 2644 2645
	}
}

2646
static irqreturn_t sdhci_irq(int irq, void *dev_id)
2647
{
2648
	irqreturn_t result = IRQ_NONE;
2649
	struct sdhci_host *host = dev_id;
2650
	u32 intmask, mask, unexpected = 0;
2651
	int max_loops = 16;
2652 2653 2654

	spin_lock(&host->lock);

2655
	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2656
		spin_unlock(&host->lock);
2657
		return IRQ_NONE;
2658 2659
	}

2660
	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2661
	if (!intmask || intmask == 0xffffffff) {
2662 2663 2664 2665
		result = IRQ_NONE;
		goto out;
	}

2666 2667 2668 2669 2670
	do {
		/* Clear selected interrupts. */
		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
				  SDHCI_INT_BUS_POWER);
		sdhci_writel(host, mask, SDHCI_INT_STATUS);
2671

2672
		DBG("IRQ status 0x%08x\n", intmask);
2673

2674 2675 2676
		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
2677

2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688
			/*
			 * There is a observation on i.mx esdhc.  INSERT
			 * bit will be immediately set again when it gets
			 * cleared, if a card is inserted.  We have to mask
			 * the irq to prevent interrupt storm which will
			 * freeze the system.  And the REMOVE gets the
			 * same situation.
			 *
			 * More testing are needed here to ensure it works
			 * for other platforms though.
			 */
2689 2690 2691 2692 2693 2694
			host->ier &= ~(SDHCI_INT_CARD_INSERT |
				       SDHCI_INT_CARD_REMOVE);
			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
					       SDHCI_INT_CARD_INSERT;
			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2695 2696 2697

			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2698 2699 2700 2701

			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
						       SDHCI_INT_CARD_REMOVE);
			result = IRQ_WAKE_THREAD;
2702
		}
2703

2704
		if (intmask & SDHCI_INT_CMD_MASK)
2705
			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2706

2707 2708
		if (intmask & SDHCI_INT_DATA_MASK)
			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2709

2710 2711 2712
		if (intmask & SDHCI_INT_BUS_POWER)
			pr_err("%s: Card is consuming too much power!\n",
				mmc_hostname(host->mmc));
2713

2714 2715 2716
		if (intmask & SDHCI_INT_RETUNE)
			mmc_retune_needed(host->mmc);

2717 2718
		if ((intmask & SDHCI_INT_CARD_INT) &&
		    (host->ier & SDHCI_INT_CARD_INT)) {
2719 2720 2721 2722
			sdhci_enable_sdio_irq_nolock(host, false);
			host->thread_isr |= SDHCI_INT_CARD_INT;
			result = IRQ_WAKE_THREAD;
		}
P
Pierre Ossman 已提交
2723

2724 2725 2726
		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2727
			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
P
Pierre Ossman 已提交
2728

2729 2730 2731 2732
		if (intmask) {
			unexpected |= intmask;
			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
		}
2733

2734 2735
		if (result == IRQ_NONE)
			result = IRQ_HANDLED;
2736

2737 2738
		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
	} while (intmask && --max_loops);
2739 2740 2741
out:
	spin_unlock(&host->lock);

2742 2743 2744 2745 2746
	if (unexpected) {
		pr_err("%s: Unexpected interrupt 0x%08x.\n",
			   mmc_hostname(host->mmc), unexpected);
		sdhci_dumpregs(host);
	}
P
Pierre Ossman 已提交
2747

2748 2749 2750
	return result;
}

2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761
static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
{
	struct sdhci_host *host = dev_id;
	unsigned long flags;
	u32 isr;

	spin_lock_irqsave(&host->lock, flags);
	isr = host->thread_isr;
	host->thread_isr = 0;
	spin_unlock_irqrestore(&host->lock, flags);

2762
	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2763 2764 2765 2766
		struct mmc_host *mmc = host->mmc;

		mmc->ops->card_event(mmc);
		mmc_detect_change(mmc, msecs_to_jiffies(200));
2767 2768
	}

2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780
	if (isr & SDHCI_INT_CARD_INT) {
		sdio_run_irqs(host->mmc);

		spin_lock_irqsave(&host->lock, flags);
		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
			sdhci_enable_sdio_irq_nolock(host, true);
		spin_unlock_irqrestore(&host->lock, flags);
	}

	return isr ? IRQ_HANDLED : IRQ_NONE;
}

2781 2782 2783 2784 2785 2786 2787
/*****************************************************************************\
 *                                                                           *
 * Suspend/resume                                                            *
 *                                                                           *
\*****************************************************************************/

#ifdef CONFIG_PM
2788 2789 2790 2791 2792 2793 2794 2795
/*
 * To enable wakeup events, the corresponding events have to be enabled in
 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
 * Table' in the SD Host Controller Standard Specification.
 * It is useless to restore SDHCI_INT_ENABLE state in
 * sdhci_disable_irq_wakeups() since it will be set by
 * sdhci_enable_card_detection() or sdhci_init().
 */
K
Kevin Liu 已提交
2796 2797 2798 2799 2800
void sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
	u8 val;
	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
			| SDHCI_WAKE_ON_INT;
2801 2802
	u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
		      SDHCI_INT_CARD_INT;
K
Kevin Liu 已提交
2803 2804 2805 2806

	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
	val |= mask ;
	/* Avoid fake wake up */
2807
	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
K
Kevin Liu 已提交
2808
		val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2809 2810
		irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
	}
K
Kevin Liu 已提交
2811
	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2812
	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
K
Kevin Liu 已提交
2813 2814 2815
}
EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);

2816
static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
K
Kevin Liu 已提交
2817 2818 2819 2820 2821 2822 2823 2824 2825
{
	u8 val;
	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
			| SDHCI_WAKE_ON_INT;

	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
	val &= ~mask;
	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
2826

2827
int sdhci_suspend_host(struct sdhci_host *host)
2828
{
2829 2830
	sdhci_disable_card_detection(host);

2831
	mmc_retune_timer_stop(host->mmc);
2832

K
Kevin Liu 已提交
2833
	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2834 2835 2836
		host->ier = 0;
		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
K
Kevin Liu 已提交
2837 2838 2839 2840 2841
		free_irq(host->irq, host);
	} else {
		sdhci_enable_irq_wakeups(host);
		enable_irq_wake(host->irq);
	}
2842
	return 0;
2843 2844
}

2845
EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2846

2847 2848
int sdhci_resume_host(struct sdhci_host *host)
{
2849
	struct mmc_host *mmc = host->mmc;
2850
	int ret = 0;
2851

2852
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2853 2854 2855
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}
2856

2857 2858 2859 2860 2861 2862
	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
		/* Card keeps power but host controller does not */
		sdhci_init(host, 0);
		host->pwr = 0;
		host->clock = 0;
2863
		mmc->ops->set_ios(mmc, &mmc->ios);
2864 2865 2866 2867
	} else {
		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
		mmiowb();
	}
2868

2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879
	if (!device_may_wakeup(mmc_dev(host->mmc))) {
		ret = request_threaded_irq(host->irq, sdhci_irq,
					   sdhci_thread_irq, IRQF_SHARED,
					   mmc_hostname(host->mmc), host);
		if (ret)
			return ret;
	} else {
		sdhci_disable_irq_wakeups(host);
		disable_irq_wake(host->irq);
	}

2880 2881
	sdhci_enable_card_detection(host);

2882
	return ret;
2883 2884
}

2885
EXPORT_SYMBOL_GPL(sdhci_resume_host);
2886 2887 2888 2889 2890

int sdhci_runtime_suspend_host(struct sdhci_host *host)
{
	unsigned long flags;

2891
	mmc_retune_timer_stop(host->mmc);
2892 2893

	spin_lock_irqsave(&host->lock, flags);
2894 2895 2896
	host->ier &= SDHCI_INT_CARD_INT;
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2897 2898
	spin_unlock_irqrestore(&host->lock, flags);

2899
	synchronize_hardirq(host->irq);
2900 2901 2902 2903 2904

	spin_lock_irqsave(&host->lock, flags);
	host->runtime_suspended = true;
	spin_unlock_irqrestore(&host->lock, flags);

2905
	return 0;
2906 2907 2908 2909 2910
}
EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);

int sdhci_runtime_resume_host(struct sdhci_host *host)
{
2911
	struct mmc_host *mmc = host->mmc;
2912
	unsigned long flags;
2913
	int host_flags = host->flags;
2914 2915 2916 2917 2918 2919 2920 2921

	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}

	sdhci_init(host, 0);

2922 2923 2924 2925 2926 2927
	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) {
		/* Force clock and power re-program */
		host->pwr = 0;
		host->clock = 0;
		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
		mmc->ops->set_ios(mmc, &mmc->ios);
2928

2929 2930 2931 2932 2933 2934
		if ((host_flags & SDHCI_PV_ENABLED) &&
		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
			spin_lock_irqsave(&host->lock, flags);
			sdhci_enable_preset_value(host, true);
			spin_unlock_irqrestore(&host->lock, flags);
		}
2935

2936 2937 2938 2939
		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
		    mmc->ops->hs400_enhanced_strobe)
			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
	}
2940

2941 2942 2943 2944 2945
	spin_lock_irqsave(&host->lock, flags);

	host->runtime_suspended = false;

	/* Enable SDIO IRQ */
2946
	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2947 2948 2949 2950 2951 2952 2953
		sdhci_enable_sdio_irq_nolock(host, true);

	/* Enable Card Detection */
	sdhci_enable_card_detection(host);

	spin_unlock_irqrestore(&host->lock, flags);

2954
	return 0;
2955 2956 2957
}
EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);

2958
#endif /* CONFIG_PM */
2959

2960 2961
/*****************************************************************************\
 *                                                                           *
2962
 * Device allocation/registration                                            *
2963 2964 2965
 *                                                                           *
\*****************************************************************************/

2966 2967
struct sdhci_host *sdhci_alloc_host(struct device *dev,
	size_t priv_size)
2968 2969 2970 2971
{
	struct mmc_host *mmc;
	struct sdhci_host *host;

2972
	WARN_ON(dev == NULL);
2973

2974
	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2975
	if (!mmc)
2976
		return ERR_PTR(-ENOMEM);
2977 2978 2979

	host = mmc_priv(mmc);
	host->mmc = mmc;
2980 2981
	host->mmc_host_ops = sdhci_ops;
	mmc->ops = &host->mmc_host_ops;
2982

2983 2984
	host->flags = SDHCI_SIGNALING_330;

2985 2986
	return host;
}
2987

2988
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2989

2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019
static int sdhci_set_dma_mask(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;
	struct device *dev = mmc_dev(mmc);
	int ret = -EINVAL;

	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
		host->flags &= ~SDHCI_USE_64_BIT_DMA;

	/* Try 64-bit mask if hardware is capable  of it */
	if (host->flags & SDHCI_USE_64_BIT_DMA) {
		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
		if (ret) {
			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
				mmc_hostname(mmc));
			host->flags &= ~SDHCI_USE_64_BIT_DMA;
		}
	}

	/* 32-bit mask as default & fallback */
	if (ret) {
		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
		if (ret)
			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
				mmc_hostname(mmc));
	}

	return ret;
}

3020 3021 3022
void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
{
	u16 v;
3023 3024
	u64 dt_caps_mask = 0;
	u64 dt_caps = 0;
3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038

	if (host->read_caps)
		return;

	host->read_caps = true;

	if (debug_quirks)
		host->quirks = debug_quirks;

	if (debug_quirks2)
		host->quirks2 = debug_quirks2;

	sdhci_do_reset(host, SDHCI_RESET_ALL);

3039 3040 3041 3042 3043
	of_property_read_u64(mmc_dev(host->mmc)->of_node,
			     "sdhci-caps-mask", &dt_caps_mask);
	of_property_read_u64(mmc_dev(host->mmc)->of_node,
			     "sdhci-caps", &dt_caps);

3044 3045 3046 3047 3048 3049
	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;

	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
		return;

3050 3051 3052 3053 3054 3055 3056
	if (caps) {
		host->caps = *caps;
	} else {
		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
		host->caps &= ~lower_32_bits(dt_caps_mask);
		host->caps |= lower_32_bits(dt_caps);
	}
3057 3058 3059 3060

	if (host->version < SDHCI_SPEC_300)
		return;

3061 3062 3063 3064 3065 3066 3067
	if (caps1) {
		host->caps1 = *caps1;
	} else {
		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
		host->caps1 &= ~upper_32_bits(dt_caps_mask);
		host->caps1 |= upper_32_bits(dt_caps);
	}
3068 3069 3070
}
EXPORT_SYMBOL_GPL(__sdhci_read_caps);

3071
int sdhci_setup_host(struct sdhci_host *host)
3072 3073
{
	struct mmc_host *mmc;
3074 3075
	u32 max_current_caps;
	unsigned int ocr_avail;
3076
	unsigned int override_timeout_clk;
3077
	u32 max_clk;
3078
	int ret;
3079

3080 3081 3082
	WARN_ON(host == NULL);
	if (host == NULL)
		return -EINVAL;
3083

3084
	mmc = host->mmc;
3085

3086 3087 3088 3089 3090 3091 3092 3093 3094 3095
	/*
	 * If there are external regulators, get them. Note this must be done
	 * early before resetting the host and reading the capabilities so that
	 * the host can take the appropriate action if regulators are not
	 * available.
	 */
	ret = mmc_regulator_get_supply(mmc);
	if (ret == -EPROBE_DEFER)
		return ret;

3096
	sdhci_read_caps(host);
3097

3098 3099
	override_timeout_clk = host->timeout_clk;

3100
	if (host->version > SDHCI_SPEC_300) {
3101 3102
		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
		       mmc_hostname(mmc), host->version);
3103 3104
	}

3105
	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3106
		host->flags |= SDHCI_USE_SDMA;
3107
	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3108
		DBG("Controller doesn't have SDMA capability\n");
3109
	else
3110
		host->flags |= SDHCI_USE_SDMA;
3111

3112
	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3113
		(host->flags & SDHCI_USE_SDMA)) {
R
Rolf Eike Beer 已提交
3114
		DBG("Disabling DMA as it is marked broken\n");
3115
		host->flags &= ~SDHCI_USE_SDMA;
3116 3117
	}

3118
	if ((host->version >= SDHCI_SPEC_200) &&
3119
		(host->caps & SDHCI_CAN_DO_ADMA2))
3120
		host->flags |= SDHCI_USE_ADMA;
3121 3122 3123 3124 3125 3126 3127

	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
		(host->flags & SDHCI_USE_ADMA)) {
		DBG("Disabling ADMA as it is marked broken\n");
		host->flags &= ~SDHCI_USE_ADMA;
	}

3128 3129 3130 3131 3132 3133 3134
	/*
	 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
	 * and *must* do 64-bit DMA.  A driver has the opportunity to change
	 * that during the first call to ->enable_dma().  Similarly
	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
	 * implement.
	 */
3135
	if (host->caps & SDHCI_CAN_64BIT)
3136 3137
		host->flags |= SDHCI_USE_64_BIT_DMA;

3138
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149
		ret = sdhci_set_dma_mask(host);

		if (!ret && host->ops->enable_dma)
			ret = host->ops->enable_dma(host);

		if (ret) {
			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
				mmc_hostname(mmc));
			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);

			ret = 0;
3150 3151 3152
		}
	}

3153 3154 3155 3156
	/* SDMA does not support 64-bit DMA */
	if (host->flags & SDHCI_USE_64_BIT_DMA)
		host->flags &= ~SDHCI_USE_SDMA;

3157
	if (host->flags & SDHCI_USE_ADMA) {
3158 3159 3160
		dma_addr_t dma;
		void *buf;

3161
		/*
3162 3163 3164 3165
		 * The DMA descriptor table size is calculated as the maximum
		 * number of segments times 2, to allow for an alignment
		 * descriptor for each segment, plus 1 for a nop end descriptor,
		 * all multipled by the descriptor size.
3166
		 */
3167 3168 3169 3170 3171 3172 3173 3174 3175
		if (host->flags & SDHCI_USE_64_BIT_DMA) {
			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
					      SDHCI_ADMA2_64_DESC_SZ;
			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
		} else {
			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
					      SDHCI_ADMA2_32_DESC_SZ;
			host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
		}
3176

3177
		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3178 3179 3180
		buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
					 host->adma_table_sz, &dma, GFP_KERNEL);
		if (!buf) {
J
Joe Perches 已提交
3181
			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3182 3183
				mmc_hostname(mmc));
			host->flags &= ~SDHCI_USE_ADMA;
3184 3185
		} else if ((dma + host->align_buffer_sz) &
			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
J
Joe Perches 已提交
3186 3187
			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
				mmc_hostname(mmc));
3188
			host->flags &= ~SDHCI_USE_ADMA;
3189 3190 3191 3192 3193
			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
					  host->adma_table_sz, buf, dma);
		} else {
			host->align_buffer = buf;
			host->align_addr = dma;
3194

3195 3196 3197
			host->adma_table = buf + host->align_buffer_sz;
			host->adma_addr = dma + host->align_buffer_sz;
		}
3198 3199
	}

3200 3201 3202 3203 3204
	/*
	 * If we use DMA, then it's up to the caller to set the DMA
	 * mask, but PIO does not need the hw shim so we set a new
	 * mask here in that case.
	 */
3205
	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3206
		host->dma_mask = DMA_BIT_MASK(64);
3207
		mmc_dev(mmc)->dma_mask = &host->dma_mask;
3208
	}
3209

3210
	if (host->version >= SDHCI_SPEC_300)
3211
		host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3212 3213
			>> SDHCI_CLOCK_BASE_SHIFT;
	else
3214
		host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3215 3216
			>> SDHCI_CLOCK_BASE_SHIFT;

3217
	host->max_clk *= 1000000;
3218 3219
	if (host->max_clk == 0 || host->quirks &
			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3220
		if (!host->ops->get_max_clock) {
3221 3222
			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
			       mmc_hostname(mmc));
3223 3224
			ret = -ENODEV;
			goto undma;
3225 3226
		}
		host->max_clk = host->ops->get_max_clock(host);
3227
	}
3228

3229 3230 3231 3232
	/*
	 * In case of Host Controller v3.00, find out whether clock
	 * multiplier is supported.
	 */
3233
	host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244
			SDHCI_CLOCK_MUL_SHIFT;

	/*
	 * In case the value in Clock Multiplier is 0, then programmable
	 * clock mode is not supported, otherwise the actual clock
	 * multiplier is one more than the value of Clock Multiplier
	 * in the Capabilities Register.
	 */
	if (host->clk_mul)
		host->clk_mul += 1;

3245 3246 3247
	/*
	 * Set host parameters.
	 */
3248 3249
	max_clk = host->max_clk;

3250
	if (host->ops->get_min_clock)
3251
		mmc->f_min = host->ops->get_min_clock(host);
3252 3253 3254
	else if (host->version >= SDHCI_SPEC_300) {
		if (host->clk_mul) {
			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3255
			max_clk = host->max_clk * host->clk_mul;
3256 3257 3258
		} else
			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
	} else
3259
		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3260

3261
	if (!mmc->f_max || mmc->f_max > max_clk)
3262 3263
		mmc->f_max = max_clk;

3264
	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3265
		host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3266 3267 3268 3269 3270 3271 3272 3273
					SDHCI_TIMEOUT_CLK_SHIFT;
		if (host->timeout_clk == 0) {
			if (host->ops->get_timeout_clock) {
				host->timeout_clk =
					host->ops->get_timeout_clock(host);
			} else {
				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
					mmc_hostname(mmc));
3274 3275
				ret = -ENODEV;
				goto undma;
3276
			}
3277 3278
		}

3279
		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3280
			host->timeout_clk *= 1000;
3281

3282 3283 3284
		if (override_timeout_clk)
			host->timeout_clk = override_timeout_clk;

3285
		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3286
			host->ops->get_max_timeout_count(host) : 1 << 27;
3287 3288
		mmc->max_busy_timeout /= host->timeout_clk;
	}
3289

3290
	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3291
	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3292 3293 3294

	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
		host->flags |= SDHCI_AUTO_CMD12;
3295

3296
	/* Auto-CMD23 stuff only works in ADMA or PIO. */
A
Andrei Warkentin 已提交
3297
	if ((host->version >= SDHCI_SPEC_300) &&
3298
	    ((host->flags & SDHCI_USE_ADMA) ||
3299 3300
	     !(host->flags & SDHCI_USE_SDMA)) &&
	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3301
		host->flags |= SDHCI_AUTO_CMD23;
3302
		DBG("Auto-CMD23 available\n");
3303
	} else {
3304
		DBG("Auto-CMD23 unavailable\n");
3305 3306
	}

3307 3308 3309 3310 3311 3312 3313
	/*
	 * A controller may support 8-bit width, but the board itself
	 * might not have the pins brought out.  Boards that support
	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
	 * their platform code before calling sdhci_add_host(), and we
	 * won't assume 8-bit width for hosts without that CAP.
	 */
3314
	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3315
		mmc->caps |= MMC_CAP_4_BIT_DATA;
3316

3317 3318 3319
	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
		mmc->caps &= ~MMC_CAP_CMD23;

3320
	if (host->caps & SDHCI_CAN_DO_HISPD)
3321
		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3322

3323
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3324
	    mmc_card_is_removable(mmc) &&
3325
	    mmc_gpio_get_cd(host->mmc) < 0)
3326 3327
		mmc->caps |= MMC_CAP_NEEDS_POLL;

3328
	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3329 3330 3331 3332
	if (!IS_ERR(mmc->supply.vqmmc)) {
		ret = regulator_enable(mmc->supply.vqmmc);
		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
						    1950000))
3333 3334 3335
			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
					 SDHCI_SUPPORT_SDR50 |
					 SDHCI_SUPPORT_DDR50);
3336 3337 3338
		if (ret) {
			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
				mmc_hostname(mmc), ret);
3339
			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3340
		}
3341
	}
3342

3343 3344 3345 3346
	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
				 SDHCI_SUPPORT_DDR50);
	}
3347

3348
	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3349 3350
	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
			   SDHCI_SUPPORT_DDR50))
3351 3352 3353
		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;

	/* SDR104 supports also implies SDR50 support */
3354
	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3355
		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3356 3357 3358
		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
		 * field can be promoted to support HS200.
		 */
3359
		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3360
			mmc->caps2 |= MMC_CAP2_HS200;
3361
	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3362
		mmc->caps |= MMC_CAP_UHS_SDR50;
3363
	}
3364

3365
	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3366
	    (host->caps1 & SDHCI_SUPPORT_HS400))
3367 3368
		mmc->caps2 |= MMC_CAP2_HS400;

3369 3370 3371 3372 3373 3374
	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
	    (IS_ERR(mmc->supply.vqmmc) ||
	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
					     1300000)))
		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;

3375 3376
	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3377 3378
		mmc->caps |= MMC_CAP_UHS_DDR50;

3379
	/* Does the host need tuning for SDR50? */
3380
	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3381 3382
		host->flags |= SDHCI_SDR50_NEEDS_TUNING;

3383
	/* Driver Type(s) (A, C, D) supported by the host */
3384
	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3385
		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3386
	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3387
		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3388
	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3389 3390
		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;

3391
	/* Initial value for re-tuning timer count */
3392 3393
	host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
			     SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3394 3395 3396 3397 3398 3399 3400 3401 3402

	/*
	 * In case Re-tuning Timer is not disabled, the actual value of
	 * re-tuning timer will be 2 ^ (n - 1).
	 */
	if (host->tuning_count)
		host->tuning_count = 1 << (host->tuning_count - 1);

	/* Re-tuning mode supported by the Host Controller */
3403
	host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3404 3405
			     SDHCI_RETUNING_MODE_SHIFT;

3406
	ocr_avail = 0;
3407

3408 3409 3410 3411 3412 3413 3414 3415
	/*
	 * According to SD Host Controller spec v3.00, if the Host System
	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
	 * the value is meaningful only if Voltage Support in the Capabilities
	 * register is set. The actual current value is 4 times the register
	 * value.
	 */
	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3416
	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3417
		int curr = regulator_get_current_limit(mmc->supply.vmmc);
3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430
		if (curr > 0) {

			/* convert to SDHCI_MAX_CURRENT format */
			curr = curr/1000;  /* convert to mA */
			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;

			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
			max_current_caps =
				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
		}
	}
3431

3432
	if (host->caps & SDHCI_CAN_VDD_330) {
3433
		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3434

A
Aaron Lu 已提交
3435
		mmc->max_current_330 = ((max_current_caps &
3436 3437 3438 3439
				   SDHCI_MAX_CURRENT_330_MASK) >>
				   SDHCI_MAX_CURRENT_330_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}
3440
	if (host->caps & SDHCI_CAN_VDD_300) {
3441
		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3442

A
Aaron Lu 已提交
3443
		mmc->max_current_300 = ((max_current_caps &
3444 3445 3446 3447
				   SDHCI_MAX_CURRENT_300_MASK) >>
				   SDHCI_MAX_CURRENT_300_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}
3448
	if (host->caps & SDHCI_CAN_VDD_180) {
3449 3450
		ocr_avail |= MMC_VDD_165_195;

A
Aaron Lu 已提交
3451
		mmc->max_current_180 = ((max_current_caps &
3452 3453 3454 3455 3456
				   SDHCI_MAX_CURRENT_180_MASK) >>
				   SDHCI_MAX_CURRENT_180_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}

3457 3458 3459 3460 3461
	/* If OCR set by host, use it instead. */
	if (host->ocr_mask)
		ocr_avail = host->ocr_mask;

	/* If OCR set by external regulators, give it highest prio. */
3462
	if (mmc->ocr_avail)
3463
		ocr_avail = mmc->ocr_avail;
3464

3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476
	mmc->ocr_avail = ocr_avail;
	mmc->ocr_avail_sdio = ocr_avail;
	if (host->ocr_avail_sdio)
		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
	mmc->ocr_avail_sd = ocr_avail;
	if (host->ocr_avail_sd)
		mmc->ocr_avail_sd &= host->ocr_avail_sd;
	else /* normal SD controllers don't support 1.8V */
		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
	mmc->ocr_avail_mmc = ocr_avail;
	if (host->ocr_avail_mmc)
		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3477 3478

	if (mmc->ocr_avail == 0) {
3479 3480
		pr_err("%s: Hardware doesn't report any support voltages.\n",
		       mmc_hostname(mmc));
3481 3482
		ret = -ENODEV;
		goto unreg;
3483 3484
	}

3485 3486 3487 3488 3489 3490 3491 3492 3493
	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
		host->flags |= SDHCI_SIGNALING_180;

	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
		host->flags |= SDHCI_SIGNALING_120;

3494 3495 3496
	spin_lock_init(&host->lock);

	/*
3497 3498
	 * Maximum number of segments. Depends on if the hardware
	 * can do scatter/gather or not.
3499
	 */
3500
	if (host->flags & SDHCI_USE_ADMA)
3501
		mmc->max_segs = SDHCI_MAX_SEGS;
3502
	else if (host->flags & SDHCI_USE_SDMA)
3503
		mmc->max_segs = 1;
3504
	else /* PIO */
3505
		mmc->max_segs = SDHCI_MAX_SEGS;
3506 3507

	/*
3508 3509 3510
	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
	 * is less anyway.
3511
	 */
3512
	mmc->max_req_size = 524288;
3513 3514 3515

	/*
	 * Maximum segment size. Could be one segment with the maximum number
3516 3517
	 * of bytes. When doing hardware scatter/gather, each entry cannot
	 * be larger than 64 KiB though.
3518
	 */
3519 3520 3521 3522 3523 3524
	if (host->flags & SDHCI_USE_ADMA) {
		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
			mmc->max_seg_size = 65535;
		else
			mmc->max_seg_size = 65536;
	} else {
3525
		mmc->max_seg_size = mmc->max_req_size;
3526
	}
3527

3528 3529 3530 3531
	/*
	 * Maximum block size. This varies from controller to controller and
	 * is specified in the capabilities register.
	 */
3532 3533 3534
	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
		mmc->max_blk_size = 2;
	} else {
3535
		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3536 3537
				SDHCI_MAX_BLOCK_SHIFT;
		if (mmc->max_blk_size >= 3) {
J
Joe Perches 已提交
3538 3539
			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
				mmc_hostname(mmc));
3540 3541 3542 3543 3544
			mmc->max_blk_size = 0;
		}
	}

	mmc->max_blk_size = 512 << mmc->max_blk_size;
3545

3546 3547 3548
	/*
	 * Maximum block count.
	 */
3549
	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3550

3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572
	return 0;

unreg:
	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);
undma:
	if (host->align_buffer)
		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
				  host->adma_table_sz, host->align_buffer,
				  host->align_addr);
	host->adma_table = NULL;
	host->align_buffer = NULL;

	return ret;
}
EXPORT_SYMBOL_GPL(sdhci_setup_host);

int __sdhci_add_host(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;
	int ret;

3573 3574 3575 3576 3577 3578
	/*
	 * Init tasklets.
	 */
	tasklet_init(&host->finish_tasklet,
		sdhci_tasklet_finish, (unsigned long)host);

3579
	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3580 3581
	setup_timer(&host->data_timer, sdhci_timeout_data_timer,
		    (unsigned long)host);
3582

3583
	init_waitqueue_head(&host->buf_ready_int);
3584

3585 3586
	sdhci_init(host, 0);

3587 3588
	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
				   IRQF_SHARED,	mmc_hostname(mmc), host);
3589 3590 3591
	if (ret) {
		pr_err("%s: Failed to request IRQ %d: %d\n",
		       mmc_hostname(mmc), host->irq, ret);
3592
		goto untasklet;
3593
	}
3594 3595 3596 3597 3598

#ifdef CONFIG_MMC_DEBUG
	sdhci_dumpregs(host);
#endif

3599
	ret = sdhci_led_register(host);
3600 3601 3602
	if (ret) {
		pr_err("%s: Failed to register LED device: %d\n",
		       mmc_hostname(mmc), ret);
3603
		goto unirq;
3604
	}
3605

3606 3607
	mmiowb();

3608 3609 3610
	ret = mmc_add_host(mmc);
	if (ret)
		goto unled;
3611

3612
	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3613
		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3614 3615
		(host->flags & SDHCI_USE_ADMA) ?
		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3616
		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3617

3618 3619
	sdhci_enable_card_detection(host);

3620 3621
	return 0;

3622
unled:
3623
	sdhci_led_unregister(host);
3624
unirq:
3625
	sdhci_do_reset(host, SDHCI_RESET_ALL);
3626 3627
	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3628
	free_irq(host->irq, host);
3629
untasklet:
3630
	tasklet_kill(&host->finish_tasklet);
3631

3632 3633
	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);
3634

3635 3636 3637 3638 3639 3640
	if (host->align_buffer)
		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
				  host->adma_table_sz, host->align_buffer,
				  host->align_addr);
	host->adma_table = NULL;
	host->align_buffer = NULL;
3641 3642 3643

	return ret;
}
3644 3645 3646 3647 3648 3649 3650 3651 3652
EXPORT_SYMBOL_GPL(__sdhci_add_host);

int sdhci_add_host(struct sdhci_host *host)
{
	int ret;

	ret = sdhci_setup_host(host);
	if (ret)
		return ret;
3653

3654 3655
	return __sdhci_add_host(host);
}
3656
EXPORT_SYMBOL_GPL(sdhci_add_host);
3657

P
Pierre Ossman 已提交
3658
void sdhci_remove_host(struct sdhci_host *host, int dead)
3659
{
3660
	struct mmc_host *mmc = host->mmc;
P
Pierre Ossman 已提交
3661 3662 3663 3664 3665 3666 3667
	unsigned long flags;

	if (dead) {
		spin_lock_irqsave(&host->lock, flags);

		host->flags |= SDHCI_DEVICE_DEAD;

3668
		if (sdhci_has_requests(host)) {
3669
			pr_err("%s: Controller removed during "
3670
				" transfer!\n", mmc_hostname(mmc));
3671
			sdhci_error_out_mrqs(host, -ENOMEDIUM);
P
Pierre Ossman 已提交
3672 3673 3674 3675 3676
		}

		spin_unlock_irqrestore(&host->lock, flags);
	}

3677 3678
	sdhci_disable_card_detection(host);

3679
	mmc_remove_host(mmc);
3680

3681
	sdhci_led_unregister(host);
3682

P
Pierre Ossman 已提交
3683
	if (!dead)
3684
		sdhci_do_reset(host, SDHCI_RESET_ALL);
3685

3686 3687
	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3688 3689 3690
	free_irq(host->irq, host);

	del_timer_sync(&host->timer);
3691
	del_timer_sync(&host->data_timer);
3692 3693

	tasklet_kill(&host->finish_tasklet);
3694

3695 3696
	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);
3697

3698
	if (host->align_buffer)
3699 3700 3701
		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
				  host->adma_table_sz, host->align_buffer,
				  host->align_addr);
3702

3703
	host->adma_table = NULL;
3704
	host->align_buffer = NULL;
3705 3706
}

3707
EXPORT_SYMBOL_GPL(sdhci_remove_host);
3708

3709
void sdhci_free_host(struct sdhci_host *host)
3710
{
3711
	mmc_free_host(host->mmc);
3712 3713
}

3714
EXPORT_SYMBOL_GPL(sdhci_free_host);
3715 3716 3717 3718 3719 3720 3721 3722 3723

/*****************************************************************************\
 *                                                                           *
 * Driver init/exit                                                          *
 *                                                                           *
\*****************************************************************************/

static int __init sdhci_drv_init(void)
{
3724
	pr_info(DRIVER_NAME
3725
		": Secure Digital Host Controller Interface driver\n");
3726
	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3727

3728
	return 0;
3729 3730 3731 3732 3733 3734 3735 3736 3737
}

static void __exit sdhci_drv_exit(void)
{
}

module_init(sdhci_drv_init);
module_exit(sdhci_drv_exit);

3738
module_param(debug_quirks, uint, 0444);
3739
module_param(debug_quirks2, uint, 0444);
3740

3741
MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3742
MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3743
MODULE_LICENSE("GPL");
3744

3745
MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3746
MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");