sdhci.c 95.1 KB
Newer Older
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3
 *
4
 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 6
 *
 * This program is free software; you can redistribute it and/or modify
7 8 9
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or (at
 * your option) any later version.
10 11 12 13
 *
 * Thanks to the following companies for their support:
 *
 *     - JMicron (hardware and technical support)
14 15 16 17
 */

#include <linux/delay.h>
#include <linux/highmem.h>
18
#include <linux/io.h>
19
#include <linux/module.h>
20
#include <linux/dma-mapping.h>
21
#include <linux/slab.h>
22
#include <linux/scatterlist.h>
M
Marek Szyprowski 已提交
23
#include <linux/regulator/consumer.h>
24
#include <linux/pm_runtime.h>
25

26 27
#include <linux/leds.h>

28
#include <linux/mmc/mmc.h>
29
#include <linux/mmc/host.h>
30
#include <linux/mmc/card.h>
31
#include <linux/mmc/sdio.h>
32
#include <linux/mmc/slot-gpio.h>
33 34 35 36 37 38

#include "sdhci.h"

#define DRIVER_NAME "sdhci"

#define DBG(f, x...) \
39
	pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
40

41 42
#define MAX_TUNING_LOOP 40

43
static unsigned int debug_quirks = 0;
44
static unsigned int debug_quirks2;
45

46 47
static void sdhci_finish_data(struct sdhci_host *);

48
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
49 50 51

static void sdhci_dumpregs(struct sdhci_host *host)
{
52 53
	pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
	       mmc_hostname(host->mmc));
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
	pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
	       sdhci_readl(host, SDHCI_DMA_ADDRESS),
	       sdhci_readw(host, SDHCI_HOST_VERSION));
	pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
	       sdhci_readw(host, SDHCI_BLOCK_SIZE),
	       sdhci_readw(host, SDHCI_BLOCK_COUNT));
	pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
	       sdhci_readl(host, SDHCI_ARGUMENT),
	       sdhci_readw(host, SDHCI_TRANSFER_MODE));
	pr_err(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
	       sdhci_readl(host, SDHCI_PRESENT_STATE),
	       sdhci_readb(host, SDHCI_HOST_CONTROL));
	pr_err(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
	       sdhci_readb(host, SDHCI_POWER_CONTROL),
	       sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
	pr_err(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
	       sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
	       sdhci_readw(host, SDHCI_CLOCK_CONTROL));
	pr_err(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
	       sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
	       sdhci_readl(host, SDHCI_INT_STATUS));
	pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
	       sdhci_readl(host, SDHCI_INT_ENABLE),
	       sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
	pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
	       sdhci_readw(host, SDHCI_ACMD12_ERR),
	       sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
	pr_err(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
	       sdhci_readl(host, SDHCI_CAPABILITIES),
	       sdhci_readl(host, SDHCI_CAPABILITIES_1));
	pr_err(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
	       sdhci_readw(host, SDHCI_COMMAND),
	       sdhci_readl(host, SDHCI_MAX_CURRENT));
	pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n",
	       sdhci_readw(host, SDHCI_HOST_CONTROL2));
90

91 92
	if (host->flags & SDHCI_USE_ADMA) {
		if (host->flags & SDHCI_USE_64_BIT_DMA)
93 94 95 96
			pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
			       readl(host->ioaddr + SDHCI_ADMA_ERROR),
			       readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
			       readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
97
		else
98 99 100
			pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
			       readl(host->ioaddr + SDHCI_ADMA_ERROR),
			       readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
101
	}
102

103
	pr_err(DRIVER_NAME ": ===========================================\n");
104 105 106 107 108 109 110 111
}

/*****************************************************************************\
 *                                                                           *
 * Low level functions                                                       *
 *                                                                           *
\*****************************************************************************/

112 113 114 115 116
static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
{
	return cmd->data || cmd->flags & MMC_RSP_BUSY;
}

117 118
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
119
	u32 present;
120

121
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
122
	    !mmc_card_is_removable(host->mmc))
123 124
		return;

125 126 127
	if (enable) {
		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
128

129 130 131 132 133
		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
				       SDHCI_INT_CARD_INSERT;
	} else {
		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
	}
134 135 136

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
137 138 139 140 141 142 143 144 145 146 147 148
}

static void sdhci_enable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, true);
}

static void sdhci_disable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, false);
}

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
{
	if (host->bus_on)
		return;
	host->bus_on = true;
	pm_runtime_get_noresume(host->mmc->parent);
}

static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
{
	if (!host->bus_on)
		return;
	host->bus_on = false;
	pm_runtime_put_noidle(host->mmc->parent);
}

165
void sdhci_reset(struct sdhci_host *host, u8 mask)
166
{
167
	unsigned long timeout;
168

169
	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
170

171
	if (mask & SDHCI_RESET_ALL) {
172
		host->clock = 0;
173 174 175 176
		/* Reset-all turns off SD Bus Power */
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
	}
177

178 179 180 181
	/* Wait max 100 ms */
	timeout = 100;

	/* hw clears the bit when it's done */
182
	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
183
		if (timeout == 0) {
184
			pr_err("%s: Reset 0x%x never completed.\n",
185 186 187 188 189 190
				mmc_hostname(host->mmc), (int)mask);
			sdhci_dumpregs(host);
			return;
		}
		timeout--;
		mdelay(1);
191
	}
192 193 194 195 196 197
}
EXPORT_SYMBOL_GPL(sdhci_reset);

static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
198 199 200
		struct mmc_host *mmc = host->mmc;

		if (!mmc->ops->get_cd(mmc))
201 202
			return;
	}
203

204
	host->ops->reset(host, mask);
205

206 207 208 209 210 211 212 213
	if (mask & SDHCI_RESET_ALL) {
		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
			if (host->ops->enable_dma)
				host->ops->enable_dma(host);
		}

		/* Resetting the controller clears many */
		host->preset_enabled = false;
214
	}
215 216
}

217
static void sdhci_init(struct sdhci_host *host, int soft)
218
{
219 220
	struct mmc_host *mmc = host->mmc;

221
	if (soft)
222
		sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
223
	else
224
		sdhci_do_reset(host, SDHCI_RESET_ALL);
225

226 227 228 229 230 231 232 233
	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
		    SDHCI_INT_RESPONSE;

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
234 235 236 237

	if (soft) {
		/* force clock reconfiguration */
		host->clock = 0;
238
		mmc->ops->set_ios(mmc, &mmc->ios);
239
	}
240
}
241

242 243
static void sdhci_reinit(struct sdhci_host *host)
{
244
	sdhci_init(host, 0);
245
	sdhci_enable_card_detection(host);
246 247
}

248
static void __sdhci_led_activate(struct sdhci_host *host)
249 250 251
{
	u8 ctrl;

252
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
253
	ctrl |= SDHCI_CTRL_LED;
254
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
255 256
}

257
static void __sdhci_led_deactivate(struct sdhci_host *host)
258 259 260
{
	u8 ctrl;

261
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
262
	ctrl &= ~SDHCI_CTRL_LED;
263
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
264 265
}

266
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
267
static void sdhci_led_control(struct led_classdev *led,
268
			      enum led_brightness brightness)
269 270 271 272 273 274
{
	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);

275 276 277
	if (host->runtime_suspended)
		goto out;

278
	if (brightness == LED_OFF)
279
		__sdhci_led_deactivate(host);
280
	else
281
		__sdhci_led_activate(host);
282
out:
283 284
	spin_unlock_irqrestore(&host->lock, flags);
}
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334

static int sdhci_led_register(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;

	snprintf(host->led_name, sizeof(host->led_name),
		 "%s::", mmc_hostname(mmc));

	host->led.name = host->led_name;
	host->led.brightness = LED_OFF;
	host->led.default_trigger = mmc_hostname(mmc);
	host->led.brightness_set = sdhci_led_control;

	return led_classdev_register(mmc_dev(mmc), &host->led);
}

static void sdhci_led_unregister(struct sdhci_host *host)
{
	led_classdev_unregister(&host->led);
}

static inline void sdhci_led_activate(struct sdhci_host *host)
{
}

static inline void sdhci_led_deactivate(struct sdhci_host *host)
{
}

#else

static inline int sdhci_led_register(struct sdhci_host *host)
{
	return 0;
}

static inline void sdhci_led_unregister(struct sdhci_host *host)
{
}

static inline void sdhci_led_activate(struct sdhci_host *host)
{
	__sdhci_led_activate(host);
}

static inline void sdhci_led_deactivate(struct sdhci_host *host)
{
	__sdhci_led_deactivate(host);
}

335 336
#endif

337 338 339 340 341 342
/*****************************************************************************\
 *                                                                           *
 * Core functions                                                            *
 *                                                                           *
\*****************************************************************************/

P
Pierre Ossman 已提交
343
static void sdhci_read_block_pio(struct sdhci_host *host)
344
{
345 346
	unsigned long flags;
	size_t blksize, len, chunk;
347
	u32 uninitialized_var(scratch);
348
	u8 *buf;
349

P
Pierre Ossman 已提交
350
	DBG("PIO reading\n");
351

P
Pierre Ossman 已提交
352
	blksize = host->data->blksz;
353
	chunk = 0;
354

355
	local_irq_save(flags);
356

P
Pierre Ossman 已提交
357
	while (blksize) {
F
Fabio Estevam 已提交
358
		BUG_ON(!sg_miter_next(&host->sg_miter));
359

360
		len = min(host->sg_miter.length, blksize);
361

362 363
		blksize -= len;
		host->sg_miter.consumed = len;
364

365
		buf = host->sg_miter.addr;
366

367 368
		while (len) {
			if (chunk == 0) {
369
				scratch = sdhci_readl(host, SDHCI_BUFFER);
370
				chunk = 4;
P
Pierre Ossman 已提交
371
			}
372 373 374 375 376 377 378

			*buf = scratch & 0xFF;

			buf++;
			scratch >>= 8;
			chunk--;
			len--;
379
		}
P
Pierre Ossman 已提交
380
	}
381 382 383 384

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
385
}
386

P
Pierre Ossman 已提交
387 388
static void sdhci_write_block_pio(struct sdhci_host *host)
{
389 390 391 392
	unsigned long flags;
	size_t blksize, len, chunk;
	u32 scratch;
	u8 *buf;
393

P
Pierre Ossman 已提交
394 395 396
	DBG("PIO writing\n");

	blksize = host->data->blksz;
397 398
	chunk = 0;
	scratch = 0;
399

400
	local_irq_save(flags);
401

P
Pierre Ossman 已提交
402
	while (blksize) {
F
Fabio Estevam 已提交
403
		BUG_ON(!sg_miter_next(&host->sg_miter));
P
Pierre Ossman 已提交
404

405 406 407 408 409 410
		len = min(host->sg_miter.length, blksize);

		blksize -= len;
		host->sg_miter.consumed = len;

		buf = host->sg_miter.addr;
411

412 413 414 415 416 417 418 419
		while (len) {
			scratch |= (u32)*buf << (chunk * 8);

			buf++;
			chunk++;
			len--;

			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
420
				sdhci_writel(host, scratch, SDHCI_BUFFER);
421 422
				chunk = 0;
				scratch = 0;
423 424 425
			}
		}
	}
426 427 428 429

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
430 431 432 433 434 435
}

static void sdhci_transfer_pio(struct sdhci_host *host)
{
	u32 mask;

436
	if (host->blocks == 0)
P
Pierre Ossman 已提交
437 438 439 440 441 442 443
		return;

	if (host->data->flags & MMC_DATA_READ)
		mask = SDHCI_DATA_AVAILABLE;
	else
		mask = SDHCI_SPACE_AVAILABLE;

444 445 446 447 448 449 450 451 452
	/*
	 * Some controllers (JMicron JMB38x) mess up the buffer bits
	 * for transfers < 4 bytes. As long as it is just one block,
	 * we can ignore the bits.
	 */
	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
		(host->data->blocks == 1))
		mask = ~0;

453
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
454 455 456
		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
			udelay(100);

P
Pierre Ossman 已提交
457 458 459 460
		if (host->data->flags & MMC_DATA_READ)
			sdhci_read_block_pio(host);
		else
			sdhci_write_block_pio(host);
461

462 463
		host->blocks--;
		if (host->blocks == 0)
P
Pierre Ossman 已提交
464 465
			break;
	}
466

P
Pierre Ossman 已提交
467
	DBG("PIO transfer complete.\n");
468 469
}

470
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
471
				  struct mmc_data *data, int cookie)
472 473 474
{
	int sg_count;

475 476 477 478 479
	/*
	 * If the data buffers are already mapped, return the previous
	 * dma_map_sg() result.
	 */
	if (data->host_cookie == COOKIE_PRE_MAPPED)
480 481 482 483 484 485 486 487 488 489
		return data->sg_count;

	sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
				data->flags & MMC_DATA_WRITE ?
				DMA_TO_DEVICE : DMA_FROM_DEVICE);

	if (sg_count == 0)
		return -ENOSPC;

	data->sg_count = sg_count;
490
	data->host_cookie = cookie;
491 492 493 494

	return sg_count;
}

495 496 497
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
	local_irq_save(*flags);
498
	return kmap_atomic(sg_page(sg)) + sg->offset;
499 500 501 502
}

static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
{
503
	kunmap_atomic(buffer);
504 505 506
	local_irq_restore(*flags);
}

507 508
static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
				  dma_addr_t addr, int len, unsigned cmd)
B
Ben Dooks 已提交
509
{
510
	struct sdhci_adma2_64_desc *dma_desc = desc;
B
Ben Dooks 已提交
511

512
	/* 32-bit and 64-bit descriptors have these members in same position */
513 514
	dma_desc->cmd = cpu_to_le16(cmd);
	dma_desc->len = cpu_to_le16(len);
515 516 517 518
	dma_desc->addr_lo = cpu_to_le32((u32)addr);

	if (host->flags & SDHCI_USE_64_BIT_DMA)
		dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
B
Ben Dooks 已提交
519 520
}

521 522
static void sdhci_adma_mark_end(void *desc)
{
523
	struct sdhci_adma2_64_desc *dma_desc = desc;
524

525
	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
526
	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
527 528
}

529 530
static void sdhci_adma_table_pre(struct sdhci_host *host,
	struct mmc_data *data, int sg_count)
531 532 533
{
	struct scatterlist *sg;
	unsigned long flags;
534 535 536 537
	dma_addr_t addr, align_addr;
	void *desc, *align;
	char *buffer;
	int len, offset, i;
538 539 540 541 542 543

	/*
	 * The spec does not specify endianness of descriptor table.
	 * We currently guess that it is LE.
	 */

544
	host->sg_count = sg_count;
545

546
	desc = host->adma_table;
547 548 549 550 551 552 553 554 555
	align = host->align_buffer;

	align_addr = host->align_addr;

	for_each_sg(data->sg, sg, host->sg_count, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);

		/*
556 557 558
		 * The SDHCI specification states that ADMA addresses must
		 * be 32-bit aligned. If they aren't, then we use a bounce
		 * buffer for the (up to three) bytes that screw up the
559 560
		 * alignment.
		 */
561 562
		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
			 SDHCI_ADMA2_MASK;
563 564 565 566 567 568 569
		if (offset) {
			if (data->flags & MMC_DATA_WRITE) {
				buffer = sdhci_kmap_atomic(sg, &flags);
				memcpy(align, buffer, offset);
				sdhci_kunmap_atomic(buffer, &flags);
			}

B
Ben Dooks 已提交
570
			/* tran, valid */
571
			sdhci_adma_write_desc(host, desc, align_addr, offset,
A
Adrian Hunter 已提交
572
					      ADMA2_TRAN_VALID);
573 574 575

			BUG_ON(offset > 65536);

576 577
			align += SDHCI_ADMA2_ALIGN;
			align_addr += SDHCI_ADMA2_ALIGN;
578

579
			desc += host->desc_sz;
580 581 582 583 584 585 586

			addr += offset;
			len -= offset;
		}

		BUG_ON(len > 65536);

587 588 589 590 591 592
		if (len) {
			/* tran, valid */
			sdhci_adma_write_desc(host, desc, addr, len,
					      ADMA2_TRAN_VALID);
			desc += host->desc_sz;
		}
593 594 595 596 597

		/*
		 * If this triggers then we have a calculation bug
		 * somewhere. :/
		 */
598
		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
599 600
	}

601
	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
602
		/* Mark the last descriptor as the terminating descriptor */
603
		if (desc != host->adma_table) {
604
			desc -= host->desc_sz;
605
			sdhci_adma_mark_end(desc);
606 607
		}
	} else {
608
		/* Add a terminating entry - nop, end, valid */
609
		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
610
	}
611 612 613 614 615 616 617
}

static void sdhci_adma_table_post(struct sdhci_host *host,
	struct mmc_data *data)
{
	struct scatterlist *sg;
	int i, size;
618
	void *align;
619 620 621
	char *buffer;
	unsigned long flags;

622 623
	if (data->flags & MMC_DATA_READ) {
		bool has_unaligned = false;
624

625 626 627 628 629 630
		/* Do a quick scan of the SG list for any unaligned mappings */
		for_each_sg(data->sg, sg, host->sg_count, i)
			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
				has_unaligned = true;
				break;
			}
631

632 633
		if (has_unaligned) {
			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
634
					    data->sg_len, DMA_FROM_DEVICE);
635

636
			align = host->align_buffer;
637

638 639 640 641 642 643 644 645
			for_each_sg(data->sg, sg, host->sg_count, i) {
				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
					size = SDHCI_ADMA2_ALIGN -
					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);

					buffer = sdhci_kmap_atomic(sg, &flags);
					memcpy(buffer, align, size);
					sdhci_kunmap_atomic(buffer, &flags);
646

647 648
					align += SDHCI_ADMA2_ALIGN;
				}
649 650 651 652 653
			}
		}
	}
}

654
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
655
{
656
	u8 count;
657
	struct mmc_data *data = cmd->data;
658
	unsigned target_timeout, current_timeout;
659

660 661 662 663 664 665
	/*
	 * If the host controller provides us with an incorrect timeout
	 * value, just skip the check and use 0xE.  The hardware may take
	 * longer to time out, but that's much better than having a too-short
	 * timeout value.
	 */
666
	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
667
		return 0xE;
668

669
	/* Unspecified timeout, assume max */
670
	if (!data && !cmd->busy_timeout)
671
		return 0xE;
672

673 674
	/* timeout in us */
	if (!data)
675
		target_timeout = cmd->busy_timeout * 1000;
676
	else {
677
		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
678 679 680 681 682 683 684 685 686 687 688 689 690
		if (host->clock && data->timeout_clks) {
			unsigned long long val;

			/*
			 * data->timeout_clks is in units of clock cycles.
			 * host->clock is in Hz.  target_timeout is in us.
			 * Hence, us = 1000000 * cycles / Hz.  Round up.
			 */
			val = 1000000 * data->timeout_clks;
			if (do_div(val, host->clock))
				target_timeout++;
			target_timeout += val;
		}
691
	}
692

693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
	/*
	 * Figure out needed cycles.
	 * We do this in steps in order to fit inside a 32 bit int.
	 * The first step is the minimum timeout, which will have a
	 * minimum resolution of 6 bits:
	 * (1) 2^13*1000 > 2^22,
	 * (2) host->timeout_clk < 2^16
	 *     =>
	 *     (1) / (2) > 2^6
	 */
	count = 0;
	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
	while (current_timeout < target_timeout) {
		count++;
		current_timeout <<= 1;
		if (count >= 0xF)
			break;
	}

	if (count >= 0xF) {
713 714
		DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
		    mmc_hostname(host->mmc), count, cmd->opcode);
715 716 717
		count = 0xE;
	}

718 719 720
	return count;
}

721 722 723 724 725 726
static void sdhci_set_transfer_irqs(struct sdhci_host *host)
{
	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;

	if (host->flags & SDHCI_REQ_USE_DMA)
727
		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
728
	else
729 730 731 732
		host->ier = (host->ier & ~dma_irqs) | pio_irqs;

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
733 734
}

735
static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
736 737
{
	u8 count;
738 739 740 741 742 743 744 745 746 747 748

	if (host->ops->set_timeout) {
		host->ops->set_timeout(host, cmd);
	} else {
		count = sdhci_calc_timeout(host, cmd);
		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
	}
}

static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
{
749
	u8 ctrl;
750
	struct mmc_data *data = cmd->data;
751

752
	if (sdhci_data_line_cmd(cmd))
753
		sdhci_set_timeout(host, cmd);
754 755

	if (!data)
756 757
		return;

758 759
	WARN_ON(host->data);

760 761 762 763 764 765 766
	/* Sanity checks */
	BUG_ON(data->blksz * data->blocks > 524288);
	BUG_ON(data->blksz > host->mmc->max_blk_size);
	BUG_ON(data->blocks > 65535);

	host->data = data;
	host->data_early = 0;
767
	host->data->bytes_xfered = 0;
768

769
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
770
		struct scatterlist *sg;
771
		unsigned int length_mask, offset_mask;
772
		int i;
773

774 775 776 777 778 779 780 781 782
		host->flags |= SDHCI_REQ_USE_DMA;

		/*
		 * FIXME: This doesn't account for merging when mapping the
		 * scatterlist.
		 *
		 * The assumption here being that alignment and lengths are
		 * the same after DMA mapping to device address space.
		 */
783
		length_mask = 0;
784
		offset_mask = 0;
785
		if (host->flags & SDHCI_USE_ADMA) {
786
			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
787
				length_mask = 3;
788 789 790 791 792 793 794
				/*
				 * As we use up to 3 byte chunks to work
				 * around alignment problems, we need to
				 * check the offset as well.
				 */
				offset_mask = 3;
			}
795 796
		} else {
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
797
				length_mask = 3;
798 799
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
				offset_mask = 3;
800 801
		}

802
		if (unlikely(length_mask | offset_mask)) {
803
			for_each_sg(data->sg, sg, data->sg_len, i) {
804
				if (sg->length & length_mask) {
805
					DBG("Reverting to PIO because of transfer size (%d)\n",
806
					    sg->length);
807 808 809
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
810
				if (sg->offset & offset_mask) {
811
					DBG("Reverting to PIO because of bad alignment\n");
812 813 814 815 816 817 818
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
			}
		}
	}

819
	if (host->flags & SDHCI_REQ_USE_DMA) {
820
		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836

		if (sg_cnt <= 0) {
			/*
			 * This only happens when someone fed
			 * us an invalid request.
			 */
			WARN_ON(1);
			host->flags &= ~SDHCI_REQ_USE_DMA;
		} else if (host->flags & SDHCI_USE_ADMA) {
			sdhci_adma_table_pre(host, data, sg_cnt);

			sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
			if (host->flags & SDHCI_USE_64_BIT_DMA)
				sdhci_writel(host,
					     (u64)host->adma_addr >> 32,
					     SDHCI_ADMA_ADDRESS_HI);
837
		} else {
838 839 840
			WARN_ON(sg_cnt != 1);
			sdhci_writel(host, sg_dma_address(data->sg),
				SDHCI_DMA_ADDRESS);
841 842 843
		}
	}

844 845 846 847 848 849
	/*
	 * Always adjust the DMA selection as some controllers
	 * (e.g. JMicron) can't do PIO properly when the selection
	 * is ADMA.
	 */
	if (host->version >= SDHCI_SPEC_200) {
850
		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
851 852
		ctrl &= ~SDHCI_CTRL_DMA_MASK;
		if ((host->flags & SDHCI_REQ_USE_DMA) &&
853 854 855 856 857 858
			(host->flags & SDHCI_USE_ADMA)) {
			if (host->flags & SDHCI_USE_64_BIT_DMA)
				ctrl |= SDHCI_CTRL_ADMA64;
			else
				ctrl |= SDHCI_CTRL_ADMA32;
		} else {
859
			ctrl |= SDHCI_CTRL_SDMA;
860
		}
861
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
862 863
	}

864
	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
865 866 867 868 869 870 871 872
		int flags;

		flags = SG_MITER_ATOMIC;
		if (host->data->flags & MMC_DATA_READ)
			flags |= SG_MITER_TO_SG;
		else
			flags |= SG_MITER_FROM_SG;
		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
873
		host->blocks = data->blocks;
874
	}
875

876 877
	sdhci_set_transfer_irqs(host);

878 879 880
	/* Set the DMA boundary value and block size */
	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
		data->blksz), SDHCI_BLOCK_SIZE);
881
	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
882 883 884
}

static void sdhci_set_transfer_mode(struct sdhci_host *host,
885
	struct mmc_command *cmd)
886
{
887
	u16 mode = 0;
888
	struct mmc_data *data = cmd->data;
889

890
	if (data == NULL) {
891 892 893 894
		if (host->quirks2 &
			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
			sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
		} else {
895
		/* clear Auto CMD settings for no data CMDs */
896 897
			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
898
				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
899
		}
900
		return;
901
	}
902

903 904
	WARN_ON(!host->data);

905 906 907
	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
		mode = SDHCI_TRNS_BLK_CNT_EN;

908
	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
909
		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
910 911 912 913
		/*
		 * If we are sending CMD23, CMD12 never gets sent
		 * on successful completion (so no Auto-CMD12).
		 */
914
		if (!cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
915
		    (cmd->opcode != SD_IO_RW_EXTENDED))
916
			mode |= SDHCI_TRNS_AUTO_CMD12;
917
		else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
918
			mode |= SDHCI_TRNS_AUTO_CMD23;
919
			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
920
		}
921
	}
922

923 924
	if (data->flags & MMC_DATA_READ)
		mode |= SDHCI_TRNS_READ;
925
	if (host->flags & SDHCI_REQ_USE_DMA)
926 927
		mode |= SDHCI_TRNS_DMA;

928
	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
929 930
}

931 932 933 934 935 936 937 938 939 940
static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
{
	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
		((mrq->cmd && mrq->cmd->error) ||
		 (mrq->sbc && mrq->sbc->error) ||
		 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
				(mrq->data->stop && mrq->data->stop->error))) ||
		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
}

941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
	int i;

	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
		if (host->mrqs_done[i] == mrq) {
			WARN_ON(1);
			return;
		}
	}

	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
		if (!host->mrqs_done[i]) {
			host->mrqs_done[i] = mrq;
			break;
		}
	}

	WARN_ON(i >= SDHCI_MAX_MRQS);

	tasklet_schedule(&host->finish_tasklet);
}

964 965
static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
966 967 968 969 970 971 972 973 974
	if (host->cmd && host->cmd->mrq == mrq)
		host->cmd = NULL;

	if (host->data_cmd && host->data_cmd->mrq == mrq)
		host->data_cmd = NULL;

	if (host->data && host->data->mrq == mrq)
		host->data = NULL;

975 976 977
	if (sdhci_needs_reset(host, mrq))
		host->pending_reset = true;

978
	__sdhci_finish_mrq(host, mrq);
979 980
}

981 982 983 984 985 986
static void sdhci_finish_data(struct sdhci_host *host)
{
	struct mmc_data *data;

	data = host->data;
	host->data = NULL;
987
	host->data_cmd = NULL;
988

989 990 991
	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
		sdhci_adma_table_post(host, data);
992 993

	/*
994 995 996 997 998
	 * The specification states that the block count register must
	 * be updated, but it does not specify at what point in the
	 * data flow. That makes the register entirely useless to read
	 * back so we have to assume that nothing made it to the card
	 * in the event of an error.
999
	 */
1000 1001
	if (data->error)
		data->bytes_xfered = 0;
1002
	else
1003
		data->bytes_xfered = data->blksz * data->blocks;
1004

1005 1006 1007 1008 1009 1010 1011
	/*
	 * Need to send CMD12 if -
	 * a) open-ended multiblock transfer (no CMD23)
	 * b) error in multiblock transfer
	 */
	if (data->stop &&
	    (data->error ||
1012
	     !data->mrq->sbc)) {
1013

1014 1015 1016 1017
		/*
		 * The controller needs a reset of internal state machines
		 * upon error conditions.
		 */
P
Pierre Ossman 已提交
1018
		if (data->error) {
1019 1020
			sdhci_do_reset(host, SDHCI_RESET_CMD);
			sdhci_do_reset(host, SDHCI_RESET_DATA);
1021 1022 1023
		}

		sdhci_send_command(host, data->stop);
1024 1025 1026
	} else {
		sdhci_finish_mrq(host, data->mrq);
	}
1027 1028
}

1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
			    unsigned long timeout)
{
	if (sdhci_data_line_cmd(mrq->cmd))
		mod_timer(&host->data_timer, timeout);
	else
		mod_timer(&host->timer, timeout);
}

static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
{
	if (sdhci_data_line_cmd(mrq->cmd))
		del_timer(&host->data_timer);
	else
		del_timer(&host->timer);
}

1046
void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1047 1048
{
	int flags;
1049
	u32 mask;
1050
	unsigned long timeout;
1051 1052 1053

	WARN_ON(host->cmd);

1054 1055 1056
	/* Initially, a command has no error */
	cmd->error = 0;

1057
	/* Wait max 10 ms */
1058
	timeout = 10;
1059 1060

	mask = SDHCI_CMD_INHIBIT;
1061
	if (sdhci_data_line_cmd(cmd))
1062 1063 1064 1065
		mask |= SDHCI_DATA_INHIBIT;

	/* We shouldn't wait for data inihibit for stop commands, even
	   though they might use busy signaling */
1066
	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1067 1068
		mask &= ~SDHCI_DATA_INHIBIT;

1069
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1070
		if (timeout == 0) {
1071 1072
			pr_err("%s: Controller never released inhibit bit(s).\n",
			       mmc_hostname(host->mmc));
1073
			sdhci_dumpregs(host);
P
Pierre Ossman 已提交
1074
			cmd->error = -EIO;
1075
			sdhci_finish_mrq(host, cmd->mrq);
1076 1077
			return;
		}
1078 1079 1080
		timeout--;
		mdelay(1);
	}
1081

1082
	timeout = jiffies;
1083 1084
	if (!cmd->data && cmd->busy_timeout > 9000)
		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1085 1086
	else
		timeout += 10 * HZ;
1087
	sdhci_mod_timer(host, cmd->mrq, timeout);
1088 1089

	host->cmd = cmd;
1090
	if (sdhci_data_line_cmd(cmd)) {
1091 1092 1093
		WARN_ON(host->data_cmd);
		host->data_cmd = cmd;
	}
1094

1095
	sdhci_prepare_data(host, cmd);
1096

1097
	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1098

1099
	sdhci_set_transfer_mode(host, cmd);
1100

1101
	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1102
		pr_err("%s: Unsupported response type!\n",
1103
			mmc_hostname(host->mmc));
P
Pierre Ossman 已提交
1104
		cmd->error = -EINVAL;
1105
		sdhci_finish_mrq(host, cmd->mrq);
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
		return;
	}

	if (!(cmd->flags & MMC_RSP_PRESENT))
		flags = SDHCI_CMD_RESP_NONE;
	else if (cmd->flags & MMC_RSP_136)
		flags = SDHCI_CMD_RESP_LONG;
	else if (cmd->flags & MMC_RSP_BUSY)
		flags = SDHCI_CMD_RESP_SHORT_BUSY;
	else
		flags = SDHCI_CMD_RESP_SHORT;

	if (cmd->flags & MMC_RSP_CRC)
		flags |= SDHCI_CMD_CRC;
	if (cmd->flags & MMC_RSP_OPCODE)
		flags |= SDHCI_CMD_INDEX;
1122 1123

	/* CMD19 is special in that the Data Present Select should be set */
1124 1125
	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1126 1127
		flags |= SDHCI_CMD_DATA;

1128
	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1129
}
1130
EXPORT_SYMBOL_GPL(sdhci_send_command);
1131 1132 1133

static void sdhci_finish_command(struct sdhci_host *host)
{
1134
	struct mmc_command *cmd = host->cmd;
1135 1136
	int i;

1137 1138 1139 1140
	host->cmd = NULL;

	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136) {
1141 1142
			/* CRC is stripped so we need to do some shifting. */
			for (i = 0;i < 4;i++) {
1143
				cmd->resp[i] = sdhci_readl(host,
1144 1145
					SDHCI_RESPONSE + (3-i)*4) << 8;
				if (i != 3)
1146
					cmd->resp[i] |=
1147
						sdhci_readb(host,
1148 1149 1150
						SDHCI_RESPONSE + (3-i)*4-1);
			}
		} else {
1151
			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1152 1153 1154
		}
	}

1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
	/*
	 * The host can send and interrupt when the busy state has
	 * ended, allowing us to wait without wasting CPU cycles.
	 * The busy signal uses DAT0 so this is similar to waiting
	 * for data to complete.
	 *
	 * Note: The 1.0 specification is a bit ambiguous about this
	 *       feature so there might be some problems with older
	 *       controllers.
	 */
1165 1166
	if (cmd->flags & MMC_RSP_BUSY) {
		if (cmd->data) {
1167 1168
			DBG("Cannot wait for busy signal when also doing a data transfer");
		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1169 1170
			   cmd == host->data_cmd) {
			/* Command complete before busy is ended */
1171 1172 1173 1174
			return;
		}
	}

1175
	/* Finished CMD23, now send actual command. */
1176 1177
	if (cmd == cmd->mrq->sbc) {
		sdhci_send_command(host, cmd->mrq->cmd);
1178
	} else {
1179

1180 1181 1182
		/* Processed actual command. */
		if (host->data && host->data_early)
			sdhci_finish_data(host);
1183

1184
		if (!cmd->data)
1185
			sdhci_finish_mrq(host, cmd->mrq);
1186
	}
1187 1188
}

1189 1190
static u16 sdhci_get_preset_value(struct sdhci_host *host)
{
1191
	u16 preset = 0;
1192

1193 1194
	switch (host->timing) {
	case MMC_TIMING_UHS_SDR12:
1195 1196
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
		break;
1197
	case MMC_TIMING_UHS_SDR25:
1198 1199
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
		break;
1200
	case MMC_TIMING_UHS_SDR50:
1201 1202
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
		break;
1203 1204
	case MMC_TIMING_UHS_SDR104:
	case MMC_TIMING_MMC_HS200:
1205 1206
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
		break;
1207
	case MMC_TIMING_UHS_DDR50:
1208
	case MMC_TIMING_MMC_DDR52:
1209 1210
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
		break;
1211 1212 1213
	case MMC_TIMING_MMC_HS400:
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
		break;
1214 1215 1216 1217 1218 1219 1220 1221 1222
	default:
		pr_warn("%s: Invalid UHS-I mode selected\n",
			mmc_hostname(host->mmc));
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
		break;
	}
	return preset;
}

1223 1224
u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
		   unsigned int *actual_clock)
1225
{
1226
	int div = 0; /* Initialized for compiler warning */
1227
	int real_div = div, clk_mul = 1;
1228
	u16 clk = 0;
1229
	bool switch_base_clk = false;
1230

1231
	if (host->version >= SDHCI_SPEC_300) {
1232
		if (host->preset_enabled) {
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
			u16 pre_val;

			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
			pre_val = sdhci_get_preset_value(host);
			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
			if (host->clk_mul &&
				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
				clk = SDHCI_PROG_CLOCK_MODE;
				real_div = div + 1;
				clk_mul = host->clk_mul;
			} else {
				real_div = max_t(int, 1, div << 1);
			}
			goto clock_set;
		}

1250 1251 1252 1253 1254
		/*
		 * Check if the Host Controller supports Programmable Clock
		 * Mode.
		 */
		if (host->clk_mul) {
1255 1256 1257 1258 1259
			for (div = 1; div <= 1024; div++) {
				if ((host->max_clk * host->clk_mul / div)
					<= clock)
					break;
			}
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
			if ((host->max_clk * host->clk_mul / div) <= clock) {
				/*
				 * Set Programmable Clock Mode in the Clock
				 * Control register.
				 */
				clk = SDHCI_PROG_CLOCK_MODE;
				real_div = div;
				clk_mul = host->clk_mul;
				div--;
			} else {
				/*
				 * Divisor can be too small to reach clock
				 * speed requirement. Then use the base clock.
				 */
				switch_base_clk = true;
			}
		}

		if (!host->clk_mul || switch_base_clk) {
1279 1280 1281 1282 1283 1284 1285 1286 1287
			/* Version 3.00 divisors must be a multiple of 2. */
			if (host->max_clk <= clock)
				div = 1;
			else {
				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
				     div += 2) {
					if ((host->max_clk / div) <= clock)
						break;
				}
1288
			}
1289
			real_div = div;
1290
			div >>= 1;
1291 1292 1293
			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
				&& !div && host->max_clk <= 25000000)
				div = 1;
1294 1295 1296
		}
	} else {
		/* Version 2.00 divisors must be a power of 2. */
1297
		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1298 1299 1300
			if ((host->max_clk / div) <= clock)
				break;
		}
1301
		real_div = div;
1302
		div >>= 1;
1303 1304
	}

1305
clock_set:
1306
	if (real_div)
1307
		*actual_clock = (host->max_clk * clk_mul) / real_div;
1308
	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1309 1310
	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
		<< SDHCI_DIVIDER_HI_SHIFT;
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329

	return clk;
}
EXPORT_SYMBOL_GPL(sdhci_calc_clk);

void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
	u16 clk;
	unsigned long timeout;

	host->mmc->actual_clock = 0;

	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);

	if (clock == 0)
		return;

	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);

1330
	clk |= SDHCI_CLOCK_INT_EN;
1331
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1332

1333 1334
	/* Wait max 20 ms */
	timeout = 20;
1335
	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1336 1337
		& SDHCI_CLOCK_INT_STABLE)) {
		if (timeout == 0) {
1338 1339
			pr_err("%s: Internal clock never stabilised.\n",
			       mmc_hostname(host->mmc));
1340 1341 1342
			sdhci_dumpregs(host);
			return;
		}
1343 1344 1345
		timeout--;
		mdelay(1);
	}
1346 1347

	clk |= SDHCI_CLOCK_CARD_EN;
1348
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1349
}
1350
EXPORT_SYMBOL_GPL(sdhci_set_clock);
1351

1352 1353
static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
				unsigned short vdd)
1354
{
1355
	struct mmc_host *mmc = host->mmc;
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369

	spin_unlock_irq(&host->lock);
	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
	spin_lock_irq(&host->lock);

	if (mode != MMC_POWER_OFF)
		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
	else
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
}

void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
		     unsigned short vdd)
{
1370
	u8 pwr = 0;
1371

1372 1373
	if (mode != MMC_POWER_OFF) {
		switch (1 << vdd) {
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
		case MMC_VDD_165_195:
			pwr = SDHCI_POWER_180;
			break;
		case MMC_VDD_29_30:
		case MMC_VDD_30_31:
			pwr = SDHCI_POWER_300;
			break;
		case MMC_VDD_32_33:
		case MMC_VDD_33_34:
			pwr = SDHCI_POWER_330;
			break;
		default:
1386 1387 1388
			WARN(1, "%s: Invalid vdd %#x\n",
			     mmc_hostname(host->mmc), vdd);
			break;
1389 1390 1391 1392
		}
	}

	if (host->pwr == pwr)
1393
		return;
1394

1395 1396 1397
	host->pwr = pwr;

	if (pwr == 0) {
1398
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1399 1400
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
1401 1402 1403 1404 1405 1406 1407
	} else {
		/*
		 * Spec says that we should clear the power reg before setting
		 * a new value. Some controllers don't seem to like this though.
		 */
		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1408

1409 1410 1411 1412 1413 1414 1415
		/*
		 * At least the Marvell CaFe chip gets confused if we set the
		 * voltage and set turn on power at the same time, so set the
		 * voltage first.
		 */
		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1416

1417
		pwr |= SDHCI_POWER_ON;
1418

1419
		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1420

1421 1422
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_on(host);
1423

1424 1425 1426 1427 1428 1429 1430
		/*
		 * Some controllers need an extra 10ms delay of 10ms before
		 * they can apply clock after applying power
		 */
		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
			mdelay(10);
	}
1431 1432
}
EXPORT_SYMBOL_GPL(sdhci_set_power);
1433

1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
			      unsigned short vdd)
{
	struct mmc_host *mmc = host->mmc;

	if (host->ops->set_power)
		host->ops->set_power(host, mode, vdd);
	else if (!IS_ERR(mmc->supply.vmmc))
		sdhci_set_power_reg(host, mode, vdd);
	else
		sdhci_set_power(host, mode, vdd);
1445 1446
}

1447 1448 1449 1450 1451 1452 1453 1454 1455
/*****************************************************************************\
 *                                                                           *
 * MMC callbacks                                                             *
 *                                                                           *
\*****************************************************************************/

static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
	struct sdhci_host *host;
1456
	int present;
1457 1458 1459 1460
	unsigned long flags;

	host = mmc_priv(mmc);

1461
	/* Firstly check card presence */
1462
	present = mmc->ops->get_cd(mmc);
1463

1464 1465
	spin_lock_irqsave(&host->lock, flags);

1466
	sdhci_led_activate(host);
1467 1468 1469 1470 1471 1472

	/*
	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
	 * requests if Auto-CMD12 is enabled.
	 */
	if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1473 1474 1475 1476 1477
		if (mrq->stop) {
			mrq->data->stop = NULL;
			mrq->stop = NULL;
		}
	}
1478

1479
	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1480
		mrq->cmd->error = -ENOMEDIUM;
1481
		sdhci_finish_mrq(host, mrq);
1482
	} else {
1483
		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1484 1485 1486
			sdhci_send_command(host, mrq->sbc);
		else
			sdhci_send_command(host, mrq->cmd);
1487
	}
1488

1489
	mmiowb();
1490 1491 1492
	spin_unlock_irqrestore(&host->lock, flags);
}

1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
void sdhci_set_bus_width(struct sdhci_host *host, int width)
{
	u8 ctrl;

	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
	if (width == MMC_BUS_WIDTH_8) {
		ctrl &= ~SDHCI_CTRL_4BITBUS;
		if (host->version >= SDHCI_SPEC_300)
			ctrl |= SDHCI_CTRL_8BITBUS;
	} else {
		if (host->version >= SDHCI_SPEC_300)
			ctrl &= ~SDHCI_CTRL_8BITBUS;
		if (width == MMC_BUS_WIDTH_4)
			ctrl |= SDHCI_CTRL_4BITBUS;
		else
			ctrl &= ~SDHCI_CTRL_4BITBUS;
	}
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_set_bus_width);

1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
{
	u16 ctrl_2;

	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	/* Select Bus Speed Mode for host */
	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
	if ((timing == MMC_TIMING_MMC_HS200) ||
	    (timing == MMC_TIMING_UHS_SDR104))
		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
	else if (timing == MMC_TIMING_UHS_SDR12)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
	else if (timing == MMC_TIMING_UHS_SDR25)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
	else if (timing == MMC_TIMING_UHS_SDR50)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
	else if ((timing == MMC_TIMING_UHS_DDR50) ||
		 (timing == MMC_TIMING_MMC_DDR52))
		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1533 1534
	else if (timing == MMC_TIMING_MMC_HS400)
		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1535 1536 1537 1538
	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);

1539
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1540
{
1541
	struct sdhci_host *host = mmc_priv(mmc);
1542 1543 1544 1545 1546
	unsigned long flags;
	u8 ctrl;

	spin_lock_irqsave(&host->lock, flags);

A
Adrian Hunter 已提交
1547 1548
	if (host->flags & SDHCI_DEVICE_DEAD) {
		spin_unlock_irqrestore(&host->lock, flags);
1549 1550
		if (!IS_ERR(mmc->supply.vmmc) &&
		    ios->power_mode == MMC_POWER_OFF)
1551
			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
A
Adrian Hunter 已提交
1552 1553
		return;
	}
P
Pierre Ossman 已提交
1554

1555 1556 1557 1558 1559
	/*
	 * Reset the chip on each power off.
	 * Should clear out any weird states.
	 */
	if (ios->power_mode == MMC_POWER_OFF) {
1560
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1561
		sdhci_reinit(host);
1562 1563
	}

1564
	if (host->version >= SDHCI_SPEC_300 &&
1565 1566
		(ios->power_mode == MMC_POWER_UP) &&
		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1567 1568
		sdhci_enable_preset_value(host, false);

1569
	if (!ios->clock || ios->clock != host->clock) {
1570
		host->ops->set_clock(host, ios->clock);
1571
		host->clock = ios->clock;
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583

		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
		    host->clock) {
			host->timeout_clk = host->mmc->actual_clock ?
						host->mmc->actual_clock / 1000 :
						host->clock / 1000;
			host->mmc->max_busy_timeout =
				host->ops->get_max_timeout_count ?
				host->ops->get_max_timeout_count(host) :
				1 << 27;
			host->mmc->max_busy_timeout /= host->timeout_clk;
		}
1584
	}
1585

1586
	__sdhci_set_power(host, ios->power_mode, ios->vdd);
1587

1588 1589 1590
	if (host->ops->platform_send_init_74_clocks)
		host->ops->platform_send_init_74_clocks(host, ios->power_mode);

1591
	host->ops->set_bus_width(host, ios->bus_width);
1592

1593
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1594

1595 1596 1597
	if ((ios->timing == MMC_TIMING_SD_HS ||
	     ios->timing == MMC_TIMING_MMC_HS)
	    && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1598 1599 1600 1601
		ctrl |= SDHCI_CTRL_HISPD;
	else
		ctrl &= ~SDHCI_CTRL_HISPD;

1602
	if (host->version >= SDHCI_SPEC_300) {
1603 1604 1605
		u16 clk, ctrl_2;

		/* In case of UHS-I modes, set High Speed Enable */
1606 1607
		if ((ios->timing == MMC_TIMING_MMC_HS400) ||
		    (ios->timing == MMC_TIMING_MMC_HS200) ||
1608
		    (ios->timing == MMC_TIMING_MMC_DDR52) ||
1609
		    (ios->timing == MMC_TIMING_UHS_SDR50) ||
1610 1611
		    (ios->timing == MMC_TIMING_UHS_SDR104) ||
		    (ios->timing == MMC_TIMING_UHS_DDR50) ||
1612
		    (ios->timing == MMC_TIMING_UHS_SDR25))
1613
			ctrl |= SDHCI_CTRL_HISPD;
1614

1615
		if (!host->preset_enabled) {
1616
			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1617 1618 1619 1620
			/*
			 * We only need to set Driver Strength if the
			 * preset value enable is not set.
			 */
1621
			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1622 1623 1624
			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1625 1626
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1627 1628
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1629 1630 1631
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
			else {
1632 1633
				pr_warn("%s: invalid driver type, default to driver type B\n",
					mmc_hostname(mmc));
1634 1635
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
			}
1636 1637

			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
		} else {
			/*
			 * According to SDHC Spec v3.00, if the Preset Value
			 * Enable in the Host Control 2 register is set, we
			 * need to reset SD Clock Enable before changing High
			 * Speed Enable to avoid generating clock gliches.
			 */

			/* Reset SD Clock Enable */
			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
			clk &= ~SDHCI_CLOCK_CARD_EN;
			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);

			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);

			/* Re-enable SD Clock */
1654
			host->ops->set_clock(host, host->clock);
1655
		}
1656 1657 1658 1659 1660 1661

		/* Reset SD Clock Enable */
		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
		clk &= ~SDHCI_CLOCK_CARD_EN;
		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);

1662
		host->ops->set_uhs_signaling(host, ios->timing);
1663
		host->timing = ios->timing;
1664

1665 1666 1667 1668 1669
		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
				((ios->timing == MMC_TIMING_UHS_SDR12) ||
				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1670 1671
				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1672 1673 1674 1675 1676 1677 1678 1679
			u16 preset;

			sdhci_enable_preset_value(host, true);
			preset = sdhci_get_preset_value(host);
			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
				>> SDHCI_PRESET_DRV_SHIFT;
		}

1680
		/* Re-enable SD Clock */
1681
		host->ops->set_clock(host, host->clock);
1682 1683
	} else
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1684

1685 1686 1687 1688 1689
	/*
	 * Some (ENE) controllers go apeshit on some ios operation,
	 * signalling timeout and CRC errors even on CMD0. Resetting
	 * it on each ios seems to solve the problem.
	 */
1690
	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1691
		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1692

1693
	mmiowb();
1694 1695 1696
	spin_unlock_irqrestore(&host->lock, flags);
}

1697
static int sdhci_get_cd(struct mmc_host *mmc)
1698 1699
{
	struct sdhci_host *host = mmc_priv(mmc);
1700
	int gpio_cd = mmc_gpio_get_cd(mmc);
1701 1702 1703 1704

	if (host->flags & SDHCI_DEVICE_DEAD)
		return 0;

1705
	/* If nonremovable, assume that the card is always present. */
1706
	if (!mmc_card_is_removable(host->mmc))
1707 1708
		return 1;

1709 1710 1711 1712
	/*
	 * Try slot gpio detect, if defined it take precedence
	 * over build in controller functionality
	 */
1713
	if (gpio_cd >= 0)
1714 1715
		return !!gpio_cd;

1716 1717 1718 1719
	/* If polling, assume that the card is always present. */
	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
		return 1;

1720 1721 1722 1723
	/* Host native card detect */
	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}

1724
static int sdhci_check_ro(struct sdhci_host *host)
1725 1726
{
	unsigned long flags;
1727
	int is_readonly;
1728 1729 1730

	spin_lock_irqsave(&host->lock, flags);

P
Pierre Ossman 已提交
1731
	if (host->flags & SDHCI_DEVICE_DEAD)
1732 1733 1734
		is_readonly = 0;
	else if (host->ops->get_ro)
		is_readonly = host->ops->get_ro(host);
P
Pierre Ossman 已提交
1735
	else
1736 1737
		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
				& SDHCI_WRITE_PROTECT);
1738 1739 1740

	spin_unlock_irqrestore(&host->lock, flags);

1741 1742 1743
	/* This quirk needs to be replaced by a callback-function later */
	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
		!is_readonly : is_readonly;
1744 1745
}

1746 1747
#define SAMPLE_COUNT	5

1748
static int sdhci_get_ro(struct mmc_host *mmc)
1749
{
1750
	struct sdhci_host *host = mmc_priv(mmc);
1751 1752 1753
	int i, ro_count;

	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1754
		return sdhci_check_ro(host);
1755 1756 1757

	ro_count = 0;
	for (i = 0; i < SAMPLE_COUNT; i++) {
1758
		if (sdhci_check_ro(host)) {
1759 1760 1761 1762 1763 1764 1765 1766
			if (++ro_count > SAMPLE_COUNT / 2)
				return 1;
		}
		msleep(30);
	}
	return 0;
}

1767 1768 1769 1770 1771 1772 1773 1774
static void sdhci_hw_reset(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);

	if (host->ops && host->ops->hw_reset)
		host->ops->hw_reset(host);
}

1775 1776
static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
{
1777
	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1778
		if (enable)
1779
			host->ier |= SDHCI_INT_CARD_INT;
1780
		else
1781 1782 1783 1784
			host->ier &= ~SDHCI_INT_CARD_INT;

		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1785 1786
		mmiowb();
	}
1787 1788 1789 1790 1791 1792
}

static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;
P
Pierre Ossman 已提交
1793

1794
	spin_lock_irqsave(&host->lock, flags);
1795 1796 1797 1798 1799
	if (enable)
		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
	else
		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;

1800
	sdhci_enable_sdio_irq_nolock(host, enable);
P
Pierre Ossman 已提交
1801 1802 1803
	spin_unlock_irqrestore(&host->lock, flags);
}

1804 1805
static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
					     struct mmc_ios *ios)
1806
{
1807
	struct sdhci_host *host = mmc_priv(mmc);
1808
	u16 ctrl;
1809
	int ret;
1810

1811 1812 1813 1814 1815 1816
	/*
	 * Signal Voltage Switching is only applicable for Host Controllers
	 * v3.00 and above.
	 */
	if (host->version < SDHCI_SPEC_300)
		return 0;
1817

1818 1819
	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

1820
	switch (ios->signal_voltage) {
1821
	case MMC_SIGNAL_VOLTAGE_330:
1822 1823
		if (!(host->flags & SDHCI_SIGNALING_330))
			return -EINVAL;
1824 1825 1826
		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
		ctrl &= ~SDHCI_CTRL_VDD_180;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1827

1828 1829 1830
		if (!IS_ERR(mmc->supply.vqmmc)) {
			ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
						    3600000);
1831
			if (ret) {
J
Joe Perches 已提交
1832 1833
				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
					mmc_hostname(mmc));
1834 1835 1836 1837 1838
				return -EIO;
			}
		}
		/* Wait for 5ms */
		usleep_range(5000, 5500);
1839

1840 1841 1842 1843
		/* 3.3V regulator output should be stable within 5 ms */
		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (!(ctrl & SDHCI_CTRL_VDD_180))
			return 0;
1844

J
Joe Perches 已提交
1845 1846
		pr_warn("%s: 3.3V regulator output did not became stable\n",
			mmc_hostname(mmc));
1847 1848 1849

		return -EAGAIN;
	case MMC_SIGNAL_VOLTAGE_180:
1850 1851
		if (!(host->flags & SDHCI_SIGNALING_180))
			return -EINVAL;
1852 1853
		if (!IS_ERR(mmc->supply.vqmmc)) {
			ret = regulator_set_voltage(mmc->supply.vqmmc,
1854 1855
					1700000, 1950000);
			if (ret) {
J
Joe Perches 已提交
1856 1857
				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
					mmc_hostname(mmc));
1858 1859 1860
				return -EIO;
			}
		}
1861 1862 1863 1864 1865

		/*
		 * Enable 1.8V Signal Enable in the Host Control2
		 * register
		 */
1866 1867
		ctrl |= SDHCI_CTRL_VDD_180;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1868

1869 1870 1871 1872
		/* Some controller need to do more when switching */
		if (host->ops->voltage_switch)
			host->ops->voltage_switch(host);

1873 1874 1875 1876
		/* 1.8V regulator output should be stable within 5 ms */
		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (ctrl & SDHCI_CTRL_VDD_180)
			return 0;
1877

J
Joe Perches 已提交
1878 1879
		pr_warn("%s: 1.8V regulator output did not became stable\n",
			mmc_hostname(mmc));
1880

1881 1882
		return -EAGAIN;
	case MMC_SIGNAL_VOLTAGE_120:
1883 1884
		if (!(host->flags & SDHCI_SIGNALING_120))
			return -EINVAL;
1885 1886 1887
		if (!IS_ERR(mmc->supply.vqmmc)) {
			ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
						    1300000);
1888
			if (ret) {
J
Joe Perches 已提交
1889 1890
				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
					mmc_hostname(mmc));
1891
				return -EIO;
1892 1893
			}
		}
1894
		return 0;
1895
	default:
1896 1897
		/* No signal voltage switch required */
		return 0;
1898
	}
1899 1900
}

1901 1902 1903 1904 1905
static int sdhci_card_busy(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);
	u32 present_state;

1906
	/* Check whether DAT[0] is 0 */
1907 1908
	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);

1909
	return !(present_state & SDHCI_DATA_0_LVL_MASK);
1910 1911
}

1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
	host->flags |= SDHCI_HS400_TUNING;
	spin_unlock_irqrestore(&host->lock, flags);

	return 0;
}

1924
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1925
{
1926
	struct sdhci_host *host = mmc_priv(mmc);
1927 1928 1929
	u16 ctrl;
	int tuning_loop_counter = MAX_TUNING_LOOP;
	int err = 0;
1930
	unsigned long flags;
1931
	unsigned int tuning_count = 0;
1932
	bool hs400_tuning;
1933

1934
	spin_lock_irqsave(&host->lock, flags);
1935

1936 1937 1938
	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
	host->flags &= ~SDHCI_HS400_TUNING;

1939 1940 1941
	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
		tuning_count = host->tuning_count;

1942
	/*
W
Weijun Yang 已提交
1943 1944 1945
	 * The Host Controller needs tuning in case of SDR104 and DDR50
	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
	 * the Capabilities register.
1946 1947
	 * If the Host Controller supports the HS200 mode then the
	 * tuning function has to be executed.
1948
	 */
1949
	switch (host->timing) {
1950
	/* HS400 tuning is done in HS200 mode */
1951
	case MMC_TIMING_MMC_HS400:
1952 1953 1954
		err = -EINVAL;
		goto out_unlock;

1955
	case MMC_TIMING_MMC_HS200:
1956 1957 1958 1959 1960 1961 1962 1963
		/*
		 * Periodic re-tuning for HS400 is not expected to be needed, so
		 * disable it here.
		 */
		if (hs400_tuning)
			tuning_count = 0;
		break;

1964
	case MMC_TIMING_UHS_SDR104:
W
Weijun Yang 已提交
1965
	case MMC_TIMING_UHS_DDR50:
1966 1967 1968
		break;

	case MMC_TIMING_UHS_SDR50:
1969
		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
1970 1971 1972 1973
			break;
		/* FALLTHROUGH */

	default:
1974
		goto out_unlock;
1975 1976
	}

1977
	if (host->ops->platform_execute_tuning) {
1978
		spin_unlock_irqrestore(&host->lock, flags);
1979 1980 1981 1982
		err = host->ops->platform_execute_tuning(host, opcode);
		return err;
	}

1983 1984
	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	ctrl |= SDHCI_CTRL_EXEC_TUNING;
1985 1986
	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
		ctrl |= SDHCI_CTRL_TUNED_CLK;
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998
	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);

	/*
	 * As per the Host Controller spec v3.00, tuning command
	 * generates Buffer Read Ready interrupt, so enable that.
	 *
	 * Note: The spec clearly says that when tuning sequence
	 * is being performed, the controller does not generate
	 * interrupts other than Buffer Read Ready interrupt. But
	 * to make sure we don't hit a controller bug, we _only_
	 * enable Buffer Read Ready interrupt here.
	 */
1999 2000
	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2001 2002 2003

	/*
	 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
2004
	 * of loops reaches 40 times.
2005 2006 2007
	 */
	do {
		struct mmc_command cmd = {0};
2008
		struct mmc_request mrq = {NULL};
2009

2010
		cmd.opcode = opcode;
2011 2012 2013 2014
		cmd.arg = 0;
		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
		cmd.retries = 0;
		cmd.data = NULL;
2015
		cmd.mrq = &mrq;
2016 2017
		cmd.error = 0;

2018 2019 2020
		if (tuning_loop_counter-- == 0)
			break;

2021 2022 2023 2024 2025 2026 2027
		mrq.cmd = &cmd;

		/*
		 * In response to CMD19, the card sends 64 bytes of tuning
		 * block to the Host Controller. So we set the block size
		 * to 64 here.
		 */
2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
		if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
			if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
					     SDHCI_BLOCK_SIZE);
			else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
					     SDHCI_BLOCK_SIZE);
		} else {
			sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
				     SDHCI_BLOCK_SIZE);
		}
2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051

		/*
		 * The tuning block is sent by the card to the host controller.
		 * So we set the TRNS_READ bit in the Transfer Mode register.
		 * This also takes care of setting DMA Enable and Multi Block
		 * Select in the same register to 0.
		 */
		sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);

		sdhci_send_command(host, &cmd);

		host->cmd = NULL;

2052
		spin_unlock_irqrestore(&host->lock, flags);
2053 2054 2055 2056
		/* Wait for Buffer Read Ready interrupt */
		wait_event_interruptible_timeout(host->buf_ready_int,
					(host->tuning_done == 1),
					msecs_to_jiffies(50));
2057
		spin_lock_irqsave(&host->lock, flags);
2058 2059

		if (!host->tuning_done) {
2060
			pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
			ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
			ctrl &= ~SDHCI_CTRL_TUNED_CLK;
			ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
			sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);

			err = -EIO;
			goto out;
		}

		host->tuning_done = 0;

		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2073 2074 2075 2076

		/* eMMC spec does not require a delay between tuning cycles */
		if (opcode == MMC_SEND_TUNING_BLOCK)
			mdelay(1);
2077 2078 2079 2080 2081 2082
	} while (ctrl & SDHCI_CTRL_EXEC_TUNING);

	/*
	 * The Host Driver has exhausted the maximum number of loops allowed,
	 * so use fixed sampling frequency.
	 */
2083
	if (tuning_loop_counter < 0) {
2084 2085
		ctrl &= ~SDHCI_CTRL_TUNED_CLK;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2086 2087
	}
	if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
2088
		pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n");
2089
		err = -EIO;
2090 2091 2092
	}

out:
2093
	if (tuning_count) {
2094 2095 2096 2097 2098 2099 2100 2101
		/*
		 * In case tuning fails, host controllers which support
		 * re-tuning can try tuning again at a later time, when the
		 * re-tuning timer expires.  So for these controllers, we
		 * return 0. Since there might be other controllers who do not
		 * have this capability, we return error for them.
		 */
		err = 0;
2102 2103
	}

2104
	host->mmc->retune_period = err ? 0 : tuning_count;
2105

2106 2107
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2108
out_unlock:
2109
	spin_unlock_irqrestore(&host->lock, flags);
2110 2111 2112
	return err;
}

2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124
static int sdhci_select_drive_strength(struct mmc_card *card,
				       unsigned int max_dtr, int host_drv,
				       int card_drv, int *drv_type)
{
	struct sdhci_host *host = mmc_priv(card->host);

	if (!host->ops->select_drive_strength)
		return 0;

	return host->ops->select_drive_strength(host, card, max_dtr, host_drv,
						card_drv, drv_type);
}
2125 2126

static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2127 2128 2129 2130 2131 2132 2133 2134 2135
{
	/* Host Controller v3.00 defines preset value registers */
	if (host->version < SDHCI_SPEC_300)
		return;

	/*
	 * We only enable or disable Preset Value if they are not already
	 * enabled or disabled respectively. Otherwise, we bail out.
	 */
2136 2137 2138 2139 2140 2141 2142 2143
	if (host->preset_enabled != enable) {
		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

		if (enable)
			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
		else
			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;

2144
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2145 2146 2147 2148 2149 2150 2151

		if (enable)
			host->flags |= SDHCI_PV_ENABLED;
		else
			host->flags &= ~SDHCI_PV_ENABLED;

		host->preset_enabled = enable;
2152
	}
2153 2154
}

2155 2156 2157 2158 2159 2160
static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
				int err)
{
	struct sdhci_host *host = mmc_priv(mmc);
	struct mmc_data *data = mrq->data;

2161
	if (data->host_cookie != COOKIE_UNMAPPED)
2162 2163 2164 2165 2166
		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
			     data->flags & MMC_DATA_WRITE ?
			       DMA_TO_DEVICE : DMA_FROM_DEVICE);

	data->host_cookie = COOKIE_UNMAPPED;
2167 2168 2169 2170 2171 2172 2173
}

static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
			       bool is_first_req)
{
	struct sdhci_host *host = mmc_priv(mmc);

2174
	mrq->data->host_cookie = COOKIE_UNMAPPED;
2175 2176

	if (host->flags & SDHCI_REQ_USE_DMA)
2177
		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2178 2179
}

2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
static inline bool sdhci_has_requests(struct sdhci_host *host)
{
	return host->cmd || host->data_cmd;
}

static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
{
	if (host->data_cmd) {
		host->data_cmd->error = err;
		sdhci_finish_mrq(host, host->data_cmd->mrq);
	}

	if (host->cmd) {
		host->cmd->error = err;
		sdhci_finish_mrq(host, host->cmd->mrq);
	}
}

2198
static void sdhci_card_event(struct mmc_host *mmc)
2199
{
2200
	struct sdhci_host *host = mmc_priv(mmc);
2201
	unsigned long flags;
2202
	int present;
2203

2204 2205 2206 2207
	/* First check if client has provided their own card event */
	if (host->ops->card_event)
		host->ops->card_event(host);

2208
	present = mmc->ops->get_cd(mmc);
2209

2210 2211
	spin_lock_irqsave(&host->lock, flags);

2212 2213
	/* Check sdhci_has_requests() first in case we are runtime suspended */
	if (sdhci_has_requests(host) && !present) {
2214
		pr_err("%s: Card removed during transfer!\n",
2215
			mmc_hostname(host->mmc));
2216
		pr_err("%s: Resetting controller.\n",
2217
			mmc_hostname(host->mmc));
2218

2219 2220
		sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
2221

2222
		sdhci_error_out_mrqs(host, -ENOMEDIUM);
2223 2224 2225
	}

	spin_unlock_irqrestore(&host->lock, flags);
2226 2227 2228 2229
}

static const struct mmc_host_ops sdhci_ops = {
	.request	= sdhci_request,
2230 2231
	.post_req	= sdhci_post_req,
	.pre_req	= sdhci_pre_req,
2232
	.set_ios	= sdhci_set_ios,
2233
	.get_cd		= sdhci_get_cd,
2234 2235 2236 2237
	.get_ro		= sdhci_get_ro,
	.hw_reset	= sdhci_hw_reset,
	.enable_sdio_irq = sdhci_enable_sdio_irq,
	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2238
	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2239
	.execute_tuning			= sdhci_execute_tuning,
2240
	.select_drive_strength		= sdhci_select_drive_strength,
2241
	.card_event			= sdhci_card_event,
2242
	.card_busy	= sdhci_card_busy,
2243 2244 2245 2246 2247 2248 2249 2250
};

/*****************************************************************************\
 *                                                                           *
 * Tasklets                                                                  *
 *                                                                           *
\*****************************************************************************/

2251
static bool sdhci_request_done(struct sdhci_host *host)
2252 2253 2254
{
	unsigned long flags;
	struct mmc_request *mrq;
2255
	int i;
2256

2257 2258
	spin_lock_irqsave(&host->lock, flags);

2259 2260 2261 2262 2263 2264
	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
		mrq = host->mrqs_done[i];
		if (mrq) {
			host->mrqs_done[i] = NULL;
			break;
		}
2265
	}
2266

2267 2268 2269 2270
	if (!mrq) {
		spin_unlock_irqrestore(&host->lock, flags);
		return true;
	}
2271

2272 2273
	sdhci_del_timer(host, mrq);

2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
	/*
	 * Always unmap the data buffers if they were mapped by
	 * sdhci_prepare_data() whenever we finish with a request.
	 * This avoids leaking DMA mappings on error.
	 */
	if (host->flags & SDHCI_REQ_USE_DMA) {
		struct mmc_data *data = mrq->data;

		if (data && data->host_cookie == COOKIE_MAPPED) {
			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
				     (data->flags & MMC_DATA_READ) ?
				     DMA_FROM_DEVICE : DMA_TO_DEVICE);
			data->host_cookie = COOKIE_UNMAPPED;
		}
	}

2290 2291 2292 2293
	/*
	 * The controller needs a reset of internal state machines
	 * upon error conditions.
	 */
2294
	if (sdhci_needs_reset(host, mrq)) {
2295
		/* Some controllers need this kick or reset won't work here */
2296
		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2297
			/* This is to force an update */
2298
			host->ops->set_clock(host, host->clock);
2299 2300 2301

		/* Spec says we should do both at the same time, but Ricoh
		   controllers do not like that. */
2302 2303
		sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
2304 2305

		host->pending_reset = false;
2306 2307
	}

2308 2309
	if (!sdhci_has_requests(host))
		sdhci_led_deactivate(host);
2310

2311
	mmiowb();
2312 2313 2314
	spin_unlock_irqrestore(&host->lock, flags);

	mmc_request_done(host->mmc, mrq);
2315 2316 2317 2318 2319 2320 2321 2322 2323 2324

	return false;
}

static void sdhci_tasklet_finish(unsigned long param)
{
	struct sdhci_host *host = (struct sdhci_host *)param;

	while (!sdhci_request_done(host))
		;
2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335
}

static void sdhci_timeout_timer(unsigned long data)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = (struct sdhci_host*)data;

	spin_lock_irqsave(&host->lock, flags);

2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359
	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
		       mmc_hostname(host->mmc));
		sdhci_dumpregs(host);

		host->cmd->error = -ETIMEDOUT;
		sdhci_finish_mrq(host, host->cmd->mrq);
	}

	mmiowb();
	spin_unlock_irqrestore(&host->lock, flags);
}

static void sdhci_timeout_data_timer(unsigned long data)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = (struct sdhci_host *)data;

	spin_lock_irqsave(&host->lock, flags);

	if (host->data || host->data_cmd ||
	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2360 2361
		pr_err("%s: Timeout waiting for hardware interrupt.\n",
		       mmc_hostname(host->mmc));
2362 2363 2364
		sdhci_dumpregs(host);

		if (host->data) {
P
Pierre Ossman 已提交
2365
			host->data->error = -ETIMEDOUT;
2366
			sdhci_finish_data(host);
2367 2368 2369
		} else if (host->data_cmd) {
			host->data_cmd->error = -ETIMEDOUT;
			sdhci_finish_mrq(host, host->data_cmd->mrq);
2370
		} else {
2371 2372
			host->cmd->error = -ETIMEDOUT;
			sdhci_finish_mrq(host, host->cmd->mrq);
2373 2374 2375
		}
	}

2376
	mmiowb();
2377 2378 2379 2380 2381 2382 2383 2384 2385
	spin_unlock_irqrestore(&host->lock, flags);
}

/*****************************************************************************\
 *                                                                           *
 * Interrupt handling                                                        *
 *                                                                           *
\*****************************************************************************/

2386
static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
2387 2388
{
	if (!host->cmd) {
2389 2390 2391 2392 2393 2394 2395
		/*
		 * SDHCI recovers from errors by resetting the cmd and data
		 * circuits.  Until that is done, there very well might be more
		 * interrupts, so ignore them in that case.
		 */
		if (host->pending_reset)
			return;
2396 2397
		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
		       mmc_hostname(host->mmc), (unsigned)intmask);
2398 2399 2400 2401
		sdhci_dumpregs(host);
		return;
	}

2402 2403 2404 2405 2406 2407
	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
		if (intmask & SDHCI_INT_TIMEOUT)
			host->cmd->error = -ETIMEDOUT;
		else
			host->cmd->error = -EILSEQ;
2408

2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
		/*
		 * If this command initiates a data phase and a response
		 * CRC error is signalled, the card can start transferring
		 * data - the card may have received the command without
		 * error.  We must not terminate the mmc_request early.
		 *
		 * If the card did not receive the command or returned an
		 * error which prevented it sending data, the data phase
		 * will time out.
		 */
		if (host->cmd->data &&
		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
		     SDHCI_INT_CRC) {
			host->cmd = NULL;
			return;
		}

2426
		sdhci_finish_mrq(host, host->cmd->mrq);
2427 2428 2429
		return;
	}

2430 2431 2432
	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
	    !(host->cmd->flags & MMC_RSP_BUSY) && !host->data &&
	    host->cmd->opcode == MMC_STOP_TRANSMISSION)
2433
		*mask &= ~SDHCI_INT_DATA_END;
2434 2435

	if (intmask & SDHCI_INT_RESPONSE)
2436
		sdhci_finish_command(host);
2437 2438
}

2439
#ifdef CONFIG_MMC_DEBUG
2440
static void sdhci_adma_show_error(struct sdhci_host *host)
2441 2442
{
	const char *name = mmc_hostname(host->mmc);
2443
	void *desc = host->adma_table;
2444 2445 2446 2447

	sdhci_dumpregs(host);

	while (true) {
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460
		struct sdhci_adma2_64_desc *dma_desc = desc;

		if (host->flags & SDHCI_USE_64_BIT_DMA)
			DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
			    name, desc, le32_to_cpu(dma_desc->addr_hi),
			    le32_to_cpu(dma_desc->addr_lo),
			    le16_to_cpu(dma_desc->len),
			    le16_to_cpu(dma_desc->cmd));
		else
			DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
			    name, desc, le32_to_cpu(dma_desc->addr_lo),
			    le16_to_cpu(dma_desc->len),
			    le16_to_cpu(dma_desc->cmd));
2461

2462
		desc += host->desc_sz;
2463

2464
		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2465 2466 2467 2468
			break;
	}
}
#else
2469
static void sdhci_adma_show_error(struct sdhci_host *host) { }
2470 2471
#endif

2472 2473
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
2474
	u32 command;
2475

2476 2477
	/* CMD19 generates _only_ Buffer Read Ready interrupt */
	if (intmask & SDHCI_INT_DATA_AVAIL) {
2478 2479 2480
		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
		if (command == MMC_SEND_TUNING_BLOCK ||
		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2481 2482 2483 2484 2485 2486
			host->tuning_done = 1;
			wake_up(&host->buf_ready_int);
			return;
		}
	}

2487
	if (!host->data) {
2488 2489 2490 2491 2492
		struct mmc_command *data_cmd = host->data_cmd;

		if (data_cmd)
			host->data_cmd = NULL;

2493
		/*
2494 2495 2496
		 * The "data complete" interrupt is also used to
		 * indicate that a busy state has ended. See comment
		 * above in sdhci_cmd_irq().
2497
		 */
2498
		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2499
			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2500
				data_cmd->error = -ETIMEDOUT;
2501
				sdhci_finish_mrq(host, data_cmd->mrq);
2502 2503
				return;
			}
2504
			if (intmask & SDHCI_INT_DATA_END) {
2505 2506 2507 2508 2509
				/*
				 * Some cards handle busy-end interrupt
				 * before the command completed, so make
				 * sure we do things in the proper order.
				 */
2510 2511 2512
				if (host->cmd == data_cmd)
					return;

2513
				sdhci_finish_mrq(host, data_cmd->mrq);
2514 2515 2516
				return;
			}
		}
2517

2518 2519 2520 2521 2522 2523 2524 2525
		/*
		 * SDHCI recovers from errors by resetting the cmd and data
		 * circuits. Until that is done, there very well might be more
		 * interrupts, so ignore them in that case.
		 */
		if (host->pending_reset)
			return;

2526 2527
		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
		       mmc_hostname(host->mmc), (unsigned)intmask);
2528 2529 2530 2531 2532 2533
		sdhci_dumpregs(host);

		return;
	}

	if (intmask & SDHCI_INT_DATA_TIMEOUT)
P
Pierre Ossman 已提交
2534
		host->data->error = -ETIMEDOUT;
2535 2536 2537 2538 2539
	else if (intmask & SDHCI_INT_DATA_END_BIT)
		host->data->error = -EILSEQ;
	else if ((intmask & SDHCI_INT_DATA_CRC) &&
		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
			!= MMC_BUS_TEST_R)
P
Pierre Ossman 已提交
2540
		host->data->error = -EILSEQ;
2541
	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2542
		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2543
		sdhci_adma_show_error(host);
2544
		host->data->error = -EIO;
2545 2546
		if (host->ops->adma_workaround)
			host->ops->adma_workaround(host, intmask);
2547
	}
2548

P
Pierre Ossman 已提交
2549
	if (host->data->error)
2550 2551
		sdhci_finish_data(host);
	else {
P
Pierre Ossman 已提交
2552
		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2553 2554
			sdhci_transfer_pio(host);

2555 2556 2557 2558
		/*
		 * We currently don't do anything fancy with DMA
		 * boundaries, but as we can't disable the feature
		 * we need to at least restart the transfer.
2559 2560 2561 2562
		 *
		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
		 * should return a valid address to continue from, but as
		 * some controllers are faulty, don't trust them.
2563
		 */
2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580
		if (intmask & SDHCI_INT_DMA_END) {
			u32 dmastart, dmanow;
			dmastart = sg_dma_address(host->data->sg);
			dmanow = dmastart + host->data->bytes_xfered;
			/*
			 * Force update to the next DMA block boundary.
			 */
			dmanow = (dmanow &
				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
				SDHCI_DEFAULT_BOUNDARY_SIZE;
			host->data->bytes_xfered = dmanow - dmastart;
			DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
				" next 0x%08x\n",
				mmc_hostname(host->mmc), dmastart,
				host->data->bytes_xfered, dmanow);
			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
		}
2581

2582
		if (intmask & SDHCI_INT_DATA_END) {
2583
			if (host->cmd == host->data_cmd) {
2584 2585 2586 2587 2588 2589 2590 2591 2592 2593
				/*
				 * Data managed to finish before the
				 * command completed. Make sure we do
				 * things in the proper order.
				 */
				host->data_early = 1;
			} else {
				sdhci_finish_data(host);
			}
		}
2594 2595 2596
	}
}

2597
static irqreturn_t sdhci_irq(int irq, void *dev_id)
2598
{
2599
	irqreturn_t result = IRQ_NONE;
2600
	struct sdhci_host *host = dev_id;
2601
	u32 intmask, mask, unexpected = 0;
2602
	int max_loops = 16;
2603 2604 2605

	spin_lock(&host->lock);

2606
	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2607
		spin_unlock(&host->lock);
2608
		return IRQ_NONE;
2609 2610
	}

2611
	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2612
	if (!intmask || intmask == 0xffffffff) {
2613 2614 2615 2616
		result = IRQ_NONE;
		goto out;
	}

2617 2618 2619 2620 2621
	do {
		/* Clear selected interrupts. */
		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
				  SDHCI_INT_BUS_POWER);
		sdhci_writel(host, mask, SDHCI_INT_STATUS);
2622

2623 2624
		DBG("*** %s got interrupt: 0x%08x\n",
			mmc_hostname(host->mmc), intmask);
2625

2626 2627 2628
		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
2629

2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640
			/*
			 * There is a observation on i.mx esdhc.  INSERT
			 * bit will be immediately set again when it gets
			 * cleared, if a card is inserted.  We have to mask
			 * the irq to prevent interrupt storm which will
			 * freeze the system.  And the REMOVE gets the
			 * same situation.
			 *
			 * More testing are needed here to ensure it works
			 * for other platforms though.
			 */
2641 2642 2643 2644 2645 2646
			host->ier &= ~(SDHCI_INT_CARD_INSERT |
				       SDHCI_INT_CARD_REMOVE);
			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
					       SDHCI_INT_CARD_INSERT;
			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2647 2648 2649

			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2650 2651 2652 2653

			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
						       SDHCI_INT_CARD_REMOVE);
			result = IRQ_WAKE_THREAD;
2654
		}
2655

2656
		if (intmask & SDHCI_INT_CMD_MASK)
2657 2658
			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
				      &intmask);
2659

2660 2661
		if (intmask & SDHCI_INT_DATA_MASK)
			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2662

2663 2664 2665
		if (intmask & SDHCI_INT_BUS_POWER)
			pr_err("%s: Card is consuming too much power!\n",
				mmc_hostname(host->mmc));
2666

2667 2668 2669 2670 2671
		if (intmask & SDHCI_INT_CARD_INT) {
			sdhci_enable_sdio_irq_nolock(host, false);
			host->thread_isr |= SDHCI_INT_CARD_INT;
			result = IRQ_WAKE_THREAD;
		}
P
Pierre Ossman 已提交
2672

2673 2674 2675 2676
		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
			     SDHCI_INT_CARD_INT);
P
Pierre Ossman 已提交
2677

2678 2679 2680 2681
		if (intmask) {
			unexpected |= intmask;
			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
		}
2682

2683 2684
		if (result == IRQ_NONE)
			result = IRQ_HANDLED;
2685

2686 2687
		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
	} while (intmask && --max_loops);
2688 2689 2690
out:
	spin_unlock(&host->lock);

2691 2692 2693 2694 2695
	if (unexpected) {
		pr_err("%s: Unexpected interrupt 0x%08x.\n",
			   mmc_hostname(host->mmc), unexpected);
		sdhci_dumpregs(host);
	}
P
Pierre Ossman 已提交
2696

2697 2698 2699
	return result;
}

2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
{
	struct sdhci_host *host = dev_id;
	unsigned long flags;
	u32 isr;

	spin_lock_irqsave(&host->lock, flags);
	isr = host->thread_isr;
	host->thread_isr = 0;
	spin_unlock_irqrestore(&host->lock, flags);

2711
	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2712 2713 2714 2715
		struct mmc_host *mmc = host->mmc;

		mmc->ops->card_event(mmc);
		mmc_detect_change(mmc, msecs_to_jiffies(200));
2716 2717
	}

2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729
	if (isr & SDHCI_INT_CARD_INT) {
		sdio_run_irqs(host->mmc);

		spin_lock_irqsave(&host->lock, flags);
		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
			sdhci_enable_sdio_irq_nolock(host, true);
		spin_unlock_irqrestore(&host->lock, flags);
	}

	return isr ? IRQ_HANDLED : IRQ_NONE;
}

2730 2731 2732 2733 2734 2735 2736
/*****************************************************************************\
 *                                                                           *
 * Suspend/resume                                                            *
 *                                                                           *
\*****************************************************************************/

#ifdef CONFIG_PM
2737 2738 2739 2740 2741 2742 2743 2744
/*
 * To enable wakeup events, the corresponding events have to be enabled in
 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
 * Table' in the SD Host Controller Standard Specification.
 * It is useless to restore SDHCI_INT_ENABLE state in
 * sdhci_disable_irq_wakeups() since it will be set by
 * sdhci_enable_card_detection() or sdhci_init().
 */
K
Kevin Liu 已提交
2745 2746 2747 2748 2749
void sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
	u8 val;
	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
			| SDHCI_WAKE_ON_INT;
2750 2751
	u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
		      SDHCI_INT_CARD_INT;
K
Kevin Liu 已提交
2752 2753 2754 2755

	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
	val |= mask ;
	/* Avoid fake wake up */
2756
	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
K
Kevin Liu 已提交
2757
		val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2758 2759
		irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
	}
K
Kevin Liu 已提交
2760
	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2761
	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
K
Kevin Liu 已提交
2762 2763 2764
}
EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);

2765
static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
K
Kevin Liu 已提交
2766 2767 2768 2769 2770 2771 2772 2773 2774
{
	u8 val;
	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
			| SDHCI_WAKE_ON_INT;

	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
	val &= ~mask;
	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
2775

2776
int sdhci_suspend_host(struct sdhci_host *host)
2777
{
2778 2779
	sdhci_disable_card_detection(host);

2780 2781
	mmc_retune_timer_stop(host->mmc);
	mmc_retune_needed(host->mmc);
2782

K
Kevin Liu 已提交
2783
	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2784 2785 2786
		host->ier = 0;
		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
K
Kevin Liu 已提交
2787 2788 2789 2790 2791
		free_irq(host->irq, host);
	} else {
		sdhci_enable_irq_wakeups(host);
		enable_irq_wake(host->irq);
	}
2792
	return 0;
2793 2794
}

2795
EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2796

2797 2798
int sdhci_resume_host(struct sdhci_host *host)
{
2799
	struct mmc_host *mmc = host->mmc;
2800
	int ret = 0;
2801

2802
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2803 2804 2805
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}
2806

2807 2808 2809 2810 2811 2812
	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
		/* Card keeps power but host controller does not */
		sdhci_init(host, 0);
		host->pwr = 0;
		host->clock = 0;
2813
		mmc->ops->set_ios(mmc, &mmc->ios);
2814 2815 2816 2817
	} else {
		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
		mmiowb();
	}
2818

2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829
	if (!device_may_wakeup(mmc_dev(host->mmc))) {
		ret = request_threaded_irq(host->irq, sdhci_irq,
					   sdhci_thread_irq, IRQF_SHARED,
					   mmc_hostname(host->mmc), host);
		if (ret)
			return ret;
	} else {
		sdhci_disable_irq_wakeups(host);
		disable_irq_wake(host->irq);
	}

2830 2831
	sdhci_enable_card_detection(host);

2832
	return ret;
2833 2834
}

2835
EXPORT_SYMBOL_GPL(sdhci_resume_host);
2836 2837 2838 2839 2840

int sdhci_runtime_suspend_host(struct sdhci_host *host)
{
	unsigned long flags;

2841 2842
	mmc_retune_timer_stop(host->mmc);
	mmc_retune_needed(host->mmc);
2843 2844

	spin_lock_irqsave(&host->lock, flags);
2845 2846 2847
	host->ier &= SDHCI_INT_CARD_INT;
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2848 2849
	spin_unlock_irqrestore(&host->lock, flags);

2850
	synchronize_hardirq(host->irq);
2851 2852 2853 2854 2855

	spin_lock_irqsave(&host->lock, flags);
	host->runtime_suspended = true;
	spin_unlock_irqrestore(&host->lock, flags);

2856
	return 0;
2857 2858 2859 2860 2861
}
EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);

int sdhci_runtime_resume_host(struct sdhci_host *host)
{
2862
	struct mmc_host *mmc = host->mmc;
2863
	unsigned long flags;
2864
	int host_flags = host->flags;
2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875

	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}

	sdhci_init(host, 0);

	/* Force clock and power re-program */
	host->pwr = 0;
	host->clock = 0;
2876 2877
	mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
	mmc->ops->set_ios(mmc, &mmc->ios);
2878

2879 2880 2881 2882 2883 2884
	if ((host_flags & SDHCI_PV_ENABLED) &&
		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
		spin_lock_irqsave(&host->lock, flags);
		sdhci_enable_preset_value(host, true);
		spin_unlock_irqrestore(&host->lock, flags);
	}
2885 2886 2887 2888 2889 2890

	spin_lock_irqsave(&host->lock, flags);

	host->runtime_suspended = false;

	/* Enable SDIO IRQ */
2891
	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2892 2893 2894 2895 2896 2897 2898
		sdhci_enable_sdio_irq_nolock(host, true);

	/* Enable Card Detection */
	sdhci_enable_card_detection(host);

	spin_unlock_irqrestore(&host->lock, flags);

2899
	return 0;
2900 2901 2902
}
EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);

2903
#endif /* CONFIG_PM */
2904

2905 2906
/*****************************************************************************\
 *                                                                           *
2907
 * Device allocation/registration                                            *
2908 2909 2910
 *                                                                           *
\*****************************************************************************/

2911 2912
struct sdhci_host *sdhci_alloc_host(struct device *dev,
	size_t priv_size)
2913 2914 2915 2916
{
	struct mmc_host *mmc;
	struct sdhci_host *host;

2917
	WARN_ON(dev == NULL);
2918

2919
	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2920
	if (!mmc)
2921
		return ERR_PTR(-ENOMEM);
2922 2923 2924

	host = mmc_priv(mmc);
	host->mmc = mmc;
2925 2926
	host->mmc_host_ops = sdhci_ops;
	mmc->ops = &host->mmc_host_ops;
2927

2928 2929
	host->flags = SDHCI_SIGNALING_330;

2930 2931
	return host;
}
2932

2933
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2934

2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964
static int sdhci_set_dma_mask(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;
	struct device *dev = mmc_dev(mmc);
	int ret = -EINVAL;

	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
		host->flags &= ~SDHCI_USE_64_BIT_DMA;

	/* Try 64-bit mask if hardware is capable  of it */
	if (host->flags & SDHCI_USE_64_BIT_DMA) {
		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
		if (ret) {
			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
				mmc_hostname(mmc));
			host->flags &= ~SDHCI_USE_64_BIT_DMA;
		}
	}

	/* 32-bit mask as default & fallback */
	if (ret) {
		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
		if (ret)
			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
				mmc_hostname(mmc));
	}

	return ret;
}

2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996
void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
{
	u16 v;

	if (host->read_caps)
		return;

	host->read_caps = true;

	if (debug_quirks)
		host->quirks = debug_quirks;

	if (debug_quirks2)
		host->quirks2 = debug_quirks2;

	sdhci_do_reset(host, SDHCI_RESET_ALL);

	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;

	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
		return;

	host->caps = caps ? *caps : sdhci_readl(host, SDHCI_CAPABILITIES);

	if (host->version < SDHCI_SPEC_300)
		return;

	host->caps1 = caps1 ? *caps1 : sdhci_readl(host, SDHCI_CAPABILITIES_1);
}
EXPORT_SYMBOL_GPL(__sdhci_read_caps);

2997
int sdhci_setup_host(struct sdhci_host *host)
2998 2999
{
	struct mmc_host *mmc;
3000 3001
	u32 max_current_caps;
	unsigned int ocr_avail;
3002
	unsigned int override_timeout_clk;
3003
	u32 max_clk;
3004
	int ret;
3005

3006 3007 3008
	WARN_ON(host == NULL);
	if (host == NULL)
		return -EINVAL;
3009

3010
	mmc = host->mmc;
3011

3012
	sdhci_read_caps(host);
3013

3014 3015
	override_timeout_clk = host->timeout_clk;

3016
	if (host->version > SDHCI_SPEC_300) {
3017 3018
		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
		       mmc_hostname(mmc), host->version);
3019 3020
	}

3021
	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3022
		host->flags |= SDHCI_USE_SDMA;
3023
	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3024
		DBG("Controller doesn't have SDMA capability\n");
3025
	else
3026
		host->flags |= SDHCI_USE_SDMA;
3027

3028
	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3029
		(host->flags & SDHCI_USE_SDMA)) {
R
Rolf Eike Beer 已提交
3030
		DBG("Disabling DMA as it is marked broken\n");
3031
		host->flags &= ~SDHCI_USE_SDMA;
3032 3033
	}

3034
	if ((host->version >= SDHCI_SPEC_200) &&
3035
		(host->caps & SDHCI_CAN_DO_ADMA2))
3036
		host->flags |= SDHCI_USE_ADMA;
3037 3038 3039 3040 3041 3042 3043

	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
		(host->flags & SDHCI_USE_ADMA)) {
		DBG("Disabling ADMA as it is marked broken\n");
		host->flags &= ~SDHCI_USE_ADMA;
	}

3044 3045 3046 3047 3048 3049 3050
	/*
	 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
	 * and *must* do 64-bit DMA.  A driver has the opportunity to change
	 * that during the first call to ->enable_dma().  Similarly
	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
	 * implement.
	 */
3051
	if (host->caps & SDHCI_CAN_64BIT)
3052 3053
		host->flags |= SDHCI_USE_64_BIT_DMA;

3054
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065
		ret = sdhci_set_dma_mask(host);

		if (!ret && host->ops->enable_dma)
			ret = host->ops->enable_dma(host);

		if (ret) {
			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
				mmc_hostname(mmc));
			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);

			ret = 0;
3066 3067 3068
		}
	}

3069 3070 3071 3072
	/* SDMA does not support 64-bit DMA */
	if (host->flags & SDHCI_USE_64_BIT_DMA)
		host->flags &= ~SDHCI_USE_SDMA;

3073
	if (host->flags & SDHCI_USE_ADMA) {
3074 3075 3076
		dma_addr_t dma;
		void *buf;

3077
		/*
3078 3079 3080 3081
		 * The DMA descriptor table size is calculated as the maximum
		 * number of segments times 2, to allow for an alignment
		 * descriptor for each segment, plus 1 for a nop end descriptor,
		 * all multipled by the descriptor size.
3082
		 */
3083 3084 3085 3086 3087 3088 3089 3090 3091
		if (host->flags & SDHCI_USE_64_BIT_DMA) {
			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
					      SDHCI_ADMA2_64_DESC_SZ;
			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
		} else {
			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
					      SDHCI_ADMA2_32_DESC_SZ;
			host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
		}
3092

3093
		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3094 3095 3096
		buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
					 host->adma_table_sz, &dma, GFP_KERNEL);
		if (!buf) {
J
Joe Perches 已提交
3097
			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3098 3099
				mmc_hostname(mmc));
			host->flags &= ~SDHCI_USE_ADMA;
3100 3101
		} else if ((dma + host->align_buffer_sz) &
			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
J
Joe Perches 已提交
3102 3103
			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
				mmc_hostname(mmc));
3104
			host->flags &= ~SDHCI_USE_ADMA;
3105 3106 3107 3108 3109
			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
					  host->adma_table_sz, buf, dma);
		} else {
			host->align_buffer = buf;
			host->align_addr = dma;
3110

3111 3112 3113
			host->adma_table = buf + host->align_buffer_sz;
			host->adma_addr = dma + host->align_buffer_sz;
		}
3114 3115
	}

3116 3117 3118 3119 3120
	/*
	 * If we use DMA, then it's up to the caller to set the DMA
	 * mask, but PIO does not need the hw shim so we set a new
	 * mask here in that case.
	 */
3121
	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3122
		host->dma_mask = DMA_BIT_MASK(64);
3123
		mmc_dev(mmc)->dma_mask = &host->dma_mask;
3124
	}
3125

3126
	if (host->version >= SDHCI_SPEC_300)
3127
		host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3128 3129
			>> SDHCI_CLOCK_BASE_SHIFT;
	else
3130
		host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3131 3132
			>> SDHCI_CLOCK_BASE_SHIFT;

3133
	host->max_clk *= 1000000;
3134 3135
	if (host->max_clk == 0 || host->quirks &
			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3136
		if (!host->ops->get_max_clock) {
3137 3138
			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
			       mmc_hostname(mmc));
3139 3140
			ret = -ENODEV;
			goto undma;
3141 3142
		}
		host->max_clk = host->ops->get_max_clock(host);
3143
	}
3144

3145 3146 3147 3148
	/*
	 * In case of Host Controller v3.00, find out whether clock
	 * multiplier is supported.
	 */
3149
	host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160
			SDHCI_CLOCK_MUL_SHIFT;

	/*
	 * In case the value in Clock Multiplier is 0, then programmable
	 * clock mode is not supported, otherwise the actual clock
	 * multiplier is one more than the value of Clock Multiplier
	 * in the Capabilities Register.
	 */
	if (host->clk_mul)
		host->clk_mul += 1;

3161 3162 3163
	/*
	 * Set host parameters.
	 */
3164 3165
	max_clk = host->max_clk;

3166
	if (host->ops->get_min_clock)
3167
		mmc->f_min = host->ops->get_min_clock(host);
3168 3169 3170
	else if (host->version >= SDHCI_SPEC_300) {
		if (host->clk_mul) {
			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3171
			max_clk = host->max_clk * host->clk_mul;
3172 3173 3174
		} else
			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
	} else
3175
		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3176

3177
	if (!mmc->f_max || mmc->f_max > max_clk)
3178 3179
		mmc->f_max = max_clk;

3180
	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3181
		host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3182 3183 3184 3185 3186 3187 3188 3189
					SDHCI_TIMEOUT_CLK_SHIFT;
		if (host->timeout_clk == 0) {
			if (host->ops->get_timeout_clock) {
				host->timeout_clk =
					host->ops->get_timeout_clock(host);
			} else {
				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
					mmc_hostname(mmc));
3190 3191
				ret = -ENODEV;
				goto undma;
3192
			}
3193 3194
		}

3195
		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3196
			host->timeout_clk *= 1000;
3197

3198 3199 3200
		if (override_timeout_clk)
			host->timeout_clk = override_timeout_clk;

3201
		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3202
			host->ops->get_max_timeout_count(host) : 1 << 27;
3203 3204
		mmc->max_busy_timeout /= host->timeout_clk;
	}
3205

3206
	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3207
	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3208 3209 3210

	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
		host->flags |= SDHCI_AUTO_CMD12;
3211

3212
	/* Auto-CMD23 stuff only works in ADMA or PIO. */
A
Andrei Warkentin 已提交
3213
	if ((host->version >= SDHCI_SPEC_300) &&
3214
	    ((host->flags & SDHCI_USE_ADMA) ||
3215 3216
	     !(host->flags & SDHCI_USE_SDMA)) &&
	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3217 3218 3219 3220 3221 3222
		host->flags |= SDHCI_AUTO_CMD23;
		DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
	} else {
		DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
	}

3223 3224 3225 3226 3227 3228 3229
	/*
	 * A controller may support 8-bit width, but the board itself
	 * might not have the pins brought out.  Boards that support
	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
	 * their platform code before calling sdhci_add_host(), and we
	 * won't assume 8-bit width for hosts without that CAP.
	 */
3230
	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3231
		mmc->caps |= MMC_CAP_4_BIT_DATA;
3232

3233 3234 3235
	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
		mmc->caps &= ~MMC_CAP_CMD23;

3236
	if (host->caps & SDHCI_CAN_DO_HISPD)
3237
		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3238

3239
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3240
	    mmc_card_is_removable(mmc) &&
3241
	    mmc_gpio_get_cd(host->mmc) < 0)
3242 3243
		mmc->caps |= MMC_CAP_NEEDS_POLL;

3244
	/* If there are external regulators, get them */
3245 3246 3247
	ret = mmc_regulator_get_supply(mmc);
	if (ret == -EPROBE_DEFER)
		goto undma;
3248

3249
	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3250 3251 3252 3253
	if (!IS_ERR(mmc->supply.vqmmc)) {
		ret = regulator_enable(mmc->supply.vqmmc);
		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
						    1950000))
3254 3255 3256
			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
					 SDHCI_SUPPORT_SDR50 |
					 SDHCI_SUPPORT_DDR50);
3257 3258 3259
		if (ret) {
			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
				mmc_hostname(mmc), ret);
3260
			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3261
		}
3262
	}
3263

3264 3265 3266 3267
	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
				 SDHCI_SUPPORT_DDR50);
	}
3268

3269
	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3270 3271
	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
			   SDHCI_SUPPORT_DDR50))
3272 3273 3274
		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;

	/* SDR104 supports also implies SDR50 support */
3275
	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3276
		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3277 3278 3279
		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
		 * field can be promoted to support HS200.
		 */
3280
		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3281
			mmc->caps2 |= MMC_CAP2_HS200;
3282
	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3283
		mmc->caps |= MMC_CAP_UHS_SDR50;
3284
	}
3285

3286
	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3287
	    (host->caps1 & SDHCI_SUPPORT_HS400))
3288 3289
		mmc->caps2 |= MMC_CAP2_HS400;

3290 3291 3292 3293 3294 3295
	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
	    (IS_ERR(mmc->supply.vqmmc) ||
	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
					     1300000)))
		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;

3296 3297
	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3298 3299
		mmc->caps |= MMC_CAP_UHS_DDR50;

3300
	/* Does the host need tuning for SDR50? */
3301
	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3302 3303
		host->flags |= SDHCI_SDR50_NEEDS_TUNING;

3304
	/* Driver Type(s) (A, C, D) supported by the host */
3305
	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3306
		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3307
	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3308
		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3309
	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3310 3311
		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;

3312
	/* Initial value for re-tuning timer count */
3313 3314
	host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
			     SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3315 3316 3317 3318 3319 3320 3321 3322 3323

	/*
	 * In case Re-tuning Timer is not disabled, the actual value of
	 * re-tuning timer will be 2 ^ (n - 1).
	 */
	if (host->tuning_count)
		host->tuning_count = 1 << (host->tuning_count - 1);

	/* Re-tuning mode supported by the Host Controller */
3324
	host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3325 3326
			     SDHCI_RETUNING_MODE_SHIFT;

3327
	ocr_avail = 0;
3328

3329 3330 3331 3332 3333 3334 3335 3336
	/*
	 * According to SD Host Controller spec v3.00, if the Host System
	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
	 * the value is meaningful only if Voltage Support in the Capabilities
	 * register is set. The actual current value is 4 times the register
	 * value.
	 */
	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3337
	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3338
		int curr = regulator_get_current_limit(mmc->supply.vmmc);
3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351
		if (curr > 0) {

			/* convert to SDHCI_MAX_CURRENT format */
			curr = curr/1000;  /* convert to mA */
			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;

			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
			max_current_caps =
				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
		}
	}
3352

3353
	if (host->caps & SDHCI_CAN_VDD_330) {
3354
		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3355

A
Aaron Lu 已提交
3356
		mmc->max_current_330 = ((max_current_caps &
3357 3358 3359 3360
				   SDHCI_MAX_CURRENT_330_MASK) >>
				   SDHCI_MAX_CURRENT_330_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}
3361
	if (host->caps & SDHCI_CAN_VDD_300) {
3362
		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3363

A
Aaron Lu 已提交
3364
		mmc->max_current_300 = ((max_current_caps &
3365 3366 3367 3368
				   SDHCI_MAX_CURRENT_300_MASK) >>
				   SDHCI_MAX_CURRENT_300_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}
3369
	if (host->caps & SDHCI_CAN_VDD_180) {
3370 3371
		ocr_avail |= MMC_VDD_165_195;

A
Aaron Lu 已提交
3372
		mmc->max_current_180 = ((max_current_caps &
3373 3374 3375 3376 3377
				   SDHCI_MAX_CURRENT_180_MASK) >>
				   SDHCI_MAX_CURRENT_180_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}

3378 3379 3380 3381 3382
	/* If OCR set by host, use it instead. */
	if (host->ocr_mask)
		ocr_avail = host->ocr_mask;

	/* If OCR set by external regulators, give it highest prio. */
3383
	if (mmc->ocr_avail)
3384
		ocr_avail = mmc->ocr_avail;
3385

3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397
	mmc->ocr_avail = ocr_avail;
	mmc->ocr_avail_sdio = ocr_avail;
	if (host->ocr_avail_sdio)
		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
	mmc->ocr_avail_sd = ocr_avail;
	if (host->ocr_avail_sd)
		mmc->ocr_avail_sd &= host->ocr_avail_sd;
	else /* normal SD controllers don't support 1.8V */
		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
	mmc->ocr_avail_mmc = ocr_avail;
	if (host->ocr_avail_mmc)
		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3398 3399

	if (mmc->ocr_avail == 0) {
3400 3401
		pr_err("%s: Hardware doesn't report any support voltages.\n",
		       mmc_hostname(mmc));
3402 3403
		ret = -ENODEV;
		goto unreg;
3404 3405
	}

3406 3407 3408 3409 3410 3411 3412 3413 3414
	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
		host->flags |= SDHCI_SIGNALING_180;

	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
		host->flags |= SDHCI_SIGNALING_120;

3415 3416 3417
	spin_lock_init(&host->lock);

	/*
3418 3419
	 * Maximum number of segments. Depends on if the hardware
	 * can do scatter/gather or not.
3420
	 */
3421
	if (host->flags & SDHCI_USE_ADMA)
3422
		mmc->max_segs = SDHCI_MAX_SEGS;
3423
	else if (host->flags & SDHCI_USE_SDMA)
3424
		mmc->max_segs = 1;
3425
	else /* PIO */
3426
		mmc->max_segs = SDHCI_MAX_SEGS;
3427 3428

	/*
3429 3430 3431
	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
	 * is less anyway.
3432
	 */
3433
	mmc->max_req_size = 524288;
3434 3435 3436

	/*
	 * Maximum segment size. Could be one segment with the maximum number
3437 3438
	 * of bytes. When doing hardware scatter/gather, each entry cannot
	 * be larger than 64 KiB though.
3439
	 */
3440 3441 3442 3443 3444 3445
	if (host->flags & SDHCI_USE_ADMA) {
		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
			mmc->max_seg_size = 65535;
		else
			mmc->max_seg_size = 65536;
	} else {
3446
		mmc->max_seg_size = mmc->max_req_size;
3447
	}
3448

3449 3450 3451 3452
	/*
	 * Maximum block size. This varies from controller to controller and
	 * is specified in the capabilities register.
	 */
3453 3454 3455
	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
		mmc->max_blk_size = 2;
	} else {
3456
		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3457 3458
				SDHCI_MAX_BLOCK_SHIFT;
		if (mmc->max_blk_size >= 3) {
J
Joe Perches 已提交
3459 3460
			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
				mmc_hostname(mmc));
3461 3462 3463 3464 3465
			mmc->max_blk_size = 0;
		}
	}

	mmc->max_blk_size = 512 << mmc->max_blk_size;
3466

3467 3468 3469
	/*
	 * Maximum block count.
	 */
3470
	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3471

3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493
	return 0;

unreg:
	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);
undma:
	if (host->align_buffer)
		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
				  host->adma_table_sz, host->align_buffer,
				  host->align_addr);
	host->adma_table = NULL;
	host->align_buffer = NULL;

	return ret;
}
EXPORT_SYMBOL_GPL(sdhci_setup_host);

int __sdhci_add_host(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;
	int ret;

3494 3495 3496 3497 3498 3499
	/*
	 * Init tasklets.
	 */
	tasklet_init(&host->finish_tasklet,
		sdhci_tasklet_finish, (unsigned long)host);

3500
	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3501 3502
	setup_timer(&host->data_timer, sdhci_timeout_data_timer,
		    (unsigned long)host);
3503

3504
	init_waitqueue_head(&host->buf_ready_int);
3505

3506 3507
	sdhci_init(host, 0);

3508 3509
	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
				   IRQF_SHARED,	mmc_hostname(mmc), host);
3510 3511 3512
	if (ret) {
		pr_err("%s: Failed to request IRQ %d: %d\n",
		       mmc_hostname(mmc), host->irq, ret);
3513
		goto untasklet;
3514
	}
3515 3516 3517 3518 3519

#ifdef CONFIG_MMC_DEBUG
	sdhci_dumpregs(host);
#endif

3520
	ret = sdhci_led_register(host);
3521 3522 3523
	if (ret) {
		pr_err("%s: Failed to register LED device: %d\n",
		       mmc_hostname(mmc), ret);
3524
		goto unirq;
3525
	}
3526

3527 3528
	mmiowb();

3529 3530 3531
	ret = mmc_add_host(mmc);
	if (ret)
		goto unled;
3532

3533
	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3534
		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3535 3536
		(host->flags & SDHCI_USE_ADMA) ?
		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3537
		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3538

3539 3540
	sdhci_enable_card_detection(host);

3541 3542
	return 0;

3543
unled:
3544
	sdhci_led_unregister(host);
3545
unirq:
3546
	sdhci_do_reset(host, SDHCI_RESET_ALL);
3547 3548
	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3549
	free_irq(host->irq, host);
3550
untasklet:
3551
	tasklet_kill(&host->finish_tasklet);
3552

3553 3554
	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);
3555

3556 3557 3558 3559 3560 3561
	if (host->align_buffer)
		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
				  host->adma_table_sz, host->align_buffer,
				  host->align_addr);
	host->adma_table = NULL;
	host->align_buffer = NULL;
3562 3563 3564

	return ret;
}
3565 3566 3567 3568 3569 3570 3571 3572 3573
EXPORT_SYMBOL_GPL(__sdhci_add_host);

int sdhci_add_host(struct sdhci_host *host)
{
	int ret;

	ret = sdhci_setup_host(host);
	if (ret)
		return ret;
3574

3575 3576
	return __sdhci_add_host(host);
}
3577
EXPORT_SYMBOL_GPL(sdhci_add_host);
3578

P
Pierre Ossman 已提交
3579
void sdhci_remove_host(struct sdhci_host *host, int dead)
3580
{
3581
	struct mmc_host *mmc = host->mmc;
P
Pierre Ossman 已提交
3582 3583 3584 3585 3586 3587 3588
	unsigned long flags;

	if (dead) {
		spin_lock_irqsave(&host->lock, flags);

		host->flags |= SDHCI_DEVICE_DEAD;

3589
		if (sdhci_has_requests(host)) {
3590
			pr_err("%s: Controller removed during "
3591
				" transfer!\n", mmc_hostname(mmc));
3592
			sdhci_error_out_mrqs(host, -ENOMEDIUM);
P
Pierre Ossman 已提交
3593 3594 3595 3596 3597
		}

		spin_unlock_irqrestore(&host->lock, flags);
	}

3598 3599
	sdhci_disable_card_detection(host);

3600
	mmc_remove_host(mmc);
3601

3602
	sdhci_led_unregister(host);
3603

P
Pierre Ossman 已提交
3604
	if (!dead)
3605
		sdhci_do_reset(host, SDHCI_RESET_ALL);
3606

3607 3608
	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3609 3610 3611
	free_irq(host->irq, host);

	del_timer_sync(&host->timer);
3612
	del_timer_sync(&host->data_timer);
3613 3614

	tasklet_kill(&host->finish_tasklet);
3615

3616 3617
	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);
3618

3619
	if (host->align_buffer)
3620 3621 3622
		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
				  host->adma_table_sz, host->align_buffer,
				  host->align_addr);
3623

3624
	host->adma_table = NULL;
3625
	host->align_buffer = NULL;
3626 3627
}

3628
EXPORT_SYMBOL_GPL(sdhci_remove_host);
3629

3630
void sdhci_free_host(struct sdhci_host *host)
3631
{
3632
	mmc_free_host(host->mmc);
3633 3634
}

3635
EXPORT_SYMBOL_GPL(sdhci_free_host);
3636 3637 3638 3639 3640 3641 3642 3643 3644

/*****************************************************************************\
 *                                                                           *
 * Driver init/exit                                                          *
 *                                                                           *
\*****************************************************************************/

static int __init sdhci_drv_init(void)
{
3645
	pr_info(DRIVER_NAME
3646
		": Secure Digital Host Controller Interface driver\n");
3647
	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3648

3649
	return 0;
3650 3651 3652 3653 3654 3655 3656 3657 3658
}

static void __exit sdhci_drv_exit(void)
{
}

module_init(sdhci_drv_init);
module_exit(sdhci_drv_exit);

3659
module_param(debug_quirks, uint, 0444);
3660
module_param(debug_quirks2, uint, 0444);
3661

3662
MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3663
MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3664
MODULE_LICENSE("GPL");
3665

3666
MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3667
MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");