sdhci.c 87.4 KB
Newer Older
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3
 *
4
 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 6
 *
 * This program is free software; you can redistribute it and/or modify
7 8 9
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or (at
 * your option) any later version.
10 11 12 13
 *
 * Thanks to the following companies for their support:
 *
 *     - JMicron (hardware and technical support)
14 15 16 17
 */

#include <linux/delay.h>
#include <linux/highmem.h>
18
#include <linux/io.h>
19
#include <linux/module.h>
20
#include <linux/dma-mapping.h>
21
#include <linux/slab.h>
22
#include <linux/scatterlist.h>
M
Marek Szyprowski 已提交
23
#include <linux/regulator/consumer.h>
24
#include <linux/pm_runtime.h>
25

26 27
#include <linux/leds.h>

28
#include <linux/mmc/mmc.h>
29
#include <linux/mmc/host.h>
30
#include <linux/mmc/card.h>
31
#include <linux/mmc/slot-gpio.h>
32 33 34 35 36 37

#include "sdhci.h"

#define DRIVER_NAME "sdhci"

#define DBG(f, x...) \
38
	pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
39

40 41 42 43 44
#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
	defined(CONFIG_MMC_SDHCI_MODULE))
#define SDHCI_USE_LEDS_CLASS
#endif

45 46
#define MAX_TUNING_LOOP 40

47
static unsigned int debug_quirks = 0;
48
static unsigned int debug_quirks2;
49

50 51 52
static void sdhci_finish_data(struct sdhci_host *);

static void sdhci_finish_command(struct sdhci_host *);
53
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
54
static void sdhci_tuning_timer(unsigned long data);
55
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56

57 58 59
#ifdef CONFIG_PM_RUNTIME
static int sdhci_runtime_pm_get(struct sdhci_host *host);
static int sdhci_runtime_pm_put(struct sdhci_host *host);
60 61
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
62 63 64 65 66 67 68 69 70
#else
static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
{
	return 0;
}
static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
{
	return 0;
}
71 72 73 74 75 76
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
{
}
static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
{
}
77 78
#endif

79 80
static void sdhci_dumpregs(struct sdhci_host *host)
{
81
	pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
82
		mmc_hostname(host->mmc));
83

84
	pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
85 86
		sdhci_readl(host, SDHCI_DMA_ADDRESS),
		sdhci_readw(host, SDHCI_HOST_VERSION));
87
	pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
88 89
		sdhci_readw(host, SDHCI_BLOCK_SIZE),
		sdhci_readw(host, SDHCI_BLOCK_COUNT));
90
	pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
91 92
		sdhci_readl(host, SDHCI_ARGUMENT),
		sdhci_readw(host, SDHCI_TRANSFER_MODE));
93
	pr_debug(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
94 95
		sdhci_readl(host, SDHCI_PRESENT_STATE),
		sdhci_readb(host, SDHCI_HOST_CONTROL));
96
	pr_debug(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
97 98
		sdhci_readb(host, SDHCI_POWER_CONTROL),
		sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
99
	pr_debug(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
100 101
		sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
		sdhci_readw(host, SDHCI_CLOCK_CONTROL));
102
	pr_debug(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
103 104
		sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
		sdhci_readl(host, SDHCI_INT_STATUS));
105
	pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
106 107
		sdhci_readl(host, SDHCI_INT_ENABLE),
		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
108
	pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
109 110
		sdhci_readw(host, SDHCI_ACMD12_ERR),
		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
111
	pr_debug(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
112
		sdhci_readl(host, SDHCI_CAPABILITIES),
113
		sdhci_readl(host, SDHCI_CAPABILITIES_1));
114
	pr_debug(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
115
		sdhci_readw(host, SDHCI_COMMAND),
116
		sdhci_readl(host, SDHCI_MAX_CURRENT));
117
	pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
118
		sdhci_readw(host, SDHCI_HOST_CONTROL2));
119

120
	if (host->flags & SDHCI_USE_ADMA)
121
		pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
122 123 124
		       readl(host->ioaddr + SDHCI_ADMA_ERROR),
		       readl(host->ioaddr + SDHCI_ADMA_ADDRESS));

125
	pr_debug(DRIVER_NAME ": ===========================================\n");
126 127 128 129 130 131 132 133
}

/*****************************************************************************\
 *                                                                           *
 * Low level functions                                                       *
 *                                                                           *
\*****************************************************************************/

134 135
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
136
	u32 present;
137

138
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
139
	    (host->mmc->caps & MMC_CAP_NONREMOVABLE))
140 141
		return;

142 143 144
	if (enable) {
		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
145

146 147 148 149 150
		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
				       SDHCI_INT_CARD_INSERT;
	} else {
		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
	}
151 152 153

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
154 155 156 157 158 159 160 161 162 163 164 165
}

static void sdhci_enable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, true);
}

static void sdhci_disable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, false);
}

166 167
static void sdhci_reset(struct sdhci_host *host, u8 mask)
{
168
	unsigned long timeout;
169
	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
170
		if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
171 172 173 174
			SDHCI_CARD_PRESENT))
			return;
	}

175 176 177
	if (host->ops->platform_reset_enter)
		host->ops->platform_reset_enter(host, mask);

178
	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
179

180
	if (mask & SDHCI_RESET_ALL) {
181
		host->clock = 0;
182 183 184 185
		/* Reset-all turns off SD Bus Power */
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
	}
186

187 188 189 190
	/* Wait max 100 ms */
	timeout = 100;

	/* hw clears the bit when it's done */
191
	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
192
		if (timeout == 0) {
193
			pr_err("%s: Reset 0x%x never completed.\n",
194 195 196 197 198 199
				mmc_hostname(host->mmc), (int)mask);
			sdhci_dumpregs(host);
			return;
		}
		timeout--;
		mdelay(1);
200
	}
201

202 203 204
	if (host->ops->platform_reset_exit)
		host->ops->platform_reset_exit(host, mask);

205 206 207 208
	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) {
		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
	}
209 210 211 212 213

	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
		if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
			host->ops->enable_dma(host);
	}
214 215
}

216 217 218
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);

static void sdhci_init(struct sdhci_host *host, int soft)
219
{
220 221 222 223
	if (soft)
		sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
	else
		sdhci_reset(host, SDHCI_RESET_ALL);
224

225 226 227 228 229 230 231 232
	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
		    SDHCI_INT_RESPONSE;

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
233 234 235 236 237 238

	if (soft) {
		/* force clock reconfiguration */
		host->clock = 0;
		sdhci_set_ios(host->mmc, &host->mmc->ios);
	}
239
}
240

241 242
static void sdhci_reinit(struct sdhci_host *host)
{
243
	sdhci_init(host, 0);
244 245 246 247 248
	/*
	 * Retuning stuffs are affected by different cards inserted and only
	 * applicable to UHS-I cards. So reset these fields to their initial
	 * value when card is removed.
	 */
249 250 251
	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
		host->flags &= ~SDHCI_USING_RETUNING_TIMER;

252 253 254 255 256
		del_timer_sync(&host->tuning_timer);
		host->flags &= ~SDHCI_NEEDS_RETUNING;
		host->mmc->max_blk_count =
			(host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
	}
257
	sdhci_enable_card_detection(host);
258 259 260 261 262 263
}

static void sdhci_activate_led(struct sdhci_host *host)
{
	u8 ctrl;

264
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
265
	ctrl |= SDHCI_CTRL_LED;
266
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
267 268 269 270 271 272
}

static void sdhci_deactivate_led(struct sdhci_host *host)
{
	u8 ctrl;

273
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
274
	ctrl &= ~SDHCI_CTRL_LED;
275
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
276 277
}

278
#ifdef SDHCI_USE_LEDS_CLASS
279 280 281 282 283 284 285 286
static void sdhci_led_control(struct led_classdev *led,
	enum led_brightness brightness)
{
	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);

287 288 289
	if (host->runtime_suspended)
		goto out;

290 291 292 293
	if (brightness == LED_OFF)
		sdhci_deactivate_led(host);
	else
		sdhci_activate_led(host);
294
out:
295 296 297 298
	spin_unlock_irqrestore(&host->lock, flags);
}
#endif

299 300 301 302 303 304
/*****************************************************************************\
 *                                                                           *
 * Core functions                                                            *
 *                                                                           *
\*****************************************************************************/

P
Pierre Ossman 已提交
305
static void sdhci_read_block_pio(struct sdhci_host *host)
306
{
307 308
	unsigned long flags;
	size_t blksize, len, chunk;
309
	u32 uninitialized_var(scratch);
310
	u8 *buf;
311

P
Pierre Ossman 已提交
312
	DBG("PIO reading\n");
313

P
Pierre Ossman 已提交
314
	blksize = host->data->blksz;
315
	chunk = 0;
316

317
	local_irq_save(flags);
318

P
Pierre Ossman 已提交
319
	while (blksize) {
320 321
		if (!sg_miter_next(&host->sg_miter))
			BUG();
322

323
		len = min(host->sg_miter.length, blksize);
324

325 326
		blksize -= len;
		host->sg_miter.consumed = len;
327

328
		buf = host->sg_miter.addr;
329

330 331
		while (len) {
			if (chunk == 0) {
332
				scratch = sdhci_readl(host, SDHCI_BUFFER);
333
				chunk = 4;
P
Pierre Ossman 已提交
334
			}
335 336 337 338 339 340 341

			*buf = scratch & 0xFF;

			buf++;
			scratch >>= 8;
			chunk--;
			len--;
342
		}
P
Pierre Ossman 已提交
343
	}
344 345 346 347

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
348
}
349

P
Pierre Ossman 已提交
350 351
static void sdhci_write_block_pio(struct sdhci_host *host)
{
352 353 354 355
	unsigned long flags;
	size_t blksize, len, chunk;
	u32 scratch;
	u8 *buf;
356

P
Pierre Ossman 已提交
357 358 359
	DBG("PIO writing\n");

	blksize = host->data->blksz;
360 361
	chunk = 0;
	scratch = 0;
362

363
	local_irq_save(flags);
364

P
Pierre Ossman 已提交
365
	while (blksize) {
366 367
		if (!sg_miter_next(&host->sg_miter))
			BUG();
P
Pierre Ossman 已提交
368

369 370 371 372 373 374
		len = min(host->sg_miter.length, blksize);

		blksize -= len;
		host->sg_miter.consumed = len;

		buf = host->sg_miter.addr;
375

376 377 378 379 380 381 382 383
		while (len) {
			scratch |= (u32)*buf << (chunk * 8);

			buf++;
			chunk++;
			len--;

			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
384
				sdhci_writel(host, scratch, SDHCI_BUFFER);
385 386
				chunk = 0;
				scratch = 0;
387 388 389
			}
		}
	}
390 391 392 393

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
394 395 396 397 398 399 400 401
}

static void sdhci_transfer_pio(struct sdhci_host *host)
{
	u32 mask;

	BUG_ON(!host->data);

402
	if (host->blocks == 0)
P
Pierre Ossman 已提交
403 404 405 406 407 408 409
		return;

	if (host->data->flags & MMC_DATA_READ)
		mask = SDHCI_DATA_AVAILABLE;
	else
		mask = SDHCI_SPACE_AVAILABLE;

410 411 412 413 414 415 416 417 418
	/*
	 * Some controllers (JMicron JMB38x) mess up the buffer bits
	 * for transfers < 4 bytes. As long as it is just one block,
	 * we can ignore the bits.
	 */
	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
		(host->data->blocks == 1))
		mask = ~0;

419
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
420 421 422
		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
			udelay(100);

P
Pierre Ossman 已提交
423 424 425 426
		if (host->data->flags & MMC_DATA_READ)
			sdhci_read_block_pio(host);
		else
			sdhci_write_block_pio(host);
427

428 429
		host->blocks--;
		if (host->blocks == 0)
P
Pierre Ossman 已提交
430 431
			break;
	}
432

P
Pierre Ossman 已提交
433
	DBG("PIO transfer complete.\n");
434 435
}

436 437 438
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
	local_irq_save(*flags);
439
	return kmap_atomic(sg_page(sg)) + sg->offset;
440 441 442 443
}

static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
{
444
	kunmap_atomic(buffer);
445 446 447
	local_irq_restore(*flags);
}

B
Ben Dooks 已提交
448 449
static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd)
{
450 451
	__le32 *dataddr = (__le32 __force *)(desc + 4);
	__le16 *cmdlen = (__le16 __force *)desc;
B
Ben Dooks 已提交
452

453 454
	/* SDHCI specification says ADMA descriptors should be 4 byte
	 * aligned, so using 16 or 32bit operations should be safe. */
B
Ben Dooks 已提交
455

456 457 458 459
	cmdlen[0] = cpu_to_le16(cmd);
	cmdlen[1] = cpu_to_le16(len);

	dataddr[0] = cpu_to_le32(addr);
B
Ben Dooks 已提交
460 461
}

462
static int sdhci_adma_table_pre(struct sdhci_host *host,
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
	struct mmc_data *data)
{
	int direction;

	u8 *desc;
	u8 *align;
	dma_addr_t addr;
	dma_addr_t align_addr;
	int len, offset;

	struct scatterlist *sg;
	int i;
	char *buffer;
	unsigned long flags;

	/*
	 * The spec does not specify endianness of descriptor table.
	 * We currently guess that it is LE.
	 */

	if (data->flags & MMC_DATA_READ)
		direction = DMA_FROM_DEVICE;
	else
		direction = DMA_TO_DEVICE;

	/*
	 * The ADMA descriptor table is mapped further down as we
	 * need to fill it with data first.
	 */

	host->align_addr = dma_map_single(mmc_dev(host->mmc),
		host->align_buffer, 128 * 4, direction);
495
	if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
496
		goto fail;
497 498 499 500
	BUG_ON(host->align_addr & 0x3);

	host->sg_count = dma_map_sg(mmc_dev(host->mmc),
		data->sg, data->sg_len, direction);
501 502
	if (host->sg_count == 0)
		goto unmap_align;
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523

	desc = host->adma_desc;
	align = host->align_buffer;

	align_addr = host->align_addr;

	for_each_sg(data->sg, sg, host->sg_count, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);

		/*
		 * The SDHCI specification states that ADMA
		 * addresses must be 32-bit aligned. If they
		 * aren't, then we use a bounce buffer for
		 * the (up to three) bytes that screw up the
		 * alignment.
		 */
		offset = (4 - (addr & 0x3)) & 0x3;
		if (offset) {
			if (data->flags & MMC_DATA_WRITE) {
				buffer = sdhci_kmap_atomic(sg, &flags);
524
				WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
525 526 527 528
				memcpy(align, buffer, offset);
				sdhci_kunmap_atomic(buffer, &flags);
			}

B
Ben Dooks 已提交
529 530
			/* tran, valid */
			sdhci_set_adma_desc(desc, align_addr, offset, 0x21);
531 532 533 534 535 536 537 538 539 540 541 542 543 544

			BUG_ON(offset > 65536);

			align += 4;
			align_addr += 4;

			desc += 8;

			addr += offset;
			len -= offset;
		}

		BUG_ON(len > 65536);

B
Ben Dooks 已提交
545 546
		/* tran, valid */
		sdhci_set_adma_desc(desc, addr, len, 0x21);
547 548 549 550 551 552 553 554 555
		desc += 8;

		/*
		 * If this triggers then we have a calculation bug
		 * somewhere. :/
		 */
		WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
	}

556 557 558 559 560 561 562 563 564 565 566 567
	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
		/*
		* Mark the last descriptor as the terminating descriptor
		*/
		if (desc != host->adma_desc) {
			desc -= 8;
			desc[0] |= 0x2; /* end */
		}
	} else {
		/*
		* Add a terminating entry.
		*/
568

569 570 571
		/* nop, end, valid */
		sdhci_set_adma_desc(desc, 0, 0, 0x3);
	}
572 573 574 575 576 577 578 579 580 581 582

	/*
	 * Resync align buffer as we might have changed it.
	 */
	if (data->flags & MMC_DATA_WRITE) {
		dma_sync_single_for_device(mmc_dev(host->mmc),
			host->align_addr, 128 * 4, direction);
	}

	host->adma_addr = dma_map_single(mmc_dev(host->mmc),
		host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
583
	if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
584
		goto unmap_entries;
585
	BUG_ON(host->adma_addr & 0x3);
586 587 588 589 590 591 592 593 594 595 596

	return 0;

unmap_entries:
	dma_unmap_sg(mmc_dev(host->mmc), data->sg,
		data->sg_len, direction);
unmap_align:
	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
		128 * 4, direction);
fail:
	return -EINVAL;
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
}

static void sdhci_adma_table_post(struct sdhci_host *host,
	struct mmc_data *data)
{
	int direction;

	struct scatterlist *sg;
	int i, size;
	u8 *align;
	char *buffer;
	unsigned long flags;

	if (data->flags & MMC_DATA_READ)
		direction = DMA_FROM_DEVICE;
	else
		direction = DMA_TO_DEVICE;

	dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
		(128 * 2 + 1) * 4, DMA_TO_DEVICE);

	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
		128 * 4, direction);

	if (data->flags & MMC_DATA_READ) {
		dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
			data->sg_len, direction);

		align = host->align_buffer;

		for_each_sg(data->sg, sg, host->sg_count, i) {
			if (sg_dma_address(sg) & 0x3) {
				size = 4 - (sg_dma_address(sg) & 0x3);

				buffer = sdhci_kmap_atomic(sg, &flags);
632
				WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
633 634 635 636 637 638 639 640 641 642 643 644
				memcpy(buffer, align, size);
				sdhci_kunmap_atomic(buffer, &flags);

				align += 4;
			}
		}
	}

	dma_unmap_sg(mmc_dev(host->mmc), data->sg,
		data->sg_len, direction);
}

645
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
646
{
647
	u8 count;
648
	struct mmc_data *data = cmd->data;
649
	unsigned target_timeout, current_timeout;
650

651 652 653 654 655 656
	/*
	 * If the host controller provides us with an incorrect timeout
	 * value, just skip the check and use 0xE.  The hardware may take
	 * longer to time out, but that's much better than having a too-short
	 * timeout value.
	 */
657
	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
658
		return 0xE;
659

660
	/* Unspecified timeout, assume max */
661
	if (!data && !cmd->busy_timeout)
662
		return 0xE;
663

664 665
	/* timeout in us */
	if (!data)
666
		target_timeout = cmd->busy_timeout * 1000;
667 668 669 670 671
	else {
		target_timeout = data->timeout_ns / 1000;
		if (host->clock)
			target_timeout += data->timeout_clks / host->clock;
	}
672

673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
	/*
	 * Figure out needed cycles.
	 * We do this in steps in order to fit inside a 32 bit int.
	 * The first step is the minimum timeout, which will have a
	 * minimum resolution of 6 bits:
	 * (1) 2^13*1000 > 2^22,
	 * (2) host->timeout_clk < 2^16
	 *     =>
	 *     (1) / (2) > 2^6
	 */
	count = 0;
	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
	while (current_timeout < target_timeout) {
		count++;
		current_timeout <<= 1;
		if (count >= 0xF)
			break;
	}

	if (count >= 0xF) {
693 694
		DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
		    mmc_hostname(host->mmc), count, cmd->opcode);
695 696 697
		count = 0xE;
	}

698 699 700
	return count;
}

701 702 703 704 705 706
static void sdhci_set_transfer_irqs(struct sdhci_host *host)
{
	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;

	if (host->flags & SDHCI_REQ_USE_DMA)
707
		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
708
	else
709 710 711 712
		host->ier = (host->ier & ~dma_irqs) | pio_irqs;

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
713 714
}

715
static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
716 717
{
	u8 count;
718
	u8 ctrl;
719
	struct mmc_data *data = cmd->data;
720
	int ret;
721 722 723

	WARN_ON(host->data);

724 725 726 727 728 729
	if (data || (cmd->flags & MMC_RSP_BUSY)) {
		count = sdhci_calc_timeout(host, cmd);
		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
	}

	if (!data)
730 731 732 733 734 735 736 737 738
		return;

	/* Sanity checks */
	BUG_ON(data->blksz * data->blocks > 524288);
	BUG_ON(data->blksz > host->mmc->max_blk_size);
	BUG_ON(data->blocks > 65535);

	host->data = data;
	host->data_early = 0;
739
	host->data->bytes_xfered = 0;
740

741
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
742 743
		host->flags |= SDHCI_REQ_USE_DMA;

744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
	/*
	 * FIXME: This doesn't account for merging when mapping the
	 * scatterlist.
	 */
	if (host->flags & SDHCI_REQ_USE_DMA) {
		int broken, i;
		struct scatterlist *sg;

		broken = 0;
		if (host->flags & SDHCI_USE_ADMA) {
			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
				broken = 1;
		} else {
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
				broken = 1;
		}

		if (unlikely(broken)) {
			for_each_sg(data->sg, sg, data->sg_len, i) {
				if (sg->length & 0x3) {
					DBG("Reverting to PIO because of "
						"transfer size (%d)\n",
						sg->length);
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
			}
		}
772 773 774 775 776 777
	}

	/*
	 * The assumption here being that alignment is the same after
	 * translation to device address space.
	 */
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
	if (host->flags & SDHCI_REQ_USE_DMA) {
		int broken, i;
		struct scatterlist *sg;

		broken = 0;
		if (host->flags & SDHCI_USE_ADMA) {
			/*
			 * As we use 3 byte chunks to work around
			 * alignment problems, we need to check this
			 * quirk.
			 */
			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
				broken = 1;
		} else {
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
				broken = 1;
		}

		if (unlikely(broken)) {
			for_each_sg(data->sg, sg, data->sg_len, i) {
				if (sg->offset & 0x3) {
					DBG("Reverting to PIO because of "
						"bad alignment\n");
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
			}
		}
	}

808 809 810 811 812 813 814 815 816
	if (host->flags & SDHCI_REQ_USE_DMA) {
		if (host->flags & SDHCI_USE_ADMA) {
			ret = sdhci_adma_table_pre(host, data);
			if (ret) {
				/*
				 * This only happens when someone fed
				 * us an invalid request.
				 */
				WARN_ON(1);
817
				host->flags &= ~SDHCI_REQ_USE_DMA;
818
			} else {
819 820
				sdhci_writel(host, host->adma_addr,
					SDHCI_ADMA_ADDRESS);
821 822
			}
		} else {
823
			int sg_cnt;
824

825
			sg_cnt = dma_map_sg(mmc_dev(host->mmc),
826 827 828 829
					data->sg, data->sg_len,
					(data->flags & MMC_DATA_READ) ?
						DMA_FROM_DEVICE :
						DMA_TO_DEVICE);
830
			if (sg_cnt == 0) {
831 832 833 834 835
				/*
				 * This only happens when someone fed
				 * us an invalid request.
				 */
				WARN_ON(1);
836
				host->flags &= ~SDHCI_REQ_USE_DMA;
837
			} else {
838
				WARN_ON(sg_cnt != 1);
839 840
				sdhci_writel(host, sg_dma_address(data->sg),
					SDHCI_DMA_ADDRESS);
841 842 843 844
			}
		}
	}

845 846 847 848 849 850
	/*
	 * Always adjust the DMA selection as some controllers
	 * (e.g. JMicron) can't do PIO properly when the selection
	 * is ADMA.
	 */
	if (host->version >= SDHCI_SPEC_200) {
851
		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
852 853 854 855 856 857
		ctrl &= ~SDHCI_CTRL_DMA_MASK;
		if ((host->flags & SDHCI_REQ_USE_DMA) &&
			(host->flags & SDHCI_USE_ADMA))
			ctrl |= SDHCI_CTRL_ADMA32;
		else
			ctrl |= SDHCI_CTRL_SDMA;
858
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
859 860
	}

861
	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
862 863 864 865 866 867 868 869
		int flags;

		flags = SG_MITER_ATOMIC;
		if (host->data->flags & MMC_DATA_READ)
			flags |= SG_MITER_TO_SG;
		else
			flags |= SG_MITER_FROM_SG;
		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
870
		host->blocks = data->blocks;
871
	}
872

873 874
	sdhci_set_transfer_irqs(host);

875 876 877
	/* Set the DMA boundary value and block size */
	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
		data->blksz), SDHCI_BLOCK_SIZE);
878
	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
879 880 881
}

static void sdhci_set_transfer_mode(struct sdhci_host *host,
882
	struct mmc_command *cmd)
883 884
{
	u16 mode;
885
	struct mmc_data *data = cmd->data;
886

887 888 889 890 891
	if (data == NULL) {
		/* clear Auto CMD settings for no data CMDs */
		mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
		sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
892
		return;
893
	}
894

895 896
	WARN_ON(!host->data);

897
	mode = SDHCI_TRNS_BLK_CNT_EN;
898 899 900 901 902 903 904 905
	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
		mode |= SDHCI_TRNS_MULTI;
		/*
		 * If we are sending CMD23, CMD12 never gets sent
		 * on successful completion (so no Auto-CMD12).
		 */
		if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12))
			mode |= SDHCI_TRNS_AUTO_CMD12;
906 907 908 909
		else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
			mode |= SDHCI_TRNS_AUTO_CMD23;
			sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
		}
910
	}
911

912 913
	if (data->flags & MMC_DATA_READ)
		mode |= SDHCI_TRNS_READ;
914
	if (host->flags & SDHCI_REQ_USE_DMA)
915 916
		mode |= SDHCI_TRNS_DMA;

917
	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
918 919 920 921 922 923 924 925 926 927 928
}

static void sdhci_finish_data(struct sdhci_host *host)
{
	struct mmc_data *data;

	BUG_ON(!host->data);

	data = host->data;
	host->data = NULL;

929
	if (host->flags & SDHCI_REQ_USE_DMA) {
930 931 932 933 934 935 936
		if (host->flags & SDHCI_USE_ADMA)
			sdhci_adma_table_post(host, data);
		else {
			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
				data->sg_len, (data->flags & MMC_DATA_READ) ?
					DMA_FROM_DEVICE : DMA_TO_DEVICE);
		}
937 938 939
	}

	/*
940 941 942 943 944
	 * The specification states that the block count register must
	 * be updated, but it does not specify at what point in the
	 * data flow. That makes the register entirely useless to read
	 * back so we have to assume that nothing made it to the card
	 * in the event of an error.
945
	 */
946 947
	if (data->error)
		data->bytes_xfered = 0;
948
	else
949
		data->bytes_xfered = data->blksz * data->blocks;
950

951 952 953 954 955 956 957 958 959
	/*
	 * Need to send CMD12 if -
	 * a) open-ended multiblock transfer (no CMD23)
	 * b) error in multiblock transfer
	 */
	if (data->stop &&
	    (data->error ||
	     !host->mrq->sbc)) {

960 961 962 963
		/*
		 * The controller needs a reset of internal state machines
		 * upon error conditions.
		 */
P
Pierre Ossman 已提交
964
		if (data->error) {
965 966 967 968 969 970 971 972 973
			sdhci_reset(host, SDHCI_RESET_CMD);
			sdhci_reset(host, SDHCI_RESET_DATA);
		}

		sdhci_send_command(host, data->stop);
	} else
		tasklet_schedule(&host->finish_tasklet);
}

974
void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
975 976
{
	int flags;
977
	u32 mask;
978
	unsigned long timeout;
979 980 981 982

	WARN_ON(host->cmd);

	/* Wait max 10 ms */
983
	timeout = 10;
984 985 986 987 988 989 990 991 992 993

	mask = SDHCI_CMD_INHIBIT;
	if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
		mask |= SDHCI_DATA_INHIBIT;

	/* We shouldn't wait for data inihibit for stop commands, even
	   though they might use busy signaling */
	if (host->mrq->data && (cmd == host->mrq->data->stop))
		mask &= ~SDHCI_DATA_INHIBIT;

994
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
995
		if (timeout == 0) {
996
			pr_err("%s: Controller never released "
P
Pierre Ossman 已提交
997
				"inhibit bit(s).\n", mmc_hostname(host->mmc));
998
			sdhci_dumpregs(host);
P
Pierre Ossman 已提交
999
			cmd->error = -EIO;
1000 1001 1002
			tasklet_schedule(&host->finish_tasklet);
			return;
		}
1003 1004 1005
		timeout--;
		mdelay(1);
	}
1006

1007
	timeout = jiffies;
1008 1009
	if (!cmd->data && cmd->busy_timeout > 9000)
		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1010 1011 1012
	else
		timeout += 10 * HZ;
	mod_timer(&host->timer, timeout);
1013 1014 1015

	host->cmd = cmd;

1016
	sdhci_prepare_data(host, cmd);
1017

1018
	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1019

1020
	sdhci_set_transfer_mode(host, cmd);
1021

1022
	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1023
		pr_err("%s: Unsupported response type!\n",
1024
			mmc_hostname(host->mmc));
P
Pierre Ossman 已提交
1025
		cmd->error = -EINVAL;
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
		tasklet_schedule(&host->finish_tasklet);
		return;
	}

	if (!(cmd->flags & MMC_RSP_PRESENT))
		flags = SDHCI_CMD_RESP_NONE;
	else if (cmd->flags & MMC_RSP_136)
		flags = SDHCI_CMD_RESP_LONG;
	else if (cmd->flags & MMC_RSP_BUSY)
		flags = SDHCI_CMD_RESP_SHORT_BUSY;
	else
		flags = SDHCI_CMD_RESP_SHORT;

	if (cmd->flags & MMC_RSP_CRC)
		flags |= SDHCI_CMD_CRC;
	if (cmd->flags & MMC_RSP_OPCODE)
		flags |= SDHCI_CMD_INDEX;
1043 1044

	/* CMD19 is special in that the Data Present Select should be set */
1045 1046
	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1047 1048
		flags |= SDHCI_CMD_DATA;

1049
	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1050
}
1051
EXPORT_SYMBOL_GPL(sdhci_send_command);
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062

static void sdhci_finish_command(struct sdhci_host *host)
{
	int i;

	BUG_ON(host->cmd == NULL);

	if (host->cmd->flags & MMC_RSP_PRESENT) {
		if (host->cmd->flags & MMC_RSP_136) {
			/* CRC is stripped so we need to do some shifting. */
			for (i = 0;i < 4;i++) {
1063
				host->cmd->resp[i] = sdhci_readl(host,
1064 1065 1066
					SDHCI_RESPONSE + (3-i)*4) << 8;
				if (i != 3)
					host->cmd->resp[i] |=
1067
						sdhci_readb(host,
1068 1069 1070
						SDHCI_RESPONSE + (3-i)*4-1);
			}
		} else {
1071
			host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1072 1073 1074
		}
	}

P
Pierre Ossman 已提交
1075
	host->cmd->error = 0;
1076

1077 1078 1079 1080 1081
	/* Finished CMD23, now send actual command. */
	if (host->cmd == host->mrq->sbc) {
		host->cmd = NULL;
		sdhci_send_command(host, host->mrq->cmd);
	} else {
1082

1083 1084 1085
		/* Processed actual command. */
		if (host->data && host->data_early)
			sdhci_finish_data(host);
1086

1087 1088 1089 1090 1091
		if (!host->cmd->data)
			tasklet_schedule(&host->finish_tasklet);

		host->cmd = NULL;
	}
1092 1093
}

1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
static u16 sdhci_get_preset_value(struct sdhci_host *host)
{
	u16 ctrl, preset = 0;

	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

	switch (ctrl & SDHCI_CTRL_UHS_MASK) {
	case SDHCI_CTRL_UHS_SDR12:
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
		break;
	case SDHCI_CTRL_UHS_SDR25:
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
		break;
	case SDHCI_CTRL_UHS_SDR50:
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
		break;
	case SDHCI_CTRL_UHS_SDR104:
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
		break;
	case SDHCI_CTRL_UHS_DDR50:
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
		break;
	default:
		pr_warn("%s: Invalid UHS-I mode selected\n",
			mmc_hostname(host->mmc));
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
		break;
	}
	return preset;
}

1125 1126
static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
1127
	int div = 0; /* Initialized for compiler warning */
1128
	int real_div = div, clk_mul = 1;
1129
	u16 clk = 0;
1130
	unsigned long timeout;
1131

1132
	if (clock && clock == host->clock)
1133 1134
		return;

1135 1136
	host->mmc->actual_clock = 0;

1137 1138 1139 1140 1141 1142
	if (host->ops->set_clock) {
		host->ops->set_clock(host, clock);
		if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
			return;
	}

1143
	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1144 1145 1146 1147

	if (clock == 0)
		goto out;

1148
	if (host->version >= SDHCI_SPEC_300) {
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
		if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
			SDHCI_CTRL_PRESET_VAL_ENABLE) {
			u16 pre_val;

			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
			pre_val = sdhci_get_preset_value(host);
			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
			if (host->clk_mul &&
				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
				clk = SDHCI_PROG_CLOCK_MODE;
				real_div = div + 1;
				clk_mul = host->clk_mul;
			} else {
				real_div = max_t(int, 1, div << 1);
			}
			goto clock_set;
		}

1168 1169 1170 1171 1172
		/*
		 * Check if the Host Controller supports Programmable Clock
		 * Mode.
		 */
		if (host->clk_mul) {
1173 1174 1175 1176 1177
			for (div = 1; div <= 1024; div++) {
				if ((host->max_clk * host->clk_mul / div)
					<= clock)
					break;
			}
1178
			/*
1179 1180
			 * Set Programmable Clock Mode in the Clock
			 * Control register.
1181
			 */
1182 1183 1184 1185
			clk = SDHCI_PROG_CLOCK_MODE;
			real_div = div;
			clk_mul = host->clk_mul;
			div--;
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
		} else {
			/* Version 3.00 divisors must be a multiple of 2. */
			if (host->max_clk <= clock)
				div = 1;
			else {
				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
				     div += 2) {
					if ((host->max_clk / div) <= clock)
						break;
				}
1196
			}
1197
			real_div = div;
1198
			div >>= 1;
1199 1200 1201
		}
	} else {
		/* Version 2.00 divisors must be a power of 2. */
1202
		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1203 1204 1205
			if ((host->max_clk / div) <= clock)
				break;
		}
1206
		real_div = div;
1207
		div >>= 1;
1208 1209
	}

1210
clock_set:
1211 1212 1213
	if (real_div)
		host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;

1214
	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1215 1216
	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
		<< SDHCI_DIVIDER_HI_SHIFT;
1217
	clk |= SDHCI_CLOCK_INT_EN;
1218
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1219

1220 1221
	/* Wait max 20 ms */
	timeout = 20;
1222
	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1223 1224
		& SDHCI_CLOCK_INT_STABLE)) {
		if (timeout == 0) {
1225
			pr_err("%s: Internal clock never "
P
Pierre Ossman 已提交
1226
				"stabilised.\n", mmc_hostname(host->mmc));
1227 1228 1229
			sdhci_dumpregs(host);
			return;
		}
1230 1231 1232
		timeout--;
		mdelay(1);
	}
1233 1234

	clk |= SDHCI_CLOCK_CARD_EN;
1235
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1236 1237 1238 1239 1240

out:
	host->clock = clock;
}

1241 1242 1243 1244 1245 1246 1247 1248 1249
static inline void sdhci_update_clock(struct sdhci_host *host)
{
	unsigned int clock;

	clock = host->clock;
	host->clock = 0;
	sdhci_set_clock(host, clock);
}

A
Adrian Hunter 已提交
1250
static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
1251
{
1252
	u8 pwr = 0;
1253

1254
	if (power != (unsigned short)-1) {
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
		switch (1 << power) {
		case MMC_VDD_165_195:
			pwr = SDHCI_POWER_180;
			break;
		case MMC_VDD_29_30:
		case MMC_VDD_30_31:
			pwr = SDHCI_POWER_300;
			break;
		case MMC_VDD_32_33:
		case MMC_VDD_33_34:
			pwr = SDHCI_POWER_330;
			break;
		default:
			BUG();
		}
	}

	if (host->pwr == pwr)
A
Adrian Hunter 已提交
1273
		return -1;
1274

1275 1276 1277
	host->pwr = pwr;

	if (pwr == 0) {
1278
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1279 1280
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
A
Adrian Hunter 已提交
1281
		return 0;
1282 1283 1284 1285 1286 1287
	}

	/*
	 * Spec says that we should clear the power reg before setting
	 * a new value. Some controllers don't seem to like this though.
	 */
1288
	if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1289
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1290

1291
	/*
1292
	 * At least the Marvell CaFe chip gets confused if we set the voltage
1293 1294
	 * and set turn on power at the same time, so set the voltage first.
	 */
1295
	if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1296
		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1297

1298
	pwr |= SDHCI_POWER_ON;
1299

1300
	sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1301

1302 1303 1304
	if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
		sdhci_runtime_pm_bus_on(host);

1305 1306 1307 1308
	/*
	 * Some controllers need an extra 10ms delay of 10ms before they
	 * can apply clock after applying power
	 */
1309
	if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1310
		mdelay(10);
A
Adrian Hunter 已提交
1311 1312

	return power;
1313 1314
}

1315 1316 1317 1318 1319 1320 1321 1322 1323
/*****************************************************************************\
 *                                                                           *
 * MMC callbacks                                                             *
 *                                                                           *
\*****************************************************************************/

static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
	struct sdhci_host *host;
1324
	int present;
1325
	unsigned long flags;
1326
	u32 tuning_opcode;
1327 1328 1329

	host = mmc_priv(mmc);

1330 1331
	sdhci_runtime_pm_get(host);

1332 1333 1334 1335
	spin_lock_irqsave(&host->lock, flags);

	WARN_ON(host->mrq != NULL);

1336
#ifndef SDHCI_USE_LEDS_CLASS
1337
	sdhci_activate_led(host);
1338
#endif
1339 1340 1341 1342 1343 1344

	/*
	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
	 * requests if Auto-CMD12 is enabled.
	 */
	if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1345 1346 1347 1348 1349
		if (mrq->stop) {
			mrq->data->stop = NULL;
			mrq->stop = NULL;
		}
	}
1350 1351 1352

	host->mrq = mrq;

1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
	/*
	 * Firstly check card presence from cd-gpio.  The return could
	 * be one of the following possibilities:
	 *     negative: cd-gpio is not available
	 *     zero: cd-gpio is used, and card is removed
	 *     one: cd-gpio is used, and card is present
	 */
	present = mmc_gpio_get_cd(host->mmc);
	if (present < 0) {
		/* If polling, assume that the card is always present. */
		if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
			present = 1;
		else
			present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
					SDHCI_CARD_PRESENT;
1368 1369
	}

1370
	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
P
Pierre Ossman 已提交
1371
		host->mrq->cmd->error = -ENOMEDIUM;
1372
		tasklet_schedule(&host->finish_tasklet);
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
	} else {
		u32 present_state;

		present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
		/*
		 * Check if the re-tuning timer has already expired and there
		 * is no on-going data transfer. If so, we need to execute
		 * tuning procedure before sending command.
		 */
		if ((host->flags & SDHCI_NEEDS_RETUNING) &&
		    !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
1384 1385 1386 1387 1388 1389
			if (mmc->card) {
				/* eMMC uses cmd21 but sd and sdio use cmd19 */
				tuning_opcode =
					mmc->card->type == MMC_TYPE_MMC ?
					MMC_SEND_TUNING_BLOCK_HS200 :
					MMC_SEND_TUNING_BLOCK;
1390 1391 1392 1393 1394 1395 1396

				/* Here we need to set the host->mrq to NULL,
				 * in case the pending finish_tasklet
				 * finishes it incorrectly.
				 */
				host->mrq = NULL;

1397 1398 1399 1400 1401 1402 1403
				spin_unlock_irqrestore(&host->lock, flags);
				sdhci_execute_tuning(mmc, tuning_opcode);
				spin_lock_irqsave(&host->lock, flags);

				/* Restore original mmc_request structure */
				host->mrq = mrq;
			}
1404 1405
		}

1406
		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1407 1408 1409
			sdhci_send_command(host, mrq->sbc);
		else
			sdhci_send_command(host, mrq->cmd);
1410
	}
1411

1412
	mmiowb();
1413 1414 1415
	spin_unlock_irqrestore(&host->lock, flags);
}

1416
static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1417 1418
{
	unsigned long flags;
A
Adrian Hunter 已提交
1419
	int vdd_bit = -1;
1420 1421 1422 1423
	u8 ctrl;

	spin_lock_irqsave(&host->lock, flags);

A
Adrian Hunter 已提交
1424 1425 1426 1427 1428 1429
	if (host->flags & SDHCI_DEVICE_DEAD) {
		spin_unlock_irqrestore(&host->lock, flags);
		if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
			mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
		return;
	}
P
Pierre Ossman 已提交
1430

1431 1432 1433 1434 1435
	/*
	 * Reset the chip on each power off.
	 * Should clear out any weird states.
	 */
	if (ios->power_mode == MMC_POWER_OFF) {
1436
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1437
		sdhci_reinit(host);
1438 1439
	}

1440
	if (host->version >= SDHCI_SPEC_300 &&
1441 1442
		(ios->power_mode == MMC_POWER_UP) &&
		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1443 1444
		sdhci_enable_preset_value(host, false);

1445 1446 1447
	sdhci_set_clock(host, ios->clock);

	if (ios->power_mode == MMC_POWER_OFF)
A
Adrian Hunter 已提交
1448
		vdd_bit = sdhci_set_power(host, -1);
1449
	else
A
Adrian Hunter 已提交
1450 1451 1452 1453 1454 1455 1456
		vdd_bit = sdhci_set_power(host, ios->vdd);

	if (host->vmmc && vdd_bit != -1) {
		spin_unlock_irqrestore(&host->lock, flags);
		mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
		spin_lock_irqsave(&host->lock, flags);
	}
1457

1458 1459 1460
	if (host->ops->platform_send_init_74_clocks)
		host->ops->platform_send_init_74_clocks(host, ios->power_mode);

1461 1462 1463
	/*
	 * If your platform has 8-bit width support but is not a v3 controller,
	 * or if it requires special setup code, you should implement that in
1464
	 * platform_bus_width().
1465
	 */
1466 1467 1468
	if (host->ops->platform_bus_width) {
		host->ops->platform_bus_width(host, ios->bus_width);
	} else {
1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
		if (ios->bus_width == MMC_BUS_WIDTH_8) {
			ctrl &= ~SDHCI_CTRL_4BITBUS;
			if (host->version >= SDHCI_SPEC_300)
				ctrl |= SDHCI_CTRL_8BITBUS;
		} else {
			if (host->version >= SDHCI_SPEC_300)
				ctrl &= ~SDHCI_CTRL_8BITBUS;
			if (ios->bus_width == MMC_BUS_WIDTH_4)
				ctrl |= SDHCI_CTRL_4BITBUS;
			else
				ctrl &= ~SDHCI_CTRL_4BITBUS;
		}
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
	}
1484

1485
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1486

1487 1488 1489
	if ((ios->timing == MMC_TIMING_SD_HS ||
	     ios->timing == MMC_TIMING_MMC_HS)
	    && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1490 1491 1492 1493
		ctrl |= SDHCI_CTRL_HISPD;
	else
		ctrl &= ~SDHCI_CTRL_HISPD;

1494
	if (host->version >= SDHCI_SPEC_300) {
1495 1496 1497
		u16 clk, ctrl_2;

		/* In case of UHS-I modes, set High Speed Enable */
1498
		if ((ios->timing == MMC_TIMING_MMC_HS200) ||
1499
		    (ios->timing == MMC_TIMING_MMC_DDR52) ||
1500
		    (ios->timing == MMC_TIMING_UHS_SDR50) ||
1501 1502
		    (ios->timing == MMC_TIMING_UHS_SDR104) ||
		    (ios->timing == MMC_TIMING_UHS_DDR50) ||
1503
		    (ios->timing == MMC_TIMING_UHS_SDR25))
1504
			ctrl |= SDHCI_CTRL_HISPD;
1505 1506 1507

		ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1508
			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
			/*
			 * We only need to set Driver Strength if the
			 * preset value enable is not set.
			 */
			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;

			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
		} else {
			/*
			 * According to SDHC Spec v3.00, if the Preset Value
			 * Enable in the Host Control 2 register is set, we
			 * need to reset SD Clock Enable before changing High
			 * Speed Enable to avoid generating clock gliches.
			 */

			/* Reset SD Clock Enable */
			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
			clk &= ~SDHCI_CLOCK_CARD_EN;
			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);

			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);

			/* Re-enable SD Clock */
1536
			sdhci_update_clock(host);
1537
		}
1538 1539 1540 1541 1542 1543 1544


		/* Reset SD Clock Enable */
		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
		clk &= ~SDHCI_CLOCK_CARD_EN;
		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);

1545 1546 1547 1548 1549 1550
		if (host->ops->set_uhs_signaling)
			host->ops->set_uhs_signaling(host, ios->timing);
		else {
			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
			/* Select Bus Speed Mode for host */
			ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1551 1552 1553
			if ((ios->timing == MMC_TIMING_MMC_HS200) ||
			    (ios->timing == MMC_TIMING_UHS_SDR104))
				ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1554
			else if (ios->timing == MMC_TIMING_UHS_SDR12)
1555 1556 1557 1558 1559
				ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
			else if (ios->timing == MMC_TIMING_UHS_SDR25)
				ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
			else if (ios->timing == MMC_TIMING_UHS_SDR50)
				ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1560 1561
			else if ((ios->timing == MMC_TIMING_UHS_DDR50) ||
				 (ios->timing == MMC_TIMING_MMC_DDR52))
1562 1563 1564
				ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
		}
1565

1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
				((ios->timing == MMC_TIMING_UHS_SDR12) ||
				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
				 (ios->timing == MMC_TIMING_UHS_DDR50))) {
			u16 preset;

			sdhci_enable_preset_value(host, true);
			preset = sdhci_get_preset_value(host);
			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
				>> SDHCI_PRESET_DRV_SHIFT;
		}

1580
		/* Re-enable SD Clock */
1581
		sdhci_update_clock(host);
1582 1583
	} else
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1584

1585 1586 1587 1588 1589
	/*
	 * Some (ENE) controllers go apeshit on some ios operation,
	 * signalling timeout and CRC errors even on CMD0. Resetting
	 * it on each ios seems to solve the problem.
	 */
1590
	if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1591 1592
		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);

1593
	mmiowb();
1594 1595 1596
	spin_unlock_irqrestore(&host->lock, flags);
}

1597 1598 1599 1600 1601 1602 1603 1604 1605
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct sdhci_host *host = mmc_priv(mmc);

	sdhci_runtime_pm_get(host);
	sdhci_do_set_ios(host, ios);
	sdhci_runtime_pm_put(host);
}

1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
static int sdhci_do_get_cd(struct sdhci_host *host)
{
	int gpio_cd = mmc_gpio_get_cd(host->mmc);

	if (host->flags & SDHCI_DEVICE_DEAD)
		return 0;

	/* If polling/nonremovable, assume that the card is always present. */
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
	    (host->mmc->caps & MMC_CAP_NONREMOVABLE))
		return 1;

	/* Try slot gpio detect */
	if (!IS_ERR_VALUE(gpio_cd))
		return !!gpio_cd;

	/* Host native card detect */
	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}

static int sdhci_get_cd(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);
	int ret;

	sdhci_runtime_pm_get(host);
	ret = sdhci_do_get_cd(host);
	sdhci_runtime_pm_put(host);
	return ret;
}

1637
static int sdhci_check_ro(struct sdhci_host *host)
1638 1639
{
	unsigned long flags;
1640
	int is_readonly;
1641 1642 1643

	spin_lock_irqsave(&host->lock, flags);

P
Pierre Ossman 已提交
1644
	if (host->flags & SDHCI_DEVICE_DEAD)
1645 1646 1647
		is_readonly = 0;
	else if (host->ops->get_ro)
		is_readonly = host->ops->get_ro(host);
P
Pierre Ossman 已提交
1648
	else
1649 1650
		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
				& SDHCI_WRITE_PROTECT);
1651 1652 1653

	spin_unlock_irqrestore(&host->lock, flags);

1654 1655 1656
	/* This quirk needs to be replaced by a callback-function later */
	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
		!is_readonly : is_readonly;
1657 1658
}

1659 1660
#define SAMPLE_COUNT	5

1661
static int sdhci_do_get_ro(struct sdhci_host *host)
1662 1663 1664 1665
{
	int i, ro_count;

	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1666
		return sdhci_check_ro(host);
1667 1668 1669

	ro_count = 0;
	for (i = 0; i < SAMPLE_COUNT; i++) {
1670
		if (sdhci_check_ro(host)) {
1671 1672 1673 1674 1675 1676 1677 1678
			if (++ro_count > SAMPLE_COUNT / 2)
				return 1;
		}
		msleep(30);
	}
	return 0;
}

1679 1680 1681 1682 1683 1684 1685 1686
static void sdhci_hw_reset(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);

	if (host->ops && host->ops->hw_reset)
		host->ops->hw_reset(host);
}

1687
static int sdhci_get_ro(struct mmc_host *mmc)
P
Pierre Ossman 已提交
1688
{
1689 1690
	struct sdhci_host *host = mmc_priv(mmc);
	int ret;
P
Pierre Ossman 已提交
1691

1692 1693 1694 1695 1696
	sdhci_runtime_pm_get(host);
	ret = sdhci_do_get_ro(host);
	sdhci_runtime_pm_put(host);
	return ret;
}
P
Pierre Ossman 已提交
1697

1698 1699
static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
{
1700
	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1701
		if (enable)
1702
			host->ier |= SDHCI_INT_CARD_INT;
1703
		else
1704 1705 1706 1707
			host->ier &= ~SDHCI_INT_CARD_INT;

		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1708 1709
		mmiowb();
	}
1710 1711 1712 1713 1714 1715
}

static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;
P
Pierre Ossman 已提交
1716

1717 1718
	sdhci_runtime_pm_get(host);

1719
	spin_lock_irqsave(&host->lock, flags);
1720 1721 1722 1723 1724
	if (enable)
		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
	else
		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;

1725
	sdhci_enable_sdio_irq_nolock(host, enable);
P
Pierre Ossman 已提交
1726
	spin_unlock_irqrestore(&host->lock, flags);
1727 1728

	sdhci_runtime_pm_put(host);
P
Pierre Ossman 已提交
1729 1730
}

1731
static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1732
						struct mmc_ios *ios)
1733
{
1734
	u16 ctrl;
1735
	int ret;
1736

1737 1738 1739 1740 1741 1742
	/*
	 * Signal Voltage Switching is only applicable for Host Controllers
	 * v3.00 and above.
	 */
	if (host->version < SDHCI_SPEC_300)
		return 0;
1743

1744 1745
	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

1746
	switch (ios->signal_voltage) {
1747 1748 1749 1750
	case MMC_SIGNAL_VOLTAGE_330:
		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
		ctrl &= ~SDHCI_CTRL_VDD_180;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1751

1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
		if (host->vqmmc) {
			ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000);
			if (ret) {
				pr_warning("%s: Switching to 3.3V signalling voltage "
						" failed\n", mmc_hostname(host->mmc));
				return -EIO;
			}
		}
		/* Wait for 5ms */
		usleep_range(5000, 5500);
1762

1763 1764 1765 1766
		/* 3.3V regulator output should be stable within 5 ms */
		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (!(ctrl & SDHCI_CTRL_VDD_180))
			return 0;
1767

1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781
		pr_warning("%s: 3.3V regulator output did not became stable\n",
				mmc_hostname(host->mmc));

		return -EAGAIN;
	case MMC_SIGNAL_VOLTAGE_180:
		if (host->vqmmc) {
			ret = regulator_set_voltage(host->vqmmc,
					1700000, 1950000);
			if (ret) {
				pr_warning("%s: Switching to 1.8V signalling voltage "
						" failed\n", mmc_hostname(host->mmc));
				return -EIO;
			}
		}
1782 1783 1784 1785 1786

		/*
		 * Enable 1.8V Signal Enable in the Host Control2
		 * register
		 */
1787 1788
		ctrl |= SDHCI_CTRL_VDD_180;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1789

1790 1791
		/* Wait for 5ms */
		usleep_range(5000, 5500);
1792

1793 1794 1795 1796
		/* 1.8V regulator output should be stable within 5 ms */
		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (ctrl & SDHCI_CTRL_VDD_180)
			return 0;
1797

1798 1799
		pr_warning("%s: 1.8V regulator output did not became stable\n",
				mmc_hostname(host->mmc));
1800

1801 1802 1803 1804 1805 1806 1807 1808
		return -EAGAIN;
	case MMC_SIGNAL_VOLTAGE_120:
		if (host->vqmmc) {
			ret = regulator_set_voltage(host->vqmmc, 1100000, 1300000);
			if (ret) {
				pr_warning("%s: Switching to 1.2V signalling voltage "
						" failed\n", mmc_hostname(host->mmc));
				return -EIO;
1809 1810
			}
		}
1811
		return 0;
1812
	default:
1813 1814
		/* No signal voltage switch required */
		return 0;
1815
	}
1816 1817
}

1818
static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1819
	struct mmc_ios *ios)
1820 1821 1822 1823 1824 1825 1826
{
	struct sdhci_host *host = mmc_priv(mmc);
	int err;

	if (host->version < SDHCI_SPEC_300)
		return 0;
	sdhci_runtime_pm_get(host);
1827
	err = sdhci_do_start_signal_voltage_switch(host, ios);
1828 1829 1830 1831
	sdhci_runtime_pm_put(host);
	return err;
}

1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844
static int sdhci_card_busy(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);
	u32 present_state;

	sdhci_runtime_pm_get(host);
	/* Check whether DAT[3:0] is 0000 */
	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
	sdhci_runtime_pm_put(host);

	return !(present_state & SDHCI_DATA_LVL_MASK);
}

1845
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1846 1847 1848 1849 1850 1851
{
	struct sdhci_host *host;
	u16 ctrl;
	int tuning_loop_counter = MAX_TUNING_LOOP;
	unsigned long timeout;
	int err = 0;
1852
	bool requires_tuning_nonuhs = false;
1853
	unsigned long flags;
1854 1855 1856

	host = mmc_priv(mmc);

1857
	sdhci_runtime_pm_get(host);
1858
	spin_lock_irqsave(&host->lock, flags);
1859 1860 1861 1862

	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

	/*
1863 1864
	 * The Host Controller needs tuning only in case of SDR104 mode
	 * and for SDR50 mode when Use Tuning for SDR50 is set in the
1865
	 * Capabilities register.
1866 1867
	 * If the Host Controller supports the HS200 mode then the
	 * tuning function has to be executed.
1868
	 */
1869 1870
	if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
	    (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
1871
	     host->flags & SDHCI_SDR104_NEEDS_TUNING))
1872 1873
		requires_tuning_nonuhs = true;

1874
	if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
1875
	    requires_tuning_nonuhs)
1876 1877
		ctrl |= SDHCI_CTRL_EXEC_TUNING;
	else {
1878
		spin_unlock_irqrestore(&host->lock, flags);
1879
		sdhci_runtime_pm_put(host);
1880 1881 1882
		return 0;
	}

1883
	if (host->ops->platform_execute_tuning) {
1884
		spin_unlock_irqrestore(&host->lock, flags);
1885 1886 1887 1888 1889
		err = host->ops->platform_execute_tuning(host, opcode);
		sdhci_runtime_pm_put(host);
		return err;
	}

1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901
	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);

	/*
	 * As per the Host Controller spec v3.00, tuning command
	 * generates Buffer Read Ready interrupt, so enable that.
	 *
	 * Note: The spec clearly says that when tuning sequence
	 * is being performed, the controller does not generate
	 * interrupts other than Buffer Read Ready interrupt. But
	 * to make sure we don't hit a controller bug, we _only_
	 * enable Buffer Read Ready interrupt here.
	 */
1902 1903
	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1904 1905 1906 1907 1908 1909 1910 1911

	/*
	 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
	 * of loops reaches 40 times or a timeout of 150ms occurs.
	 */
	timeout = 150;
	do {
		struct mmc_command cmd = {0};
1912
		struct mmc_request mrq = {NULL};
1913 1914 1915 1916

		if (!tuning_loop_counter && !timeout)
			break;

1917
		cmd.opcode = opcode;
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
		cmd.arg = 0;
		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
		cmd.retries = 0;
		cmd.data = NULL;
		cmd.error = 0;

		mrq.cmd = &cmd;
		host->mrq = &mrq;

		/*
		 * In response to CMD19, the card sends 64 bytes of tuning
		 * block to the Host Controller. So we set the block size
		 * to 64 here.
		 */
1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
		if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
			if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
					     SDHCI_BLOCK_SIZE);
			else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
					     SDHCI_BLOCK_SIZE);
		} else {
			sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
				     SDHCI_BLOCK_SIZE);
		}
1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956

		/*
		 * The tuning block is sent by the card to the host controller.
		 * So we set the TRNS_READ bit in the Transfer Mode register.
		 * This also takes care of setting DMA Enable and Multi Block
		 * Select in the same register to 0.
		 */
		sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);

		sdhci_send_command(host, &cmd);

		host->cmd = NULL;
		host->mrq = NULL;

1957
		spin_unlock_irqrestore(&host->lock, flags);
1958 1959 1960 1961
		/* Wait for Buffer Read Ready interrupt */
		wait_event_interruptible_timeout(host->buf_ready_int,
					(host->tuning_done == 1),
					msecs_to_jiffies(50));
1962
		spin_lock_irqsave(&host->lock, flags);
1963 1964

		if (!host->tuning_done) {
1965
			pr_info(DRIVER_NAME ": Timeout waiting for "
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
				"Buffer Read Ready interrupt during tuning "
				"procedure, falling back to fixed sampling "
				"clock\n");
			ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
			ctrl &= ~SDHCI_CTRL_TUNED_CLK;
			ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
			sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);

			err = -EIO;
			goto out;
		}

		host->tuning_done = 0;

		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		tuning_loop_counter--;
		timeout--;
1983 1984 1985 1986

		/* eMMC spec does not require a delay between tuning cycles */
		if (opcode == MMC_SEND_TUNING_BLOCK)
			mdelay(1);
1987 1988 1989 1990 1991 1992 1993 1994 1995
	} while (ctrl & SDHCI_CTRL_EXEC_TUNING);

	/*
	 * The Host Driver has exhausted the maximum number of loops allowed,
	 * so use fixed sampling frequency.
	 */
	if (!tuning_loop_counter || !timeout) {
		ctrl &= ~SDHCI_CTRL_TUNED_CLK;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1996
		err = -EIO;
1997 1998
	} else {
		if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
1999
			pr_info(DRIVER_NAME ": Tuning procedure"
2000 2001 2002 2003 2004 2005 2006
				" failed, falling back to fixed sampling"
				" clock\n");
			err = -EIO;
		}
	}

out:
2007 2008 2009 2010 2011 2012 2013 2014
	/*
	 * If this is the very first time we are here, we start the retuning
	 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
	 * flag won't be set, we check this condition before actually starting
	 * the timer.
	 */
	if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
	    (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
2015
		host->flags |= SDHCI_USING_RETUNING_TIMER;
2016 2017 2018 2019
		mod_timer(&host->tuning_timer, jiffies +
			host->tuning_count * HZ);
		/* Tuning mode 1 limits the maximum data length to 4MB */
		mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
2020
	} else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
2021 2022
		host->flags &= ~SDHCI_NEEDS_RETUNING;
		/* Reload the new initial value for timer */
2023 2024
		mod_timer(&host->tuning_timer, jiffies +
			  host->tuning_count * HZ);
2025 2026 2027 2028 2029 2030 2031
	}

	/*
	 * In case tuning fails, host controllers which support re-tuning can
	 * try tuning again at a later time, when the re-tuning timer expires.
	 * So for these controllers, we return 0. Since there might be other
	 * controllers who do not have this capability, we return error for
2032 2033
	 * them. SDHCI_USING_RETUNING_TIMER means the host is currently using
	 * a retuning timer to do the retuning for the card.
2034
	 */
2035
	if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
2036 2037
		err = 0;

2038 2039
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2040
	spin_unlock_irqrestore(&host->lock, flags);
2041
	sdhci_runtime_pm_put(host);
2042 2043 2044 2045

	return err;
}

2046 2047

static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063
{
	u16 ctrl;

	/* Host Controller v3.00 defines preset value registers */
	if (host->version < SDHCI_SPEC_300)
		return;

	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

	/*
	 * We only enable or disable Preset Value if they are not already
	 * enabled or disabled respectively. Otherwise, we bail out.
	 */
	if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
		ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2064
		host->flags |= SDHCI_PV_ENABLED;
2065 2066 2067
	} else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
		ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2068
		host->flags &= ~SDHCI_PV_ENABLED;
2069
	}
2070 2071
}

2072
static void sdhci_card_event(struct mmc_host *mmc)
2073
{
2074
	struct sdhci_host *host = mmc_priv(mmc);
2075 2076
	unsigned long flags;

2077 2078 2079 2080
	/* First check if client has provided their own card event */
	if (host->ops->card_event)
		host->ops->card_event(host);

2081 2082
	spin_lock_irqsave(&host->lock, flags);

2083
	/* Check host->mrq first in case we are runtime suspended */
2084
	if (host->mrq && !sdhci_do_get_cd(host)) {
2085
		pr_err("%s: Card removed during transfer!\n",
2086
			mmc_hostname(host->mmc));
2087
		pr_err("%s: Resetting controller.\n",
2088
			mmc_hostname(host->mmc));
2089

2090 2091
		sdhci_reset(host, SDHCI_RESET_CMD);
		sdhci_reset(host, SDHCI_RESET_DATA);
2092

2093 2094
		host->mrq->cmd->error = -ENOMEDIUM;
		tasklet_schedule(&host->finish_tasklet);
2095 2096 2097
	}

	spin_unlock_irqrestore(&host->lock, flags);
2098 2099 2100 2101 2102
}

static const struct mmc_host_ops sdhci_ops = {
	.request	= sdhci_request,
	.set_ios	= sdhci_set_ios,
2103
	.get_cd		= sdhci_get_cd,
2104 2105 2106 2107 2108 2109
	.get_ro		= sdhci_get_ro,
	.hw_reset	= sdhci_hw_reset,
	.enable_sdio_irq = sdhci_enable_sdio_irq,
	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
	.execute_tuning			= sdhci_execute_tuning,
	.card_event			= sdhci_card_event,
2110
	.card_busy	= sdhci_card_busy,
2111 2112 2113 2114 2115 2116 2117 2118
};

/*****************************************************************************\
 *                                                                           *
 * Tasklets                                                                  *
 *                                                                           *
\*****************************************************************************/

2119 2120 2121 2122 2123 2124 2125 2126
static void sdhci_tasklet_finish(unsigned long param)
{
	struct sdhci_host *host;
	unsigned long flags;
	struct mmc_request *mrq;

	host = (struct sdhci_host*)param;

2127 2128
	spin_lock_irqsave(&host->lock, flags);

2129 2130 2131 2132
        /*
         * If this tasklet gets rescheduled while running, it will
         * be run again afterwards but without any active request.
         */
2133 2134
	if (!host->mrq) {
		spin_unlock_irqrestore(&host->lock, flags);
2135
		return;
2136
	}
2137 2138 2139 2140 2141 2142 2143 2144 2145

	del_timer(&host->timer);

	mrq = host->mrq;

	/*
	 * The controller needs a reset of internal state machines
	 * upon error conditions.
	 */
P
Pierre Ossman 已提交
2146
	if (!(host->flags & SDHCI_DEVICE_DEAD) &&
2147
	    ((mrq->cmd && mrq->cmd->error) ||
P
Pierre Ossman 已提交
2148 2149 2150
		 (mrq->data && (mrq->data->error ||
		  (mrq->data->stop && mrq->data->stop->error))) ||
		   (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
2151 2152

		/* Some controllers need this kick or reset won't work here */
2153
		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2154
			/* This is to force an update */
2155
			sdhci_update_clock(host);
2156 2157 2158

		/* Spec says we should do both at the same time, but Ricoh
		   controllers do not like that. */
2159 2160 2161 2162 2163 2164 2165 2166
		sdhci_reset(host, SDHCI_RESET_CMD);
		sdhci_reset(host, SDHCI_RESET_DATA);
	}

	host->mrq = NULL;
	host->cmd = NULL;
	host->data = NULL;

2167
#ifndef SDHCI_USE_LEDS_CLASS
2168
	sdhci_deactivate_led(host);
2169
#endif
2170

2171
	mmiowb();
2172 2173 2174
	spin_unlock_irqrestore(&host->lock, flags);

	mmc_request_done(host->mmc, mrq);
2175
	sdhci_runtime_pm_put(host);
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187
}

static void sdhci_timeout_timer(unsigned long data)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = (struct sdhci_host*)data;

	spin_lock_irqsave(&host->lock, flags);

	if (host->mrq) {
2188
		pr_err("%s: Timeout waiting for hardware "
P
Pierre Ossman 已提交
2189
			"interrupt.\n", mmc_hostname(host->mmc));
2190 2191 2192
		sdhci_dumpregs(host);

		if (host->data) {
P
Pierre Ossman 已提交
2193
			host->data->error = -ETIMEDOUT;
2194 2195 2196
			sdhci_finish_data(host);
		} else {
			if (host->cmd)
P
Pierre Ossman 已提交
2197
				host->cmd->error = -ETIMEDOUT;
2198
			else
P
Pierre Ossman 已提交
2199
				host->mrq->cmd->error = -ETIMEDOUT;
2200 2201 2202 2203 2204

			tasklet_schedule(&host->finish_tasklet);
		}
	}

2205
	mmiowb();
2206 2207 2208
	spin_unlock_irqrestore(&host->lock, flags);
}

2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
static void sdhci_tuning_timer(unsigned long data)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = (struct sdhci_host *)data;

	spin_lock_irqsave(&host->lock, flags);

	host->flags |= SDHCI_NEEDS_RETUNING;

	spin_unlock_irqrestore(&host->lock, flags);
}

2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
/*****************************************************************************\
 *                                                                           *
 * Interrupt handling                                                        *
 *                                                                           *
\*****************************************************************************/

static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
{
	BUG_ON(intmask == 0);

	if (!host->cmd) {
2234
		pr_err("%s: Got command interrupt 0x%08x even "
2235 2236
			"though no command operation was in progress.\n",
			mmc_hostname(host->mmc), (unsigned)intmask);
2237 2238 2239 2240
		sdhci_dumpregs(host);
		return;
	}

2241
	if (intmask & SDHCI_INT_TIMEOUT)
P
Pierre Ossman 已提交
2242 2243 2244 2245
		host->cmd->error = -ETIMEDOUT;
	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
			SDHCI_INT_INDEX))
		host->cmd->error = -EILSEQ;
2246

2247
	if (host->cmd->error) {
2248
		tasklet_schedule(&host->finish_tasklet);
2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266
		return;
	}

	/*
	 * The host can send and interrupt when the busy state has
	 * ended, allowing us to wait without wasting CPU cycles.
	 * Unfortunately this is overloaded on the "data complete"
	 * interrupt, so we need to take some care when handling
	 * it.
	 *
	 * Note: The 1.0 specification is a bit ambiguous about this
	 *       feature so there might be some problems with older
	 *       controllers.
	 */
	if (host->cmd->flags & MMC_RSP_BUSY) {
		if (host->cmd->data)
			DBG("Cannot wait for busy signal when also "
				"doing a data transfer");
2267
		else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
2268
			return;
2269 2270 2271

		/* The controller does not support the end-of-busy IRQ,
		 * fall through and take the SDHCI_INT_RESPONSE */
2272 2273 2274
	}

	if (intmask & SDHCI_INT_RESPONSE)
2275
		sdhci_finish_command(host);
2276 2277
}

2278
#ifdef CONFIG_MMC_DEBUG
2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
static void sdhci_show_adma_error(struct sdhci_host *host)
{
	const char *name = mmc_hostname(host->mmc);
	u8 *desc = host->adma_desc;
	__le32 *dma;
	__le16 *len;
	u8 attr;

	sdhci_dumpregs(host);

	while (true) {
		dma = (__le32 *)(desc + 4);
		len = (__le16 *)(desc + 2);
		attr = *desc;

		DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
		    name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);

		desc += 8;

		if (attr & 2)
			break;
	}
}
#else
static void sdhci_show_adma_error(struct sdhci_host *host) { }
#endif

2307 2308
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
2309
	u32 command;
2310 2311
	BUG_ON(intmask == 0);

2312 2313
	/* CMD19 generates _only_ Buffer Read Ready interrupt */
	if (intmask & SDHCI_INT_DATA_AVAIL) {
2314 2315 2316
		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
		if (command == MMC_SEND_TUNING_BLOCK ||
		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2317 2318 2319 2320 2321 2322
			host->tuning_done = 1;
			wake_up(&host->buf_ready_int);
			return;
		}
	}

2323 2324
	if (!host->data) {
		/*
2325 2326 2327
		 * The "data complete" interrupt is also used to
		 * indicate that a busy state has ended. See comment
		 * above in sdhci_cmd_irq().
2328
		 */
2329 2330 2331 2332 2333 2334
		if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
			if (intmask & SDHCI_INT_DATA_END) {
				sdhci_finish_command(host);
				return;
			}
		}
2335

2336
		pr_err("%s: Got data interrupt 0x%08x even "
2337 2338
			"though no data operation was in progress.\n",
			mmc_hostname(host->mmc), (unsigned)intmask);
2339 2340 2341 2342 2343 2344
		sdhci_dumpregs(host);

		return;
	}

	if (intmask & SDHCI_INT_DATA_TIMEOUT)
P
Pierre Ossman 已提交
2345
		host->data->error = -ETIMEDOUT;
2346 2347 2348 2349 2350
	else if (intmask & SDHCI_INT_DATA_END_BIT)
		host->data->error = -EILSEQ;
	else if ((intmask & SDHCI_INT_DATA_CRC) &&
		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
			!= MMC_BUS_TEST_R)
P
Pierre Ossman 已提交
2351
		host->data->error = -EILSEQ;
2352
	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2353
		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2354
		sdhci_show_adma_error(host);
2355
		host->data->error = -EIO;
2356 2357
		if (host->ops->adma_workaround)
			host->ops->adma_workaround(host, intmask);
2358
	}
2359

P
Pierre Ossman 已提交
2360
	if (host->data->error)
2361 2362
		sdhci_finish_data(host);
	else {
P
Pierre Ossman 已提交
2363
		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2364 2365
			sdhci_transfer_pio(host);

2366 2367 2368 2369
		/*
		 * We currently don't do anything fancy with DMA
		 * boundaries, but as we can't disable the feature
		 * we need to at least restart the transfer.
2370 2371 2372 2373
		 *
		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
		 * should return a valid address to continue from, but as
		 * some controllers are faulty, don't trust them.
2374
		 */
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391
		if (intmask & SDHCI_INT_DMA_END) {
			u32 dmastart, dmanow;
			dmastart = sg_dma_address(host->data->sg);
			dmanow = dmastart + host->data->bytes_xfered;
			/*
			 * Force update to the next DMA block boundary.
			 */
			dmanow = (dmanow &
				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
				SDHCI_DEFAULT_BOUNDARY_SIZE;
			host->data->bytes_xfered = dmanow - dmastart;
			DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
				" next 0x%08x\n",
				mmc_hostname(host->mmc), dmastart,
				host->data->bytes_xfered, dmanow);
			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
		}
2392

2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
		if (intmask & SDHCI_INT_DATA_END) {
			if (host->cmd) {
				/*
				 * Data managed to finish before the
				 * command completed. Make sure we do
				 * things in the proper order.
				 */
				host->data_early = 1;
			} else {
				sdhci_finish_data(host);
			}
		}
2405 2406 2407
	}
}

2408
static irqreturn_t sdhci_irq(int irq, void *dev_id)
2409
{
2410
	irqreturn_t result = IRQ_NONE;
2411
	struct sdhci_host *host = dev_id;
2412
	u32 intmask, mask, unexpected = 0;
2413
	int max_loops = 16;
2414 2415 2416

	spin_lock(&host->lock);

2417
	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2418
		spin_unlock(&host->lock);
2419
		return IRQ_NONE;
2420 2421
	}

2422
	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2423
	if (!intmask || intmask == 0xffffffff) {
2424 2425 2426 2427
		result = IRQ_NONE;
		goto out;
	}

2428 2429 2430 2431 2432
	do {
		/* Clear selected interrupts. */
		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
				  SDHCI_INT_BUS_POWER);
		sdhci_writel(host, mask, SDHCI_INT_STATUS);
2433

2434 2435
		DBG("*** %s got interrupt: 0x%08x\n",
			mmc_hostname(host->mmc), intmask);
2436

2437 2438 2439
		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
2440

2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451
			/*
			 * There is a observation on i.mx esdhc.  INSERT
			 * bit will be immediately set again when it gets
			 * cleared, if a card is inserted.  We have to mask
			 * the irq to prevent interrupt storm which will
			 * freeze the system.  And the REMOVE gets the
			 * same situation.
			 *
			 * More testing are needed here to ensure it works
			 * for other platforms though.
			 */
2452 2453 2454 2455 2456 2457
			host->ier &= ~(SDHCI_INT_CARD_INSERT |
				       SDHCI_INT_CARD_REMOVE);
			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
					       SDHCI_INT_CARD_INSERT;
			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2458 2459 2460

			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2461 2462 2463 2464

			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
						       SDHCI_INT_CARD_REMOVE);
			result = IRQ_WAKE_THREAD;
2465
		}
2466

2467 2468
		if (intmask & SDHCI_INT_CMD_MASK)
			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2469

2470 2471
		if (intmask & SDHCI_INT_DATA_MASK)
			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2472

2473 2474 2475
		if (intmask & SDHCI_INT_BUS_POWER)
			pr_err("%s: Card is consuming too much power!\n",
				mmc_hostname(host->mmc));
2476

2477 2478 2479 2480 2481
		if (intmask & SDHCI_INT_CARD_INT) {
			sdhci_enable_sdio_irq_nolock(host, false);
			host->thread_isr |= SDHCI_INT_CARD_INT;
			result = IRQ_WAKE_THREAD;
		}
P
Pierre Ossman 已提交
2482

2483 2484 2485 2486
		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
			     SDHCI_INT_CARD_INT);
P
Pierre Ossman 已提交
2487

2488 2489 2490 2491
		if (intmask) {
			unexpected |= intmask;
			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
		}
2492

2493 2494
		if (result == IRQ_NONE)
			result = IRQ_HANDLED;
2495

2496 2497
		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
	} while (intmask && --max_loops);
2498 2499 2500
out:
	spin_unlock(&host->lock);

2501 2502 2503 2504 2505
	if (unexpected) {
		pr_err("%s: Unexpected interrupt 0x%08x.\n",
			   mmc_hostname(host->mmc), unexpected);
		sdhci_dumpregs(host);
	}
P
Pierre Ossman 已提交
2506

2507 2508 2509
	return result;
}

2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520
static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
{
	struct sdhci_host *host = dev_id;
	unsigned long flags;
	u32 isr;

	spin_lock_irqsave(&host->lock, flags);
	isr = host->thread_isr;
	host->thread_isr = 0;
	spin_unlock_irqrestore(&host->lock, flags);

2521 2522 2523 2524 2525
	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
		sdhci_card_event(host->mmc);
		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
	}

2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
	if (isr & SDHCI_INT_CARD_INT) {
		sdio_run_irqs(host->mmc);

		spin_lock_irqsave(&host->lock, flags);
		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
			sdhci_enable_sdio_irq_nolock(host, true);
		spin_unlock_irqrestore(&host->lock, flags);
	}

	return isr ? IRQ_HANDLED : IRQ_NONE;
}

2538 2539 2540 2541 2542 2543 2544
/*****************************************************************************\
 *                                                                           *
 * Suspend/resume                                                            *
 *                                                                           *
\*****************************************************************************/

#ifdef CONFIG_PM
K
Kevin Liu 已提交
2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
void sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
	u8 val;
	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
			| SDHCI_WAKE_ON_INT;

	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
	val |= mask ;
	/* Avoid fake wake up */
	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
		val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);

void sdhci_disable_irq_wakeups(struct sdhci_host *host)
{
	u8 val;
	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
			| SDHCI_WAKE_ON_INT;

	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
	val &= ~mask;
	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
2571

2572
int sdhci_suspend_host(struct sdhci_host *host)
2573
{
2574 2575 2576
	if (host->ops->platform_suspend)
		host->ops->platform_suspend(host);

2577 2578
	sdhci_disable_card_detection(host);

2579
	/* Disable tuning since we are suspending */
2580
	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
2581
		del_timer_sync(&host->tuning_timer);
2582 2583 2584
		host->flags &= ~SDHCI_NEEDS_RETUNING;
	}

K
Kevin Liu 已提交
2585
	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2586 2587 2588
		host->ier = 0;
		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
K
Kevin Liu 已提交
2589 2590 2591 2592 2593
		free_irq(host->irq, host);
	} else {
		sdhci_enable_irq_wakeups(host);
		enable_irq_wake(host->irq);
	}
2594
	return 0;
2595 2596
}

2597
EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2598

2599 2600
int sdhci_resume_host(struct sdhci_host *host)
{
2601
	int ret = 0;
2602

2603
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2604 2605 2606
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}
2607

K
Kevin Liu 已提交
2608
	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2609 2610 2611
		ret = request_threaded_irq(host->irq, sdhci_irq,
					   sdhci_thread_irq, IRQF_SHARED,
					   mmc_hostname(host->mmc), host);
K
Kevin Liu 已提交
2612 2613 2614 2615 2616 2617
		if (ret)
			return ret;
	} else {
		sdhci_disable_irq_wakeups(host);
		disable_irq_wake(host->irq);
	}
2618

2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629
	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
		/* Card keeps power but host controller does not */
		sdhci_init(host, 0);
		host->pwr = 0;
		host->clock = 0;
		sdhci_do_set_ios(host, &host->mmc->ios);
	} else {
		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
		mmiowb();
	}
2630

2631 2632
	sdhci_enable_card_detection(host);

2633 2634 2635
	if (host->ops->platform_resume)
		host->ops->platform_resume(host);

2636
	/* Set the re-tuning expiration flag */
2637
	if (host->flags & SDHCI_USING_RETUNING_TIMER)
2638 2639
		host->flags |= SDHCI_NEEDS_RETUNING;

2640
	return ret;
2641 2642
}

2643
EXPORT_SYMBOL_GPL(sdhci_resume_host);
2644 2645
#endif /* CONFIG_PM */

2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658
#ifdef CONFIG_PM_RUNTIME

static int sdhci_runtime_pm_get(struct sdhci_host *host)
{
	return pm_runtime_get_sync(host->mmc->parent);
}

static int sdhci_runtime_pm_put(struct sdhci_host *host)
{
	pm_runtime_mark_last_busy(host->mmc->parent);
	return pm_runtime_put_autosuspend(host->mmc->parent);
}

2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
{
	if (host->runtime_suspended || host->bus_on)
		return;
	host->bus_on = true;
	pm_runtime_get_noresume(host->mmc->parent);
}

static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
{
	if (host->runtime_suspended || !host->bus_on)
		return;
	host->bus_on = false;
	pm_runtime_put_noidle(host->mmc->parent);
}

2675 2676 2677 2678 2679 2680
int sdhci_runtime_suspend_host(struct sdhci_host *host)
{
	unsigned long flags;
	int ret = 0;

	/* Disable tuning since we are suspending */
2681
	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
2682 2683 2684 2685 2686
		del_timer_sync(&host->tuning_timer);
		host->flags &= ~SDHCI_NEEDS_RETUNING;
	}

	spin_lock_irqsave(&host->lock, flags);
2687 2688 2689
	host->ier &= SDHCI_INT_CARD_INT;
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2690 2691
	spin_unlock_irqrestore(&host->lock, flags);

2692
	synchronize_hardirq(host->irq);
2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719

	spin_lock_irqsave(&host->lock, flags);
	host->runtime_suspended = true;
	spin_unlock_irqrestore(&host->lock, flags);

	return ret;
}
EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);

int sdhci_runtime_resume_host(struct sdhci_host *host)
{
	unsigned long flags;
	int ret = 0, host_flags = host->flags;

	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}

	sdhci_init(host, 0);

	/* Force clock and power re-program */
	host->pwr = 0;
	host->clock = 0;
	sdhci_do_set_ios(host, &host->mmc->ios);

	sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
2720 2721 2722 2723 2724 2725
	if ((host_flags & SDHCI_PV_ENABLED) &&
		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
		spin_lock_irqsave(&host->lock, flags);
		sdhci_enable_preset_value(host, true);
		spin_unlock_irqrestore(&host->lock, flags);
	}
2726 2727

	/* Set the re-tuning expiration flag */
2728
	if (host->flags & SDHCI_USING_RETUNING_TIMER)
2729 2730 2731 2732 2733 2734 2735
		host->flags |= SDHCI_NEEDS_RETUNING;

	spin_lock_irqsave(&host->lock, flags);

	host->runtime_suspended = false;

	/* Enable SDIO IRQ */
2736
	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749
		sdhci_enable_sdio_irq_nolock(host, true);

	/* Enable Card Detection */
	sdhci_enable_card_detection(host);

	spin_unlock_irqrestore(&host->lock, flags);

	return ret;
}
EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);

#endif

2750 2751
/*****************************************************************************\
 *                                                                           *
2752
 * Device allocation/registration                                            *
2753 2754 2755
 *                                                                           *
\*****************************************************************************/

2756 2757
struct sdhci_host *sdhci_alloc_host(struct device *dev,
	size_t priv_size)
2758 2759 2760 2761
{
	struct mmc_host *mmc;
	struct sdhci_host *host;

2762
	WARN_ON(dev == NULL);
2763

2764
	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2765
	if (!mmc)
2766
		return ERR_PTR(-ENOMEM);
2767 2768 2769 2770

	host = mmc_priv(mmc);
	host->mmc = mmc;

2771 2772
	return host;
}
2773

2774
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2775

2776 2777 2778
int sdhci_add_host(struct sdhci_host *host)
{
	struct mmc_host *mmc;
2779
	u32 caps[2] = {0, 0};
2780 2781
	u32 max_current_caps;
	unsigned int ocr_avail;
2782
	int ret;
2783

2784 2785 2786
	WARN_ON(host == NULL);
	if (host == NULL)
		return -EINVAL;
2787

2788
	mmc = host->mmc;
2789

2790 2791
	if (debug_quirks)
		host->quirks = debug_quirks;
2792 2793
	if (debug_quirks2)
		host->quirks2 = debug_quirks2;
2794

2795 2796
	sdhci_reset(host, SDHCI_RESET_ALL);

2797
	host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2798 2799
	host->version = (host->version & SDHCI_SPEC_VER_MASK)
				>> SDHCI_SPEC_VER_SHIFT;
2800
	if (host->version > SDHCI_SPEC_300) {
2801
		pr_err("%s: Unknown controller version (%d). "
2802
			"You may experience problems.\n", mmc_hostname(mmc),
2803
			host->version);
2804 2805
	}

2806
	caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
2807
		sdhci_readl(host, SDHCI_CAPABILITIES);
2808

2809 2810 2811 2812
	if (host->version >= SDHCI_SPEC_300)
		caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
			host->caps1 :
			sdhci_readl(host, SDHCI_CAPABILITIES_1);
2813

2814
	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
2815
		host->flags |= SDHCI_USE_SDMA;
2816
	else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
2817
		DBG("Controller doesn't have SDMA capability\n");
2818
	else
2819
		host->flags |= SDHCI_USE_SDMA;
2820

2821
	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
2822
		(host->flags & SDHCI_USE_SDMA)) {
R
Rolf Eike Beer 已提交
2823
		DBG("Disabling DMA as it is marked broken\n");
2824
		host->flags &= ~SDHCI_USE_SDMA;
2825 2826
	}

2827 2828
	if ((host->version >= SDHCI_SPEC_200) &&
		(caps[0] & SDHCI_CAN_DO_ADMA2))
2829
		host->flags |= SDHCI_USE_ADMA;
2830 2831 2832 2833 2834 2835 2836

	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
		(host->flags & SDHCI_USE_ADMA)) {
		DBG("Disabling ADMA as it is marked broken\n");
		host->flags &= ~SDHCI_USE_ADMA;
	}

2837
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2838 2839
		if (host->ops->enable_dma) {
			if (host->ops->enable_dma(host)) {
2840
				pr_warning("%s: No suitable DMA "
2841 2842
					"available. Falling back to PIO.\n",
					mmc_hostname(mmc));
2843 2844
				host->flags &=
					~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
2845
			}
2846 2847 2848
		}
	}

2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859
	if (host->flags & SDHCI_USE_ADMA) {
		/*
		 * We need to allocate descriptors for all sg entries
		 * (128) and potentially one alignment transfer for
		 * each of those entries.
		 */
		host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
		host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
		if (!host->adma_desc || !host->align_buffer) {
			kfree(host->adma_desc);
			kfree(host->align_buffer);
2860
			pr_warning("%s: Unable to allocate ADMA "
2861 2862 2863 2864 2865 2866
				"buffers. Falling back to standard DMA.\n",
				mmc_hostname(mmc));
			host->flags &= ~SDHCI_USE_ADMA;
		}
	}

2867 2868 2869 2870 2871
	/*
	 * If we use DMA, then it's up to the caller to set the DMA
	 * mask, but PIO does not need the hw shim so we set a new
	 * mask here in that case.
	 */
2872
	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
2873 2874 2875
		host->dma_mask = DMA_BIT_MASK(64);
		mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
	}
2876

2877
	if (host->version >= SDHCI_SPEC_300)
2878
		host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
2879 2880
			>> SDHCI_CLOCK_BASE_SHIFT;
	else
2881
		host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
2882 2883
			>> SDHCI_CLOCK_BASE_SHIFT;

2884
	host->max_clk *= 1000000;
2885 2886
	if (host->max_clk == 0 || host->quirks &
			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
2887
		if (!host->ops->get_max_clock) {
2888
			pr_err("%s: Hardware doesn't specify base clock "
2889 2890 2891 2892
			       "frequency.\n", mmc_hostname(mmc));
			return -ENODEV;
		}
		host->max_clk = host->ops->get_max_clock(host);
2893
	}
2894

2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910
	/*
	 * In case of Host Controller v3.00, find out whether clock
	 * multiplier is supported.
	 */
	host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
			SDHCI_CLOCK_MUL_SHIFT;

	/*
	 * In case the value in Clock Multiplier is 0, then programmable
	 * clock mode is not supported, otherwise the actual clock
	 * multiplier is one more than the value of Clock Multiplier
	 * in the Capabilities Register.
	 */
	if (host->clk_mul)
		host->clk_mul += 1;

2911 2912 2913 2914
	/*
	 * Set host parameters.
	 */
	mmc->ops = &sdhci_ops;
2915
	mmc->f_max = host->max_clk;
2916
	if (host->ops->get_min_clock)
2917
		mmc->f_min = host->ops->get_min_clock(host);
2918 2919 2920 2921 2922 2923 2924
	else if (host->version >= SDHCI_SPEC_300) {
		if (host->clk_mul) {
			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
			mmc->f_max = host->max_clk * host->clk_mul;
		} else
			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
	} else
2925
		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
2926

2927 2928 2929 2930 2931 2932 2933
	host->timeout_clk =
		(caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
	if (host->timeout_clk == 0) {
		if (host->ops->get_timeout_clock) {
			host->timeout_clk = host->ops->get_timeout_clock(host);
		} else if (!(host->quirks &
				SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
2934
			pr_err("%s: Hardware doesn't specify timeout clock "
2935 2936 2937 2938 2939 2940 2941 2942
			       "frequency.\n", mmc_hostname(mmc));
			return -ENODEV;
		}
	}
	if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
		host->timeout_clk *= 1000;

	if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
2943
		host->timeout_clk = mmc->f_max / 1000;
2944

2945
	mmc->max_busy_timeout = (1 << 27) / host->timeout_clk;
2946

2947
	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
2948
	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2949 2950 2951

	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
		host->flags |= SDHCI_AUTO_CMD12;
2952

2953
	/* Auto-CMD23 stuff only works in ADMA or PIO. */
A
Andrei Warkentin 已提交
2954
	if ((host->version >= SDHCI_SPEC_300) &&
2955
	    ((host->flags & SDHCI_USE_ADMA) ||
A
Andrei Warkentin 已提交
2956
	     !(host->flags & SDHCI_USE_SDMA))) {
2957 2958 2959 2960 2961 2962
		host->flags |= SDHCI_AUTO_CMD23;
		DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
	} else {
		DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
	}

2963 2964 2965 2966 2967 2968 2969
	/*
	 * A controller may support 8-bit width, but the board itself
	 * might not have the pins brought out.  Boards that support
	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
	 * their platform code before calling sdhci_add_host(), and we
	 * won't assume 8-bit width for hosts without that CAP.
	 */
2970
	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
2971
		mmc->caps |= MMC_CAP_4_BIT_DATA;
2972

2973 2974 2975
	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
		mmc->caps &= ~MMC_CAP_CMD23;

2976
	if (caps[0] & SDHCI_CAN_DO_HISPD)
2977
		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2978

2979
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
2980
	    !(host->mmc->caps & MMC_CAP_NONREMOVABLE))
2981 2982
		mmc->caps |= MMC_CAP_NEEDS_POLL;

2983
	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
2984
	host->vqmmc = regulator_get_optional(mmc_dev(mmc), "vqmmc");
2985 2986 2987 2988 2989 2990
	if (IS_ERR_OR_NULL(host->vqmmc)) {
		if (PTR_ERR(host->vqmmc) < 0) {
			pr_info("%s: no vqmmc regulator found\n",
				mmc_hostname(mmc));
			host->vqmmc = NULL;
		}
2991
	} else {
2992
		ret = regulator_enable(host->vqmmc);
2993 2994
		if (!regulator_is_supported_voltage(host->vqmmc, 1700000,
			1950000))
2995 2996 2997
			caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
					SDHCI_SUPPORT_SDR50 |
					SDHCI_SUPPORT_DDR50);
2998 2999 3000 3001 3002
		if (ret) {
			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
				mmc_hostname(mmc), ret);
			host->vqmmc = NULL;
		}
3003
	}
3004

3005 3006 3007 3008
	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
		caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
		       SDHCI_SUPPORT_DDR50);

3009 3010 3011
	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
	if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
		       SDHCI_SUPPORT_DDR50))
3012 3013 3014
		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;

	/* SDR104 supports also implies SDR50 support */
3015
	if (caps[1] & SDHCI_SUPPORT_SDR104) {
3016
		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3017 3018 3019
		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
		 * field can be promoted to support HS200.
		 */
3020 3021
		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
			mmc->caps2 |= MMC_CAP2_HS200;
3022
	} else if (caps[1] & SDHCI_SUPPORT_SDR50)
3023 3024
		mmc->caps |= MMC_CAP_UHS_SDR50;

3025 3026
	if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
		!(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3027 3028
		mmc->caps |= MMC_CAP_UHS_DDR50;

3029
	/* Does the host need tuning for SDR50? */
3030 3031 3032
	if (caps[1] & SDHCI_USE_SDR50_TUNING)
		host->flags |= SDHCI_SDR50_NEEDS_TUNING;

3033
	/* Does the host need tuning for SDR104 / HS200? */
3034
	if (mmc->caps2 & MMC_CAP2_HS200)
3035
		host->flags |= SDHCI_SDR104_NEEDS_TUNING;
3036

3037 3038 3039 3040 3041 3042 3043 3044
	/* Driver Type(s) (A, C, D) supported by the host */
	if (caps[1] & SDHCI_DRIVER_TYPE_A)
		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
	if (caps[1] & SDHCI_DRIVER_TYPE_C)
		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
	if (caps[1] & SDHCI_DRIVER_TYPE_D)
		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;

3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
	/* Initial value for re-tuning timer count */
	host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
			      SDHCI_RETUNING_TIMER_COUNT_SHIFT;

	/*
	 * In case Re-tuning Timer is not disabled, the actual value of
	 * re-tuning timer will be 2 ^ (n - 1).
	 */
	if (host->tuning_count)
		host->tuning_count = 1 << (host->tuning_count - 1);

	/* Re-tuning mode supported by the Host Controller */
	host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
			     SDHCI_RETUNING_MODE_SHIFT;

3060
	ocr_avail = 0;
3061

3062
	host->vmmc = regulator_get_optional(mmc_dev(mmc), "vmmc");
3063 3064 3065 3066 3067 3068
	if (IS_ERR_OR_NULL(host->vmmc)) {
		if (PTR_ERR(host->vmmc) < 0) {
			pr_info("%s: no vmmc regulator found\n",
				mmc_hostname(mmc));
			host->vmmc = NULL;
		}
3069
	}
3070

3071
#ifdef CONFIG_REGULATOR
3072 3073 3074 3075 3076
	/*
	 * Voltage range check makes sense only if regulator reports
	 * any voltage value.
	 */
	if (host->vmmc && regulator_get_voltage(host->vmmc) > 0) {
3077 3078
		ret = regulator_is_supported_voltage(host->vmmc, 2700000,
			3600000);
3079 3080 3081 3082
		if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330)))
			caps[0] &= ~SDHCI_CAN_VDD_330;
		if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_300)))
			caps[0] &= ~SDHCI_CAN_VDD_300;
3083 3084
		ret = regulator_is_supported_voltage(host->vmmc, 1700000,
			1950000);
3085 3086 3087 3088 3089
		if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_180)))
			caps[0] &= ~SDHCI_CAN_VDD_180;
	}
#endif /* CONFIG_REGULATOR */

3090 3091 3092 3093 3094 3095 3096 3097
	/*
	 * According to SD Host Controller spec v3.00, if the Host System
	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
	 * the value is meaningful only if Voltage Support in the Capabilities
	 * register is set. The actual current value is 4 times the register
	 * value.
	 */
	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112
	if (!max_current_caps && host->vmmc) {
		u32 curr = regulator_get_current_limit(host->vmmc);
		if (curr > 0) {

			/* convert to SDHCI_MAX_CURRENT format */
			curr = curr/1000;  /* convert to mA */
			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;

			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
			max_current_caps =
				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
		}
	}
3113 3114

	if (caps[0] & SDHCI_CAN_VDD_330) {
3115
		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3116

A
Aaron Lu 已提交
3117
		mmc->max_current_330 = ((max_current_caps &
3118 3119 3120 3121 3122
				   SDHCI_MAX_CURRENT_330_MASK) >>
				   SDHCI_MAX_CURRENT_330_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}
	if (caps[0] & SDHCI_CAN_VDD_300) {
3123
		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3124

A
Aaron Lu 已提交
3125
		mmc->max_current_300 = ((max_current_caps &
3126 3127 3128 3129 3130
				   SDHCI_MAX_CURRENT_300_MASK) >>
				   SDHCI_MAX_CURRENT_300_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}
	if (caps[0] & SDHCI_CAN_VDD_180) {
3131 3132
		ocr_avail |= MMC_VDD_165_195;

A
Aaron Lu 已提交
3133
		mmc->max_current_180 = ((max_current_caps &
3134 3135 3136 3137 3138
				   SDHCI_MAX_CURRENT_180_MASK) >>
				   SDHCI_MAX_CURRENT_180_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}

3139 3140 3141
	if (host->ocr_mask)
		ocr_avail = host->ocr_mask;

3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153
	mmc->ocr_avail = ocr_avail;
	mmc->ocr_avail_sdio = ocr_avail;
	if (host->ocr_avail_sdio)
		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
	mmc->ocr_avail_sd = ocr_avail;
	if (host->ocr_avail_sd)
		mmc->ocr_avail_sd &= host->ocr_avail_sd;
	else /* normal SD controllers don't support 1.8V */
		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
	mmc->ocr_avail_mmc = ocr_avail;
	if (host->ocr_avail_mmc)
		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3154 3155

	if (mmc->ocr_avail == 0) {
3156
		pr_err("%s: Hardware doesn't report any "
3157
			"support voltages.\n", mmc_hostname(mmc));
3158
		return -ENODEV;
3159 3160
	}

3161 3162 3163
	spin_lock_init(&host->lock);

	/*
3164 3165
	 * Maximum number of segments. Depends on if the hardware
	 * can do scatter/gather or not.
3166
	 */
3167
	if (host->flags & SDHCI_USE_ADMA)
3168
		mmc->max_segs = 128;
3169
	else if (host->flags & SDHCI_USE_SDMA)
3170
		mmc->max_segs = 1;
3171
	else /* PIO */
3172
		mmc->max_segs = 128;
3173 3174

	/*
3175
	 * Maximum number of sectors in one transfer. Limited by DMA boundary
3176
	 * size (512KiB).
3177
	 */
3178
	mmc->max_req_size = 524288;
3179 3180 3181

	/*
	 * Maximum segment size. Could be one segment with the maximum number
3182 3183
	 * of bytes. When doing hardware scatter/gather, each entry cannot
	 * be larger than 64 KiB though.
3184
	 */
3185 3186 3187 3188 3189 3190
	if (host->flags & SDHCI_USE_ADMA) {
		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
			mmc->max_seg_size = 65535;
		else
			mmc->max_seg_size = 65536;
	} else {
3191
		mmc->max_seg_size = mmc->max_req_size;
3192
	}
3193

3194 3195 3196 3197
	/*
	 * Maximum block size. This varies from controller to controller and
	 * is specified in the capabilities register.
	 */
3198 3199 3200
	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
		mmc->max_blk_size = 2;
	} else {
3201
		mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
3202 3203
				SDHCI_MAX_BLOCK_SHIFT;
		if (mmc->max_blk_size >= 3) {
3204
			pr_warning("%s: Invalid maximum block size, "
3205 3206 3207 3208 3209 3210
				"assuming 512 bytes\n", mmc_hostname(mmc));
			mmc->max_blk_size = 0;
		}
	}

	mmc->max_blk_size = 512 << mmc->max_blk_size;
3211

3212 3213 3214
	/*
	 * Maximum block count.
	 */
3215
	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3216

3217 3218 3219 3220 3221 3222
	/*
	 * Init tasklets.
	 */
	tasklet_init(&host->finish_tasklet,
		sdhci_tasklet_finish, (unsigned long)host);

3223
	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3224

3225
	if (host->version >= SDHCI_SPEC_300) {
3226 3227
		init_waitqueue_head(&host->buf_ready_int);

3228 3229 3230 3231 3232 3233
		/* Initialize re-tuning timer */
		init_timer(&host->tuning_timer);
		host->tuning_timer.data = (unsigned long)host;
		host->tuning_timer.function = sdhci_tuning_timer;
	}

3234 3235
	sdhci_init(host, 0);

3236 3237
	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
				   IRQF_SHARED,	mmc_hostname(mmc), host);
3238 3239 3240
	if (ret) {
		pr_err("%s: Failed to request IRQ %d: %d\n",
		       mmc_hostname(mmc), host->irq, ret);
3241
		goto untasklet;
3242
	}
3243 3244 3245 3246 3247

#ifdef CONFIG_MMC_DEBUG
	sdhci_dumpregs(host);
#endif

3248
#ifdef SDHCI_USE_LEDS_CLASS
H
Helmut Schaa 已提交
3249 3250 3251
	snprintf(host->led_name, sizeof(host->led_name),
		"%s::", mmc_hostname(mmc));
	host->led.name = host->led_name;
3252 3253 3254 3255
	host->led.brightness = LED_OFF;
	host->led.default_trigger = mmc_hostname(mmc);
	host->led.brightness_set = sdhci_led_control;

3256
	ret = led_classdev_register(mmc_dev(mmc), &host->led);
3257 3258 3259
	if (ret) {
		pr_err("%s: Failed to register LED device: %d\n",
		       mmc_hostname(mmc), ret);
3260
		goto reset;
3261
	}
3262 3263
#endif

3264 3265
	mmiowb();

3266 3267
	mmc_add_host(mmc);

3268
	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3269
		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3270 3271
		(host->flags & SDHCI_USE_ADMA) ? "ADMA" :
		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3272

3273 3274
	sdhci_enable_card_detection(host);

3275 3276
	return 0;

3277
#ifdef SDHCI_USE_LEDS_CLASS
3278 3279
reset:
	sdhci_reset(host, SDHCI_RESET_ALL);
3280 3281
	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3282 3283
	free_irq(host->irq, host);
#endif
3284
untasklet:
3285 3286 3287 3288 3289
	tasklet_kill(&host->finish_tasklet);

	return ret;
}

3290
EXPORT_SYMBOL_GPL(sdhci_add_host);
3291

P
Pierre Ossman 已提交
3292
void sdhci_remove_host(struct sdhci_host *host, int dead)
3293
{
P
Pierre Ossman 已提交
3294 3295 3296 3297 3298 3299 3300 3301
	unsigned long flags;

	if (dead) {
		spin_lock_irqsave(&host->lock, flags);

		host->flags |= SDHCI_DEVICE_DEAD;

		if (host->mrq) {
3302
			pr_err("%s: Controller removed during "
P
Pierre Ossman 已提交
3303 3304 3305 3306 3307 3308 3309 3310 3311
				" transfer!\n", mmc_hostname(host->mmc));

			host->mrq->cmd->error = -ENOMEDIUM;
			tasklet_schedule(&host->finish_tasklet);
		}

		spin_unlock_irqrestore(&host->lock, flags);
	}

3312 3313
	sdhci_disable_card_detection(host);

3314
	mmc_remove_host(host->mmc);
3315

3316
#ifdef SDHCI_USE_LEDS_CLASS
3317 3318 3319
	led_classdev_unregister(&host->led);
#endif

P
Pierre Ossman 已提交
3320 3321
	if (!dead)
		sdhci_reset(host, SDHCI_RESET_ALL);
3322

3323 3324
	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3325 3326 3327 3328 3329
	free_irq(host->irq, host);

	del_timer_sync(&host->timer);

	tasklet_kill(&host->finish_tasklet);
3330

3331 3332
	if (host->vmmc) {
		regulator_disable(host->vmmc);
M
Marek Szyprowski 已提交
3333
		regulator_put(host->vmmc);
3334
	}
M
Marek Szyprowski 已提交
3335

3336 3337 3338 3339 3340
	if (host->vqmmc) {
		regulator_disable(host->vqmmc);
		regulator_put(host->vqmmc);
	}

3341 3342 3343 3344 3345
	kfree(host->adma_desc);
	kfree(host->align_buffer);

	host->adma_desc = NULL;
	host->align_buffer = NULL;
3346 3347
}

3348
EXPORT_SYMBOL_GPL(sdhci_remove_host);
3349

3350
void sdhci_free_host(struct sdhci_host *host)
3351
{
3352
	mmc_free_host(host->mmc);
3353 3354
}

3355
EXPORT_SYMBOL_GPL(sdhci_free_host);
3356 3357 3358 3359 3360 3361 3362 3363 3364

/*****************************************************************************\
 *                                                                           *
 * Driver init/exit                                                          *
 *                                                                           *
\*****************************************************************************/

static int __init sdhci_drv_init(void)
{
3365
	pr_info(DRIVER_NAME
3366
		": Secure Digital Host Controller Interface driver\n");
3367
	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3368

3369
	return 0;
3370 3371 3372 3373 3374 3375 3376 3377 3378
}

static void __exit sdhci_drv_exit(void)
{
}

module_init(sdhci_drv_init);
module_exit(sdhci_drv_exit);

3379
module_param(debug_quirks, uint, 0444);
3380
module_param(debug_quirks2, uint, 0444);
3381

3382
MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3383
MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3384
MODULE_LICENSE("GPL");
3385

3386
MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3387
MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");