sdhci.c 92.8 KB
Newer Older
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3
 *
4
 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 6
 *
 * This program is free software; you can redistribute it and/or modify
7 8 9
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or (at
 * your option) any later version.
10 11 12 13
 *
 * Thanks to the following companies for their support:
 *
 *     - JMicron (hardware and technical support)
14 15 16 17
 */

#include <linux/delay.h>
#include <linux/highmem.h>
18
#include <linux/io.h>
19
#include <linux/module.h>
20
#include <linux/dma-mapping.h>
21
#include <linux/slab.h>
22
#include <linux/scatterlist.h>
M
Marek Szyprowski 已提交
23
#include <linux/regulator/consumer.h>
24
#include <linux/pm_runtime.h>
25

26 27
#include <linux/leds.h>

28
#include <linux/mmc/mmc.h>
29
#include <linux/mmc/host.h>
30
#include <linux/mmc/card.h>
31
#include <linux/mmc/slot-gpio.h>
32 33 34 35 36 37

#include "sdhci.h"

#define DRIVER_NAME "sdhci"

#define DBG(f, x...) \
38
	pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
39

40 41 42 43 44
#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
	defined(CONFIG_MMC_SDHCI_MODULE))
#define SDHCI_USE_LEDS_CLASS
#endif

45 46
#define MAX_TUNING_LOOP 40

47
static unsigned int debug_quirks = 0;
48
static unsigned int debug_quirks2;
49

50 51 52
static void sdhci_finish_data(struct sdhci_host *);

static void sdhci_finish_command(struct sdhci_host *);
53
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
54
static void sdhci_tuning_timer(unsigned long data);
55
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56 57 58
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
					struct mmc_data *data,
					struct sdhci_host_next *next);
59

60
#ifdef CONFIG_PM
61 62
static int sdhci_runtime_pm_get(struct sdhci_host *host);
static int sdhci_runtime_pm_put(struct sdhci_host *host);
63 64
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
65 66 67 68 69 70 71 72 73
#else
static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
{
	return 0;
}
static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
{
	return 0;
}
74 75 76 77 78 79
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
{
}
static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
{
}
80 81
#endif

82 83
static void sdhci_dumpregs(struct sdhci_host *host)
{
84
	pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
85
		mmc_hostname(host->mmc));
86

87
	pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
88 89
		sdhci_readl(host, SDHCI_DMA_ADDRESS),
		sdhci_readw(host, SDHCI_HOST_VERSION));
90
	pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
91 92
		sdhci_readw(host, SDHCI_BLOCK_SIZE),
		sdhci_readw(host, SDHCI_BLOCK_COUNT));
93
	pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
94 95
		sdhci_readl(host, SDHCI_ARGUMENT),
		sdhci_readw(host, SDHCI_TRANSFER_MODE));
96
	pr_debug(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
97 98
		sdhci_readl(host, SDHCI_PRESENT_STATE),
		sdhci_readb(host, SDHCI_HOST_CONTROL));
99
	pr_debug(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
100 101
		sdhci_readb(host, SDHCI_POWER_CONTROL),
		sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
102
	pr_debug(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
103 104
		sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
		sdhci_readw(host, SDHCI_CLOCK_CONTROL));
105
	pr_debug(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
106 107
		sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
		sdhci_readl(host, SDHCI_INT_STATUS));
108
	pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
109 110
		sdhci_readl(host, SDHCI_INT_ENABLE),
		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
111
	pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
112 113
		sdhci_readw(host, SDHCI_ACMD12_ERR),
		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
114
	pr_debug(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
115
		sdhci_readl(host, SDHCI_CAPABILITIES),
116
		sdhci_readl(host, SDHCI_CAPABILITIES_1));
117
	pr_debug(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
118
		sdhci_readw(host, SDHCI_COMMAND),
119
		sdhci_readl(host, SDHCI_MAX_CURRENT));
120
	pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
121
		sdhci_readw(host, SDHCI_HOST_CONTROL2));
122

123 124 125 126 127 128 129 130 131 132 133
	if (host->flags & SDHCI_USE_ADMA) {
		if (host->flags & SDHCI_USE_64_BIT_DMA)
			pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
				 readl(host->ioaddr + SDHCI_ADMA_ERROR),
				 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
				 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
		else
			pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
				 readl(host->ioaddr + SDHCI_ADMA_ERROR),
				 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
	}
134

135
	pr_debug(DRIVER_NAME ": ===========================================\n");
136 137 138 139 140 141 142 143
}

/*****************************************************************************\
 *                                                                           *
 * Low level functions                                                       *
 *                                                                           *
\*****************************************************************************/

144 145
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
146
	u32 present;
147

148
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
149
	    (host->mmc->caps & MMC_CAP_NONREMOVABLE))
150 151
		return;

152 153 154
	if (enable) {
		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
155

156 157 158 159 160
		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
				       SDHCI_INT_CARD_INSERT;
	} else {
		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
	}
161 162 163

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
164 165 166 167 168 169 170 171 172 173 174 175
}

static void sdhci_enable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, true);
}

static void sdhci_disable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, false);
}

176
void sdhci_reset(struct sdhci_host *host, u8 mask)
177
{
178
	unsigned long timeout;
179

180
	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
181

182
	if (mask & SDHCI_RESET_ALL) {
183
		host->clock = 0;
184 185 186 187
		/* Reset-all turns off SD Bus Power */
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
	}
188

189 190 191 192
	/* Wait max 100 ms */
	timeout = 100;

	/* hw clears the bit when it's done */
193
	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
194
		if (timeout == 0) {
195
			pr_err("%s: Reset 0x%x never completed.\n",
196 197 198 199 200 201
				mmc_hostname(host->mmc), (int)mask);
			sdhci_dumpregs(host);
			return;
		}
		timeout--;
		mdelay(1);
202
	}
203 204 205 206 207 208 209 210 211 212
}
EXPORT_SYMBOL_GPL(sdhci_reset);

static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
		if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
			SDHCI_CARD_PRESENT))
			return;
	}
213

214
	host->ops->reset(host, mask);
215

216 217 218 219 220 221 222 223
	if (mask & SDHCI_RESET_ALL) {
		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
			if (host->ops->enable_dma)
				host->ops->enable_dma(host);
		}

		/* Resetting the controller clears many */
		host->preset_enabled = false;
224
	}
225 226
}

227 228 229
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);

static void sdhci_init(struct sdhci_host *host, int soft)
230
{
231
	if (soft)
232
		sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
233
	else
234
		sdhci_do_reset(host, SDHCI_RESET_ALL);
235

236 237 238 239 240 241 242 243
	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
		    SDHCI_INT_RESPONSE;

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
244 245 246 247 248 249

	if (soft) {
		/* force clock reconfiguration */
		host->clock = 0;
		sdhci_set_ios(host->mmc, &host->mmc->ios);
	}
250
}
251

252 253
static void sdhci_reinit(struct sdhci_host *host)
{
254
	sdhci_init(host, 0);
255 256 257 258 259
	/*
	 * Retuning stuffs are affected by different cards inserted and only
	 * applicable to UHS-I cards. So reset these fields to their initial
	 * value when card is removed.
	 */
260 261 262
	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
		host->flags &= ~SDHCI_USING_RETUNING_TIMER;

263 264 265
		del_timer_sync(&host->tuning_timer);
		host->flags &= ~SDHCI_NEEDS_RETUNING;
	}
266
	sdhci_enable_card_detection(host);
267 268 269 270 271 272
}

static void sdhci_activate_led(struct sdhci_host *host)
{
	u8 ctrl;

273
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
274
	ctrl |= SDHCI_CTRL_LED;
275
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
276 277 278 279 280 281
}

static void sdhci_deactivate_led(struct sdhci_host *host)
{
	u8 ctrl;

282
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
283
	ctrl &= ~SDHCI_CTRL_LED;
284
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
285 286
}

287
#ifdef SDHCI_USE_LEDS_CLASS
288 289 290 291 292 293 294 295
static void sdhci_led_control(struct led_classdev *led,
	enum led_brightness brightness)
{
	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);

296 297 298
	if (host->runtime_suspended)
		goto out;

299 300 301 302
	if (brightness == LED_OFF)
		sdhci_deactivate_led(host);
	else
		sdhci_activate_led(host);
303
out:
304 305 306 307
	spin_unlock_irqrestore(&host->lock, flags);
}
#endif

308 309 310 311 312 313
/*****************************************************************************\
 *                                                                           *
 * Core functions                                                            *
 *                                                                           *
\*****************************************************************************/

P
Pierre Ossman 已提交
314
static void sdhci_read_block_pio(struct sdhci_host *host)
315
{
316 317
	unsigned long flags;
	size_t blksize, len, chunk;
318
	u32 uninitialized_var(scratch);
319
	u8 *buf;
320

P
Pierre Ossman 已提交
321
	DBG("PIO reading\n");
322

P
Pierre Ossman 已提交
323
	blksize = host->data->blksz;
324
	chunk = 0;
325

326
	local_irq_save(flags);
327

P
Pierre Ossman 已提交
328
	while (blksize) {
329 330
		if (!sg_miter_next(&host->sg_miter))
			BUG();
331

332
		len = min(host->sg_miter.length, blksize);
333

334 335
		blksize -= len;
		host->sg_miter.consumed = len;
336

337
		buf = host->sg_miter.addr;
338

339 340
		while (len) {
			if (chunk == 0) {
341
				scratch = sdhci_readl(host, SDHCI_BUFFER);
342
				chunk = 4;
P
Pierre Ossman 已提交
343
			}
344 345 346 347 348 349 350

			*buf = scratch & 0xFF;

			buf++;
			scratch >>= 8;
			chunk--;
			len--;
351
		}
P
Pierre Ossman 已提交
352
	}
353 354 355 356

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
357
}
358

P
Pierre Ossman 已提交
359 360
static void sdhci_write_block_pio(struct sdhci_host *host)
{
361 362 363 364
	unsigned long flags;
	size_t blksize, len, chunk;
	u32 scratch;
	u8 *buf;
365

P
Pierre Ossman 已提交
366 367 368
	DBG("PIO writing\n");

	blksize = host->data->blksz;
369 370
	chunk = 0;
	scratch = 0;
371

372
	local_irq_save(flags);
373

P
Pierre Ossman 已提交
374
	while (blksize) {
375 376
		if (!sg_miter_next(&host->sg_miter))
			BUG();
P
Pierre Ossman 已提交
377

378 379 380 381 382 383
		len = min(host->sg_miter.length, blksize);

		blksize -= len;
		host->sg_miter.consumed = len;

		buf = host->sg_miter.addr;
384

385 386 387 388 389 390 391 392
		while (len) {
			scratch |= (u32)*buf << (chunk * 8);

			buf++;
			chunk++;
			len--;

			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
393
				sdhci_writel(host, scratch, SDHCI_BUFFER);
394 395
				chunk = 0;
				scratch = 0;
396 397 398
			}
		}
	}
399 400 401 402

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
403 404 405 406 407 408 409 410
}

static void sdhci_transfer_pio(struct sdhci_host *host)
{
	u32 mask;

	BUG_ON(!host->data);

411
	if (host->blocks == 0)
P
Pierre Ossman 已提交
412 413 414 415 416 417 418
		return;

	if (host->data->flags & MMC_DATA_READ)
		mask = SDHCI_DATA_AVAILABLE;
	else
		mask = SDHCI_SPACE_AVAILABLE;

419 420 421 422 423 424 425 426 427
	/*
	 * Some controllers (JMicron JMB38x) mess up the buffer bits
	 * for transfers < 4 bytes. As long as it is just one block,
	 * we can ignore the bits.
	 */
	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
		(host->data->blocks == 1))
		mask = ~0;

428
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
429 430 431
		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
			udelay(100);

P
Pierre Ossman 已提交
432 433 434 435
		if (host->data->flags & MMC_DATA_READ)
			sdhci_read_block_pio(host);
		else
			sdhci_write_block_pio(host);
436

437 438
		host->blocks--;
		if (host->blocks == 0)
P
Pierre Ossman 已提交
439 440
			break;
	}
441

P
Pierre Ossman 已提交
442
	DBG("PIO transfer complete.\n");
443 444
}

445 446 447
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
	local_irq_save(*flags);
448
	return kmap_atomic(sg_page(sg)) + sg->offset;
449 450 451 452
}

static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
{
453
	kunmap_atomic(buffer);
454 455 456
	local_irq_restore(*flags);
}

457 458
static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
				  dma_addr_t addr, int len, unsigned cmd)
B
Ben Dooks 已提交
459
{
460
	struct sdhci_adma2_64_desc *dma_desc = desc;
B
Ben Dooks 已提交
461

462
	/* 32-bit and 64-bit descriptors have these members in same position */
463 464
	dma_desc->cmd = cpu_to_le16(cmd);
	dma_desc->len = cpu_to_le16(len);
465 466 467 468
	dma_desc->addr_lo = cpu_to_le32((u32)addr);

	if (host->flags & SDHCI_USE_64_BIT_DMA)
		dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
B
Ben Dooks 已提交
469 470
}

471 472
static void sdhci_adma_mark_end(void *desc)
{
473
	struct sdhci_adma2_64_desc *dma_desc = desc;
474

475
	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
476
	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
477 478
}

479
static int sdhci_adma_table_pre(struct sdhci_host *host,
480 481 482 483
	struct mmc_data *data)
{
	int direction;

484 485
	void *desc;
	void *align;
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
	dma_addr_t addr;
	dma_addr_t align_addr;
	int len, offset;

	struct scatterlist *sg;
	int i;
	char *buffer;
	unsigned long flags;

	/*
	 * The spec does not specify endianness of descriptor table.
	 * We currently guess that it is LE.
	 */

	if (data->flags & MMC_DATA_READ)
		direction = DMA_FROM_DEVICE;
	else
		direction = DMA_TO_DEVICE;

	host->align_addr = dma_map_single(mmc_dev(host->mmc),
506
		host->align_buffer, host->align_buffer_sz, direction);
507
	if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
508
		goto fail;
509
	BUG_ON(host->align_addr & host->align_mask);
510

511 512
	host->sg_count = sdhci_pre_dma_transfer(host, data, NULL);
	if (host->sg_count < 0)
513
		goto unmap_align;
514

515
	desc = host->adma_table;
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
	align = host->align_buffer;

	align_addr = host->align_addr;

	for_each_sg(data->sg, sg, host->sg_count, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);

		/*
		 * The SDHCI specification states that ADMA
		 * addresses must be 32-bit aligned. If they
		 * aren't, then we use a bounce buffer for
		 * the (up to three) bytes that screw up the
		 * alignment.
		 */
531 532
		offset = (host->align_sz - (addr & host->align_mask)) &
			 host->align_mask;
533 534 535 536 537 538 539
		if (offset) {
			if (data->flags & MMC_DATA_WRITE) {
				buffer = sdhci_kmap_atomic(sg, &flags);
				memcpy(align, buffer, offset);
				sdhci_kunmap_atomic(buffer, &flags);
			}

B
Ben Dooks 已提交
540
			/* tran, valid */
541
			sdhci_adma_write_desc(host, desc, align_addr, offset,
A
Adrian Hunter 已提交
542
					      ADMA2_TRAN_VALID);
543 544 545

			BUG_ON(offset > 65536);

546 547
			align += host->align_sz;
			align_addr += host->align_sz;
548

549
			desc += host->desc_sz;
550 551 552 553 554 555 556

			addr += offset;
			len -= offset;
		}

		BUG_ON(len > 65536);

B
Ben Dooks 已提交
557
		/* tran, valid */
558
		sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
559
		desc += host->desc_sz;
560 561 562 563 564

		/*
		 * If this triggers then we have a calculation bug
		 * somewhere. :/
		 */
565
		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
566 567
	}

568 569 570 571
	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
		/*
		* Mark the last descriptor as the terminating descriptor
		*/
572
		if (desc != host->adma_table) {
573
			desc -= host->desc_sz;
574
			sdhci_adma_mark_end(desc);
575 576 577 578 579
		}
	} else {
		/*
		* Add a terminating entry.
		*/
580

581
		/* nop, end, valid */
582
		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
583
	}
584 585 586 587 588 589

	/*
	 * Resync align buffer as we might have changed it.
	 */
	if (data->flags & MMC_DATA_WRITE) {
		dma_sync_single_for_device(mmc_dev(host->mmc),
590
			host->align_addr, host->align_buffer_sz, direction);
591 592
	}

593 594 595 596
	return 0;

unmap_align:
	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
597
		host->align_buffer_sz, direction);
598 599
fail:
	return -EINVAL;
600 601 602 603 604 605 606 607 608
}

static void sdhci_adma_table_post(struct sdhci_host *host,
	struct mmc_data *data)
{
	int direction;

	struct scatterlist *sg;
	int i, size;
609
	void *align;
610 611
	char *buffer;
	unsigned long flags;
612
	bool has_unaligned;
613 614 615 616 617 618 619

	if (data->flags & MMC_DATA_READ)
		direction = DMA_FROM_DEVICE;
	else
		direction = DMA_TO_DEVICE;

	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
620
		host->align_buffer_sz, direction);
621

622 623 624
	/* Do a quick scan of the SG list for any unaligned mappings */
	has_unaligned = false;
	for_each_sg(data->sg, sg, host->sg_count, i)
625
		if (sg_dma_address(sg) & host->align_mask) {
626 627 628 629 630
			has_unaligned = true;
			break;
		}

	if (has_unaligned && data->flags & MMC_DATA_READ) {
631 632 633 634 635 636
		dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
			data->sg_len, direction);

		align = host->align_buffer;

		for_each_sg(data->sg, sg, host->sg_count, i) {
637 638 639
			if (sg_dma_address(sg) & host->align_mask) {
				size = host->align_sz -
				       (sg_dma_address(sg) & host->align_mask);
640 641 642 643 644

				buffer = sdhci_kmap_atomic(sg, &flags);
				memcpy(buffer, align, size);
				sdhci_kunmap_atomic(buffer, &flags);

645
				align += host->align_sz;
646 647 648 649
			}
		}
	}

650 651 652
	if (!data->host_cookie)
		dma_unmap_sg(mmc_dev(host->mmc), data->sg,
			data->sg_len, direction);
653 654
}

655
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
656
{
657
	u8 count;
658
	struct mmc_data *data = cmd->data;
659
	unsigned target_timeout, current_timeout;
660

661 662 663 664 665 666
	/*
	 * If the host controller provides us with an incorrect timeout
	 * value, just skip the check and use 0xE.  The hardware may take
	 * longer to time out, but that's much better than having a too-short
	 * timeout value.
	 */
667
	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
668
		return 0xE;
669

670
	/* Unspecified timeout, assume max */
671
	if (!data && !cmd->busy_timeout)
672
		return 0xE;
673

674 675
	/* timeout in us */
	if (!data)
676
		target_timeout = cmd->busy_timeout * 1000;
677 678 679 680 681
	else {
		target_timeout = data->timeout_ns / 1000;
		if (host->clock)
			target_timeout += data->timeout_clks / host->clock;
	}
682

683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
	/*
	 * Figure out needed cycles.
	 * We do this in steps in order to fit inside a 32 bit int.
	 * The first step is the minimum timeout, which will have a
	 * minimum resolution of 6 bits:
	 * (1) 2^13*1000 > 2^22,
	 * (2) host->timeout_clk < 2^16
	 *     =>
	 *     (1) / (2) > 2^6
	 */
	count = 0;
	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
	while (current_timeout < target_timeout) {
		count++;
		current_timeout <<= 1;
		if (count >= 0xF)
			break;
	}

	if (count >= 0xF) {
703 704
		DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
		    mmc_hostname(host->mmc), count, cmd->opcode);
705 706 707
		count = 0xE;
	}

708 709 710
	return count;
}

711 712 713 714 715 716
static void sdhci_set_transfer_irqs(struct sdhci_host *host)
{
	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;

	if (host->flags & SDHCI_REQ_USE_DMA)
717
		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
718
	else
719 720 721 722
		host->ier = (host->ier & ~dma_irqs) | pio_irqs;

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
723 724
}

725
static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
726 727
{
	u8 count;
728 729 730 731 732 733 734 735 736 737 738

	if (host->ops->set_timeout) {
		host->ops->set_timeout(host, cmd);
	} else {
		count = sdhci_calc_timeout(host, cmd);
		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
	}
}

static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
{
739
	u8 ctrl;
740
	struct mmc_data *data = cmd->data;
741
	int ret;
742 743 744

	WARN_ON(host->data);

745 746
	if (data || (cmd->flags & MMC_RSP_BUSY))
		sdhci_set_timeout(host, cmd);
747 748

	if (!data)
749 750 751 752 753 754 755 756 757
		return;

	/* Sanity checks */
	BUG_ON(data->blksz * data->blocks > 524288);
	BUG_ON(data->blksz > host->mmc->max_blk_size);
	BUG_ON(data->blocks > 65535);

	host->data = data;
	host->data_early = 0;
758
	host->data->bytes_xfered = 0;
759

760
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
761 762
		host->flags |= SDHCI_REQ_USE_DMA;

763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
	/*
	 * FIXME: This doesn't account for merging when mapping the
	 * scatterlist.
	 */
	if (host->flags & SDHCI_REQ_USE_DMA) {
		int broken, i;
		struct scatterlist *sg;

		broken = 0;
		if (host->flags & SDHCI_USE_ADMA) {
			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
				broken = 1;
		} else {
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
				broken = 1;
		}

		if (unlikely(broken)) {
			for_each_sg(data->sg, sg, data->sg_len, i) {
				if (sg->length & 0x3) {
					DBG("Reverting to PIO because of "
						"transfer size (%d)\n",
						sg->length);
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
			}
		}
791 792 793 794 795 796
	}

	/*
	 * The assumption here being that alignment is the same after
	 * translation to device address space.
	 */
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
	if (host->flags & SDHCI_REQ_USE_DMA) {
		int broken, i;
		struct scatterlist *sg;

		broken = 0;
		if (host->flags & SDHCI_USE_ADMA) {
			/*
			 * As we use 3 byte chunks to work around
			 * alignment problems, we need to check this
			 * quirk.
			 */
			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
				broken = 1;
		} else {
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
				broken = 1;
		}

		if (unlikely(broken)) {
			for_each_sg(data->sg, sg, data->sg_len, i) {
				if (sg->offset & 0x3) {
					DBG("Reverting to PIO because of "
						"bad alignment\n");
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
			}
		}
	}

827 828 829 830 831 832 833 834 835
	if (host->flags & SDHCI_REQ_USE_DMA) {
		if (host->flags & SDHCI_USE_ADMA) {
			ret = sdhci_adma_table_pre(host, data);
			if (ret) {
				/*
				 * This only happens when someone fed
				 * us an invalid request.
				 */
				WARN_ON(1);
836
				host->flags &= ~SDHCI_REQ_USE_DMA;
837
			} else {
838 839
				sdhci_writel(host, host->adma_addr,
					SDHCI_ADMA_ADDRESS);
840 841 842 843
				if (host->flags & SDHCI_USE_64_BIT_DMA)
					sdhci_writel(host,
						     (u64)host->adma_addr >> 32,
						     SDHCI_ADMA_ADDRESS_HI);
844 845
			}
		} else {
846
			int sg_cnt;
847

848
			sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
849
			if (sg_cnt == 0) {
850 851 852 853 854
				/*
				 * This only happens when someone fed
				 * us an invalid request.
				 */
				WARN_ON(1);
855
				host->flags &= ~SDHCI_REQ_USE_DMA;
856
			} else {
857
				WARN_ON(sg_cnt != 1);
858 859
				sdhci_writel(host, sg_dma_address(data->sg),
					SDHCI_DMA_ADDRESS);
860 861 862 863
			}
		}
	}

864 865 866 867 868 869
	/*
	 * Always adjust the DMA selection as some controllers
	 * (e.g. JMicron) can't do PIO properly when the selection
	 * is ADMA.
	 */
	if (host->version >= SDHCI_SPEC_200) {
870
		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
871 872
		ctrl &= ~SDHCI_CTRL_DMA_MASK;
		if ((host->flags & SDHCI_REQ_USE_DMA) &&
873 874 875 876 877 878
			(host->flags & SDHCI_USE_ADMA)) {
			if (host->flags & SDHCI_USE_64_BIT_DMA)
				ctrl |= SDHCI_CTRL_ADMA64;
			else
				ctrl |= SDHCI_CTRL_ADMA32;
		} else {
879
			ctrl |= SDHCI_CTRL_SDMA;
880
		}
881
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
882 883
	}

884
	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
885 886 887 888 889 890 891 892
		int flags;

		flags = SG_MITER_ATOMIC;
		if (host->data->flags & MMC_DATA_READ)
			flags |= SG_MITER_TO_SG;
		else
			flags |= SG_MITER_FROM_SG;
		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
893
		host->blocks = data->blocks;
894
	}
895

896 897
	sdhci_set_transfer_irqs(host);

898 899 900
	/* Set the DMA boundary value and block size */
	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
		data->blksz), SDHCI_BLOCK_SIZE);
901
	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
902 903 904
}

static void sdhci_set_transfer_mode(struct sdhci_host *host,
905
	struct mmc_command *cmd)
906 907
{
	u16 mode;
908
	struct mmc_data *data = cmd->data;
909

910
	if (data == NULL) {
911 912 913 914
		if (host->quirks2 &
			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
			sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
		} else {
915
		/* clear Auto CMD settings for no data CMDs */
916 917
			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
918
				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
919
		}
920
		return;
921
	}
922

923 924
	WARN_ON(!host->data);

925
	mode = SDHCI_TRNS_BLK_CNT_EN;
926 927 928 929 930 931 932 933
	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
		mode |= SDHCI_TRNS_MULTI;
		/*
		 * If we are sending CMD23, CMD12 never gets sent
		 * on successful completion (so no Auto-CMD12).
		 */
		if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12))
			mode |= SDHCI_TRNS_AUTO_CMD12;
934 935 936 937
		else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
			mode |= SDHCI_TRNS_AUTO_CMD23;
			sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
		}
938
	}
939

940 941
	if (data->flags & MMC_DATA_READ)
		mode |= SDHCI_TRNS_READ;
942
	if (host->flags & SDHCI_REQ_USE_DMA)
943 944
		mode |= SDHCI_TRNS_DMA;

945
	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
946 947 948 949 950 951 952 953 954 955 956
}

static void sdhci_finish_data(struct sdhci_host *host)
{
	struct mmc_data *data;

	BUG_ON(!host->data);

	data = host->data;
	host->data = NULL;

957
	if (host->flags & SDHCI_REQ_USE_DMA) {
958 959 960
		if (host->flags & SDHCI_USE_ADMA)
			sdhci_adma_table_post(host, data);
		else {
961 962 963 964
			if (!data->host_cookie)
				dma_unmap_sg(mmc_dev(host->mmc),
					data->sg, data->sg_len,
					(data->flags & MMC_DATA_READ) ?
965 966
					DMA_FROM_DEVICE : DMA_TO_DEVICE);
		}
967 968 969
	}

	/*
970 971 972 973 974
	 * The specification states that the block count register must
	 * be updated, but it does not specify at what point in the
	 * data flow. That makes the register entirely useless to read
	 * back so we have to assume that nothing made it to the card
	 * in the event of an error.
975
	 */
976 977
	if (data->error)
		data->bytes_xfered = 0;
978
	else
979
		data->bytes_xfered = data->blksz * data->blocks;
980

981 982 983 984 985 986 987 988 989
	/*
	 * Need to send CMD12 if -
	 * a) open-ended multiblock transfer (no CMD23)
	 * b) error in multiblock transfer
	 */
	if (data->stop &&
	    (data->error ||
	     !host->mrq->sbc)) {

990 991 992 993
		/*
		 * The controller needs a reset of internal state machines
		 * upon error conditions.
		 */
P
Pierre Ossman 已提交
994
		if (data->error) {
995 996
			sdhci_do_reset(host, SDHCI_RESET_CMD);
			sdhci_do_reset(host, SDHCI_RESET_DATA);
997 998 999 1000 1001 1002 1003
		}

		sdhci_send_command(host, data->stop);
	} else
		tasklet_schedule(&host->finish_tasklet);
}

1004
void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1005 1006
{
	int flags;
1007
	u32 mask;
1008
	unsigned long timeout;
1009 1010 1011 1012

	WARN_ON(host->cmd);

	/* Wait max 10 ms */
1013
	timeout = 10;
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023

	mask = SDHCI_CMD_INHIBIT;
	if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
		mask |= SDHCI_DATA_INHIBIT;

	/* We shouldn't wait for data inihibit for stop commands, even
	   though they might use busy signaling */
	if (host->mrq->data && (cmd == host->mrq->data->stop))
		mask &= ~SDHCI_DATA_INHIBIT;

1024
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1025
		if (timeout == 0) {
1026
			pr_err("%s: Controller never released "
P
Pierre Ossman 已提交
1027
				"inhibit bit(s).\n", mmc_hostname(host->mmc));
1028
			sdhci_dumpregs(host);
P
Pierre Ossman 已提交
1029
			cmd->error = -EIO;
1030 1031 1032
			tasklet_schedule(&host->finish_tasklet);
			return;
		}
1033 1034 1035
		timeout--;
		mdelay(1);
	}
1036

1037
	timeout = jiffies;
1038 1039
	if (!cmd->data && cmd->busy_timeout > 9000)
		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1040 1041 1042
	else
		timeout += 10 * HZ;
	mod_timer(&host->timer, timeout);
1043 1044

	host->cmd = cmd;
1045
	host->busy_handle = 0;
1046

1047
	sdhci_prepare_data(host, cmd);
1048

1049
	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1050

1051
	sdhci_set_transfer_mode(host, cmd);
1052

1053
	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1054
		pr_err("%s: Unsupported response type!\n",
1055
			mmc_hostname(host->mmc));
P
Pierre Ossman 已提交
1056
		cmd->error = -EINVAL;
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
		tasklet_schedule(&host->finish_tasklet);
		return;
	}

	if (!(cmd->flags & MMC_RSP_PRESENT))
		flags = SDHCI_CMD_RESP_NONE;
	else if (cmd->flags & MMC_RSP_136)
		flags = SDHCI_CMD_RESP_LONG;
	else if (cmd->flags & MMC_RSP_BUSY)
		flags = SDHCI_CMD_RESP_SHORT_BUSY;
	else
		flags = SDHCI_CMD_RESP_SHORT;

	if (cmd->flags & MMC_RSP_CRC)
		flags |= SDHCI_CMD_CRC;
	if (cmd->flags & MMC_RSP_OPCODE)
		flags |= SDHCI_CMD_INDEX;
1074 1075

	/* CMD19 is special in that the Data Present Select should be set */
1076 1077
	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1078 1079
		flags |= SDHCI_CMD_DATA;

1080
	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1081
}
1082
EXPORT_SYMBOL_GPL(sdhci_send_command);
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093

static void sdhci_finish_command(struct sdhci_host *host)
{
	int i;

	BUG_ON(host->cmd == NULL);

	if (host->cmd->flags & MMC_RSP_PRESENT) {
		if (host->cmd->flags & MMC_RSP_136) {
			/* CRC is stripped so we need to do some shifting. */
			for (i = 0;i < 4;i++) {
1094
				host->cmd->resp[i] = sdhci_readl(host,
1095 1096 1097
					SDHCI_RESPONSE + (3-i)*4) << 8;
				if (i != 3)
					host->cmd->resp[i] |=
1098
						sdhci_readb(host,
1099 1100 1101
						SDHCI_RESPONSE + (3-i)*4-1);
			}
		} else {
1102
			host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1103 1104 1105
		}
	}

P
Pierre Ossman 已提交
1106
	host->cmd->error = 0;
1107

1108 1109 1110 1111 1112
	/* Finished CMD23, now send actual command. */
	if (host->cmd == host->mrq->sbc) {
		host->cmd = NULL;
		sdhci_send_command(host, host->mrq->cmd);
	} else {
1113

1114 1115 1116
		/* Processed actual command. */
		if (host->data && host->data_early)
			sdhci_finish_data(host);
1117

1118 1119 1120 1121 1122
		if (!host->cmd->data)
			tasklet_schedule(&host->finish_tasklet);

		host->cmd = NULL;
	}
1123 1124
}

1125 1126
static u16 sdhci_get_preset_value(struct sdhci_host *host)
{
1127
	u16 preset = 0;
1128

1129 1130
	switch (host->timing) {
	case MMC_TIMING_UHS_SDR12:
1131 1132
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
		break;
1133
	case MMC_TIMING_UHS_SDR25:
1134 1135
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
		break;
1136
	case MMC_TIMING_UHS_SDR50:
1137 1138
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
		break;
1139 1140
	case MMC_TIMING_UHS_SDR104:
	case MMC_TIMING_MMC_HS200:
1141 1142
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
		break;
1143
	case MMC_TIMING_UHS_DDR50:
1144 1145
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
		break;
1146 1147 1148
	case MMC_TIMING_MMC_HS400:
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
		break;
1149 1150 1151 1152 1153 1154 1155 1156 1157
	default:
		pr_warn("%s: Invalid UHS-I mode selected\n",
			mmc_hostname(host->mmc));
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
		break;
	}
	return preset;
}

1158
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1159
{
1160
	int div = 0; /* Initialized for compiler warning */
1161
	int real_div = div, clk_mul = 1;
1162
	u16 clk = 0;
1163
	unsigned long timeout;
1164

1165 1166
	host->mmc->actual_clock = 0;

1167
	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1168 1169

	if (clock == 0)
1170
		return;
1171

1172
	if (host->version >= SDHCI_SPEC_300) {
1173
		if (host->preset_enabled) {
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
			u16 pre_val;

			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
			pre_val = sdhci_get_preset_value(host);
			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
			if (host->clk_mul &&
				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
				clk = SDHCI_PROG_CLOCK_MODE;
				real_div = div + 1;
				clk_mul = host->clk_mul;
			} else {
				real_div = max_t(int, 1, div << 1);
			}
			goto clock_set;
		}

1191 1192 1193 1194 1195
		/*
		 * Check if the Host Controller supports Programmable Clock
		 * Mode.
		 */
		if (host->clk_mul) {
1196 1197 1198 1199 1200
			for (div = 1; div <= 1024; div++) {
				if ((host->max_clk * host->clk_mul / div)
					<= clock)
					break;
			}
1201
			/*
1202 1203
			 * Set Programmable Clock Mode in the Clock
			 * Control register.
1204
			 */
1205 1206 1207 1208
			clk = SDHCI_PROG_CLOCK_MODE;
			real_div = div;
			clk_mul = host->clk_mul;
			div--;
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
		} else {
			/* Version 3.00 divisors must be a multiple of 2. */
			if (host->max_clk <= clock)
				div = 1;
			else {
				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
				     div += 2) {
					if ((host->max_clk / div) <= clock)
						break;
				}
1219
			}
1220
			real_div = div;
1221
			div >>= 1;
1222 1223 1224
		}
	} else {
		/* Version 2.00 divisors must be a power of 2. */
1225
		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1226 1227 1228
			if ((host->max_clk / div) <= clock)
				break;
		}
1229
		real_div = div;
1230
		div >>= 1;
1231 1232
	}

1233
clock_set:
1234
	if (real_div)
1235
		host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
1236
	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1237 1238
	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
		<< SDHCI_DIVIDER_HI_SHIFT;
1239
	clk |= SDHCI_CLOCK_INT_EN;
1240
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1241

1242 1243
	/* Wait max 20 ms */
	timeout = 20;
1244
	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1245 1246
		& SDHCI_CLOCK_INT_STABLE)) {
		if (timeout == 0) {
1247
			pr_err("%s: Internal clock never "
P
Pierre Ossman 已提交
1248
				"stabilised.\n", mmc_hostname(host->mmc));
1249 1250 1251
			sdhci_dumpregs(host);
			return;
		}
1252 1253 1254
		timeout--;
		mdelay(1);
	}
1255 1256

	clk |= SDHCI_CLOCK_CARD_EN;
1257
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1258
}
1259
EXPORT_SYMBOL_GPL(sdhci_set_clock);
1260

1261 1262
static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
			    unsigned short vdd)
1263
{
1264
	struct mmc_host *mmc = host->mmc;
1265
	u8 pwr = 0;
1266

1267 1268
	if (!IS_ERR(mmc->supply.vmmc)) {
		spin_unlock_irq(&host->lock);
1269
		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1270
		spin_lock_irq(&host->lock);
1271 1272 1273 1274 1275 1276

		if (mode != MMC_POWER_OFF)
			sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
		else
			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);

1277 1278 1279
		return;
	}

1280 1281
	if (mode != MMC_POWER_OFF) {
		switch (1 << vdd) {
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
		case MMC_VDD_165_195:
			pwr = SDHCI_POWER_180;
			break;
		case MMC_VDD_29_30:
		case MMC_VDD_30_31:
			pwr = SDHCI_POWER_300;
			break;
		case MMC_VDD_32_33:
		case MMC_VDD_33_34:
			pwr = SDHCI_POWER_330;
			break;
		default:
			BUG();
		}
	}

	if (host->pwr == pwr)
1299
		return;
1300

1301 1302 1303
	host->pwr = pwr;

	if (pwr == 0) {
1304
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1305 1306
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
1307
		vdd = 0;
1308 1309 1310 1311 1312 1313 1314
	} else {
		/*
		 * Spec says that we should clear the power reg before setting
		 * a new value. Some controllers don't seem to like this though.
		 */
		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1315

1316 1317 1318 1319 1320 1321 1322
		/*
		 * At least the Marvell CaFe chip gets confused if we set the
		 * voltage and set turn on power at the same time, so set the
		 * voltage first.
		 */
		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1323

1324
		pwr |= SDHCI_POWER_ON;
1325

1326
		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1327

1328 1329
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_on(host);
1330

1331 1332 1333 1334 1335 1336 1337
		/*
		 * Some controllers need an extra 10ms delay of 10ms before
		 * they can apply clock after applying power
		 */
		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
			mdelay(10);
	}
1338 1339
}

1340 1341 1342 1343 1344 1345 1346 1347 1348
/*****************************************************************************\
 *                                                                           *
 * MMC callbacks                                                             *
 *                                                                           *
\*****************************************************************************/

static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
	struct sdhci_host *host;
1349
	int present;
1350
	unsigned long flags;
1351
	u32 tuning_opcode;
1352 1353 1354

	host = mmc_priv(mmc);

1355 1356
	sdhci_runtime_pm_get(host);

1357 1358
	present = mmc_gpio_get_cd(host->mmc);

1359 1360 1361 1362
	spin_lock_irqsave(&host->lock, flags);

	WARN_ON(host->mrq != NULL);

1363
#ifndef SDHCI_USE_LEDS_CLASS
1364
	sdhci_activate_led(host);
1365
#endif
1366 1367 1368 1369 1370 1371

	/*
	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
	 * requests if Auto-CMD12 is enabled.
	 */
	if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1372 1373 1374 1375 1376
		if (mrq->stop) {
			mrq->data->stop = NULL;
			mrq->stop = NULL;
		}
	}
1377 1378 1379

	host->mrq = mrq;

1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
	/*
	 * Firstly check card presence from cd-gpio.  The return could
	 * be one of the following possibilities:
	 *     negative: cd-gpio is not available
	 *     zero: cd-gpio is used, and card is removed
	 *     one: cd-gpio is used, and card is present
	 */
	if (present < 0) {
		/* If polling, assume that the card is always present. */
		if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
			present = 1;
		else
			present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
					SDHCI_CARD_PRESENT;
1394 1395
	}

1396
	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
P
Pierre Ossman 已提交
1397
		host->mrq->cmd->error = -ENOMEDIUM;
1398
		tasklet_schedule(&host->finish_tasklet);
1399 1400 1401 1402 1403 1404
	} else {
		u32 present_state;

		present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
		/*
		 * Check if the re-tuning timer has already expired and there
1405 1406
		 * is no on-going data transfer and DAT0 is not busy. If so,
		 * we need to execute tuning procedure before sending command.
1407 1408
		 */
		if ((host->flags & SDHCI_NEEDS_RETUNING) &&
1409 1410
		    !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) &&
		    (present_state & SDHCI_DATA_0_LVL_MASK)) {
1411 1412 1413 1414 1415 1416
			if (mmc->card) {
				/* eMMC uses cmd21 but sd and sdio use cmd19 */
				tuning_opcode =
					mmc->card->type == MMC_TYPE_MMC ?
					MMC_SEND_TUNING_BLOCK_HS200 :
					MMC_SEND_TUNING_BLOCK;
1417 1418 1419 1420 1421 1422 1423

				/* Here we need to set the host->mrq to NULL,
				 * in case the pending finish_tasklet
				 * finishes it incorrectly.
				 */
				host->mrq = NULL;

1424 1425 1426 1427 1428 1429 1430
				spin_unlock_irqrestore(&host->lock, flags);
				sdhci_execute_tuning(mmc, tuning_opcode);
				spin_lock_irqsave(&host->lock, flags);

				/* Restore original mmc_request structure */
				host->mrq = mrq;
			}
1431 1432
		}

1433
		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1434 1435 1436
			sdhci_send_command(host, mrq->sbc);
		else
			sdhci_send_command(host, mrq->cmd);
1437
	}
1438

1439
	mmiowb();
1440 1441 1442
	spin_unlock_irqrestore(&host->lock, flags);
}

1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463
void sdhci_set_bus_width(struct sdhci_host *host, int width)
{
	u8 ctrl;

	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
	if (width == MMC_BUS_WIDTH_8) {
		ctrl &= ~SDHCI_CTRL_4BITBUS;
		if (host->version >= SDHCI_SPEC_300)
			ctrl |= SDHCI_CTRL_8BITBUS;
	} else {
		if (host->version >= SDHCI_SPEC_300)
			ctrl &= ~SDHCI_CTRL_8BITBUS;
		if (width == MMC_BUS_WIDTH_4)
			ctrl |= SDHCI_CTRL_4BITBUS;
		else
			ctrl &= ~SDHCI_CTRL_4BITBUS;
	}
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_set_bus_width);

1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
{
	u16 ctrl_2;

	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	/* Select Bus Speed Mode for host */
	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
	if ((timing == MMC_TIMING_MMC_HS200) ||
	    (timing == MMC_TIMING_UHS_SDR104))
		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
	else if (timing == MMC_TIMING_UHS_SDR12)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
	else if (timing == MMC_TIMING_UHS_SDR25)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
	else if (timing == MMC_TIMING_UHS_SDR50)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
	else if ((timing == MMC_TIMING_UHS_DDR50) ||
		 (timing == MMC_TIMING_MMC_DDR52))
		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1483 1484
	else if (timing == MMC_TIMING_MMC_HS400)
		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1485 1486 1487 1488
	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);

1489
static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1490 1491 1492
{
	unsigned long flags;
	u8 ctrl;
1493
	struct mmc_host *mmc = host->mmc;
1494 1495 1496

	spin_lock_irqsave(&host->lock, flags);

A
Adrian Hunter 已提交
1497 1498
	if (host->flags & SDHCI_DEVICE_DEAD) {
		spin_unlock_irqrestore(&host->lock, flags);
1499 1500
		if (!IS_ERR(mmc->supply.vmmc) &&
		    ios->power_mode == MMC_POWER_OFF)
1501
			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
A
Adrian Hunter 已提交
1502 1503
		return;
	}
P
Pierre Ossman 已提交
1504

1505 1506 1507 1508 1509
	/*
	 * Reset the chip on each power off.
	 * Should clear out any weird states.
	 */
	if (ios->power_mode == MMC_POWER_OFF) {
1510
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1511
		sdhci_reinit(host);
1512 1513
	}

1514
	if (host->version >= SDHCI_SPEC_300 &&
1515 1516
		(ios->power_mode == MMC_POWER_UP) &&
		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1517 1518
		sdhci_enable_preset_value(host, false);

1519
	if (!ios->clock || ios->clock != host->clock) {
1520
		host->ops->set_clock(host, ios->clock);
1521
		host->clock = ios->clock;
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533

		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
		    host->clock) {
			host->timeout_clk = host->mmc->actual_clock ?
						host->mmc->actual_clock / 1000 :
						host->clock / 1000;
			host->mmc->max_busy_timeout =
				host->ops->get_max_timeout_count ?
				host->ops->get_max_timeout_count(host) :
				1 << 27;
			host->mmc->max_busy_timeout /= host->timeout_clk;
		}
1534
	}
1535

1536
	sdhci_set_power(host, ios->power_mode, ios->vdd);
1537

1538 1539 1540
	if (host->ops->platform_send_init_74_clocks)
		host->ops->platform_send_init_74_clocks(host, ios->power_mode);

1541
	host->ops->set_bus_width(host, ios->bus_width);
1542

1543
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1544

1545 1546 1547
	if ((ios->timing == MMC_TIMING_SD_HS ||
	     ios->timing == MMC_TIMING_MMC_HS)
	    && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1548 1549 1550 1551
		ctrl |= SDHCI_CTRL_HISPD;
	else
		ctrl &= ~SDHCI_CTRL_HISPD;

1552
	if (host->version >= SDHCI_SPEC_300) {
1553 1554 1555
		u16 clk, ctrl_2;

		/* In case of UHS-I modes, set High Speed Enable */
1556 1557
		if ((ios->timing == MMC_TIMING_MMC_HS400) ||
		    (ios->timing == MMC_TIMING_MMC_HS200) ||
1558
		    (ios->timing == MMC_TIMING_MMC_DDR52) ||
1559
		    (ios->timing == MMC_TIMING_UHS_SDR50) ||
1560 1561
		    (ios->timing == MMC_TIMING_UHS_SDR104) ||
		    (ios->timing == MMC_TIMING_UHS_DDR50) ||
1562
		    (ios->timing == MMC_TIMING_UHS_SDR25))
1563
			ctrl |= SDHCI_CTRL_HISPD;
1564

1565
		if (!host->preset_enabled) {
1566
			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1567 1568 1569 1570
			/*
			 * We only need to set Driver Strength if the
			 * preset value enable is not set.
			 */
1571
			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1572 1573 1574 1575 1576 1577 1578
			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;

			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594
		} else {
			/*
			 * According to SDHC Spec v3.00, if the Preset Value
			 * Enable in the Host Control 2 register is set, we
			 * need to reset SD Clock Enable before changing High
			 * Speed Enable to avoid generating clock gliches.
			 */

			/* Reset SD Clock Enable */
			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
			clk &= ~SDHCI_CLOCK_CARD_EN;
			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);

			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);

			/* Re-enable SD Clock */
1595
			host->ops->set_clock(host, host->clock);
1596
		}
1597 1598 1599 1600 1601 1602

		/* Reset SD Clock Enable */
		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
		clk &= ~SDHCI_CLOCK_CARD_EN;
		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);

1603
		host->ops->set_uhs_signaling(host, ios->timing);
1604
		host->timing = ios->timing;
1605

1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
				((ios->timing == MMC_TIMING_UHS_SDR12) ||
				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
				 (ios->timing == MMC_TIMING_UHS_DDR50))) {
			u16 preset;

			sdhci_enable_preset_value(host, true);
			preset = sdhci_get_preset_value(host);
			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
				>> SDHCI_PRESET_DRV_SHIFT;
		}

1620
		/* Re-enable SD Clock */
1621
		host->ops->set_clock(host, host->clock);
1622 1623
	} else
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1624

1625 1626 1627 1628 1629
	/*
	 * Some (ENE) controllers go apeshit on some ios operation,
	 * signalling timeout and CRC errors even on CMD0. Resetting
	 * it on each ios seems to solve the problem.
	 */
1630
	if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1631
		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1632

1633
	mmiowb();
1634 1635 1636
	spin_unlock_irqrestore(&host->lock, flags);
}

1637 1638 1639 1640 1641 1642 1643 1644 1645
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct sdhci_host *host = mmc_priv(mmc);

	sdhci_runtime_pm_get(host);
	sdhci_do_set_ios(host, ios);
	sdhci_runtime_pm_put(host);
}

1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
static int sdhci_do_get_cd(struct sdhci_host *host)
{
	int gpio_cd = mmc_gpio_get_cd(host->mmc);

	if (host->flags & SDHCI_DEVICE_DEAD)
		return 0;

	/* If polling/nonremovable, assume that the card is always present. */
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
	    (host->mmc->caps & MMC_CAP_NONREMOVABLE))
		return 1;

	/* Try slot gpio detect */
	if (!IS_ERR_VALUE(gpio_cd))
		return !!gpio_cd;

	/* Host native card detect */
	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}

static int sdhci_get_cd(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);
	int ret;

	sdhci_runtime_pm_get(host);
	ret = sdhci_do_get_cd(host);
	sdhci_runtime_pm_put(host);
	return ret;
}

1677
static int sdhci_check_ro(struct sdhci_host *host)
1678 1679
{
	unsigned long flags;
1680
	int is_readonly;
1681 1682 1683

	spin_lock_irqsave(&host->lock, flags);

P
Pierre Ossman 已提交
1684
	if (host->flags & SDHCI_DEVICE_DEAD)
1685 1686 1687
		is_readonly = 0;
	else if (host->ops->get_ro)
		is_readonly = host->ops->get_ro(host);
P
Pierre Ossman 已提交
1688
	else
1689 1690
		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
				& SDHCI_WRITE_PROTECT);
1691 1692 1693

	spin_unlock_irqrestore(&host->lock, flags);

1694 1695 1696
	/* This quirk needs to be replaced by a callback-function later */
	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
		!is_readonly : is_readonly;
1697 1698
}

1699 1700
#define SAMPLE_COUNT	5

1701
static int sdhci_do_get_ro(struct sdhci_host *host)
1702 1703 1704 1705
{
	int i, ro_count;

	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1706
		return sdhci_check_ro(host);
1707 1708 1709

	ro_count = 0;
	for (i = 0; i < SAMPLE_COUNT; i++) {
1710
		if (sdhci_check_ro(host)) {
1711 1712 1713 1714 1715 1716 1717 1718
			if (++ro_count > SAMPLE_COUNT / 2)
				return 1;
		}
		msleep(30);
	}
	return 0;
}

1719 1720 1721 1722 1723 1724 1725 1726
static void sdhci_hw_reset(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);

	if (host->ops && host->ops->hw_reset)
		host->ops->hw_reset(host);
}

1727
static int sdhci_get_ro(struct mmc_host *mmc)
P
Pierre Ossman 已提交
1728
{
1729 1730
	struct sdhci_host *host = mmc_priv(mmc);
	int ret;
P
Pierre Ossman 已提交
1731

1732 1733 1734 1735 1736
	sdhci_runtime_pm_get(host);
	ret = sdhci_do_get_ro(host);
	sdhci_runtime_pm_put(host);
	return ret;
}
P
Pierre Ossman 已提交
1737

1738 1739
static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
{
1740
	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1741
		if (enable)
1742
			host->ier |= SDHCI_INT_CARD_INT;
1743
		else
1744 1745 1746 1747
			host->ier &= ~SDHCI_INT_CARD_INT;

		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1748 1749
		mmiowb();
	}
1750 1751 1752 1753 1754 1755
}

static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;
P
Pierre Ossman 已提交
1756

1757 1758
	sdhci_runtime_pm_get(host);

1759
	spin_lock_irqsave(&host->lock, flags);
1760 1761 1762 1763 1764
	if (enable)
		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
	else
		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;

1765
	sdhci_enable_sdio_irq_nolock(host, enable);
P
Pierre Ossman 已提交
1766
	spin_unlock_irqrestore(&host->lock, flags);
1767 1768

	sdhci_runtime_pm_put(host);
P
Pierre Ossman 已提交
1769 1770
}

1771
static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1772
						struct mmc_ios *ios)
1773
{
1774
	struct mmc_host *mmc = host->mmc;
1775
	u16 ctrl;
1776
	int ret;
1777

1778 1779 1780 1781 1782 1783
	/*
	 * Signal Voltage Switching is only applicable for Host Controllers
	 * v3.00 and above.
	 */
	if (host->version < SDHCI_SPEC_300)
		return 0;
1784

1785 1786
	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

1787
	switch (ios->signal_voltage) {
1788 1789 1790 1791
	case MMC_SIGNAL_VOLTAGE_330:
		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
		ctrl &= ~SDHCI_CTRL_VDD_180;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1792

1793 1794 1795
		if (!IS_ERR(mmc->supply.vqmmc)) {
			ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
						    3600000);
1796
			if (ret) {
J
Joe Perches 已提交
1797 1798
				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
					mmc_hostname(mmc));
1799 1800 1801 1802 1803
				return -EIO;
			}
		}
		/* Wait for 5ms */
		usleep_range(5000, 5500);
1804

1805 1806 1807 1808
		/* 3.3V regulator output should be stable within 5 ms */
		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (!(ctrl & SDHCI_CTRL_VDD_180))
			return 0;
1809

J
Joe Perches 已提交
1810 1811
		pr_warn("%s: 3.3V regulator output did not became stable\n",
			mmc_hostname(mmc));
1812 1813 1814

		return -EAGAIN;
	case MMC_SIGNAL_VOLTAGE_180:
1815 1816
		if (!IS_ERR(mmc->supply.vqmmc)) {
			ret = regulator_set_voltage(mmc->supply.vqmmc,
1817 1818
					1700000, 1950000);
			if (ret) {
J
Joe Perches 已提交
1819 1820
				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
					mmc_hostname(mmc));
1821 1822 1823
				return -EIO;
			}
		}
1824 1825 1826 1827 1828

		/*
		 * Enable 1.8V Signal Enable in the Host Control2
		 * register
		 */
1829 1830
		ctrl |= SDHCI_CTRL_VDD_180;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1831

1832 1833 1834 1835
		/* 1.8V regulator output should be stable within 5 ms */
		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (ctrl & SDHCI_CTRL_VDD_180)
			return 0;
1836

J
Joe Perches 已提交
1837 1838
		pr_warn("%s: 1.8V regulator output did not became stable\n",
			mmc_hostname(mmc));
1839

1840 1841
		return -EAGAIN;
	case MMC_SIGNAL_VOLTAGE_120:
1842 1843 1844
		if (!IS_ERR(mmc->supply.vqmmc)) {
			ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
						    1300000);
1845
			if (ret) {
J
Joe Perches 已提交
1846 1847
				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
					mmc_hostname(mmc));
1848
				return -EIO;
1849 1850
			}
		}
1851
		return 0;
1852
	default:
1853 1854
		/* No signal voltage switch required */
		return 0;
1855
	}
1856 1857
}

1858
static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1859
	struct mmc_ios *ios)
1860 1861 1862 1863 1864 1865 1866
{
	struct sdhci_host *host = mmc_priv(mmc);
	int err;

	if (host->version < SDHCI_SPEC_300)
		return 0;
	sdhci_runtime_pm_get(host);
1867
	err = sdhci_do_start_signal_voltage_switch(host, ios);
1868 1869 1870 1871
	sdhci_runtime_pm_put(host);
	return err;
}

1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
static int sdhci_card_busy(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);
	u32 present_state;

	sdhci_runtime_pm_get(host);
	/* Check whether DAT[3:0] is 0000 */
	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
	sdhci_runtime_pm_put(host);

	return !(present_state & SDHCI_DATA_LVL_MASK);
}

1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
	host->flags |= SDHCI_HS400_TUNING;
	spin_unlock_irqrestore(&host->lock, flags);

	return 0;
}

1897
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1898
{
1899
	struct sdhci_host *host = mmc_priv(mmc);
1900 1901 1902
	u16 ctrl;
	int tuning_loop_counter = MAX_TUNING_LOOP;
	int err = 0;
1903
	unsigned long flags;
1904
	unsigned int tuning_count = 0;
1905
	bool hs400_tuning;
1906

1907
	sdhci_runtime_pm_get(host);
1908
	spin_lock_irqsave(&host->lock, flags);
1909

1910 1911 1912
	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
	host->flags &= ~SDHCI_HS400_TUNING;

1913 1914 1915
	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
		tuning_count = host->tuning_count;

1916
	/*
1917 1918
	 * The Host Controller needs tuning only in case of SDR104 mode
	 * and for SDR50 mode when Use Tuning for SDR50 is set in the
1919
	 * Capabilities register.
1920 1921
	 * If the Host Controller supports the HS200 mode then the
	 * tuning function has to be executed.
1922
	 */
1923
	switch (host->timing) {
1924
	/* HS400 tuning is done in HS200 mode */
1925
	case MMC_TIMING_MMC_HS400:
1926 1927 1928
		err = -EINVAL;
		goto out_unlock;

1929
	case MMC_TIMING_MMC_HS200:
1930 1931 1932 1933 1934 1935 1936 1937
		/*
		 * Periodic re-tuning for HS400 is not expected to be needed, so
		 * disable it here.
		 */
		if (hs400_tuning)
			tuning_count = 0;
		break;

1938 1939 1940 1941 1942 1943 1944 1945 1946 1947
	case MMC_TIMING_UHS_SDR104:
		break;

	case MMC_TIMING_UHS_SDR50:
		if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
		    host->flags & SDHCI_SDR104_NEEDS_TUNING)
			break;
		/* FALLTHROUGH */

	default:
1948
		goto out_unlock;
1949 1950
	}

1951
	if (host->ops->platform_execute_tuning) {
1952
		spin_unlock_irqrestore(&host->lock, flags);
1953 1954 1955 1956 1957
		err = host->ops->platform_execute_tuning(host, opcode);
		sdhci_runtime_pm_put(host);
		return err;
	}

1958 1959
	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	ctrl |= SDHCI_CTRL_EXEC_TUNING;
1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);

	/*
	 * As per the Host Controller spec v3.00, tuning command
	 * generates Buffer Read Ready interrupt, so enable that.
	 *
	 * Note: The spec clearly says that when tuning sequence
	 * is being performed, the controller does not generate
	 * interrupts other than Buffer Read Ready interrupt. But
	 * to make sure we don't hit a controller bug, we _only_
	 * enable Buffer Read Ready interrupt here.
	 */
1972 1973
	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1974 1975 1976 1977 1978 1979 1980

	/*
	 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
	 * of loops reaches 40 times or a timeout of 150ms occurs.
	 */
	do {
		struct mmc_command cmd = {0};
1981
		struct mmc_request mrq = {NULL};
1982

1983
		cmd.opcode = opcode;
1984 1985 1986 1987 1988 1989
		cmd.arg = 0;
		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
		cmd.retries = 0;
		cmd.data = NULL;
		cmd.error = 0;

1990 1991 1992
		if (tuning_loop_counter-- == 0)
			break;

1993 1994 1995 1996 1997 1998 1999 2000
		mrq.cmd = &cmd;
		host->mrq = &mrq;

		/*
		 * In response to CMD19, the card sends 64 bytes of tuning
		 * block to the Host Controller. So we set the block size
		 * to 64 here.
		 */
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
		if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
			if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
					     SDHCI_BLOCK_SIZE);
			else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
					     SDHCI_BLOCK_SIZE);
		} else {
			sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
				     SDHCI_BLOCK_SIZE);
		}
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025

		/*
		 * The tuning block is sent by the card to the host controller.
		 * So we set the TRNS_READ bit in the Transfer Mode register.
		 * This also takes care of setting DMA Enable and Multi Block
		 * Select in the same register to 0.
		 */
		sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);

		sdhci_send_command(host, &cmd);

		host->cmd = NULL;
		host->mrq = NULL;

2026
		spin_unlock_irqrestore(&host->lock, flags);
2027 2028 2029 2030
		/* Wait for Buffer Read Ready interrupt */
		wait_event_interruptible_timeout(host->buf_ready_int,
					(host->tuning_done == 1),
					msecs_to_jiffies(50));
2031
		spin_lock_irqsave(&host->lock, flags);
2032 2033

		if (!host->tuning_done) {
2034
			pr_info(DRIVER_NAME ": Timeout waiting for "
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049
				"Buffer Read Ready interrupt during tuning "
				"procedure, falling back to fixed sampling "
				"clock\n");
			ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
			ctrl &= ~SDHCI_CTRL_TUNED_CLK;
			ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
			sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);

			err = -EIO;
			goto out;
		}

		host->tuning_done = 0;

		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2050 2051 2052 2053

		/* eMMC spec does not require a delay between tuning cycles */
		if (opcode == MMC_SEND_TUNING_BLOCK)
			mdelay(1);
2054 2055 2056 2057 2058 2059
	} while (ctrl & SDHCI_CTRL_EXEC_TUNING);

	/*
	 * The Host Driver has exhausted the maximum number of loops allowed,
	 * so use fixed sampling frequency.
	 */
2060
	if (tuning_loop_counter < 0) {
2061 2062
		ctrl &= ~SDHCI_CTRL_TUNED_CLK;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2063 2064 2065 2066 2067
	}
	if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
		pr_info(DRIVER_NAME ": Tuning procedure"
			" failed, falling back to fixed sampling"
			" clock\n");
2068
		err = -EIO;
2069 2070 2071
	}

out:
2072 2073 2074
	host->flags &= ~SDHCI_NEEDS_RETUNING;

	if (tuning_count) {
2075
		host->flags |= SDHCI_USING_RETUNING_TIMER;
2076
		mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ);
2077 2078 2079 2080 2081 2082 2083
	}

	/*
	 * In case tuning fails, host controllers which support re-tuning can
	 * try tuning again at a later time, when the re-tuning timer expires.
	 * So for these controllers, we return 0. Since there might be other
	 * controllers who do not have this capability, we return error for
2084 2085
	 * them. SDHCI_USING_RETUNING_TIMER means the host is currently using
	 * a retuning timer to do the retuning for the card.
2086
	 */
2087
	if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
2088 2089
		err = 0;

2090 2091
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2092
out_unlock:
2093
	spin_unlock_irqrestore(&host->lock, flags);
2094
	sdhci_runtime_pm_put(host);
2095 2096 2097 2098

	return err;
}

2099 2100

static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2101 2102 2103 2104 2105 2106 2107 2108 2109
{
	/* Host Controller v3.00 defines preset value registers */
	if (host->version < SDHCI_SPEC_300)
		return;

	/*
	 * We only enable or disable Preset Value if they are not already
	 * enabled or disabled respectively. Otherwise, we bail out.
	 */
2110 2111 2112 2113 2114 2115 2116 2117
	if (host->preset_enabled != enable) {
		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

		if (enable)
			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
		else
			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;

2118
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2119 2120 2121 2122 2123 2124 2125

		if (enable)
			host->flags |= SDHCI_PV_ENABLED;
		else
			host->flags &= ~SDHCI_PV_ENABLED;

		host->preset_enabled = enable;
2126
	}
2127 2128
}

2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
				int err)
{
	struct sdhci_host *host = mmc_priv(mmc);
	struct mmc_data *data = mrq->data;

	if (host->flags & SDHCI_REQ_USE_DMA) {
		if (data->host_cookie)
			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
					 data->flags & MMC_DATA_WRITE ?
					 DMA_TO_DEVICE : DMA_FROM_DEVICE);
		mrq->data->host_cookie = 0;
	}
}

static int sdhci_pre_dma_transfer(struct sdhci_host *host,
				       struct mmc_data *data,
				       struct sdhci_host_next *next)
{
	int sg_count;

	if (!next && data->host_cookie &&
	    data->host_cookie != host->next_data.cookie) {
		pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n",
			__func__, data->host_cookie, host->next_data.cookie);
		data->host_cookie = 0;
	}

	/* Check if next job is already prepared */
	if (next ||
	    (!next && data->host_cookie != host->next_data.cookie)) {
		sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg,
				     data->sg_len,
				     data->flags & MMC_DATA_WRITE ?
				     DMA_TO_DEVICE : DMA_FROM_DEVICE);

	} else {
		sg_count = host->next_data.sg_count;
		host->next_data.sg_count = 0;
	}


	if (sg_count == 0)
		return -EINVAL;

	if (next) {
		next->sg_count = sg_count;
		data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
	} else
		host->sg_count = sg_count;

	return sg_count;
}

static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
			       bool is_first_req)
{
	struct sdhci_host *host = mmc_priv(mmc);

	if (mrq->data->host_cookie) {
		mrq->data->host_cookie = 0;
		return;
	}

	if (host->flags & SDHCI_REQ_USE_DMA)
		if (sdhci_pre_dma_transfer(host,
					mrq->data,
					&host->next_data) < 0)
			mrq->data->host_cookie = 0;
}

2200
static void sdhci_card_event(struct mmc_host *mmc)
2201
{
2202
	struct sdhci_host *host = mmc_priv(mmc);
2203
	unsigned long flags;
2204
	int present;
2205

2206 2207 2208 2209
	/* First check if client has provided their own card event */
	if (host->ops->card_event)
		host->ops->card_event(host);

2210 2211
	present = sdhci_do_get_cd(host);

2212 2213
	spin_lock_irqsave(&host->lock, flags);

2214
	/* Check host->mrq first in case we are runtime suspended */
2215
	if (host->mrq && !present) {
2216
		pr_err("%s: Card removed during transfer!\n",
2217
			mmc_hostname(host->mmc));
2218
		pr_err("%s: Resetting controller.\n",
2219
			mmc_hostname(host->mmc));
2220

2221 2222
		sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
2223

2224 2225
		host->mrq->cmd->error = -ENOMEDIUM;
		tasklet_schedule(&host->finish_tasklet);
2226 2227 2228
	}

	spin_unlock_irqrestore(&host->lock, flags);
2229 2230 2231 2232
}

static const struct mmc_host_ops sdhci_ops = {
	.request	= sdhci_request,
2233 2234
	.post_req	= sdhci_post_req,
	.pre_req	= sdhci_pre_req,
2235
	.set_ios	= sdhci_set_ios,
2236
	.get_cd		= sdhci_get_cd,
2237 2238 2239 2240
	.get_ro		= sdhci_get_ro,
	.hw_reset	= sdhci_hw_reset,
	.enable_sdio_irq = sdhci_enable_sdio_irq,
	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2241
	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2242 2243
	.execute_tuning			= sdhci_execute_tuning,
	.card_event			= sdhci_card_event,
2244
	.card_busy	= sdhci_card_busy,
2245 2246 2247 2248 2249 2250 2251 2252
};

/*****************************************************************************\
 *                                                                           *
 * Tasklets                                                                  *
 *                                                                           *
\*****************************************************************************/

2253 2254 2255 2256 2257 2258 2259 2260
static void sdhci_tasklet_finish(unsigned long param)
{
	struct sdhci_host *host;
	unsigned long flags;
	struct mmc_request *mrq;

	host = (struct sdhci_host*)param;

2261 2262
	spin_lock_irqsave(&host->lock, flags);

2263 2264 2265 2266
        /*
         * If this tasklet gets rescheduled while running, it will
         * be run again afterwards but without any active request.
         */
2267 2268
	if (!host->mrq) {
		spin_unlock_irqrestore(&host->lock, flags);
2269
		return;
2270
	}
2271 2272 2273 2274 2275 2276 2277 2278 2279

	del_timer(&host->timer);

	mrq = host->mrq;

	/*
	 * The controller needs a reset of internal state machines
	 * upon error conditions.
	 */
P
Pierre Ossman 已提交
2280
	if (!(host->flags & SDHCI_DEVICE_DEAD) &&
2281
	    ((mrq->cmd && mrq->cmd->error) ||
2282 2283 2284 2285
	     (mrq->sbc && mrq->sbc->error) ||
	     (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
			    (mrq->data->stop && mrq->data->stop->error))) ||
	     (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
2286 2287

		/* Some controllers need this kick or reset won't work here */
2288
		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2289
			/* This is to force an update */
2290
			host->ops->set_clock(host, host->clock);
2291 2292 2293

		/* Spec says we should do both at the same time, but Ricoh
		   controllers do not like that. */
2294 2295
		sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
2296 2297 2298 2299 2300 2301
	}

	host->mrq = NULL;
	host->cmd = NULL;
	host->data = NULL;

2302
#ifndef SDHCI_USE_LEDS_CLASS
2303
	sdhci_deactivate_led(host);
2304
#endif
2305

2306
	mmiowb();
2307 2308 2309
	spin_unlock_irqrestore(&host->lock, flags);

	mmc_request_done(host->mmc, mrq);
2310
	sdhci_runtime_pm_put(host);
2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322
}

static void sdhci_timeout_timer(unsigned long data)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = (struct sdhci_host*)data;

	spin_lock_irqsave(&host->lock, flags);

	if (host->mrq) {
2323
		pr_err("%s: Timeout waiting for hardware "
P
Pierre Ossman 已提交
2324
			"interrupt.\n", mmc_hostname(host->mmc));
2325 2326 2327
		sdhci_dumpregs(host);

		if (host->data) {
P
Pierre Ossman 已提交
2328
			host->data->error = -ETIMEDOUT;
2329 2330 2331
			sdhci_finish_data(host);
		} else {
			if (host->cmd)
P
Pierre Ossman 已提交
2332
				host->cmd->error = -ETIMEDOUT;
2333
			else
P
Pierre Ossman 已提交
2334
				host->mrq->cmd->error = -ETIMEDOUT;
2335 2336 2337 2338 2339

			tasklet_schedule(&host->finish_tasklet);
		}
	}

2340
	mmiowb();
2341 2342 2343
	spin_unlock_irqrestore(&host->lock, flags);
}

2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
static void sdhci_tuning_timer(unsigned long data)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = (struct sdhci_host *)data;

	spin_lock_irqsave(&host->lock, flags);

	host->flags |= SDHCI_NEEDS_RETUNING;

	spin_unlock_irqrestore(&host->lock, flags);
}

2358 2359 2360 2361 2362 2363
/*****************************************************************************\
 *                                                                           *
 * Interrupt handling                                                        *
 *                                                                           *
\*****************************************************************************/

2364
static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
2365 2366 2367 2368
{
	BUG_ON(intmask == 0);

	if (!host->cmd) {
2369
		pr_err("%s: Got command interrupt 0x%08x even "
2370 2371
			"though no command operation was in progress.\n",
			mmc_hostname(host->mmc), (unsigned)intmask);
2372 2373 2374 2375
		sdhci_dumpregs(host);
		return;
	}

2376
	if (intmask & SDHCI_INT_TIMEOUT)
P
Pierre Ossman 已提交
2377 2378 2379 2380
		host->cmd->error = -ETIMEDOUT;
	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
			SDHCI_INT_INDEX))
		host->cmd->error = -EILSEQ;
2381

2382
	if (host->cmd->error) {
2383
		tasklet_schedule(&host->finish_tasklet);
2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
		return;
	}

	/*
	 * The host can send and interrupt when the busy state has
	 * ended, allowing us to wait without wasting CPU cycles.
	 * Unfortunately this is overloaded on the "data complete"
	 * interrupt, so we need to take some care when handling
	 * it.
	 *
	 * Note: The 1.0 specification is a bit ambiguous about this
	 *       feature so there might be some problems with older
	 *       controllers.
	 */
	if (host->cmd->flags & MMC_RSP_BUSY) {
		if (host->cmd->data)
			DBG("Cannot wait for busy signal when also "
				"doing a data transfer");
2402 2403 2404 2405
		else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
				&& !host->busy_handle) {
			/* Mark that command complete before busy is ended */
			host->busy_handle = 1;
2406
			return;
2407
		}
2408 2409 2410

		/* The controller does not support the end-of-busy IRQ,
		 * fall through and take the SDHCI_INT_RESPONSE */
2411 2412 2413
	} else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
		   host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) {
		*mask &= ~SDHCI_INT_DATA_END;
2414 2415 2416
	}

	if (intmask & SDHCI_INT_RESPONSE)
2417
		sdhci_finish_command(host);
2418 2419
}

2420
#ifdef CONFIG_MMC_DEBUG
2421
static void sdhci_adma_show_error(struct sdhci_host *host)
2422 2423
{
	const char *name = mmc_hostname(host->mmc);
2424
	void *desc = host->adma_table;
2425 2426 2427 2428

	sdhci_dumpregs(host);

	while (true) {
2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441
		struct sdhci_adma2_64_desc *dma_desc = desc;

		if (host->flags & SDHCI_USE_64_BIT_DMA)
			DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
			    name, desc, le32_to_cpu(dma_desc->addr_hi),
			    le32_to_cpu(dma_desc->addr_lo),
			    le16_to_cpu(dma_desc->len),
			    le16_to_cpu(dma_desc->cmd));
		else
			DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
			    name, desc, le32_to_cpu(dma_desc->addr_lo),
			    le16_to_cpu(dma_desc->len),
			    le16_to_cpu(dma_desc->cmd));
2442

2443
		desc += host->desc_sz;
2444

2445
		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2446 2447 2448 2449
			break;
	}
}
#else
2450
static void sdhci_adma_show_error(struct sdhci_host *host) { }
2451 2452
#endif

2453 2454
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
2455
	u32 command;
2456 2457
	BUG_ON(intmask == 0);

2458 2459
	/* CMD19 generates _only_ Buffer Read Ready interrupt */
	if (intmask & SDHCI_INT_DATA_AVAIL) {
2460 2461 2462
		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
		if (command == MMC_SEND_TUNING_BLOCK ||
		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2463 2464 2465 2466 2467 2468
			host->tuning_done = 1;
			wake_up(&host->buf_ready_int);
			return;
		}
	}

2469 2470
	if (!host->data) {
		/*
2471 2472 2473
		 * The "data complete" interrupt is also used to
		 * indicate that a busy state has ended. See comment
		 * above in sdhci_cmd_irq().
2474
		 */
2475
		if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
2476 2477 2478 2479 2480
			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
				host->cmd->error = -ETIMEDOUT;
				tasklet_schedule(&host->finish_tasklet);
				return;
			}
2481
			if (intmask & SDHCI_INT_DATA_END) {
2482 2483 2484 2485 2486 2487 2488 2489 2490
				/*
				 * Some cards handle busy-end interrupt
				 * before the command completed, so make
				 * sure we do things in the proper order.
				 */
				if (host->busy_handle)
					sdhci_finish_command(host);
				else
					host->busy_handle = 1;
2491 2492 2493
				return;
			}
		}
2494

2495
		pr_err("%s: Got data interrupt 0x%08x even "
2496 2497
			"though no data operation was in progress.\n",
			mmc_hostname(host->mmc), (unsigned)intmask);
2498 2499 2500 2501 2502 2503
		sdhci_dumpregs(host);

		return;
	}

	if (intmask & SDHCI_INT_DATA_TIMEOUT)
P
Pierre Ossman 已提交
2504
		host->data->error = -ETIMEDOUT;
2505 2506 2507 2508 2509
	else if (intmask & SDHCI_INT_DATA_END_BIT)
		host->data->error = -EILSEQ;
	else if ((intmask & SDHCI_INT_DATA_CRC) &&
		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
			!= MMC_BUS_TEST_R)
P
Pierre Ossman 已提交
2510
		host->data->error = -EILSEQ;
2511
	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2512
		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2513
		sdhci_adma_show_error(host);
2514
		host->data->error = -EIO;
2515 2516
		if (host->ops->adma_workaround)
			host->ops->adma_workaround(host, intmask);
2517
	}
2518

P
Pierre Ossman 已提交
2519
	if (host->data->error)
2520 2521
		sdhci_finish_data(host);
	else {
P
Pierre Ossman 已提交
2522
		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2523 2524
			sdhci_transfer_pio(host);

2525 2526 2527 2528
		/*
		 * We currently don't do anything fancy with DMA
		 * boundaries, but as we can't disable the feature
		 * we need to at least restart the transfer.
2529 2530 2531 2532
		 *
		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
		 * should return a valid address to continue from, but as
		 * some controllers are faulty, don't trust them.
2533
		 */
2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550
		if (intmask & SDHCI_INT_DMA_END) {
			u32 dmastart, dmanow;
			dmastart = sg_dma_address(host->data->sg);
			dmanow = dmastart + host->data->bytes_xfered;
			/*
			 * Force update to the next DMA block boundary.
			 */
			dmanow = (dmanow &
				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
				SDHCI_DEFAULT_BOUNDARY_SIZE;
			host->data->bytes_xfered = dmanow - dmastart;
			DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
				" next 0x%08x\n",
				mmc_hostname(host->mmc), dmastart,
				host->data->bytes_xfered, dmanow);
			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
		}
2551

2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
		if (intmask & SDHCI_INT_DATA_END) {
			if (host->cmd) {
				/*
				 * Data managed to finish before the
				 * command completed. Make sure we do
				 * things in the proper order.
				 */
				host->data_early = 1;
			} else {
				sdhci_finish_data(host);
			}
		}
2564 2565 2566
	}
}

2567
static irqreturn_t sdhci_irq(int irq, void *dev_id)
2568
{
2569
	irqreturn_t result = IRQ_NONE;
2570
	struct sdhci_host *host = dev_id;
2571
	u32 intmask, mask, unexpected = 0;
2572
	int max_loops = 16;
2573 2574 2575

	spin_lock(&host->lock);

2576
	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2577
		spin_unlock(&host->lock);
2578
		return IRQ_NONE;
2579 2580
	}

2581
	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2582
	if (!intmask || intmask == 0xffffffff) {
2583 2584 2585 2586
		result = IRQ_NONE;
		goto out;
	}

2587 2588 2589 2590 2591
	do {
		/* Clear selected interrupts. */
		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
				  SDHCI_INT_BUS_POWER);
		sdhci_writel(host, mask, SDHCI_INT_STATUS);
2592

2593 2594
		DBG("*** %s got interrupt: 0x%08x\n",
			mmc_hostname(host->mmc), intmask);
2595

2596 2597 2598
		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
2599

2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610
			/*
			 * There is a observation on i.mx esdhc.  INSERT
			 * bit will be immediately set again when it gets
			 * cleared, if a card is inserted.  We have to mask
			 * the irq to prevent interrupt storm which will
			 * freeze the system.  And the REMOVE gets the
			 * same situation.
			 *
			 * More testing are needed here to ensure it works
			 * for other platforms though.
			 */
2611 2612 2613 2614 2615 2616
			host->ier &= ~(SDHCI_INT_CARD_INSERT |
				       SDHCI_INT_CARD_REMOVE);
			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
					       SDHCI_INT_CARD_INSERT;
			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2617 2618 2619

			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2620 2621 2622 2623

			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
						       SDHCI_INT_CARD_REMOVE);
			result = IRQ_WAKE_THREAD;
2624
		}
2625

2626
		if (intmask & SDHCI_INT_CMD_MASK)
2627 2628
			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
				      &intmask);
2629

2630 2631
		if (intmask & SDHCI_INT_DATA_MASK)
			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2632

2633 2634 2635
		if (intmask & SDHCI_INT_BUS_POWER)
			pr_err("%s: Card is consuming too much power!\n",
				mmc_hostname(host->mmc));
2636

2637 2638 2639 2640 2641
		if (intmask & SDHCI_INT_CARD_INT) {
			sdhci_enable_sdio_irq_nolock(host, false);
			host->thread_isr |= SDHCI_INT_CARD_INT;
			result = IRQ_WAKE_THREAD;
		}
P
Pierre Ossman 已提交
2642

2643 2644 2645 2646
		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
			     SDHCI_INT_CARD_INT);
P
Pierre Ossman 已提交
2647

2648 2649 2650 2651
		if (intmask) {
			unexpected |= intmask;
			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
		}
2652

2653 2654
		if (result == IRQ_NONE)
			result = IRQ_HANDLED;
2655

2656 2657
		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
	} while (intmask && --max_loops);
2658 2659 2660
out:
	spin_unlock(&host->lock);

2661 2662 2663 2664 2665
	if (unexpected) {
		pr_err("%s: Unexpected interrupt 0x%08x.\n",
			   mmc_hostname(host->mmc), unexpected);
		sdhci_dumpregs(host);
	}
P
Pierre Ossman 已提交
2666

2667 2668 2669
	return result;
}

2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680
static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
{
	struct sdhci_host *host = dev_id;
	unsigned long flags;
	u32 isr;

	spin_lock_irqsave(&host->lock, flags);
	isr = host->thread_isr;
	host->thread_isr = 0;
	spin_unlock_irqrestore(&host->lock, flags);

2681 2682 2683 2684 2685
	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
		sdhci_card_event(host->mmc);
		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
	}

2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697
	if (isr & SDHCI_INT_CARD_INT) {
		sdio_run_irqs(host->mmc);

		spin_lock_irqsave(&host->lock, flags);
		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
			sdhci_enable_sdio_irq_nolock(host, true);
		spin_unlock_irqrestore(&host->lock, flags);
	}

	return isr ? IRQ_HANDLED : IRQ_NONE;
}

2698 2699 2700 2701 2702 2703 2704
/*****************************************************************************\
 *                                                                           *
 * Suspend/resume                                                            *
 *                                                                           *
\*****************************************************************************/

#ifdef CONFIG_PM
K
Kevin Liu 已提交
2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
void sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
	u8 val;
	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
			| SDHCI_WAKE_ON_INT;

	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
	val |= mask ;
	/* Avoid fake wake up */
	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
		val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);

2720
static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
K
Kevin Liu 已提交
2721 2722 2723 2724 2725 2726 2727 2728 2729
{
	u8 val;
	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
			| SDHCI_WAKE_ON_INT;

	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
	val &= ~mask;
	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
2730

2731
int sdhci_suspend_host(struct sdhci_host *host)
2732
{
2733 2734
	sdhci_disable_card_detection(host);

2735
	/* Disable tuning since we are suspending */
2736
	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
2737
		del_timer_sync(&host->tuning_timer);
2738 2739 2740
		host->flags &= ~SDHCI_NEEDS_RETUNING;
	}

K
Kevin Liu 已提交
2741
	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2742 2743 2744
		host->ier = 0;
		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
K
Kevin Liu 已提交
2745 2746 2747 2748 2749
		free_irq(host->irq, host);
	} else {
		sdhci_enable_irq_wakeups(host);
		enable_irq_wake(host->irq);
	}
2750
	return 0;
2751 2752
}

2753
EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2754

2755 2756
int sdhci_resume_host(struct sdhci_host *host)
{
2757
	int ret = 0;
2758

2759
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2760 2761 2762
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}
2763

K
Kevin Liu 已提交
2764
	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2765 2766 2767
		ret = request_threaded_irq(host->irq, sdhci_irq,
					   sdhci_thread_irq, IRQF_SHARED,
					   mmc_hostname(host->mmc), host);
K
Kevin Liu 已提交
2768 2769 2770 2771 2772 2773
		if (ret)
			return ret;
	} else {
		sdhci_disable_irq_wakeups(host);
		disable_irq_wake(host->irq);
	}
2774

2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785
	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
		/* Card keeps power but host controller does not */
		sdhci_init(host, 0);
		host->pwr = 0;
		host->clock = 0;
		sdhci_do_set_ios(host, &host->mmc->ios);
	} else {
		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
		mmiowb();
	}
2786

2787 2788
	sdhci_enable_card_detection(host);

2789
	/* Set the re-tuning expiration flag */
2790
	if (host->flags & SDHCI_USING_RETUNING_TIMER)
2791 2792
		host->flags |= SDHCI_NEEDS_RETUNING;

2793
	return ret;
2794 2795
}

2796
EXPORT_SYMBOL_GPL(sdhci_resume_host);
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808

static int sdhci_runtime_pm_get(struct sdhci_host *host)
{
	return pm_runtime_get_sync(host->mmc->parent);
}

static int sdhci_runtime_pm_put(struct sdhci_host *host)
{
	pm_runtime_mark_last_busy(host->mmc->parent);
	return pm_runtime_put_autosuspend(host->mmc->parent);
}

2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
{
	if (host->runtime_suspended || host->bus_on)
		return;
	host->bus_on = true;
	pm_runtime_get_noresume(host->mmc->parent);
}

static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
{
	if (host->runtime_suspended || !host->bus_on)
		return;
	host->bus_on = false;
	pm_runtime_put_noidle(host->mmc->parent);
}

2825 2826 2827 2828 2829
int sdhci_runtime_suspend_host(struct sdhci_host *host)
{
	unsigned long flags;

	/* Disable tuning since we are suspending */
2830
	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
2831 2832 2833 2834 2835
		del_timer_sync(&host->tuning_timer);
		host->flags &= ~SDHCI_NEEDS_RETUNING;
	}

	spin_lock_irqsave(&host->lock, flags);
2836 2837 2838
	host->ier &= SDHCI_INT_CARD_INT;
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2839 2840
	spin_unlock_irqrestore(&host->lock, flags);

2841
	synchronize_hardirq(host->irq);
2842 2843 2844 2845 2846

	spin_lock_irqsave(&host->lock, flags);
	host->runtime_suspended = true;
	spin_unlock_irqrestore(&host->lock, flags);

2847
	return 0;
2848 2849 2850 2851 2852 2853
}
EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);

int sdhci_runtime_resume_host(struct sdhci_host *host)
{
	unsigned long flags;
2854
	int host_flags = host->flags;
2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868

	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}

	sdhci_init(host, 0);

	/* Force clock and power re-program */
	host->pwr = 0;
	host->clock = 0;
	sdhci_do_set_ios(host, &host->mmc->ios);

	sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
2869 2870 2871 2872 2873 2874
	if ((host_flags & SDHCI_PV_ENABLED) &&
		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
		spin_lock_irqsave(&host->lock, flags);
		sdhci_enable_preset_value(host, true);
		spin_unlock_irqrestore(&host->lock, flags);
	}
2875 2876

	/* Set the re-tuning expiration flag */
2877
	if (host->flags & SDHCI_USING_RETUNING_TIMER)
2878 2879 2880 2881 2882 2883 2884
		host->flags |= SDHCI_NEEDS_RETUNING;

	spin_lock_irqsave(&host->lock, flags);

	host->runtime_suspended = false;

	/* Enable SDIO IRQ */
2885
	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2886 2887 2888 2889 2890 2891 2892
		sdhci_enable_sdio_irq_nolock(host, true);

	/* Enable Card Detection */
	sdhci_enable_card_detection(host);

	spin_unlock_irqrestore(&host->lock, flags);

2893
	return 0;
2894 2895 2896
}
EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);

2897
#endif /* CONFIG_PM */
2898

2899 2900
/*****************************************************************************\
 *                                                                           *
2901
 * Device allocation/registration                                            *
2902 2903 2904
 *                                                                           *
\*****************************************************************************/

2905 2906
struct sdhci_host *sdhci_alloc_host(struct device *dev,
	size_t priv_size)
2907 2908 2909 2910
{
	struct mmc_host *mmc;
	struct sdhci_host *host;

2911
	WARN_ON(dev == NULL);
2912

2913
	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2914
	if (!mmc)
2915
		return ERR_PTR(-ENOMEM);
2916 2917 2918 2919

	host = mmc_priv(mmc);
	host->mmc = mmc;

2920 2921
	return host;
}
2922

2923
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2924

2925 2926 2927
int sdhci_add_host(struct sdhci_host *host)
{
	struct mmc_host *mmc;
2928
	u32 caps[2] = {0, 0};
2929 2930
	u32 max_current_caps;
	unsigned int ocr_avail;
2931
	unsigned int override_timeout_clk;
2932
	int ret;
2933

2934 2935 2936
	WARN_ON(host == NULL);
	if (host == NULL)
		return -EINVAL;
2937

2938
	mmc = host->mmc;
2939

2940 2941
	if (debug_quirks)
		host->quirks = debug_quirks;
2942 2943
	if (debug_quirks2)
		host->quirks2 = debug_quirks2;
2944

2945 2946
	override_timeout_clk = host->timeout_clk;

2947
	sdhci_do_reset(host, SDHCI_RESET_ALL);
2948

2949
	host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2950 2951
	host->version = (host->version & SDHCI_SPEC_VER_MASK)
				>> SDHCI_SPEC_VER_SHIFT;
2952
	if (host->version > SDHCI_SPEC_300) {
2953
		pr_err("%s: Unknown controller version (%d). "
2954
			"You may experience problems.\n", mmc_hostname(mmc),
2955
			host->version);
2956 2957
	}

2958
	caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
2959
		sdhci_readl(host, SDHCI_CAPABILITIES);
2960

2961 2962 2963 2964
	if (host->version >= SDHCI_SPEC_300)
		caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
			host->caps1 :
			sdhci_readl(host, SDHCI_CAPABILITIES_1);
2965

2966
	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
2967
		host->flags |= SDHCI_USE_SDMA;
2968
	else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
2969
		DBG("Controller doesn't have SDMA capability\n");
2970
	else
2971
		host->flags |= SDHCI_USE_SDMA;
2972

2973
	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
2974
		(host->flags & SDHCI_USE_SDMA)) {
R
Rolf Eike Beer 已提交
2975
		DBG("Disabling DMA as it is marked broken\n");
2976
		host->flags &= ~SDHCI_USE_SDMA;
2977 2978
	}

2979 2980
	if ((host->version >= SDHCI_SPEC_200) &&
		(caps[0] & SDHCI_CAN_DO_ADMA2))
2981
		host->flags |= SDHCI_USE_ADMA;
2982 2983 2984 2985 2986 2987 2988

	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
		(host->flags & SDHCI_USE_ADMA)) {
		DBG("Disabling ADMA as it is marked broken\n");
		host->flags &= ~SDHCI_USE_ADMA;
	}

2989 2990 2991 2992 2993 2994 2995 2996 2997 2998
	/*
	 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
	 * and *must* do 64-bit DMA.  A driver has the opportunity to change
	 * that during the first call to ->enable_dma().  Similarly
	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
	 * implement.
	 */
	if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT)
		host->flags |= SDHCI_USE_64_BIT_DMA;

2999
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3000 3001
		if (host->ops->enable_dma) {
			if (host->ops->enable_dma(host)) {
J
Joe Perches 已提交
3002
				pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3003
					mmc_hostname(mmc));
3004 3005
				host->flags &=
					~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3006
			}
3007 3008 3009
		}
	}

3010 3011 3012 3013
	/* SDMA does not support 64-bit DMA */
	if (host->flags & SDHCI_USE_64_BIT_DMA)
		host->flags &= ~SDHCI_USE_SDMA;

3014 3015
	if (host->flags & SDHCI_USE_ADMA) {
		/*
3016 3017 3018 3019
		 * The DMA descriptor table size is calculated as the maximum
		 * number of segments times 2, to allow for an alignment
		 * descriptor for each segment, plus 1 for a nop end descriptor,
		 * all multipled by the descriptor size.
3020
		 */
3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037
		if (host->flags & SDHCI_USE_64_BIT_DMA) {
			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
					      SDHCI_ADMA2_64_DESC_SZ;
			host->align_buffer_sz = SDHCI_MAX_SEGS *
						SDHCI_ADMA2_64_ALIGN;
			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
			host->align_sz = SDHCI_ADMA2_64_ALIGN;
			host->align_mask = SDHCI_ADMA2_64_ALIGN - 1;
		} else {
			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
					      SDHCI_ADMA2_32_DESC_SZ;
			host->align_buffer_sz = SDHCI_MAX_SEGS *
						SDHCI_ADMA2_32_ALIGN;
			host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
			host->align_sz = SDHCI_ADMA2_32_ALIGN;
			host->align_mask = SDHCI_ADMA2_32_ALIGN - 1;
		}
3038
		host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
3039
						      host->adma_table_sz,
3040 3041
						      &host->adma_addr,
						      GFP_KERNEL);
3042
		host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
3043
		if (!host->adma_table || !host->align_buffer) {
3044
			dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
3045
					  host->adma_table, host->adma_addr);
3046
			kfree(host->align_buffer);
J
Joe Perches 已提交
3047
			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3048 3049
				mmc_hostname(mmc));
			host->flags &= ~SDHCI_USE_ADMA;
3050
			host->adma_table = NULL;
3051
			host->align_buffer = NULL;
3052
		} else if (host->adma_addr & host->align_mask) {
J
Joe Perches 已提交
3053 3054
			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
				mmc_hostname(mmc));
3055
			host->flags &= ~SDHCI_USE_ADMA;
3056
			dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
3057
					  host->adma_table, host->adma_addr);
3058
			kfree(host->align_buffer);
3059
			host->adma_table = NULL;
3060
			host->align_buffer = NULL;
3061 3062 3063
		}
	}

3064 3065 3066 3067 3068
	/*
	 * If we use DMA, then it's up to the caller to set the DMA
	 * mask, but PIO does not need the hw shim so we set a new
	 * mask here in that case.
	 */
3069
	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3070
		host->dma_mask = DMA_BIT_MASK(64);
3071
		mmc_dev(mmc)->dma_mask = &host->dma_mask;
3072
	}
3073

3074
	if (host->version >= SDHCI_SPEC_300)
3075
		host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
3076 3077
			>> SDHCI_CLOCK_BASE_SHIFT;
	else
3078
		host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
3079 3080
			>> SDHCI_CLOCK_BASE_SHIFT;

3081
	host->max_clk *= 1000000;
3082 3083
	if (host->max_clk == 0 || host->quirks &
			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3084
		if (!host->ops->get_max_clock) {
3085
			pr_err("%s: Hardware doesn't specify base clock "
3086 3087 3088 3089
			       "frequency.\n", mmc_hostname(mmc));
			return -ENODEV;
		}
		host->max_clk = host->ops->get_max_clock(host);
3090
	}
3091

3092
	host->next_data.cookie = 1;
3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108
	/*
	 * In case of Host Controller v3.00, find out whether clock
	 * multiplier is supported.
	 */
	host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
			SDHCI_CLOCK_MUL_SHIFT;

	/*
	 * In case the value in Clock Multiplier is 0, then programmable
	 * clock mode is not supported, otherwise the actual clock
	 * multiplier is one more than the value of Clock Multiplier
	 * in the Capabilities Register.
	 */
	if (host->clk_mul)
		host->clk_mul += 1;

3109 3110 3111 3112
	/*
	 * Set host parameters.
	 */
	mmc->ops = &sdhci_ops;
3113
	mmc->f_max = host->max_clk;
3114
	if (host->ops->get_min_clock)
3115
		mmc->f_min = host->ops->get_min_clock(host);
3116 3117 3118 3119 3120 3121 3122
	else if (host->version >= SDHCI_SPEC_300) {
		if (host->clk_mul) {
			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
			mmc->f_max = host->max_clk * host->clk_mul;
		} else
			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
	} else
3123
		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3124

3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136
	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
		host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
					SDHCI_TIMEOUT_CLK_SHIFT;
		if (host->timeout_clk == 0) {
			if (host->ops->get_timeout_clock) {
				host->timeout_clk =
					host->ops->get_timeout_clock(host);
			} else {
				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
					mmc_hostname(mmc));
				return -ENODEV;
			}
3137 3138
		}

3139 3140
		if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
			host->timeout_clk *= 1000;
3141

3142
		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3143
			host->ops->get_max_timeout_count(host) : 1 << 27;
3144 3145
		mmc->max_busy_timeout /= host->timeout_clk;
	}
3146

3147 3148 3149
	if (override_timeout_clk)
		host->timeout_clk = override_timeout_clk;

3150
	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3151
	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3152 3153 3154

	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
		host->flags |= SDHCI_AUTO_CMD12;
3155

3156
	/* Auto-CMD23 stuff only works in ADMA or PIO. */
A
Andrei Warkentin 已提交
3157
	if ((host->version >= SDHCI_SPEC_300) &&
3158
	    ((host->flags & SDHCI_USE_ADMA) ||
A
Andrei Warkentin 已提交
3159
	     !(host->flags & SDHCI_USE_SDMA))) {
3160 3161 3162 3163 3164 3165
		host->flags |= SDHCI_AUTO_CMD23;
		DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
	} else {
		DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
	}

3166 3167 3168 3169 3170 3171 3172
	/*
	 * A controller may support 8-bit width, but the board itself
	 * might not have the pins brought out.  Boards that support
	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
	 * their platform code before calling sdhci_add_host(), and we
	 * won't assume 8-bit width for hosts without that CAP.
	 */
3173
	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3174
		mmc->caps |= MMC_CAP_4_BIT_DATA;
3175

3176 3177 3178
	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
		mmc->caps &= ~MMC_CAP_CMD23;

3179
	if (caps[0] & SDHCI_CAN_DO_HISPD)
3180
		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3181

3182
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3183
	    !(mmc->caps & MMC_CAP_NONREMOVABLE))
3184 3185
		mmc->caps |= MMC_CAP_NEEDS_POLL;

3186 3187 3188 3189
	/* If there are external regulators, get them */
	if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER)
		return -EPROBE_DEFER;

3190
	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3191 3192 3193 3194
	if (!IS_ERR(mmc->supply.vqmmc)) {
		ret = regulator_enable(mmc->supply.vqmmc);
		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
						    1950000))
3195 3196 3197
			caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
					SDHCI_SUPPORT_SDR50 |
					SDHCI_SUPPORT_DDR50);
3198 3199 3200
		if (ret) {
			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
				mmc_hostname(mmc), ret);
3201
			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3202
		}
3203
	}
3204

3205 3206 3207 3208
	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
		caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
		       SDHCI_SUPPORT_DDR50);

3209 3210 3211
	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
	if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
		       SDHCI_SUPPORT_DDR50))
3212 3213 3214
		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;

	/* SDR104 supports also implies SDR50 support */
3215
	if (caps[1] & SDHCI_SUPPORT_SDR104) {
3216
		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3217 3218 3219
		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
		 * field can be promoted to support HS200.
		 */
3220
		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3221
			mmc->caps2 |= MMC_CAP2_HS200;
3222
	} else if (caps[1] & SDHCI_SUPPORT_SDR50)
3223 3224
		mmc->caps |= MMC_CAP_UHS_SDR50;

3225 3226 3227 3228
	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
	    (caps[1] & SDHCI_SUPPORT_HS400))
		mmc->caps2 |= MMC_CAP2_HS400;

3229 3230 3231 3232 3233 3234
	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
	    (IS_ERR(mmc->supply.vqmmc) ||
	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
					     1300000)))
		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;

3235 3236
	if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
		!(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3237 3238
		mmc->caps |= MMC_CAP_UHS_DDR50;

3239
	/* Does the host need tuning for SDR50? */
3240 3241 3242
	if (caps[1] & SDHCI_USE_SDR50_TUNING)
		host->flags |= SDHCI_SDR50_NEEDS_TUNING;

3243
	/* Does the host need tuning for SDR104 / HS200? */
3244
	if (mmc->caps2 & MMC_CAP2_HS200)
3245
		host->flags |= SDHCI_SDR104_NEEDS_TUNING;
3246

3247 3248 3249 3250 3251 3252 3253 3254
	/* Driver Type(s) (A, C, D) supported by the host */
	if (caps[1] & SDHCI_DRIVER_TYPE_A)
		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
	if (caps[1] & SDHCI_DRIVER_TYPE_C)
		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
	if (caps[1] & SDHCI_DRIVER_TYPE_D)
		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;

3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269
	/* Initial value for re-tuning timer count */
	host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
			      SDHCI_RETUNING_TIMER_COUNT_SHIFT;

	/*
	 * In case Re-tuning Timer is not disabled, the actual value of
	 * re-tuning timer will be 2 ^ (n - 1).
	 */
	if (host->tuning_count)
		host->tuning_count = 1 << (host->tuning_count - 1);

	/* Re-tuning mode supported by the Host Controller */
	host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
			     SDHCI_RETUNING_MODE_SHIFT;

3270
	ocr_avail = 0;
3271

3272 3273 3274 3275 3276 3277 3278 3279
	/*
	 * According to SD Host Controller spec v3.00, if the Host System
	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
	 * the value is meaningful only if Voltage Support in the Capabilities
	 * register is set. The actual current value is 4 times the register
	 * value.
	 */
	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3280
	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3281
		int curr = regulator_get_current_limit(mmc->supply.vmmc);
3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294
		if (curr > 0) {

			/* convert to SDHCI_MAX_CURRENT format */
			curr = curr/1000;  /* convert to mA */
			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;

			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
			max_current_caps =
				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
		}
	}
3295 3296

	if (caps[0] & SDHCI_CAN_VDD_330) {
3297
		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3298

A
Aaron Lu 已提交
3299
		mmc->max_current_330 = ((max_current_caps &
3300 3301 3302 3303 3304
				   SDHCI_MAX_CURRENT_330_MASK) >>
				   SDHCI_MAX_CURRENT_330_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}
	if (caps[0] & SDHCI_CAN_VDD_300) {
3305
		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3306

A
Aaron Lu 已提交
3307
		mmc->max_current_300 = ((max_current_caps &
3308 3309 3310 3311 3312
				   SDHCI_MAX_CURRENT_300_MASK) >>
				   SDHCI_MAX_CURRENT_300_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}
	if (caps[0] & SDHCI_CAN_VDD_180) {
3313 3314
		ocr_avail |= MMC_VDD_165_195;

A
Aaron Lu 已提交
3315
		mmc->max_current_180 = ((max_current_caps &
3316 3317 3318 3319 3320
				   SDHCI_MAX_CURRENT_180_MASK) >>
				   SDHCI_MAX_CURRENT_180_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}

3321
	/* If OCR set by external regulators, use it instead */
3322
	if (mmc->ocr_avail)
3323
		ocr_avail = mmc->ocr_avail;
3324

3325
	if (host->ocr_mask)
3326
		ocr_avail &= host->ocr_mask;
3327

3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339
	mmc->ocr_avail = ocr_avail;
	mmc->ocr_avail_sdio = ocr_avail;
	if (host->ocr_avail_sdio)
		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
	mmc->ocr_avail_sd = ocr_avail;
	if (host->ocr_avail_sd)
		mmc->ocr_avail_sd &= host->ocr_avail_sd;
	else /* normal SD controllers don't support 1.8V */
		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
	mmc->ocr_avail_mmc = ocr_avail;
	if (host->ocr_avail_mmc)
		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3340 3341

	if (mmc->ocr_avail == 0) {
3342
		pr_err("%s: Hardware doesn't report any "
3343
			"support voltages.\n", mmc_hostname(mmc));
3344
		return -ENODEV;
3345 3346
	}

3347 3348 3349
	spin_lock_init(&host->lock);

	/*
3350 3351
	 * Maximum number of segments. Depends on if the hardware
	 * can do scatter/gather or not.
3352
	 */
3353
	if (host->flags & SDHCI_USE_ADMA)
3354
		mmc->max_segs = SDHCI_MAX_SEGS;
3355
	else if (host->flags & SDHCI_USE_SDMA)
3356
		mmc->max_segs = 1;
3357
	else /* PIO */
3358
		mmc->max_segs = SDHCI_MAX_SEGS;
3359 3360

	/*
3361 3362 3363
	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
	 * is less anyway.
3364
	 */
3365
	mmc->max_req_size = 524288;
3366 3367 3368

	/*
	 * Maximum segment size. Could be one segment with the maximum number
3369 3370
	 * of bytes. When doing hardware scatter/gather, each entry cannot
	 * be larger than 64 KiB though.
3371
	 */
3372 3373 3374 3375 3376 3377
	if (host->flags & SDHCI_USE_ADMA) {
		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
			mmc->max_seg_size = 65535;
		else
			mmc->max_seg_size = 65536;
	} else {
3378
		mmc->max_seg_size = mmc->max_req_size;
3379
	}
3380

3381 3382 3383 3384
	/*
	 * Maximum block size. This varies from controller to controller and
	 * is specified in the capabilities register.
	 */
3385 3386 3387
	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
		mmc->max_blk_size = 2;
	} else {
3388
		mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
3389 3390
				SDHCI_MAX_BLOCK_SHIFT;
		if (mmc->max_blk_size >= 3) {
J
Joe Perches 已提交
3391 3392
			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
				mmc_hostname(mmc));
3393 3394 3395 3396 3397
			mmc->max_blk_size = 0;
		}
	}

	mmc->max_blk_size = 512 << mmc->max_blk_size;
3398

3399 3400 3401
	/*
	 * Maximum block count.
	 */
3402
	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3403

3404 3405 3406 3407 3408 3409
	/*
	 * Init tasklets.
	 */
	tasklet_init(&host->finish_tasklet,
		sdhci_tasklet_finish, (unsigned long)host);

3410
	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3411

3412
	if (host->version >= SDHCI_SPEC_300) {
3413 3414
		init_waitqueue_head(&host->buf_ready_int);

3415 3416 3417 3418 3419 3420
		/* Initialize re-tuning timer */
		init_timer(&host->tuning_timer);
		host->tuning_timer.data = (unsigned long)host;
		host->tuning_timer.function = sdhci_tuning_timer;
	}

3421 3422
	sdhci_init(host, 0);

3423 3424
	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
				   IRQF_SHARED,	mmc_hostname(mmc), host);
3425 3426 3427
	if (ret) {
		pr_err("%s: Failed to request IRQ %d: %d\n",
		       mmc_hostname(mmc), host->irq, ret);
3428
		goto untasklet;
3429
	}
3430 3431 3432 3433 3434

#ifdef CONFIG_MMC_DEBUG
	sdhci_dumpregs(host);
#endif

3435
#ifdef SDHCI_USE_LEDS_CLASS
H
Helmut Schaa 已提交
3436 3437 3438
	snprintf(host->led_name, sizeof(host->led_name),
		"%s::", mmc_hostname(mmc));
	host->led.name = host->led_name;
3439 3440 3441 3442
	host->led.brightness = LED_OFF;
	host->led.default_trigger = mmc_hostname(mmc);
	host->led.brightness_set = sdhci_led_control;

3443
	ret = led_classdev_register(mmc_dev(mmc), &host->led);
3444 3445 3446
	if (ret) {
		pr_err("%s: Failed to register LED device: %d\n",
		       mmc_hostname(mmc), ret);
3447
		goto reset;
3448
	}
3449 3450
#endif

3451 3452
	mmiowb();

3453 3454
	mmc_add_host(mmc);

3455
	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3456
		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3457 3458
		(host->flags & SDHCI_USE_ADMA) ?
		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3459
		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3460

3461 3462
	sdhci_enable_card_detection(host);

3463 3464
	return 0;

3465
#ifdef SDHCI_USE_LEDS_CLASS
3466
reset:
3467
	sdhci_do_reset(host, SDHCI_RESET_ALL);
3468 3469
	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3470 3471
	free_irq(host->irq, host);
#endif
3472
untasklet:
3473 3474 3475 3476 3477
	tasklet_kill(&host->finish_tasklet);

	return ret;
}

3478
EXPORT_SYMBOL_GPL(sdhci_add_host);
3479

P
Pierre Ossman 已提交
3480
void sdhci_remove_host(struct sdhci_host *host, int dead)
3481
{
3482
	struct mmc_host *mmc = host->mmc;
P
Pierre Ossman 已提交
3483 3484 3485 3486 3487 3488 3489 3490
	unsigned long flags;

	if (dead) {
		spin_lock_irqsave(&host->lock, flags);

		host->flags |= SDHCI_DEVICE_DEAD;

		if (host->mrq) {
3491
			pr_err("%s: Controller removed during "
3492
				" transfer!\n", mmc_hostname(mmc));
P
Pierre Ossman 已提交
3493 3494 3495 3496 3497 3498 3499 3500

			host->mrq->cmd->error = -ENOMEDIUM;
			tasklet_schedule(&host->finish_tasklet);
		}

		spin_unlock_irqrestore(&host->lock, flags);
	}

3501 3502
	sdhci_disable_card_detection(host);

3503
	mmc_remove_host(mmc);
3504

3505
#ifdef SDHCI_USE_LEDS_CLASS
3506 3507 3508
	led_classdev_unregister(&host->led);
#endif

P
Pierre Ossman 已提交
3509
	if (!dead)
3510
		sdhci_do_reset(host, SDHCI_RESET_ALL);
3511

3512 3513
	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3514 3515 3516 3517 3518
	free_irq(host->irq, host);

	del_timer_sync(&host->timer);

	tasklet_kill(&host->finish_tasklet);
3519

3520 3521
	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);
3522

3523
	if (host->adma_table)
3524
		dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
3525
				  host->adma_table, host->adma_addr);
3526 3527
	kfree(host->align_buffer);

3528
	host->adma_table = NULL;
3529
	host->align_buffer = NULL;
3530 3531
}

3532
EXPORT_SYMBOL_GPL(sdhci_remove_host);
3533

3534
void sdhci_free_host(struct sdhci_host *host)
3535
{
3536
	mmc_free_host(host->mmc);
3537 3538
}

3539
EXPORT_SYMBOL_GPL(sdhci_free_host);
3540 3541 3542 3543 3544 3545 3546 3547 3548

/*****************************************************************************\
 *                                                                           *
 * Driver init/exit                                                          *
 *                                                                           *
\*****************************************************************************/

static int __init sdhci_drv_init(void)
{
3549
	pr_info(DRIVER_NAME
3550
		": Secure Digital Host Controller Interface driver\n");
3551
	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3552

3553
	return 0;
3554 3555 3556 3557 3558 3559 3560 3561 3562
}

static void __exit sdhci_drv_exit(void)
{
}

module_init(sdhci_drv_init);
module_exit(sdhci_drv_exit);

3563
module_param(debug_quirks, uint, 0444);
3564
module_param(debug_quirks2, uint, 0444);
3565

3566
MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3567
MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3568
MODULE_LICENSE("GPL");
3569

3570
MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3571
MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");