sdhci.c 100.3 KB
Newer Older
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3
 *
4
 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 6
 *
 * This program is free software; you can redistribute it and/or modify
7 8 9
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or (at
 * your option) any later version.
10 11 12 13
 *
 * Thanks to the following companies for their support:
 *
 *     - JMicron (hardware and technical support)
14 15 16
 */

#include <linux/delay.h>
A
Adrian Hunter 已提交
17
#include <linux/ktime.h>
18
#include <linux/highmem.h>
19
#include <linux/io.h>
20
#include <linux/module.h>
21
#include <linux/dma-mapping.h>
22
#include <linux/slab.h>
23
#include <linux/scatterlist.h>
M
Marek Szyprowski 已提交
24
#include <linux/regulator/consumer.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/of.h>
27

28 29
#include <linux/leds.h>

30
#include <linux/mmc/mmc.h>
31
#include <linux/mmc/host.h>
32
#include <linux/mmc/card.h>
33
#include <linux/mmc/sdio.h>
34
#include <linux/mmc/slot-gpio.h>
35 36 37 38 39 40

#include "sdhci.h"

#define DRIVER_NAME "sdhci"

#define DBG(f, x...) \
41
	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
42

43 44 45
#define SDHCI_DUMP(f, x...) \
	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)

46 47
#define MAX_TUNING_LOOP 40

48
static unsigned int debug_quirks = 0;
49
static unsigned int debug_quirks2;
50

51 52
static void sdhci_finish_data(struct sdhci_host *);

53
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
54

55
void sdhci_dumpregs(struct sdhci_host *host)
56
{
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");

	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
		   sdhci_readw(host, SDHCI_HOST_VERSION));
	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
		   sdhci_readl(host, SDHCI_ARGUMENT),
		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
		   sdhci_readl(host, SDHCI_PRESENT_STATE),
		   sdhci_readb(host, SDHCI_HOST_CONTROL));
	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
		   sdhci_readb(host, SDHCI_POWER_CONTROL),
		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
		   sdhci_readl(host, SDHCI_INT_STATUS));
	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
		   sdhci_readl(host, SDHCI_INT_ENABLE),
		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
	SDHCI_DUMP("AC12 err:  0x%08x | Slot int: 0x%08x\n",
		   sdhci_readw(host, SDHCI_ACMD12_ERR),
		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
		   sdhci_readl(host, SDHCI_CAPABILITIES),
		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
		   sdhci_readw(host, SDHCI_COMMAND),
		   sdhci_readl(host, SDHCI_MAX_CURRENT));
	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
93 94
		   sdhci_readl(host, SDHCI_RESPONSE),
		   sdhci_readl(host, SDHCI_RESPONSE + 4));
95
	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
96 97
		   sdhci_readl(host, SDHCI_RESPONSE + 8),
		   sdhci_readl(host, SDHCI_RESPONSE + 12));
98 99
	SDHCI_DUMP("Host ctl2: 0x%08x\n",
		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
100

101
	if (host->flags & SDHCI_USE_ADMA) {
102 103 104 105 106 107 108 109 110 111
		if (host->flags & SDHCI_USE_64_BIT_DMA) {
			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
				   sdhci_readl(host, SDHCI_ADMA_ERROR),
				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
		} else {
			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
				   sdhci_readl(host, SDHCI_ADMA_ERROR),
				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
		}
112
	}
113

114
	SDHCI_DUMP("============================================\n");
115
}
116
EXPORT_SYMBOL_GPL(sdhci_dumpregs);
117 118 119 120 121 122 123

/*****************************************************************************\
 *                                                                           *
 * Low level functions                                                       *
 *                                                                           *
\*****************************************************************************/

124 125 126 127 128
static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
{
	return cmd->data || cmd->flags & MMC_RSP_BUSY;
}

129 130
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
131
	u32 present;
132

133
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
134
	    !mmc_card_is_removable(host->mmc))
135 136
		return;

137 138 139
	if (enable) {
		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
140

141 142 143 144 145
		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
				       SDHCI_INT_CARD_INSERT;
	} else {
		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
	}
146 147 148

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
149 150 151 152 153 154 155 156 157 158 159 160
}

static void sdhci_enable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, true);
}

static void sdhci_disable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, false);
}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
{
	if (host->bus_on)
		return;
	host->bus_on = true;
	pm_runtime_get_noresume(host->mmc->parent);
}

static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
{
	if (!host->bus_on)
		return;
	host->bus_on = false;
	pm_runtime_put_noidle(host->mmc->parent);
}

177
void sdhci_reset(struct sdhci_host *host, u8 mask)
178
{
A
Adrian Hunter 已提交
179
	ktime_t timeout;
180

181
	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
182

183
	if (mask & SDHCI_RESET_ALL) {
184
		host->clock = 0;
185 186 187 188
		/* Reset-all turns off SD Bus Power */
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
	}
189

190
	/* Wait max 100 ms */
A
Adrian Hunter 已提交
191
	timeout = ktime_add_ms(ktime_get(), 100);
192 193

	/* hw clears the bit when it's done */
194
	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
A
Adrian Hunter 已提交
195
		if (ktime_after(ktime_get(), timeout)) {
196
			pr_err("%s: Reset 0x%x never completed.\n",
197 198 199 200
				mmc_hostname(host->mmc), (int)mask);
			sdhci_dumpregs(host);
			return;
		}
A
Adrian Hunter 已提交
201
		udelay(10);
202
	}
203 204 205 206 207 208
}
EXPORT_SYMBOL_GPL(sdhci_reset);

static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
209 210 211
		struct mmc_host *mmc = host->mmc;

		if (!mmc->ops->get_cd(mmc))
212 213
			return;
	}
214

215
	host->ops->reset(host, mask);
216

217 218 219 220 221 222 223 224
	if (mask & SDHCI_RESET_ALL) {
		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
			if (host->ops->enable_dma)
				host->ops->enable_dma(host);
		}

		/* Resetting the controller clears many */
		host->preset_enabled = false;
225
	}
226 227
}

228
static void sdhci_set_default_irqs(struct sdhci_host *host)
229
{
230 231 232 233 234 235
	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
		    SDHCI_INT_RESPONSE;

236 237 238 239
	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
	    host->tuning_mode == SDHCI_TUNING_MODE_3)
		host->ier |= SDHCI_INT_RETUNE;

240 241
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
242 243 244 245 246 247 248 249 250 251 252 253
}

static void sdhci_init(struct sdhci_host *host, int soft)
{
	struct mmc_host *mmc = host->mmc;

	if (soft)
		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
	else
		sdhci_do_reset(host, SDHCI_RESET_ALL);

	sdhci_set_default_irqs(host);
254

A
Adrian Hunter 已提交
255 256
	host->cqe_on = false;

257 258 259
	if (soft) {
		/* force clock reconfiguration */
		host->clock = 0;
260
		mmc->ops->set_ios(mmc, &mmc->ios);
261
	}
262
}
263

264 265
static void sdhci_reinit(struct sdhci_host *host)
{
266
	sdhci_init(host, 0);
267
	sdhci_enable_card_detection(host);
268 269
}

270
static void __sdhci_led_activate(struct sdhci_host *host)
271 272 273
{
	u8 ctrl;

274
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
275
	ctrl |= SDHCI_CTRL_LED;
276
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
277 278
}

279
static void __sdhci_led_deactivate(struct sdhci_host *host)
280 281 282
{
	u8 ctrl;

283
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
284
	ctrl &= ~SDHCI_CTRL_LED;
285
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
286 287
}

288
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
289
static void sdhci_led_control(struct led_classdev *led,
290
			      enum led_brightness brightness)
291 292 293 294 295 296
{
	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);

297 298 299
	if (host->runtime_suspended)
		goto out;

300
	if (brightness == LED_OFF)
301
		__sdhci_led_deactivate(host);
302
	else
303
		__sdhci_led_activate(host);
304
out:
305 306
	spin_unlock_irqrestore(&host->lock, flags);
}
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356

static int sdhci_led_register(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;

	snprintf(host->led_name, sizeof(host->led_name),
		 "%s::", mmc_hostname(mmc));

	host->led.name = host->led_name;
	host->led.brightness = LED_OFF;
	host->led.default_trigger = mmc_hostname(mmc);
	host->led.brightness_set = sdhci_led_control;

	return led_classdev_register(mmc_dev(mmc), &host->led);
}

static void sdhci_led_unregister(struct sdhci_host *host)
{
	led_classdev_unregister(&host->led);
}

static inline void sdhci_led_activate(struct sdhci_host *host)
{
}

static inline void sdhci_led_deactivate(struct sdhci_host *host)
{
}

#else

static inline int sdhci_led_register(struct sdhci_host *host)
{
	return 0;
}

static inline void sdhci_led_unregister(struct sdhci_host *host)
{
}

static inline void sdhci_led_activate(struct sdhci_host *host)
{
	__sdhci_led_activate(host);
}

static inline void sdhci_led_deactivate(struct sdhci_host *host)
{
	__sdhci_led_deactivate(host);
}

357 358
#endif

359 360 361 362 363 364
/*****************************************************************************\
 *                                                                           *
 * Core functions                                                            *
 *                                                                           *
\*****************************************************************************/

P
Pierre Ossman 已提交
365
static void sdhci_read_block_pio(struct sdhci_host *host)
366
{
367 368
	unsigned long flags;
	size_t blksize, len, chunk;
369
	u32 uninitialized_var(scratch);
370
	u8 *buf;
371

P
Pierre Ossman 已提交
372
	DBG("PIO reading\n");
373

P
Pierre Ossman 已提交
374
	blksize = host->data->blksz;
375
	chunk = 0;
376

377
	local_irq_save(flags);
378

P
Pierre Ossman 已提交
379
	while (blksize) {
F
Fabio Estevam 已提交
380
		BUG_ON(!sg_miter_next(&host->sg_miter));
381

382
		len = min(host->sg_miter.length, blksize);
383

384 385
		blksize -= len;
		host->sg_miter.consumed = len;
386

387
		buf = host->sg_miter.addr;
388

389 390
		while (len) {
			if (chunk == 0) {
391
				scratch = sdhci_readl(host, SDHCI_BUFFER);
392
				chunk = 4;
P
Pierre Ossman 已提交
393
			}
394 395 396 397 398 399 400

			*buf = scratch & 0xFF;

			buf++;
			scratch >>= 8;
			chunk--;
			len--;
401
		}
P
Pierre Ossman 已提交
402
	}
403 404 405 406

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
407
}
408

P
Pierre Ossman 已提交
409 410
static void sdhci_write_block_pio(struct sdhci_host *host)
{
411 412 413 414
	unsigned long flags;
	size_t blksize, len, chunk;
	u32 scratch;
	u8 *buf;
415

P
Pierre Ossman 已提交
416 417 418
	DBG("PIO writing\n");

	blksize = host->data->blksz;
419 420
	chunk = 0;
	scratch = 0;
421

422
	local_irq_save(flags);
423

P
Pierre Ossman 已提交
424
	while (blksize) {
F
Fabio Estevam 已提交
425
		BUG_ON(!sg_miter_next(&host->sg_miter));
P
Pierre Ossman 已提交
426

427 428 429 430 431 432
		len = min(host->sg_miter.length, blksize);

		blksize -= len;
		host->sg_miter.consumed = len;

		buf = host->sg_miter.addr;
433

434 435 436 437 438 439 440 441
		while (len) {
			scratch |= (u32)*buf << (chunk * 8);

			buf++;
			chunk++;
			len--;

			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
442
				sdhci_writel(host, scratch, SDHCI_BUFFER);
443 444
				chunk = 0;
				scratch = 0;
445 446 447
			}
		}
	}
448 449 450 451

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
452 453 454 455 456 457
}

static void sdhci_transfer_pio(struct sdhci_host *host)
{
	u32 mask;

458
	if (host->blocks == 0)
P
Pierre Ossman 已提交
459 460 461 462 463 464 465
		return;

	if (host->data->flags & MMC_DATA_READ)
		mask = SDHCI_DATA_AVAILABLE;
	else
		mask = SDHCI_SPACE_AVAILABLE;

466 467 468 469 470 471 472 473 474
	/*
	 * Some controllers (JMicron JMB38x) mess up the buffer bits
	 * for transfers < 4 bytes. As long as it is just one block,
	 * we can ignore the bits.
	 */
	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
		(host->data->blocks == 1))
		mask = ~0;

475
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
476 477 478
		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
			udelay(100);

P
Pierre Ossman 已提交
479 480 481 482
		if (host->data->flags & MMC_DATA_READ)
			sdhci_read_block_pio(host);
		else
			sdhci_write_block_pio(host);
483

484 485
		host->blocks--;
		if (host->blocks == 0)
P
Pierre Ossman 已提交
486 487
			break;
	}
488

P
Pierre Ossman 已提交
489
	DBG("PIO transfer complete.\n");
490 491
}

492
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
493
				  struct mmc_data *data, int cookie)
494 495 496
{
	int sg_count;

497 498 499 500 501
	/*
	 * If the data buffers are already mapped, return the previous
	 * dma_map_sg() result.
	 */
	if (data->host_cookie == COOKIE_PRE_MAPPED)
502 503 504 505 506 507 508 509 510 511
		return data->sg_count;

	sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
				data->flags & MMC_DATA_WRITE ?
				DMA_TO_DEVICE : DMA_FROM_DEVICE);

	if (sg_count == 0)
		return -ENOSPC;

	data->sg_count = sg_count;
512
	data->host_cookie = cookie;
513 514 515 516

	return sg_count;
}

517 518 519
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
	local_irq_save(*flags);
520
	return kmap_atomic(sg_page(sg)) + sg->offset;
521 522 523 524
}

static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
{
525
	kunmap_atomic(buffer);
526 527 528
	local_irq_restore(*flags);
}

529 530
static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
				  dma_addr_t addr, int len, unsigned cmd)
B
Ben Dooks 已提交
531
{
532
	struct sdhci_adma2_64_desc *dma_desc = desc;
B
Ben Dooks 已提交
533

534
	/* 32-bit and 64-bit descriptors have these members in same position */
535 536
	dma_desc->cmd = cpu_to_le16(cmd);
	dma_desc->len = cpu_to_le16(len);
537 538 539 540
	dma_desc->addr_lo = cpu_to_le32((u32)addr);

	if (host->flags & SDHCI_USE_64_BIT_DMA)
		dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
B
Ben Dooks 已提交
541 542
}

543 544
static void sdhci_adma_mark_end(void *desc)
{
545
	struct sdhci_adma2_64_desc *dma_desc = desc;
546

547
	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
548
	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
549 550
}

551 552
static void sdhci_adma_table_pre(struct sdhci_host *host,
	struct mmc_data *data, int sg_count)
553 554 555
{
	struct scatterlist *sg;
	unsigned long flags;
556 557 558 559
	dma_addr_t addr, align_addr;
	void *desc, *align;
	char *buffer;
	int len, offset, i;
560 561 562 563 564 565

	/*
	 * The spec does not specify endianness of descriptor table.
	 * We currently guess that it is LE.
	 */

566
	host->sg_count = sg_count;
567

568
	desc = host->adma_table;
569 570 571 572 573 574 575 576 577
	align = host->align_buffer;

	align_addr = host->align_addr;

	for_each_sg(data->sg, sg, host->sg_count, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);

		/*
578 579 580
		 * The SDHCI specification states that ADMA addresses must
		 * be 32-bit aligned. If they aren't, then we use a bounce
		 * buffer for the (up to three) bytes that screw up the
581 582
		 * alignment.
		 */
583 584
		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
			 SDHCI_ADMA2_MASK;
585 586 587 588 589 590 591
		if (offset) {
			if (data->flags & MMC_DATA_WRITE) {
				buffer = sdhci_kmap_atomic(sg, &flags);
				memcpy(align, buffer, offset);
				sdhci_kunmap_atomic(buffer, &flags);
			}

B
Ben Dooks 已提交
592
			/* tran, valid */
593
			sdhci_adma_write_desc(host, desc, align_addr, offset,
A
Adrian Hunter 已提交
594
					      ADMA2_TRAN_VALID);
595 596 597

			BUG_ON(offset > 65536);

598 599
			align += SDHCI_ADMA2_ALIGN;
			align_addr += SDHCI_ADMA2_ALIGN;
600

601
			desc += host->desc_sz;
602 603 604 605 606 607 608

			addr += offset;
			len -= offset;
		}

		BUG_ON(len > 65536);

609 610 611 612 613 614
		if (len) {
			/* tran, valid */
			sdhci_adma_write_desc(host, desc, addr, len,
					      ADMA2_TRAN_VALID);
			desc += host->desc_sz;
		}
615 616 617 618 619

		/*
		 * If this triggers then we have a calculation bug
		 * somewhere. :/
		 */
620
		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
621 622
	}

623
	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
624
		/* Mark the last descriptor as the terminating descriptor */
625
		if (desc != host->adma_table) {
626
			desc -= host->desc_sz;
627
			sdhci_adma_mark_end(desc);
628 629
		}
	} else {
630
		/* Add a terminating entry - nop, end, valid */
631
		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
632
	}
633 634 635 636 637 638 639
}

static void sdhci_adma_table_post(struct sdhci_host *host,
	struct mmc_data *data)
{
	struct scatterlist *sg;
	int i, size;
640
	void *align;
641 642 643
	char *buffer;
	unsigned long flags;

644 645
	if (data->flags & MMC_DATA_READ) {
		bool has_unaligned = false;
646

647 648 649 650 651 652
		/* Do a quick scan of the SG list for any unaligned mappings */
		for_each_sg(data->sg, sg, host->sg_count, i)
			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
				has_unaligned = true;
				break;
			}
653

654 655
		if (has_unaligned) {
			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
656
					    data->sg_len, DMA_FROM_DEVICE);
657

658
			align = host->align_buffer;
659

660 661 662 663 664 665 666 667
			for_each_sg(data->sg, sg, host->sg_count, i) {
				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
					size = SDHCI_ADMA2_ALIGN -
					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);

					buffer = sdhci_kmap_atomic(sg, &flags);
					memcpy(buffer, align, size);
					sdhci_kunmap_atomic(buffer, &flags);
668

669 670
					align += SDHCI_ADMA2_ALIGN;
				}
671 672 673 674 675
			}
		}
	}
}

676
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
677
{
678
	u8 count;
679
	struct mmc_data *data = cmd->data;
680
	unsigned target_timeout, current_timeout;
681

682 683 684 685 686 687
	/*
	 * If the host controller provides us with an incorrect timeout
	 * value, just skip the check and use 0xE.  The hardware may take
	 * longer to time out, but that's much better than having a too-short
	 * timeout value.
	 */
688
	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
689
		return 0xE;
690

691
	/* Unspecified timeout, assume max */
692
	if (!data && !cmd->busy_timeout)
693
		return 0xE;
694

695 696
	/* timeout in us */
	if (!data)
697
		target_timeout = cmd->busy_timeout * 1000;
698
	else {
699
		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
700 701 702 703 704 705 706 707
		if (host->clock && data->timeout_clks) {
			unsigned long long val;

			/*
			 * data->timeout_clks is in units of clock cycles.
			 * host->clock is in Hz.  target_timeout is in us.
			 * Hence, us = 1000000 * cycles / Hz.  Round up.
			 */
708
			val = 1000000ULL * data->timeout_clks;
709 710 711 712
			if (do_div(val, host->clock))
				target_timeout++;
			target_timeout += val;
		}
713
	}
714

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
	/*
	 * Figure out needed cycles.
	 * We do this in steps in order to fit inside a 32 bit int.
	 * The first step is the minimum timeout, which will have a
	 * minimum resolution of 6 bits:
	 * (1) 2^13*1000 > 2^22,
	 * (2) host->timeout_clk < 2^16
	 *     =>
	 *     (1) / (2) > 2^6
	 */
	count = 0;
	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
	while (current_timeout < target_timeout) {
		count++;
		current_timeout <<= 1;
		if (count >= 0xF)
			break;
	}

	if (count >= 0xF) {
735 736
		DBG("Too large timeout 0x%x requested for CMD%d!\n",
		    count, cmd->opcode);
737 738 739
		count = 0xE;
	}

740 741 742
	return count;
}

743 744 745 746 747 748
static void sdhci_set_transfer_irqs(struct sdhci_host *host)
{
	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;

	if (host->flags & SDHCI_REQ_USE_DMA)
749
		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
750
	else
751 752 753 754
		host->ier = (host->ier & ~dma_irqs) | pio_irqs;

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
755 756
}

757
static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
758 759
{
	u8 count;
760 761 762 763 764 765 766 767 768 769 770

	if (host->ops->set_timeout) {
		host->ops->set_timeout(host, cmd);
	} else {
		count = sdhci_calc_timeout(host, cmd);
		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
	}
}

static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
{
771
	u8 ctrl;
772
	struct mmc_data *data = cmd->data;
773

774
	if (sdhci_data_line_cmd(cmd))
775
		sdhci_set_timeout(host, cmd);
776 777

	if (!data)
778 779
		return;

780 781
	WARN_ON(host->data);

782 783 784 785 786 787 788
	/* Sanity checks */
	BUG_ON(data->blksz * data->blocks > 524288);
	BUG_ON(data->blksz > host->mmc->max_blk_size);
	BUG_ON(data->blocks > 65535);

	host->data = data;
	host->data_early = 0;
789
	host->data->bytes_xfered = 0;
790

791
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
792
		struct scatterlist *sg;
793
		unsigned int length_mask, offset_mask;
794
		int i;
795

796 797 798 799 800 801 802 803 804
		host->flags |= SDHCI_REQ_USE_DMA;

		/*
		 * FIXME: This doesn't account for merging when mapping the
		 * scatterlist.
		 *
		 * The assumption here being that alignment and lengths are
		 * the same after DMA mapping to device address space.
		 */
805
		length_mask = 0;
806
		offset_mask = 0;
807
		if (host->flags & SDHCI_USE_ADMA) {
808
			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
809
				length_mask = 3;
810 811 812 813 814 815 816
				/*
				 * As we use up to 3 byte chunks to work
				 * around alignment problems, we need to
				 * check the offset as well.
				 */
				offset_mask = 3;
			}
817 818
		} else {
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
819
				length_mask = 3;
820 821
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
				offset_mask = 3;
822 823
		}

824
		if (unlikely(length_mask | offset_mask)) {
825
			for_each_sg(data->sg, sg, data->sg_len, i) {
826
				if (sg->length & length_mask) {
827
					DBG("Reverting to PIO because of transfer size (%d)\n",
828
					    sg->length);
829 830 831
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
832
				if (sg->offset & offset_mask) {
833
					DBG("Reverting to PIO because of bad alignment\n");
834 835 836 837 838 839 840
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
			}
		}
	}

841
	if (host->flags & SDHCI_REQ_USE_DMA) {
842
		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858

		if (sg_cnt <= 0) {
			/*
			 * This only happens when someone fed
			 * us an invalid request.
			 */
			WARN_ON(1);
			host->flags &= ~SDHCI_REQ_USE_DMA;
		} else if (host->flags & SDHCI_USE_ADMA) {
			sdhci_adma_table_pre(host, data, sg_cnt);

			sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
			if (host->flags & SDHCI_USE_64_BIT_DMA)
				sdhci_writel(host,
					     (u64)host->adma_addr >> 32,
					     SDHCI_ADMA_ADDRESS_HI);
859
		} else {
860 861 862
			WARN_ON(sg_cnt != 1);
			sdhci_writel(host, sg_dma_address(data->sg),
				SDHCI_DMA_ADDRESS);
863 864 865
		}
	}

866 867 868 869 870 871
	/*
	 * Always adjust the DMA selection as some controllers
	 * (e.g. JMicron) can't do PIO properly when the selection
	 * is ADMA.
	 */
	if (host->version >= SDHCI_SPEC_200) {
872
		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
873 874
		ctrl &= ~SDHCI_CTRL_DMA_MASK;
		if ((host->flags & SDHCI_REQ_USE_DMA) &&
875 876 877 878 879 880
			(host->flags & SDHCI_USE_ADMA)) {
			if (host->flags & SDHCI_USE_64_BIT_DMA)
				ctrl |= SDHCI_CTRL_ADMA64;
			else
				ctrl |= SDHCI_CTRL_ADMA32;
		} else {
881
			ctrl |= SDHCI_CTRL_SDMA;
882
		}
883
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
884 885
	}

886
	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
887 888 889 890 891 892 893 894
		int flags;

		flags = SG_MITER_ATOMIC;
		if (host->data->flags & MMC_DATA_READ)
			flags |= SG_MITER_TO_SG;
		else
			flags |= SG_MITER_FROM_SG;
		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
895
		host->blocks = data->blocks;
896
	}
897

898 899
	sdhci_set_transfer_irqs(host);

900 901 902
	/* Set the DMA boundary value and block size */
	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
		data->blksz), SDHCI_BLOCK_SIZE);
903
	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
904 905
}

906 907 908
static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
				    struct mmc_request *mrq)
{
909 910
	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
	       !mrq->cap_cmd_during_tfr;
911 912
}

913
static void sdhci_set_transfer_mode(struct sdhci_host *host,
914
	struct mmc_command *cmd)
915
{
916
	u16 mode = 0;
917
	struct mmc_data *data = cmd->data;
918

919
	if (data == NULL) {
920 921 922 923
		if (host->quirks2 &
			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
			sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
		} else {
924
		/* clear Auto CMD settings for no data CMDs */
925 926
			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
927
				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
928
		}
929
		return;
930
	}
931

932 933
	WARN_ON(!host->data);

934 935 936
	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
		mode = SDHCI_TRNS_BLK_CNT_EN;

937
	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
938
		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
939 940 941 942
		/*
		 * If we are sending CMD23, CMD12 never gets sent
		 * on successful completion (so no Auto-CMD12).
		 */
943
		if (sdhci_auto_cmd12(host, cmd->mrq) &&
944
		    (cmd->opcode != SD_IO_RW_EXTENDED))
945
			mode |= SDHCI_TRNS_AUTO_CMD12;
946
		else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
947
			mode |= SDHCI_TRNS_AUTO_CMD23;
948
			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
949
		}
950
	}
951

952 953
	if (data->flags & MMC_DATA_READ)
		mode |= SDHCI_TRNS_READ;
954
	if (host->flags & SDHCI_REQ_USE_DMA)
955 956
		mode |= SDHCI_TRNS_DMA;

957
	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
958 959
}

960 961 962 963 964 965 966 967 968 969
static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
{
	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
		((mrq->cmd && mrq->cmd->error) ||
		 (mrq->sbc && mrq->sbc->error) ||
		 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
				(mrq->data->stop && mrq->data->stop->error))) ||
		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
}

970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
	int i;

	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
		if (host->mrqs_done[i] == mrq) {
			WARN_ON(1);
			return;
		}
	}

	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
		if (!host->mrqs_done[i]) {
			host->mrqs_done[i] = mrq;
			break;
		}
	}

	WARN_ON(i >= SDHCI_MAX_MRQS);

	tasklet_schedule(&host->finish_tasklet);
}

993 994
static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
995 996 997 998 999 1000 1001 1002 1003
	if (host->cmd && host->cmd->mrq == mrq)
		host->cmd = NULL;

	if (host->data_cmd && host->data_cmd->mrq == mrq)
		host->data_cmd = NULL;

	if (host->data && host->data->mrq == mrq)
		host->data = NULL;

1004 1005 1006
	if (sdhci_needs_reset(host, mrq))
		host->pending_reset = true;

1007
	__sdhci_finish_mrq(host, mrq);
1008 1009
}

1010 1011
static void sdhci_finish_data(struct sdhci_host *host)
{
1012 1013
	struct mmc_command *data_cmd = host->data_cmd;
	struct mmc_data *data = host->data;
1014 1015

	host->data = NULL;
1016
	host->data_cmd = NULL;
1017

1018 1019 1020
	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
		sdhci_adma_table_post(host, data);
1021 1022

	/*
1023 1024 1025 1026 1027
	 * The specification states that the block count register must
	 * be updated, but it does not specify at what point in the
	 * data flow. That makes the register entirely useless to read
	 * back so we have to assume that nothing made it to the card
	 * in the event of an error.
1028
	 */
1029 1030
	if (data->error)
		data->bytes_xfered = 0;
1031
	else
1032
		data->bytes_xfered = data->blksz * data->blocks;
1033

1034 1035 1036 1037 1038 1039 1040
	/*
	 * Need to send CMD12 if -
	 * a) open-ended multiblock transfer (no CMD23)
	 * b) error in multiblock transfer
	 */
	if (data->stop &&
	    (data->error ||
1041
	     !data->mrq->sbc)) {
1042

1043 1044 1045 1046
		/*
		 * The controller needs a reset of internal state machines
		 * upon error conditions.
		 */
P
Pierre Ossman 已提交
1047
		if (data->error) {
1048 1049
			if (!host->cmd || host->cmd == data_cmd)
				sdhci_do_reset(host, SDHCI_RESET_CMD);
1050
			sdhci_do_reset(host, SDHCI_RESET_DATA);
1051 1052
		}

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
		/*
		 * 'cap_cmd_during_tfr' request must not use the command line
		 * after mmc_command_done() has been called. It is upper layer's
		 * responsibility to send the stop command if required.
		 */
		if (data->mrq->cap_cmd_during_tfr) {
			sdhci_finish_mrq(host, data->mrq);
		} else {
			/* Avoid triggering warning in sdhci_send_command() */
			host->cmd = NULL;
			sdhci_send_command(host, data->stop);
		}
1065 1066 1067
	} else {
		sdhci_finish_mrq(host, data->mrq);
	}
1068 1069
}

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
			    unsigned long timeout)
{
	if (sdhci_data_line_cmd(mrq->cmd))
		mod_timer(&host->data_timer, timeout);
	else
		mod_timer(&host->timer, timeout);
}

static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
{
	if (sdhci_data_line_cmd(mrq->cmd))
		del_timer(&host->data_timer);
	else
		del_timer(&host->timer);
}

1087
void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1088 1089
{
	int flags;
1090
	u32 mask;
1091
	unsigned long timeout;
1092 1093 1094

	WARN_ON(host->cmd);

1095 1096 1097
	/* Initially, a command has no error */
	cmd->error = 0;

1098 1099 1100 1101
	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
	    cmd->opcode == MMC_STOP_TRANSMISSION)
		cmd->flags |= MMC_RSP_BUSY;

1102
	/* Wait max 10 ms */
1103
	timeout = 10;
1104 1105

	mask = SDHCI_CMD_INHIBIT;
1106
	if (sdhci_data_line_cmd(cmd))
1107 1108 1109 1110
		mask |= SDHCI_DATA_INHIBIT;

	/* We shouldn't wait for data inihibit for stop commands, even
	   though they might use busy signaling */
1111
	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1112 1113
		mask &= ~SDHCI_DATA_INHIBIT;

1114
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1115
		if (timeout == 0) {
1116 1117
			pr_err("%s: Controller never released inhibit bit(s).\n",
			       mmc_hostname(host->mmc));
1118
			sdhci_dumpregs(host);
P
Pierre Ossman 已提交
1119
			cmd->error = -EIO;
1120
			sdhci_finish_mrq(host, cmd->mrq);
1121 1122
			return;
		}
1123 1124 1125
		timeout--;
		mdelay(1);
	}
1126

1127
	timeout = jiffies;
1128 1129
	if (!cmd->data && cmd->busy_timeout > 9000)
		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1130 1131
	else
		timeout += 10 * HZ;
1132
	sdhci_mod_timer(host, cmd->mrq, timeout);
1133 1134

	host->cmd = cmd;
1135
	if (sdhci_data_line_cmd(cmd)) {
1136 1137 1138
		WARN_ON(host->data_cmd);
		host->data_cmd = cmd;
	}
1139

1140
	sdhci_prepare_data(host, cmd);
1141

1142
	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1143

1144
	sdhci_set_transfer_mode(host, cmd);
1145

1146
	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1147
		pr_err("%s: Unsupported response type!\n",
1148
			mmc_hostname(host->mmc));
P
Pierre Ossman 已提交
1149
		cmd->error = -EINVAL;
1150
		sdhci_finish_mrq(host, cmd->mrq);
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
		return;
	}

	if (!(cmd->flags & MMC_RSP_PRESENT))
		flags = SDHCI_CMD_RESP_NONE;
	else if (cmd->flags & MMC_RSP_136)
		flags = SDHCI_CMD_RESP_LONG;
	else if (cmd->flags & MMC_RSP_BUSY)
		flags = SDHCI_CMD_RESP_SHORT_BUSY;
	else
		flags = SDHCI_CMD_RESP_SHORT;

	if (cmd->flags & MMC_RSP_CRC)
		flags |= SDHCI_CMD_CRC;
	if (cmd->flags & MMC_RSP_OPCODE)
		flags |= SDHCI_CMD_INDEX;
1167 1168

	/* CMD19 is special in that the Data Present Select should be set */
1169 1170
	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1171 1172
		flags |= SDHCI_CMD_DATA;

1173
	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1174
}
1175
EXPORT_SYMBOL_GPL(sdhci_send_command);
1176 1177 1178

static void sdhci_finish_command(struct sdhci_host *host)
{
1179
	struct mmc_command *cmd = host->cmd;
1180 1181
	int i;

1182 1183 1184 1185
	host->cmd = NULL;

	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136) {
1186 1187
			/* CRC is stripped so we need to do some shifting. */
			for (i = 0;i < 4;i++) {
1188
				cmd->resp[i] = sdhci_readl(host,
1189 1190
					SDHCI_RESPONSE + (3-i)*4) << 8;
				if (i != 3)
1191
					cmd->resp[i] |=
1192
						sdhci_readb(host,
1193 1194 1195
						SDHCI_RESPONSE + (3-i)*4-1);
			}
		} else {
1196
			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1197 1198 1199
		}
	}

1200 1201 1202
	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
		mmc_command_done(host->mmc, cmd->mrq);

1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
	/*
	 * The host can send and interrupt when the busy state has
	 * ended, allowing us to wait without wasting CPU cycles.
	 * The busy signal uses DAT0 so this is similar to waiting
	 * for data to complete.
	 *
	 * Note: The 1.0 specification is a bit ambiguous about this
	 *       feature so there might be some problems with older
	 *       controllers.
	 */
1213 1214
	if (cmd->flags & MMC_RSP_BUSY) {
		if (cmd->data) {
1215 1216
			DBG("Cannot wait for busy signal when also doing a data transfer");
		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1217 1218
			   cmd == host->data_cmd) {
			/* Command complete before busy is ended */
1219 1220 1221 1222
			return;
		}
	}

1223
	/* Finished CMD23, now send actual command. */
1224 1225
	if (cmd == cmd->mrq->sbc) {
		sdhci_send_command(host, cmd->mrq->cmd);
1226
	} else {
1227

1228 1229 1230
		/* Processed actual command. */
		if (host->data && host->data_early)
			sdhci_finish_data(host);
1231

1232
		if (!cmd->data)
1233
			sdhci_finish_mrq(host, cmd->mrq);
1234
	}
1235 1236
}

1237 1238
static u16 sdhci_get_preset_value(struct sdhci_host *host)
{
1239
	u16 preset = 0;
1240

1241 1242
	switch (host->timing) {
	case MMC_TIMING_UHS_SDR12:
1243 1244
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
		break;
1245
	case MMC_TIMING_UHS_SDR25:
1246 1247
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
		break;
1248
	case MMC_TIMING_UHS_SDR50:
1249 1250
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
		break;
1251 1252
	case MMC_TIMING_UHS_SDR104:
	case MMC_TIMING_MMC_HS200:
1253 1254
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
		break;
1255
	case MMC_TIMING_UHS_DDR50:
1256
	case MMC_TIMING_MMC_DDR52:
1257 1258
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
		break;
1259 1260 1261
	case MMC_TIMING_MMC_HS400:
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
		break;
1262 1263 1264 1265 1266 1267 1268 1269 1270
	default:
		pr_warn("%s: Invalid UHS-I mode selected\n",
			mmc_hostname(host->mmc));
		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
		break;
	}
	return preset;
}

1271 1272
u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
		   unsigned int *actual_clock)
1273
{
1274
	int div = 0; /* Initialized for compiler warning */
1275
	int real_div = div, clk_mul = 1;
1276
	u16 clk = 0;
1277
	bool switch_base_clk = false;
1278

1279
	if (host->version >= SDHCI_SPEC_300) {
1280
		if (host->preset_enabled) {
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
			u16 pre_val;

			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
			pre_val = sdhci_get_preset_value(host);
			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
			if (host->clk_mul &&
				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
				clk = SDHCI_PROG_CLOCK_MODE;
				real_div = div + 1;
				clk_mul = host->clk_mul;
			} else {
				real_div = max_t(int, 1, div << 1);
			}
			goto clock_set;
		}

1298 1299 1300 1301 1302
		/*
		 * Check if the Host Controller supports Programmable Clock
		 * Mode.
		 */
		if (host->clk_mul) {
1303 1304 1305 1306 1307
			for (div = 1; div <= 1024; div++) {
				if ((host->max_clk * host->clk_mul / div)
					<= clock)
					break;
			}
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
			if ((host->max_clk * host->clk_mul / div) <= clock) {
				/*
				 * Set Programmable Clock Mode in the Clock
				 * Control register.
				 */
				clk = SDHCI_PROG_CLOCK_MODE;
				real_div = div;
				clk_mul = host->clk_mul;
				div--;
			} else {
				/*
				 * Divisor can be too small to reach clock
				 * speed requirement. Then use the base clock.
				 */
				switch_base_clk = true;
			}
		}

		if (!host->clk_mul || switch_base_clk) {
1327 1328 1329 1330 1331 1332 1333 1334 1335
			/* Version 3.00 divisors must be a multiple of 2. */
			if (host->max_clk <= clock)
				div = 1;
			else {
				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
				     div += 2) {
					if ((host->max_clk / div) <= clock)
						break;
				}
1336
			}
1337
			real_div = div;
1338
			div >>= 1;
1339 1340 1341
			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
				&& !div && host->max_clk <= 25000000)
				div = 1;
1342 1343 1344
		}
	} else {
		/* Version 2.00 divisors must be a power of 2. */
1345
		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1346 1347 1348
			if ((host->max_clk / div) <= clock)
				break;
		}
1349
		real_div = div;
1350
		div >>= 1;
1351 1352
	}

1353
clock_set:
1354
	if (real_div)
1355
		*actual_clock = (host->max_clk * clk_mul) / real_div;
1356
	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1357 1358
	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
		<< SDHCI_DIVIDER_HI_SHIFT;
1359 1360 1361 1362 1363

	return clk;
}
EXPORT_SYMBOL_GPL(sdhci_calc_clk);

1364
void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1365
{
A
Adrian Hunter 已提交
1366
	ktime_t timeout;
1367

1368
	clk |= SDHCI_CLOCK_INT_EN;
1369
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1370

1371
	/* Wait max 20 ms */
A
Adrian Hunter 已提交
1372
	timeout = ktime_add_ms(ktime_get(), 20);
1373
	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1374
		& SDHCI_CLOCK_INT_STABLE)) {
A
Adrian Hunter 已提交
1375
		if (ktime_after(ktime_get(), timeout)) {
1376 1377
			pr_err("%s: Internal clock never stabilised.\n",
			       mmc_hostname(host->mmc));
1378 1379 1380
			sdhci_dumpregs(host);
			return;
		}
A
Adrian Hunter 已提交
1381
		udelay(10);
1382
	}
1383 1384

	clk |= SDHCI_CLOCK_CARD_EN;
1385
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1386
}
1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
EXPORT_SYMBOL_GPL(sdhci_enable_clk);

void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
	u16 clk;

	host->mmc->actual_clock = 0;

	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);

	if (clock == 0)
		return;

	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
	sdhci_enable_clk(host, clk);
}
1403
EXPORT_SYMBOL_GPL(sdhci_set_clock);
1404

1405 1406
static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
				unsigned short vdd)
1407
{
1408
	struct mmc_host *mmc = host->mmc;
1409 1410 1411 1412 1413 1414 1415 1416 1417

	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);

	if (mode != MMC_POWER_OFF)
		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
	else
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
}

1418 1419
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
			   unsigned short vdd)
1420
{
1421
	u8 pwr = 0;
1422

1423 1424
	if (mode != MMC_POWER_OFF) {
		switch (1 << vdd) {
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
		case MMC_VDD_165_195:
			pwr = SDHCI_POWER_180;
			break;
		case MMC_VDD_29_30:
		case MMC_VDD_30_31:
			pwr = SDHCI_POWER_300;
			break;
		case MMC_VDD_32_33:
		case MMC_VDD_33_34:
			pwr = SDHCI_POWER_330;
			break;
		default:
1437 1438 1439
			WARN(1, "%s: Invalid vdd %#x\n",
			     mmc_hostname(host->mmc), vdd);
			break;
1440 1441 1442 1443
		}
	}

	if (host->pwr == pwr)
1444
		return;
1445

1446 1447 1448
	host->pwr = pwr;

	if (pwr == 0) {
1449
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1450 1451
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
1452 1453 1454 1455 1456 1457 1458
	} else {
		/*
		 * Spec says that we should clear the power reg before setting
		 * a new value. Some controllers don't seem to like this though.
		 */
		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1459

1460 1461 1462 1463 1464 1465 1466
		/*
		 * At least the Marvell CaFe chip gets confused if we set the
		 * voltage and set turn on power at the same time, so set the
		 * voltage first.
		 */
		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1467

1468
		pwr |= SDHCI_POWER_ON;
1469

1470
		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1471

1472 1473
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_on(host);
1474

1475 1476 1477 1478 1479 1480 1481
		/*
		 * Some controllers need an extra 10ms delay of 10ms before
		 * they can apply clock after applying power
		 */
		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
			mdelay(10);
	}
1482
}
1483
EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1484

1485 1486
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
		     unsigned short vdd)
1487
{
1488 1489
	if (IS_ERR(host->mmc->supply.vmmc))
		sdhci_set_power_noreg(host, mode, vdd);
1490
	else
1491
		sdhci_set_power_reg(host, mode, vdd);
1492
}
1493
EXPORT_SYMBOL_GPL(sdhci_set_power);
1494

1495 1496 1497 1498 1499 1500 1501 1502 1503
/*****************************************************************************\
 *                                                                           *
 * MMC callbacks                                                             *
 *                                                                           *
\*****************************************************************************/

static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
	struct sdhci_host *host;
1504
	int present;
1505 1506 1507 1508
	unsigned long flags;

	host = mmc_priv(mmc);

1509
	/* Firstly check card presence */
1510
	present = mmc->ops->get_cd(mmc);
1511

1512 1513
	spin_lock_irqsave(&host->lock, flags);

1514
	sdhci_led_activate(host);
1515 1516 1517 1518 1519

	/*
	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
	 * requests if Auto-CMD12 is enabled.
	 */
1520
	if (sdhci_auto_cmd12(host, mrq)) {
1521 1522 1523 1524 1525
		if (mrq->stop) {
			mrq->data->stop = NULL;
			mrq->stop = NULL;
		}
	}
1526

1527
	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1528
		mrq->cmd->error = -ENOMEDIUM;
1529
		sdhci_finish_mrq(host, mrq);
1530
	} else {
1531
		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1532 1533 1534
			sdhci_send_command(host, mrq->sbc);
		else
			sdhci_send_command(host, mrq->cmd);
1535
	}
1536

1537
	mmiowb();
1538 1539 1540
	spin_unlock_irqrestore(&host->lock, flags);
}

1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
void sdhci_set_bus_width(struct sdhci_host *host, int width)
{
	u8 ctrl;

	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
	if (width == MMC_BUS_WIDTH_8) {
		ctrl &= ~SDHCI_CTRL_4BITBUS;
		if (host->version >= SDHCI_SPEC_300)
			ctrl |= SDHCI_CTRL_8BITBUS;
	} else {
		if (host->version >= SDHCI_SPEC_300)
			ctrl &= ~SDHCI_CTRL_8BITBUS;
		if (width == MMC_BUS_WIDTH_4)
			ctrl |= SDHCI_CTRL_4BITBUS;
		else
			ctrl &= ~SDHCI_CTRL_4BITBUS;
	}
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_set_bus_width);

1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
{
	u16 ctrl_2;

	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	/* Select Bus Speed Mode for host */
	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
	if ((timing == MMC_TIMING_MMC_HS200) ||
	    (timing == MMC_TIMING_UHS_SDR104))
		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
	else if (timing == MMC_TIMING_UHS_SDR12)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
	else if (timing == MMC_TIMING_UHS_SDR25)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
	else if (timing == MMC_TIMING_UHS_SDR50)
		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
	else if ((timing == MMC_TIMING_UHS_DDR50) ||
		 (timing == MMC_TIMING_MMC_DDR52))
		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1581 1582
	else if (timing == MMC_TIMING_MMC_HS400)
		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1583 1584 1585 1586
	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);

1587
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1588
{
1589
	struct sdhci_host *host = mmc_priv(mmc);
1590 1591
	u8 ctrl;

1592 1593 1594
	if (ios->power_mode == MMC_POWER_UNDEFINED)
		return;

A
Adrian Hunter 已提交
1595
	if (host->flags & SDHCI_DEVICE_DEAD) {
1596 1597
		if (!IS_ERR(mmc->supply.vmmc) &&
		    ios->power_mode == MMC_POWER_OFF)
1598
			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
A
Adrian Hunter 已提交
1599 1600
		return;
	}
P
Pierre Ossman 已提交
1601

1602 1603 1604 1605 1606
	/*
	 * Reset the chip on each power off.
	 * Should clear out any weird states.
	 */
	if (ios->power_mode == MMC_POWER_OFF) {
1607
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1608
		sdhci_reinit(host);
1609 1610
	}

1611
	if (host->version >= SDHCI_SPEC_300 &&
1612 1613
		(ios->power_mode == MMC_POWER_UP) &&
		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1614 1615
		sdhci_enable_preset_value(host, false);

1616
	if (!ios->clock || ios->clock != host->clock) {
1617
		host->ops->set_clock(host, ios->clock);
1618
		host->clock = ios->clock;
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630

		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
		    host->clock) {
			host->timeout_clk = host->mmc->actual_clock ?
						host->mmc->actual_clock / 1000 :
						host->clock / 1000;
			host->mmc->max_busy_timeout =
				host->ops->get_max_timeout_count ?
				host->ops->get_max_timeout_count(host) :
				1 << 27;
			host->mmc->max_busy_timeout /= host->timeout_clk;
		}
1631
	}
1632

1633 1634 1635 1636
	if (host->ops->set_power)
		host->ops->set_power(host, ios->power_mode, ios->vdd);
	else
		sdhci_set_power(host, ios->power_mode, ios->vdd);
1637

1638 1639 1640
	if (host->ops->platform_send_init_74_clocks)
		host->ops->platform_send_init_74_clocks(host, ios->power_mode);

1641
	host->ops->set_bus_width(host, ios->bus_width);
1642

1643
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1644

1645
	if ((ios->timing == MMC_TIMING_SD_HS ||
1646 1647 1648 1649 1650 1651 1652 1653
	     ios->timing == MMC_TIMING_MMC_HS ||
	     ios->timing == MMC_TIMING_MMC_HS400 ||
	     ios->timing == MMC_TIMING_MMC_HS200 ||
	     ios->timing == MMC_TIMING_MMC_DDR52 ||
	     ios->timing == MMC_TIMING_UHS_SDR50 ||
	     ios->timing == MMC_TIMING_UHS_SDR104 ||
	     ios->timing == MMC_TIMING_UHS_DDR50 ||
	     ios->timing == MMC_TIMING_UHS_SDR25)
1654
	    && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1655 1656 1657 1658
		ctrl |= SDHCI_CTRL_HISPD;
	else
		ctrl &= ~SDHCI_CTRL_HISPD;

1659
	if (host->version >= SDHCI_SPEC_300) {
1660 1661
		u16 clk, ctrl_2;

1662
		if (!host->preset_enabled) {
1663
			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1664 1665 1666 1667
			/*
			 * We only need to set Driver Strength if the
			 * preset value enable is not set.
			 */
1668
			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1669 1670 1671
			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1672 1673
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1674 1675
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1676 1677 1678
			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
			else {
1679 1680
				pr_warn("%s: invalid driver type, default to driver type B\n",
					mmc_hostname(mmc));
1681 1682
				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
			}
1683 1684

			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
		} else {
			/*
			 * According to SDHC Spec v3.00, if the Preset Value
			 * Enable in the Host Control 2 register is set, we
			 * need to reset SD Clock Enable before changing High
			 * Speed Enable to avoid generating clock gliches.
			 */

			/* Reset SD Clock Enable */
			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
			clk &= ~SDHCI_CLOCK_CARD_EN;
			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);

			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);

			/* Re-enable SD Clock */
1701
			host->ops->set_clock(host, host->clock);
1702
		}
1703 1704 1705 1706 1707 1708

		/* Reset SD Clock Enable */
		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
		clk &= ~SDHCI_CLOCK_CARD_EN;
		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);

1709
		host->ops->set_uhs_signaling(host, ios->timing);
1710
		host->timing = ios->timing;
1711

1712 1713 1714 1715 1716
		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
				((ios->timing == MMC_TIMING_UHS_SDR12) ||
				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1717 1718
				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1719 1720 1721 1722 1723 1724 1725 1726
			u16 preset;

			sdhci_enable_preset_value(host, true);
			preset = sdhci_get_preset_value(host);
			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
				>> SDHCI_PRESET_DRV_SHIFT;
		}

1727
		/* Re-enable SD Clock */
1728
		host->ops->set_clock(host, host->clock);
1729 1730
	} else
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1731

1732 1733 1734 1735 1736
	/*
	 * Some (ENE) controllers go apeshit on some ios operation,
	 * signalling timeout and CRC errors even on CMD0. Resetting
	 * it on each ios seems to solve the problem.
	 */
1737
	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1738
		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1739

1740
	mmiowb();
1741 1742
}

1743
static int sdhci_get_cd(struct mmc_host *mmc)
1744 1745
{
	struct sdhci_host *host = mmc_priv(mmc);
1746
	int gpio_cd = mmc_gpio_get_cd(mmc);
1747 1748 1749 1750

	if (host->flags & SDHCI_DEVICE_DEAD)
		return 0;

1751
	/* If nonremovable, assume that the card is always present. */
1752
	if (!mmc_card_is_removable(host->mmc))
1753 1754
		return 1;

1755 1756 1757 1758
	/*
	 * Try slot gpio detect, if defined it take precedence
	 * over build in controller functionality
	 */
1759
	if (gpio_cd >= 0)
1760 1761
		return !!gpio_cd;

1762 1763 1764 1765
	/* If polling, assume that the card is always present. */
	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
		return 1;

1766 1767 1768 1769
	/* Host native card detect */
	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}

1770
static int sdhci_check_ro(struct sdhci_host *host)
1771 1772
{
	unsigned long flags;
1773
	int is_readonly;
1774 1775 1776

	spin_lock_irqsave(&host->lock, flags);

P
Pierre Ossman 已提交
1777
	if (host->flags & SDHCI_DEVICE_DEAD)
1778 1779 1780
		is_readonly = 0;
	else if (host->ops->get_ro)
		is_readonly = host->ops->get_ro(host);
P
Pierre Ossman 已提交
1781
	else
1782 1783
		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
				& SDHCI_WRITE_PROTECT);
1784 1785 1786

	spin_unlock_irqrestore(&host->lock, flags);

1787 1788 1789
	/* This quirk needs to be replaced by a callback-function later */
	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
		!is_readonly : is_readonly;
1790 1791
}

1792 1793
#define SAMPLE_COUNT	5

1794
static int sdhci_get_ro(struct mmc_host *mmc)
1795
{
1796
	struct sdhci_host *host = mmc_priv(mmc);
1797 1798 1799
	int i, ro_count;

	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1800
		return sdhci_check_ro(host);
1801 1802 1803

	ro_count = 0;
	for (i = 0; i < SAMPLE_COUNT; i++) {
1804
		if (sdhci_check_ro(host)) {
1805 1806 1807 1808 1809 1810 1811 1812
			if (++ro_count > SAMPLE_COUNT / 2)
				return 1;
		}
		msleep(30);
	}
	return 0;
}

1813 1814 1815 1816 1817 1818 1819 1820
static void sdhci_hw_reset(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);

	if (host->ops && host->ops->hw_reset)
		host->ops->hw_reset(host);
}

1821 1822
static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
{
1823
	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1824
		if (enable)
1825
			host->ier |= SDHCI_INT_CARD_INT;
1826
		else
1827 1828 1829 1830
			host->ier &= ~SDHCI_INT_CARD_INT;

		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1831 1832
		mmiowb();
	}
1833 1834 1835 1836 1837 1838
}

static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;
P
Pierre Ossman 已提交
1839

1840 1841 1842
	if (enable)
		pm_runtime_get_noresume(host->mmc->parent);

1843
	spin_lock_irqsave(&host->lock, flags);
1844 1845 1846 1847 1848
	if (enable)
		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
	else
		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;

1849
	sdhci_enable_sdio_irq_nolock(host, enable);
P
Pierre Ossman 已提交
1850
	spin_unlock_irqrestore(&host->lock, flags);
1851 1852 1853

	if (!enable)
		pm_runtime_put_noidle(host->mmc->parent);
P
Pierre Ossman 已提交
1854 1855
}

1856 1857
static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
					     struct mmc_ios *ios)
1858
{
1859
	struct sdhci_host *host = mmc_priv(mmc);
1860
	u16 ctrl;
1861
	int ret;
1862

1863 1864 1865 1866 1867 1868
	/*
	 * Signal Voltage Switching is only applicable for Host Controllers
	 * v3.00 and above.
	 */
	if (host->version < SDHCI_SPEC_300)
		return 0;
1869

1870 1871
	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

1872
	switch (ios->signal_voltage) {
1873
	case MMC_SIGNAL_VOLTAGE_330:
1874 1875
		if (!(host->flags & SDHCI_SIGNALING_330))
			return -EINVAL;
1876 1877 1878
		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
		ctrl &= ~SDHCI_CTRL_VDD_180;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1879

1880
		if (!IS_ERR(mmc->supply.vqmmc)) {
1881
			ret = mmc_regulator_set_vqmmc(mmc, ios);
1882
			if (ret) {
J
Joe Perches 已提交
1883 1884
				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
					mmc_hostname(mmc));
1885 1886 1887 1888 1889
				return -EIO;
			}
		}
		/* Wait for 5ms */
		usleep_range(5000, 5500);
1890

1891 1892 1893 1894
		/* 3.3V regulator output should be stable within 5 ms */
		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (!(ctrl & SDHCI_CTRL_VDD_180))
			return 0;
1895

J
Joe Perches 已提交
1896 1897
		pr_warn("%s: 3.3V regulator output did not became stable\n",
			mmc_hostname(mmc));
1898 1899 1900

		return -EAGAIN;
	case MMC_SIGNAL_VOLTAGE_180:
1901 1902
		if (!(host->flags & SDHCI_SIGNALING_180))
			return -EINVAL;
1903
		if (!IS_ERR(mmc->supply.vqmmc)) {
1904
			ret = mmc_regulator_set_vqmmc(mmc, ios);
1905
			if (ret) {
J
Joe Perches 已提交
1906 1907
				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
					mmc_hostname(mmc));
1908 1909 1910
				return -EIO;
			}
		}
1911 1912 1913 1914 1915

		/*
		 * Enable 1.8V Signal Enable in the Host Control2
		 * register
		 */
1916 1917
		ctrl |= SDHCI_CTRL_VDD_180;
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1918

1919 1920 1921 1922
		/* Some controller need to do more when switching */
		if (host->ops->voltage_switch)
			host->ops->voltage_switch(host);

1923 1924 1925 1926
		/* 1.8V regulator output should be stable within 5 ms */
		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (ctrl & SDHCI_CTRL_VDD_180)
			return 0;
1927

J
Joe Perches 已提交
1928 1929
		pr_warn("%s: 1.8V regulator output did not became stable\n",
			mmc_hostname(mmc));
1930

1931 1932
		return -EAGAIN;
	case MMC_SIGNAL_VOLTAGE_120:
1933 1934
		if (!(host->flags & SDHCI_SIGNALING_120))
			return -EINVAL;
1935
		if (!IS_ERR(mmc->supply.vqmmc)) {
1936
			ret = mmc_regulator_set_vqmmc(mmc, ios);
1937
			if (ret) {
J
Joe Perches 已提交
1938 1939
				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
					mmc_hostname(mmc));
1940
				return -EIO;
1941 1942
			}
		}
1943
		return 0;
1944
	default:
1945 1946
		/* No signal voltage switch required */
		return 0;
1947
	}
1948 1949
}

1950 1951 1952 1953 1954
static int sdhci_card_busy(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);
	u32 present_state;

1955
	/* Check whether DAT[0] is 0 */
1956 1957
	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);

1958
	return !(present_state & SDHCI_DATA_0_LVL_MASK);
1959 1960
}

1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
	host->flags |= SDHCI_HS400_TUNING;
	spin_unlock_irqrestore(&host->lock, flags);

	return 0;
}

1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
static void sdhci_start_tuning(struct sdhci_host *host)
{
	u16 ctrl;

	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	ctrl |= SDHCI_CTRL_EXEC_TUNING;
	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
		ctrl |= SDHCI_CTRL_TUNED_CLK;
	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);

	/*
	 * As per the Host Controller spec v3.00, tuning command
	 * generates Buffer Read Ready interrupt, so enable that.
	 *
	 * Note: The spec clearly says that when tuning sequence
	 * is being performed, the controller does not generate
	 * interrupts other than Buffer Read Ready interrupt. But
	 * to make sure we don't hit a controller bug, we _only_
	 * enable Buffer Read Ready interrupt here.
	 */
	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
}

static void sdhci_end_tuning(struct sdhci_host *host)
{
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}

static void sdhci_reset_tuning(struct sdhci_host *host)
{
	u16 ctrl;

	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
}

2013
static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031
{
	sdhci_reset_tuning(host);

	sdhci_do_reset(host, SDHCI_RESET_CMD);
	sdhci_do_reset(host, SDHCI_RESET_DATA);

	sdhci_end_tuning(host);

	mmc_abort_tuning(host->mmc, opcode);
}

/*
 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
 * tuning command does not have a data payload (or rather the hardware does it
 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
 * interrupt setup is different to other commands and there is no timeout
 * interrupt so special handling is needed.
 */
2032
static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2033 2034
{
	struct mmc_host *mmc = host->mmc;
2035 2036
	struct mmc_command cmd = {};
	struct mmc_request mrq = {};
2037 2038 2039
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050

	cmd.opcode = opcode;
	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
	cmd.mrq = &mrq;

	mrq.cmd = &cmd;
	/*
	 * In response to CMD19, the card sends 64 bytes of tuning
	 * block to the Host Controller. So we set the block size
	 * to 64 here.
	 */
2051 2052 2053 2054 2055
	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
		sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE);
	else
		sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072

	/*
	 * The tuning block is sent by the card to the host controller.
	 * So we set the TRNS_READ bit in the Transfer Mode register.
	 * This also takes care of setting DMA Enable and Multi Block
	 * Select in the same register to 0.
	 */
	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);

	sdhci_send_command(host, &cmd);

	host->cmd = NULL;

	sdhci_del_timer(host, &mrq);

	host->tuning_done = 0;

2073
	mmiowb();
2074 2075 2076 2077 2078 2079 2080 2081
	spin_unlock_irqrestore(&host->lock, flags);

	/* Wait for Buffer Read Ready interrupt */
	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
			   msecs_to_jiffies(50));

}

2082
static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
A
Adrian Hunter 已提交
2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
{
	int i;

	/*
	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
	 * of loops reaches 40 times.
	 */
	for (i = 0; i < MAX_TUNING_LOOP; i++) {
		u16 ctrl;

2093
		sdhci_send_tuning(host, opcode);
A
Adrian Hunter 已提交
2094 2095 2096 2097

		if (!host->tuning_done) {
			pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
				mmc_hostname(host->mmc));
2098
			sdhci_abort_tuning(host, opcode);
A
Adrian Hunter 已提交
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
			return;
		}

		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
			if (ctrl & SDHCI_CTRL_TUNED_CLK)
				return; /* Success! */
			break;
		}

		/* eMMC spec does not require a delay between tuning cycles */
		if (opcode == MMC_SEND_TUNING_BLOCK)
			mdelay(1);
	}

	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
		mmc_hostname(host->mmc));
	sdhci_reset_tuning(host);
}

2119
int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2120
{
2121
	struct sdhci_host *host = mmc_priv(mmc);
2122
	int err = 0;
2123
	unsigned int tuning_count = 0;
2124
	bool hs400_tuning;
2125

2126 2127
	hs400_tuning = host->flags & SDHCI_HS400_TUNING;

2128 2129 2130
	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
		tuning_count = host->tuning_count;

2131
	/*
W
Weijun Yang 已提交
2132 2133 2134
	 * The Host Controller needs tuning in case of SDR104 and DDR50
	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
	 * the Capabilities register.
2135 2136
	 * If the Host Controller supports the HS200 mode then the
	 * tuning function has to be executed.
2137
	 */
2138
	switch (host->timing) {
2139
	/* HS400 tuning is done in HS200 mode */
2140
	case MMC_TIMING_MMC_HS400:
2141
		err = -EINVAL;
2142
		goto out;
2143

2144
	case MMC_TIMING_MMC_HS200:
2145 2146 2147 2148 2149 2150 2151 2152
		/*
		 * Periodic re-tuning for HS400 is not expected to be needed, so
		 * disable it here.
		 */
		if (hs400_tuning)
			tuning_count = 0;
		break;

2153
	case MMC_TIMING_UHS_SDR104:
W
Weijun Yang 已提交
2154
	case MMC_TIMING_UHS_DDR50:
2155 2156 2157
		break;

	case MMC_TIMING_UHS_SDR50:
2158
		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2159 2160 2161 2162
			break;
		/* FALLTHROUGH */

	default:
2163
		goto out;
2164 2165
	}

2166
	if (host->ops->platform_execute_tuning) {
2167
		err = host->ops->platform_execute_tuning(host, opcode);
2168
		goto out;
2169 2170
	}

A
Adrian Hunter 已提交
2171
	host->mmc->retune_period = tuning_count;
2172

A
Adrian Hunter 已提交
2173
	sdhci_start_tuning(host);
2174

2175
	__sdhci_execute_tuning(host, opcode);
2176

2177
	sdhci_end_tuning(host);
2178
out:
2179
	host->flags &= ~SDHCI_HS400_TUNING;
A
Adrian Hunter 已提交
2180

2181 2182
	return err;
}
2183
EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2184

2185
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2186 2187 2188 2189 2190 2191 2192 2193 2194
{
	/* Host Controller v3.00 defines preset value registers */
	if (host->version < SDHCI_SPEC_300)
		return;

	/*
	 * We only enable or disable Preset Value if they are not already
	 * enabled or disabled respectively. Otherwise, we bail out.
	 */
2195 2196 2197 2198 2199 2200 2201 2202
	if (host->preset_enabled != enable) {
		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);

		if (enable)
			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
		else
			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;

2203
		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2204 2205 2206 2207 2208 2209 2210

		if (enable)
			host->flags |= SDHCI_PV_ENABLED;
		else
			host->flags &= ~SDHCI_PV_ENABLED;

		host->preset_enabled = enable;
2211
	}
2212 2213
}

2214 2215 2216 2217 2218 2219
static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
				int err)
{
	struct sdhci_host *host = mmc_priv(mmc);
	struct mmc_data *data = mrq->data;

2220
	if (data->host_cookie != COOKIE_UNMAPPED)
2221 2222 2223 2224 2225
		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
			     data->flags & MMC_DATA_WRITE ?
			       DMA_TO_DEVICE : DMA_FROM_DEVICE);

	data->host_cookie = COOKIE_UNMAPPED;
2226 2227
}

2228
static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2229 2230 2231
{
	struct sdhci_host *host = mmc_priv(mmc);

2232
	mrq->data->host_cookie = COOKIE_UNMAPPED;
2233 2234

	if (host->flags & SDHCI_REQ_USE_DMA)
2235
		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2236 2237
}

2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
static inline bool sdhci_has_requests(struct sdhci_host *host)
{
	return host->cmd || host->data_cmd;
}

static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
{
	if (host->data_cmd) {
		host->data_cmd->error = err;
		sdhci_finish_mrq(host, host->data_cmd->mrq);
	}

	if (host->cmd) {
		host->cmd->error = err;
		sdhci_finish_mrq(host, host->cmd->mrq);
	}
}

2256
static void sdhci_card_event(struct mmc_host *mmc)
2257
{
2258
	struct sdhci_host *host = mmc_priv(mmc);
2259
	unsigned long flags;
2260
	int present;
2261

2262 2263 2264 2265
	/* First check if client has provided their own card event */
	if (host->ops->card_event)
		host->ops->card_event(host);

2266
	present = mmc->ops->get_cd(mmc);
2267

2268 2269
	spin_lock_irqsave(&host->lock, flags);

2270 2271
	/* Check sdhci_has_requests() first in case we are runtime suspended */
	if (sdhci_has_requests(host) && !present) {
2272
		pr_err("%s: Card removed during transfer!\n",
2273
			mmc_hostname(host->mmc));
2274
		pr_err("%s: Resetting controller.\n",
2275
			mmc_hostname(host->mmc));
2276

2277 2278
		sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
2279

2280
		sdhci_error_out_mrqs(host, -ENOMEDIUM);
2281 2282 2283
	}

	spin_unlock_irqrestore(&host->lock, flags);
2284 2285 2286 2287
}

static const struct mmc_host_ops sdhci_ops = {
	.request	= sdhci_request,
2288 2289
	.post_req	= sdhci_post_req,
	.pre_req	= sdhci_pre_req,
2290
	.set_ios	= sdhci_set_ios,
2291
	.get_cd		= sdhci_get_cd,
2292 2293 2294 2295
	.get_ro		= sdhci_get_ro,
	.hw_reset	= sdhci_hw_reset,
	.enable_sdio_irq = sdhci_enable_sdio_irq,
	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2296
	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2297 2298
	.execute_tuning			= sdhci_execute_tuning,
	.card_event			= sdhci_card_event,
2299
	.card_busy	= sdhci_card_busy,
2300 2301 2302 2303 2304 2305 2306 2307
};

/*****************************************************************************\
 *                                                                           *
 * Tasklets                                                                  *
 *                                                                           *
\*****************************************************************************/

2308
static bool sdhci_request_done(struct sdhci_host *host)
2309 2310 2311
{
	unsigned long flags;
	struct mmc_request *mrq;
2312
	int i;
2313

2314 2315
	spin_lock_irqsave(&host->lock, flags);

2316 2317
	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
		mrq = host->mrqs_done[i];
2318
		if (mrq)
2319
			break;
2320
	}
2321

2322 2323 2324 2325
	if (!mrq) {
		spin_unlock_irqrestore(&host->lock, flags);
		return true;
	}
2326

2327 2328
	sdhci_del_timer(host, mrq);

2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344
	/*
	 * Always unmap the data buffers if they were mapped by
	 * sdhci_prepare_data() whenever we finish with a request.
	 * This avoids leaking DMA mappings on error.
	 */
	if (host->flags & SDHCI_REQ_USE_DMA) {
		struct mmc_data *data = mrq->data;

		if (data && data->host_cookie == COOKIE_MAPPED) {
			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
				     (data->flags & MMC_DATA_READ) ?
				     DMA_FROM_DEVICE : DMA_TO_DEVICE);
			data->host_cookie = COOKIE_UNMAPPED;
		}
	}

2345 2346 2347 2348
	/*
	 * The controller needs a reset of internal state machines
	 * upon error conditions.
	 */
2349
	if (sdhci_needs_reset(host, mrq)) {
2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360
		/*
		 * Do not finish until command and data lines are available for
		 * reset. Note there can only be one other mrq, so it cannot
		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
		 * would both be null.
		 */
		if (host->cmd || host->data_cmd) {
			spin_unlock_irqrestore(&host->lock, flags);
			return true;
		}

2361
		/* Some controllers need this kick or reset won't work here */
2362
		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2363
			/* This is to force an update */
2364
			host->ops->set_clock(host, host->clock);
2365 2366 2367

		/* Spec says we should do both at the same time, but Ricoh
		   controllers do not like that. */
2368 2369
		sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
2370 2371

		host->pending_reset = false;
2372 2373
	}

2374 2375
	if (!sdhci_has_requests(host))
		sdhci_led_deactivate(host);
2376

2377 2378
	host->mrqs_done[i] = NULL;

2379
	mmiowb();
2380 2381 2382
	spin_unlock_irqrestore(&host->lock, flags);

	mmc_request_done(host->mmc, mrq);
2383 2384 2385 2386 2387 2388 2389 2390 2391 2392

	return false;
}

static void sdhci_tasklet_finish(unsigned long param)
{
	struct sdhci_host *host = (struct sdhci_host *)param;

	while (!sdhci_request_done(host))
		;
2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403
}

static void sdhci_timeout_timer(unsigned long data)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = (struct sdhci_host*)data;

	spin_lock_irqsave(&host->lock, flags);

2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
		       mmc_hostname(host->mmc));
		sdhci_dumpregs(host);

		host->cmd->error = -ETIMEDOUT;
		sdhci_finish_mrq(host, host->cmd->mrq);
	}

	mmiowb();
	spin_unlock_irqrestore(&host->lock, flags);
}

static void sdhci_timeout_data_timer(unsigned long data)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = (struct sdhci_host *)data;

	spin_lock_irqsave(&host->lock, flags);

	if (host->data || host->data_cmd ||
	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2428 2429
		pr_err("%s: Timeout waiting for hardware interrupt.\n",
		       mmc_hostname(host->mmc));
2430 2431 2432
		sdhci_dumpregs(host);

		if (host->data) {
P
Pierre Ossman 已提交
2433
			host->data->error = -ETIMEDOUT;
2434
			sdhci_finish_data(host);
2435 2436 2437
		} else if (host->data_cmd) {
			host->data_cmd->error = -ETIMEDOUT;
			sdhci_finish_mrq(host, host->data_cmd->mrq);
2438
		} else {
2439 2440
			host->cmd->error = -ETIMEDOUT;
			sdhci_finish_mrq(host, host->cmd->mrq);
2441 2442 2443
		}
	}

2444
	mmiowb();
2445 2446 2447 2448 2449 2450 2451 2452 2453
	spin_unlock_irqrestore(&host->lock, flags);
}

/*****************************************************************************\
 *                                                                           *
 * Interrupt handling                                                        *
 *                                                                           *
\*****************************************************************************/

2454
static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2455 2456
{
	if (!host->cmd) {
2457 2458 2459 2460 2461 2462 2463
		/*
		 * SDHCI recovers from errors by resetting the cmd and data
		 * circuits.  Until that is done, there very well might be more
		 * interrupts, so ignore them in that case.
		 */
		if (host->pending_reset)
			return;
2464 2465
		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
		       mmc_hostname(host->mmc), (unsigned)intmask);
2466 2467 2468 2469
		sdhci_dumpregs(host);
		return;
	}

2470 2471 2472 2473 2474 2475
	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
		if (intmask & SDHCI_INT_TIMEOUT)
			host->cmd->error = -ETIMEDOUT;
		else
			host->cmd->error = -EILSEQ;
2476

2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493
		/*
		 * If this command initiates a data phase and a response
		 * CRC error is signalled, the card can start transferring
		 * data - the card may have received the command without
		 * error.  We must not terminate the mmc_request early.
		 *
		 * If the card did not receive the command or returned an
		 * error which prevented it sending data, the data phase
		 * will time out.
		 */
		if (host->cmd->data &&
		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
		     SDHCI_INT_CRC) {
			host->cmd = NULL;
			return;
		}

2494
		sdhci_finish_mrq(host, host->cmd->mrq);
2495 2496 2497 2498
		return;
	}

	if (intmask & SDHCI_INT_RESPONSE)
2499
		sdhci_finish_command(host);
2500 2501
}

2502
#ifdef CONFIG_MMC_DEBUG
2503
static void sdhci_adma_show_error(struct sdhci_host *host)
2504
{
2505
	void *desc = host->adma_table;
2506 2507 2508 2509

	sdhci_dumpregs(host);

	while (true) {
2510 2511 2512
		struct sdhci_adma2_64_desc *dma_desc = desc;

		if (host->flags & SDHCI_USE_64_BIT_DMA)
2513 2514
			DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
			    desc, le32_to_cpu(dma_desc->addr_hi),
2515 2516 2517 2518
			    le32_to_cpu(dma_desc->addr_lo),
			    le16_to_cpu(dma_desc->len),
			    le16_to_cpu(dma_desc->cmd));
		else
2519 2520
			DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
			    desc, le32_to_cpu(dma_desc->addr_lo),
2521 2522
			    le16_to_cpu(dma_desc->len),
			    le16_to_cpu(dma_desc->cmd));
2523

2524
		desc += host->desc_sz;
2525

2526
		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2527 2528 2529 2530
			break;
	}
}
#else
2531
static void sdhci_adma_show_error(struct sdhci_host *host) { }
2532 2533
#endif

2534 2535
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
2536
	u32 command;
2537

2538 2539
	/* CMD19 generates _only_ Buffer Read Ready interrupt */
	if (intmask & SDHCI_INT_DATA_AVAIL) {
2540 2541 2542
		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
		if (command == MMC_SEND_TUNING_BLOCK ||
		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2543 2544 2545 2546 2547 2548
			host->tuning_done = 1;
			wake_up(&host->buf_ready_int);
			return;
		}
	}

2549
	if (!host->data) {
2550 2551
		struct mmc_command *data_cmd = host->data_cmd;

2552
		/*
2553 2554 2555
		 * The "data complete" interrupt is also used to
		 * indicate that a busy state has ended. See comment
		 * above in sdhci_cmd_irq().
2556
		 */
2557
		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2558
			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2559
				host->data_cmd = NULL;
2560
				data_cmd->error = -ETIMEDOUT;
2561
				sdhci_finish_mrq(host, data_cmd->mrq);
2562 2563
				return;
			}
2564
			if (intmask & SDHCI_INT_DATA_END) {
2565
				host->data_cmd = NULL;
2566 2567 2568 2569 2570
				/*
				 * Some cards handle busy-end interrupt
				 * before the command completed, so make
				 * sure we do things in the proper order.
				 */
2571 2572 2573
				if (host->cmd == data_cmd)
					return;

2574
				sdhci_finish_mrq(host, data_cmd->mrq);
2575 2576 2577
				return;
			}
		}
2578

2579 2580 2581 2582 2583 2584 2585 2586
		/*
		 * SDHCI recovers from errors by resetting the cmd and data
		 * circuits. Until that is done, there very well might be more
		 * interrupts, so ignore them in that case.
		 */
		if (host->pending_reset)
			return;

2587 2588
		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
		       mmc_hostname(host->mmc), (unsigned)intmask);
2589 2590 2591 2592 2593 2594
		sdhci_dumpregs(host);

		return;
	}

	if (intmask & SDHCI_INT_DATA_TIMEOUT)
P
Pierre Ossman 已提交
2595
		host->data->error = -ETIMEDOUT;
2596 2597 2598 2599 2600
	else if (intmask & SDHCI_INT_DATA_END_BIT)
		host->data->error = -EILSEQ;
	else if ((intmask & SDHCI_INT_DATA_CRC) &&
		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
			!= MMC_BUS_TEST_R)
P
Pierre Ossman 已提交
2601
		host->data->error = -EILSEQ;
2602
	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2603
		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2604
		sdhci_adma_show_error(host);
2605
		host->data->error = -EIO;
2606 2607
		if (host->ops->adma_workaround)
			host->ops->adma_workaround(host, intmask);
2608
	}
2609

P
Pierre Ossman 已提交
2610
	if (host->data->error)
2611 2612
		sdhci_finish_data(host);
	else {
P
Pierre Ossman 已提交
2613
		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2614 2615
			sdhci_transfer_pio(host);

2616 2617 2618 2619
		/*
		 * We currently don't do anything fancy with DMA
		 * boundaries, but as we can't disable the feature
		 * we need to at least restart the transfer.
2620 2621 2622 2623
		 *
		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
		 * should return a valid address to continue from, but as
		 * some controllers are faulty, don't trust them.
2624
		 */
2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
		if (intmask & SDHCI_INT_DMA_END) {
			u32 dmastart, dmanow;
			dmastart = sg_dma_address(host->data->sg);
			dmanow = dmastart + host->data->bytes_xfered;
			/*
			 * Force update to the next DMA block boundary.
			 */
			dmanow = (dmanow &
				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
				SDHCI_DEFAULT_BOUNDARY_SIZE;
			host->data->bytes_xfered = dmanow - dmastart;
2636 2637
			DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
			    dmastart, host->data->bytes_xfered, dmanow);
2638 2639
			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
		}
2640

2641
		if (intmask & SDHCI_INT_DATA_END) {
2642
			if (host->cmd == host->data_cmd) {
2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
				/*
				 * Data managed to finish before the
				 * command completed. Make sure we do
				 * things in the proper order.
				 */
				host->data_early = 1;
			} else {
				sdhci_finish_data(host);
			}
		}
2653 2654 2655
	}
}

2656
static irqreturn_t sdhci_irq(int irq, void *dev_id)
2657
{
2658
	irqreturn_t result = IRQ_NONE;
2659
	struct sdhci_host *host = dev_id;
2660
	u32 intmask, mask, unexpected = 0;
2661
	int max_loops = 16;
2662 2663 2664

	spin_lock(&host->lock);

2665
	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2666
		spin_unlock(&host->lock);
2667
		return IRQ_NONE;
2668 2669
	}

2670
	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2671
	if (!intmask || intmask == 0xffffffff) {
2672 2673 2674 2675
		result = IRQ_NONE;
		goto out;
	}

2676
	do {
A
Adrian Hunter 已提交
2677 2678 2679 2680 2681 2682 2683 2684
		DBG("IRQ status 0x%08x\n", intmask);

		if (host->ops->irq) {
			intmask = host->ops->irq(host, intmask);
			if (!intmask)
				goto cont;
		}

2685 2686 2687 2688
		/* Clear selected interrupts. */
		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
				  SDHCI_INT_BUS_POWER);
		sdhci_writel(host, mask, SDHCI_INT_STATUS);
2689

2690 2691 2692
		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
2693

2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704
			/*
			 * There is a observation on i.mx esdhc.  INSERT
			 * bit will be immediately set again when it gets
			 * cleared, if a card is inserted.  We have to mask
			 * the irq to prevent interrupt storm which will
			 * freeze the system.  And the REMOVE gets the
			 * same situation.
			 *
			 * More testing are needed here to ensure it works
			 * for other platforms though.
			 */
2705 2706 2707 2708 2709 2710
			host->ier &= ~(SDHCI_INT_CARD_INSERT |
				       SDHCI_INT_CARD_REMOVE);
			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
					       SDHCI_INT_CARD_INSERT;
			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2711 2712 2713

			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2714 2715 2716 2717

			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
						       SDHCI_INT_CARD_REMOVE);
			result = IRQ_WAKE_THREAD;
2718
		}
2719

2720
		if (intmask & SDHCI_INT_CMD_MASK)
2721
			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2722

2723 2724
		if (intmask & SDHCI_INT_DATA_MASK)
			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2725

2726 2727 2728
		if (intmask & SDHCI_INT_BUS_POWER)
			pr_err("%s: Card is consuming too much power!\n",
				mmc_hostname(host->mmc));
2729

2730 2731 2732
		if (intmask & SDHCI_INT_RETUNE)
			mmc_retune_needed(host->mmc);

2733 2734
		if ((intmask & SDHCI_INT_CARD_INT) &&
		    (host->ier & SDHCI_INT_CARD_INT)) {
2735 2736 2737 2738
			sdhci_enable_sdio_irq_nolock(host, false);
			host->thread_isr |= SDHCI_INT_CARD_INT;
			result = IRQ_WAKE_THREAD;
		}
P
Pierre Ossman 已提交
2739

2740 2741 2742
		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2743
			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
P
Pierre Ossman 已提交
2744

2745 2746 2747 2748
		if (intmask) {
			unexpected |= intmask;
			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
		}
A
Adrian Hunter 已提交
2749
cont:
2750 2751
		if (result == IRQ_NONE)
			result = IRQ_HANDLED;
2752

2753 2754
		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
	} while (intmask && --max_loops);
2755 2756 2757
out:
	spin_unlock(&host->lock);

2758 2759 2760 2761 2762
	if (unexpected) {
		pr_err("%s: Unexpected interrupt 0x%08x.\n",
			   mmc_hostname(host->mmc), unexpected);
		sdhci_dumpregs(host);
	}
P
Pierre Ossman 已提交
2763

2764 2765 2766
	return result;
}

2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777
static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
{
	struct sdhci_host *host = dev_id;
	unsigned long flags;
	u32 isr;

	spin_lock_irqsave(&host->lock, flags);
	isr = host->thread_isr;
	host->thread_isr = 0;
	spin_unlock_irqrestore(&host->lock, flags);

2778
	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2779 2780 2781 2782
		struct mmc_host *mmc = host->mmc;

		mmc->ops->card_event(mmc);
		mmc_detect_change(mmc, msecs_to_jiffies(200));
2783 2784
	}

2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796
	if (isr & SDHCI_INT_CARD_INT) {
		sdio_run_irqs(host->mmc);

		spin_lock_irqsave(&host->lock, flags);
		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
			sdhci_enable_sdio_irq_nolock(host, true);
		spin_unlock_irqrestore(&host->lock, flags);
	}

	return isr ? IRQ_HANDLED : IRQ_NONE;
}

2797 2798 2799 2800 2801 2802 2803
/*****************************************************************************\
 *                                                                           *
 * Suspend/resume                                                            *
 *                                                                           *
\*****************************************************************************/

#ifdef CONFIG_PM
2804 2805 2806 2807 2808 2809 2810 2811
/*
 * To enable wakeup events, the corresponding events have to be enabled in
 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
 * Table' in the SD Host Controller Standard Specification.
 * It is useless to restore SDHCI_INT_ENABLE state in
 * sdhci_disable_irq_wakeups() since it will be set by
 * sdhci_enable_card_detection() or sdhci_init().
 */
K
Kevin Liu 已提交
2812 2813 2814 2815 2816
void sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
	u8 val;
	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
			| SDHCI_WAKE_ON_INT;
2817 2818
	u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
		      SDHCI_INT_CARD_INT;
K
Kevin Liu 已提交
2819 2820 2821 2822

	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
	val |= mask ;
	/* Avoid fake wake up */
2823
	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
K
Kevin Liu 已提交
2824
		val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2825 2826
		irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
	}
K
Kevin Liu 已提交
2827
	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2828
	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
K
Kevin Liu 已提交
2829 2830 2831
}
EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);

2832
static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
K
Kevin Liu 已提交
2833 2834 2835 2836 2837 2838 2839 2840 2841
{
	u8 val;
	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
			| SDHCI_WAKE_ON_INT;

	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
	val &= ~mask;
	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
2842

2843
int sdhci_suspend_host(struct sdhci_host *host)
2844
{
2845 2846
	sdhci_disable_card_detection(host);

2847
	mmc_retune_timer_stop(host->mmc);
2848

K
Kevin Liu 已提交
2849
	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2850 2851 2852
		host->ier = 0;
		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
K
Kevin Liu 已提交
2853 2854 2855 2856 2857
		free_irq(host->irq, host);
	} else {
		sdhci_enable_irq_wakeups(host);
		enable_irq_wake(host->irq);
	}
2858
	return 0;
2859 2860
}

2861
EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2862

2863 2864
int sdhci_resume_host(struct sdhci_host *host)
{
2865
	struct mmc_host *mmc = host->mmc;
2866
	int ret = 0;
2867

2868
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2869 2870 2871
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}
2872

2873 2874 2875 2876 2877 2878
	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
		/* Card keeps power but host controller does not */
		sdhci_init(host, 0);
		host->pwr = 0;
		host->clock = 0;
2879
		mmc->ops->set_ios(mmc, &mmc->ios);
2880 2881 2882 2883
	} else {
		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
		mmiowb();
	}
2884

2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
	if (!device_may_wakeup(mmc_dev(host->mmc))) {
		ret = request_threaded_irq(host->irq, sdhci_irq,
					   sdhci_thread_irq, IRQF_SHARED,
					   mmc_hostname(host->mmc), host);
		if (ret)
			return ret;
	} else {
		sdhci_disable_irq_wakeups(host);
		disable_irq_wake(host->irq);
	}

2896 2897
	sdhci_enable_card_detection(host);

2898
	return ret;
2899 2900
}

2901
EXPORT_SYMBOL_GPL(sdhci_resume_host);
2902 2903 2904 2905 2906

int sdhci_runtime_suspend_host(struct sdhci_host *host)
{
	unsigned long flags;

2907
	mmc_retune_timer_stop(host->mmc);
2908 2909

	spin_lock_irqsave(&host->lock, flags);
2910 2911 2912
	host->ier &= SDHCI_INT_CARD_INT;
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2913 2914
	spin_unlock_irqrestore(&host->lock, flags);

2915
	synchronize_hardirq(host->irq);
2916 2917 2918 2919 2920

	spin_lock_irqsave(&host->lock, flags);
	host->runtime_suspended = true;
	spin_unlock_irqrestore(&host->lock, flags);

2921
	return 0;
2922 2923 2924 2925 2926
}
EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);

int sdhci_runtime_resume_host(struct sdhci_host *host)
{
2927
	struct mmc_host *mmc = host->mmc;
2928
	unsigned long flags;
2929
	int host_flags = host->flags;
2930 2931 2932 2933 2934 2935 2936 2937

	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}

	sdhci_init(host, 0);

2938 2939 2940 2941 2942 2943
	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) {
		/* Force clock and power re-program */
		host->pwr = 0;
		host->clock = 0;
		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
		mmc->ops->set_ios(mmc, &mmc->ios);
2944

2945 2946 2947 2948 2949 2950
		if ((host_flags & SDHCI_PV_ENABLED) &&
		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
			spin_lock_irqsave(&host->lock, flags);
			sdhci_enable_preset_value(host, true);
			spin_unlock_irqrestore(&host->lock, flags);
		}
2951

2952 2953 2954 2955
		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
		    mmc->ops->hs400_enhanced_strobe)
			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
	}
2956

2957 2958 2959 2960 2961
	spin_lock_irqsave(&host->lock, flags);

	host->runtime_suspended = false;

	/* Enable SDIO IRQ */
2962
	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2963 2964 2965 2966 2967 2968 2969
		sdhci_enable_sdio_irq_nolock(host, true);

	/* Enable Card Detection */
	sdhci_enable_card_detection(host);

	spin_unlock_irqrestore(&host->lock, flags);

2970
	return 0;
2971 2972 2973
}
EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);

2974
#endif /* CONFIG_PM */
2975

A
Adrian Hunter 已提交
2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088
/*****************************************************************************\
 *                                                                           *
 * Command Queue Engine (CQE) helpers                                        *
 *                                                                           *
\*****************************************************************************/

void sdhci_cqe_enable(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;
	u8 ctrl;

	spin_lock_irqsave(&host->lock, flags);

	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
	ctrl &= ~SDHCI_CTRL_DMA_MASK;
	if (host->flags & SDHCI_USE_64_BIT_DMA)
		ctrl |= SDHCI_CTRL_ADMA64;
	else
		ctrl |= SDHCI_CTRL_ADMA32;
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);

	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 512),
		     SDHCI_BLOCK_SIZE);

	/* Set maximum timeout */
	sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);

	host->ier = host->cqe_ier;

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);

	host->cqe_on = true;

	pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
		 mmc_hostname(mmc), host->ier,
		 sdhci_readl(host, SDHCI_INT_STATUS));

	mmiowb();
	spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_cqe_enable);

void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
{
	struct sdhci_host *host = mmc_priv(mmc);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);

	sdhci_set_default_irqs(host);

	host->cqe_on = false;

	if (recovery) {
		sdhci_do_reset(host, SDHCI_RESET_CMD);
		sdhci_do_reset(host, SDHCI_RESET_DATA);
	}

	pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
		 mmc_hostname(mmc), host->ier,
		 sdhci_readl(host, SDHCI_INT_STATUS));

	mmiowb();
	spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_cqe_disable);

bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
		   int *data_error)
{
	u32 mask;

	if (!host->cqe_on)
		return false;

	if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
		*cmd_error = -EILSEQ;
	else if (intmask & SDHCI_INT_TIMEOUT)
		*cmd_error = -ETIMEDOUT;
	else
		*cmd_error = 0;

	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
		*data_error = -EILSEQ;
	else if (intmask & SDHCI_INT_DATA_TIMEOUT)
		*data_error = -ETIMEDOUT;
	else if (intmask & SDHCI_INT_ADMA_ERROR)
		*data_error = -EIO;
	else
		*data_error = 0;

	/* Clear selected interrupts. */
	mask = intmask & host->cqe_ier;
	sdhci_writel(host, mask, SDHCI_INT_STATUS);

	if (intmask & SDHCI_INT_BUS_POWER)
		pr_err("%s: Card is consuming too much power!\n",
		       mmc_hostname(host->mmc));

	intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
	if (intmask) {
		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
		pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
		       mmc_hostname(host->mmc), intmask);
		sdhci_dumpregs(host);
	}

	return true;
}
EXPORT_SYMBOL_GPL(sdhci_cqe_irq);

3089 3090
/*****************************************************************************\
 *                                                                           *
3091
 * Device allocation/registration                                            *
3092 3093 3094
 *                                                                           *
\*****************************************************************************/

3095 3096
struct sdhci_host *sdhci_alloc_host(struct device *dev,
	size_t priv_size)
3097 3098 3099 3100
{
	struct mmc_host *mmc;
	struct sdhci_host *host;

3101
	WARN_ON(dev == NULL);
3102

3103
	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3104
	if (!mmc)
3105
		return ERR_PTR(-ENOMEM);
3106 3107 3108

	host = mmc_priv(mmc);
	host->mmc = mmc;
3109 3110
	host->mmc_host_ops = sdhci_ops;
	mmc->ops = &host->mmc_host_ops;
3111

3112 3113
	host->flags = SDHCI_SIGNALING_330;

A
Adrian Hunter 已提交
3114 3115 3116
	host->cqe_ier     = SDHCI_CQE_INT_MASK;
	host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;

3117 3118
	return host;
}
3119

3120
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3121

3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151
static int sdhci_set_dma_mask(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;
	struct device *dev = mmc_dev(mmc);
	int ret = -EINVAL;

	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
		host->flags &= ~SDHCI_USE_64_BIT_DMA;

	/* Try 64-bit mask if hardware is capable  of it */
	if (host->flags & SDHCI_USE_64_BIT_DMA) {
		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
		if (ret) {
			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
				mmc_hostname(mmc));
			host->flags &= ~SDHCI_USE_64_BIT_DMA;
		}
	}

	/* 32-bit mask as default & fallback */
	if (ret) {
		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
		if (ret)
			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
				mmc_hostname(mmc));
	}

	return ret;
}

3152 3153 3154
void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
{
	u16 v;
3155 3156
	u64 dt_caps_mask = 0;
	u64 dt_caps = 0;
3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170

	if (host->read_caps)
		return;

	host->read_caps = true;

	if (debug_quirks)
		host->quirks = debug_quirks;

	if (debug_quirks2)
		host->quirks2 = debug_quirks2;

	sdhci_do_reset(host, SDHCI_RESET_ALL);

3171 3172 3173 3174 3175
	of_property_read_u64(mmc_dev(host->mmc)->of_node,
			     "sdhci-caps-mask", &dt_caps_mask);
	of_property_read_u64(mmc_dev(host->mmc)->of_node,
			     "sdhci-caps", &dt_caps);

3176 3177 3178 3179 3180 3181
	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;

	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
		return;

3182 3183 3184 3185 3186 3187 3188
	if (caps) {
		host->caps = *caps;
	} else {
		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
		host->caps &= ~lower_32_bits(dt_caps_mask);
		host->caps |= lower_32_bits(dt_caps);
	}
3189 3190 3191 3192

	if (host->version < SDHCI_SPEC_300)
		return;

3193 3194 3195 3196 3197 3198 3199
	if (caps1) {
		host->caps1 = *caps1;
	} else {
		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
		host->caps1 &= ~upper_32_bits(dt_caps_mask);
		host->caps1 |= upper_32_bits(dt_caps);
	}
3200 3201 3202
}
EXPORT_SYMBOL_GPL(__sdhci_read_caps);

3203
int sdhci_setup_host(struct sdhci_host *host)
3204 3205
{
	struct mmc_host *mmc;
3206 3207
	u32 max_current_caps;
	unsigned int ocr_avail;
3208
	unsigned int override_timeout_clk;
3209
	u32 max_clk;
3210
	int ret;
3211

3212 3213 3214
	WARN_ON(host == NULL);
	if (host == NULL)
		return -EINVAL;
3215

3216
	mmc = host->mmc;
3217

3218 3219 3220 3221 3222 3223 3224 3225 3226 3227
	/*
	 * If there are external regulators, get them. Note this must be done
	 * early before resetting the host and reading the capabilities so that
	 * the host can take the appropriate action if regulators are not
	 * available.
	 */
	ret = mmc_regulator_get_supply(mmc);
	if (ret == -EPROBE_DEFER)
		return ret;

3228
	sdhci_read_caps(host);
3229

3230 3231
	override_timeout_clk = host->timeout_clk;

3232
	if (host->version > SDHCI_SPEC_300) {
3233 3234
		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
		       mmc_hostname(mmc), host->version);
3235 3236
	}

3237
	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3238
		host->flags |= SDHCI_USE_SDMA;
3239
	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3240
		DBG("Controller doesn't have SDMA capability\n");
3241
	else
3242
		host->flags |= SDHCI_USE_SDMA;
3243

3244
	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3245
		(host->flags & SDHCI_USE_SDMA)) {
R
Rolf Eike Beer 已提交
3246
		DBG("Disabling DMA as it is marked broken\n");
3247
		host->flags &= ~SDHCI_USE_SDMA;
3248 3249
	}

3250
	if ((host->version >= SDHCI_SPEC_200) &&
3251
		(host->caps & SDHCI_CAN_DO_ADMA2))
3252
		host->flags |= SDHCI_USE_ADMA;
3253 3254 3255 3256 3257 3258 3259

	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
		(host->flags & SDHCI_USE_ADMA)) {
		DBG("Disabling ADMA as it is marked broken\n");
		host->flags &= ~SDHCI_USE_ADMA;
	}

3260 3261 3262 3263 3264 3265 3266
	/*
	 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
	 * and *must* do 64-bit DMA.  A driver has the opportunity to change
	 * that during the first call to ->enable_dma().  Similarly
	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
	 * implement.
	 */
3267
	if (host->caps & SDHCI_CAN_64BIT)
3268 3269
		host->flags |= SDHCI_USE_64_BIT_DMA;

3270
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281
		ret = sdhci_set_dma_mask(host);

		if (!ret && host->ops->enable_dma)
			ret = host->ops->enable_dma(host);

		if (ret) {
			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
				mmc_hostname(mmc));
			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);

			ret = 0;
3282 3283 3284
		}
	}

3285 3286 3287 3288
	/* SDMA does not support 64-bit DMA */
	if (host->flags & SDHCI_USE_64_BIT_DMA)
		host->flags &= ~SDHCI_USE_SDMA;

3289
	if (host->flags & SDHCI_USE_ADMA) {
3290 3291 3292
		dma_addr_t dma;
		void *buf;

3293
		/*
3294 3295 3296 3297
		 * The DMA descriptor table size is calculated as the maximum
		 * number of segments times 2, to allow for an alignment
		 * descriptor for each segment, plus 1 for a nop end descriptor,
		 * all multipled by the descriptor size.
3298
		 */
3299 3300 3301 3302 3303 3304 3305 3306 3307
		if (host->flags & SDHCI_USE_64_BIT_DMA) {
			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
					      SDHCI_ADMA2_64_DESC_SZ;
			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
		} else {
			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
					      SDHCI_ADMA2_32_DESC_SZ;
			host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
		}
3308

3309
		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3310 3311 3312
		buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
					 host->adma_table_sz, &dma, GFP_KERNEL);
		if (!buf) {
J
Joe Perches 已提交
3313
			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3314 3315
				mmc_hostname(mmc));
			host->flags &= ~SDHCI_USE_ADMA;
3316 3317
		} else if ((dma + host->align_buffer_sz) &
			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
J
Joe Perches 已提交
3318 3319
			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
				mmc_hostname(mmc));
3320
			host->flags &= ~SDHCI_USE_ADMA;
3321 3322 3323 3324 3325
			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
					  host->adma_table_sz, buf, dma);
		} else {
			host->align_buffer = buf;
			host->align_addr = dma;
3326

3327 3328 3329
			host->adma_table = buf + host->align_buffer_sz;
			host->adma_addr = dma + host->align_buffer_sz;
		}
3330 3331
	}

3332 3333 3334 3335 3336
	/*
	 * If we use DMA, then it's up to the caller to set the DMA
	 * mask, but PIO does not need the hw shim so we set a new
	 * mask here in that case.
	 */
3337
	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3338
		host->dma_mask = DMA_BIT_MASK(64);
3339
		mmc_dev(mmc)->dma_mask = &host->dma_mask;
3340
	}
3341

3342
	if (host->version >= SDHCI_SPEC_300)
3343
		host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3344 3345
			>> SDHCI_CLOCK_BASE_SHIFT;
	else
3346
		host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3347 3348
			>> SDHCI_CLOCK_BASE_SHIFT;

3349
	host->max_clk *= 1000000;
3350 3351
	if (host->max_clk == 0 || host->quirks &
			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3352
		if (!host->ops->get_max_clock) {
3353 3354
			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
			       mmc_hostname(mmc));
3355 3356
			ret = -ENODEV;
			goto undma;
3357 3358
		}
		host->max_clk = host->ops->get_max_clock(host);
3359
	}
3360

3361 3362 3363 3364
	/*
	 * In case of Host Controller v3.00, find out whether clock
	 * multiplier is supported.
	 */
3365
	host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376
			SDHCI_CLOCK_MUL_SHIFT;

	/*
	 * In case the value in Clock Multiplier is 0, then programmable
	 * clock mode is not supported, otherwise the actual clock
	 * multiplier is one more than the value of Clock Multiplier
	 * in the Capabilities Register.
	 */
	if (host->clk_mul)
		host->clk_mul += 1;

3377 3378 3379
	/*
	 * Set host parameters.
	 */
3380 3381
	max_clk = host->max_clk;

3382
	if (host->ops->get_min_clock)
3383
		mmc->f_min = host->ops->get_min_clock(host);
3384 3385 3386
	else if (host->version >= SDHCI_SPEC_300) {
		if (host->clk_mul) {
			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3387
			max_clk = host->max_clk * host->clk_mul;
3388 3389 3390
		} else
			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
	} else
3391
		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3392

3393
	if (!mmc->f_max || mmc->f_max > max_clk)
3394 3395
		mmc->f_max = max_clk;

3396
	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3397
		host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3398 3399 3400 3401 3402 3403 3404 3405
					SDHCI_TIMEOUT_CLK_SHIFT;
		if (host->timeout_clk == 0) {
			if (host->ops->get_timeout_clock) {
				host->timeout_clk =
					host->ops->get_timeout_clock(host);
			} else {
				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
					mmc_hostname(mmc));
3406 3407
				ret = -ENODEV;
				goto undma;
3408
			}
3409 3410
		}

3411
		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3412
			host->timeout_clk *= 1000;
3413

3414 3415 3416
		if (override_timeout_clk)
			host->timeout_clk = override_timeout_clk;

3417
		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3418
			host->ops->get_max_timeout_count(host) : 1 << 27;
3419 3420
		mmc->max_busy_timeout /= host->timeout_clk;
	}
3421

3422
	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3423
	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3424 3425 3426

	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
		host->flags |= SDHCI_AUTO_CMD12;
3427

3428
	/* Auto-CMD23 stuff only works in ADMA or PIO. */
A
Andrei Warkentin 已提交
3429
	if ((host->version >= SDHCI_SPEC_300) &&
3430
	    ((host->flags & SDHCI_USE_ADMA) ||
3431 3432
	     !(host->flags & SDHCI_USE_SDMA)) &&
	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3433
		host->flags |= SDHCI_AUTO_CMD23;
3434
		DBG("Auto-CMD23 available\n");
3435
	} else {
3436
		DBG("Auto-CMD23 unavailable\n");
3437 3438
	}

3439 3440 3441 3442 3443 3444 3445
	/*
	 * A controller may support 8-bit width, but the board itself
	 * might not have the pins brought out.  Boards that support
	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
	 * their platform code before calling sdhci_add_host(), and we
	 * won't assume 8-bit width for hosts without that CAP.
	 */
3446
	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3447
		mmc->caps |= MMC_CAP_4_BIT_DATA;
3448

3449 3450 3451
	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
		mmc->caps &= ~MMC_CAP_CMD23;

3452
	if (host->caps & SDHCI_CAN_DO_HISPD)
3453
		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3454

3455
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3456
	    mmc_card_is_removable(mmc) &&
3457
	    mmc_gpio_get_cd(host->mmc) < 0)
3458 3459
		mmc->caps |= MMC_CAP_NEEDS_POLL;

3460
	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3461 3462 3463 3464
	if (!IS_ERR(mmc->supply.vqmmc)) {
		ret = regulator_enable(mmc->supply.vqmmc);
		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
						    1950000))
3465 3466 3467
			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
					 SDHCI_SUPPORT_SDR50 |
					 SDHCI_SUPPORT_DDR50);
3468 3469 3470
		if (ret) {
			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
				mmc_hostname(mmc), ret);
3471
			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3472
		}
3473
	}
3474

3475 3476 3477 3478
	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
				 SDHCI_SUPPORT_DDR50);
	}
3479

3480
	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3481 3482
	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
			   SDHCI_SUPPORT_DDR50))
3483 3484 3485
		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;

	/* SDR104 supports also implies SDR50 support */
3486
	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3487
		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3488 3489 3490
		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
		 * field can be promoted to support HS200.
		 */
3491
		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3492
			mmc->caps2 |= MMC_CAP2_HS200;
3493
	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3494
		mmc->caps |= MMC_CAP_UHS_SDR50;
3495
	}
3496

3497
	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3498
	    (host->caps1 & SDHCI_SUPPORT_HS400))
3499 3500
		mmc->caps2 |= MMC_CAP2_HS400;

3501 3502 3503 3504 3505 3506
	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
	    (IS_ERR(mmc->supply.vqmmc) ||
	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
					     1300000)))
		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;

3507 3508
	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3509 3510
		mmc->caps |= MMC_CAP_UHS_DDR50;

3511
	/* Does the host need tuning for SDR50? */
3512
	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3513 3514
		host->flags |= SDHCI_SDR50_NEEDS_TUNING;

3515
	/* Driver Type(s) (A, C, D) supported by the host */
3516
	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3517
		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3518
	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3519
		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3520
	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3521 3522
		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;

3523
	/* Initial value for re-tuning timer count */
3524 3525
	host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
			     SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3526 3527 3528 3529 3530 3531 3532 3533 3534

	/*
	 * In case Re-tuning Timer is not disabled, the actual value of
	 * re-tuning timer will be 2 ^ (n - 1).
	 */
	if (host->tuning_count)
		host->tuning_count = 1 << (host->tuning_count - 1);

	/* Re-tuning mode supported by the Host Controller */
3535
	host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3536 3537
			     SDHCI_RETUNING_MODE_SHIFT;

3538
	ocr_avail = 0;
3539

3540 3541 3542 3543 3544 3545 3546 3547
	/*
	 * According to SD Host Controller spec v3.00, if the Host System
	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
	 * the value is meaningful only if Voltage Support in the Capabilities
	 * register is set. The actual current value is 4 times the register
	 * value.
	 */
	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3548
	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3549
		int curr = regulator_get_current_limit(mmc->supply.vmmc);
3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562
		if (curr > 0) {

			/* convert to SDHCI_MAX_CURRENT format */
			curr = curr/1000;  /* convert to mA */
			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;

			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
			max_current_caps =
				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
		}
	}
3563

3564
	if (host->caps & SDHCI_CAN_VDD_330) {
3565
		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3566

A
Aaron Lu 已提交
3567
		mmc->max_current_330 = ((max_current_caps &
3568 3569 3570 3571
				   SDHCI_MAX_CURRENT_330_MASK) >>
				   SDHCI_MAX_CURRENT_330_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}
3572
	if (host->caps & SDHCI_CAN_VDD_300) {
3573
		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3574

A
Aaron Lu 已提交
3575
		mmc->max_current_300 = ((max_current_caps &
3576 3577 3578 3579
				   SDHCI_MAX_CURRENT_300_MASK) >>
				   SDHCI_MAX_CURRENT_300_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}
3580
	if (host->caps & SDHCI_CAN_VDD_180) {
3581 3582
		ocr_avail |= MMC_VDD_165_195;

A
Aaron Lu 已提交
3583
		mmc->max_current_180 = ((max_current_caps &
3584 3585 3586 3587 3588
				   SDHCI_MAX_CURRENT_180_MASK) >>
				   SDHCI_MAX_CURRENT_180_SHIFT) *
				   SDHCI_MAX_CURRENT_MULTIPLIER;
	}

3589 3590 3591 3592 3593
	/* If OCR set by host, use it instead. */
	if (host->ocr_mask)
		ocr_avail = host->ocr_mask;

	/* If OCR set by external regulators, give it highest prio. */
3594
	if (mmc->ocr_avail)
3595
		ocr_avail = mmc->ocr_avail;
3596

3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608
	mmc->ocr_avail = ocr_avail;
	mmc->ocr_avail_sdio = ocr_avail;
	if (host->ocr_avail_sdio)
		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
	mmc->ocr_avail_sd = ocr_avail;
	if (host->ocr_avail_sd)
		mmc->ocr_avail_sd &= host->ocr_avail_sd;
	else /* normal SD controllers don't support 1.8V */
		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
	mmc->ocr_avail_mmc = ocr_avail;
	if (host->ocr_avail_mmc)
		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3609 3610

	if (mmc->ocr_avail == 0) {
3611 3612
		pr_err("%s: Hardware doesn't report any support voltages.\n",
		       mmc_hostname(mmc));
3613 3614
		ret = -ENODEV;
		goto unreg;
3615 3616
	}

3617 3618 3619 3620 3621 3622 3623 3624 3625
	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
		host->flags |= SDHCI_SIGNALING_180;

	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
		host->flags |= SDHCI_SIGNALING_120;

3626 3627 3628
	spin_lock_init(&host->lock);

	/*
3629 3630
	 * Maximum number of segments. Depends on if the hardware
	 * can do scatter/gather or not.
3631
	 */
3632
	if (host->flags & SDHCI_USE_ADMA)
3633
		mmc->max_segs = SDHCI_MAX_SEGS;
3634
	else if (host->flags & SDHCI_USE_SDMA)
3635
		mmc->max_segs = 1;
3636
	else /* PIO */
3637
		mmc->max_segs = SDHCI_MAX_SEGS;
3638 3639

	/*
3640 3641 3642
	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
	 * is less anyway.
3643
	 */
3644
	mmc->max_req_size = 524288;
3645 3646 3647

	/*
	 * Maximum segment size. Could be one segment with the maximum number
3648 3649
	 * of bytes. When doing hardware scatter/gather, each entry cannot
	 * be larger than 64 KiB though.
3650
	 */
3651 3652 3653 3654 3655 3656
	if (host->flags & SDHCI_USE_ADMA) {
		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
			mmc->max_seg_size = 65535;
		else
			mmc->max_seg_size = 65536;
	} else {
3657
		mmc->max_seg_size = mmc->max_req_size;
3658
	}
3659

3660 3661 3662 3663
	/*
	 * Maximum block size. This varies from controller to controller and
	 * is specified in the capabilities register.
	 */
3664 3665 3666
	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
		mmc->max_blk_size = 2;
	} else {
3667
		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3668 3669
				SDHCI_MAX_BLOCK_SHIFT;
		if (mmc->max_blk_size >= 3) {
J
Joe Perches 已提交
3670 3671
			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
				mmc_hostname(mmc));
3672 3673 3674 3675 3676
			mmc->max_blk_size = 0;
		}
	}

	mmc->max_blk_size = 512 << mmc->max_blk_size;
3677

3678 3679 3680
	/*
	 * Maximum block count.
	 */
3681
	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3682

3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699
	return 0;

unreg:
	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);
undma:
	if (host->align_buffer)
		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
				  host->adma_table_sz, host->align_buffer,
				  host->align_addr);
	host->adma_table = NULL;
	host->align_buffer = NULL;

	return ret;
}
EXPORT_SYMBOL_GPL(sdhci_setup_host);

3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715
void sdhci_cleanup_host(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;

	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);

	if (host->align_buffer)
		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
				  host->adma_table_sz, host->align_buffer,
				  host->align_addr);
	host->adma_table = NULL;
	host->align_buffer = NULL;
}
EXPORT_SYMBOL_GPL(sdhci_cleanup_host);

3716 3717 3718 3719 3720
int __sdhci_add_host(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;
	int ret;

3721 3722 3723 3724 3725 3726
	/*
	 * Init tasklets.
	 */
	tasklet_init(&host->finish_tasklet,
		sdhci_tasklet_finish, (unsigned long)host);

3727
	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3728 3729
	setup_timer(&host->data_timer, sdhci_timeout_data_timer,
		    (unsigned long)host);
3730

3731
	init_waitqueue_head(&host->buf_ready_int);
3732

3733 3734
	sdhci_init(host, 0);

3735 3736
	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
				   IRQF_SHARED,	mmc_hostname(mmc), host);
3737 3738 3739
	if (ret) {
		pr_err("%s: Failed to request IRQ %d: %d\n",
		       mmc_hostname(mmc), host->irq, ret);
3740
		goto untasklet;
3741
	}
3742 3743 3744 3745 3746

#ifdef CONFIG_MMC_DEBUG
	sdhci_dumpregs(host);
#endif

3747
	ret = sdhci_led_register(host);
3748 3749 3750
	if (ret) {
		pr_err("%s: Failed to register LED device: %d\n",
		       mmc_hostname(mmc), ret);
3751
		goto unirq;
3752
	}
3753

3754 3755
	mmiowb();

3756 3757 3758
	ret = mmc_add_host(mmc);
	if (ret)
		goto unled;
3759

3760
	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3761
		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3762 3763
		(host->flags & SDHCI_USE_ADMA) ?
		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3764
		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3765

3766 3767
	sdhci_enable_card_detection(host);

3768 3769
	return 0;

3770
unled:
3771
	sdhci_led_unregister(host);
3772
unirq:
3773
	sdhci_do_reset(host, SDHCI_RESET_ALL);
3774 3775
	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3776
	free_irq(host->irq, host);
3777
untasklet:
3778
	tasklet_kill(&host->finish_tasklet);
3779

3780 3781
	return ret;
}
3782 3783 3784 3785 3786 3787 3788 3789 3790
EXPORT_SYMBOL_GPL(__sdhci_add_host);

int sdhci_add_host(struct sdhci_host *host)
{
	int ret;

	ret = sdhci_setup_host(host);
	if (ret)
		return ret;
3791

3792 3793 3794 3795 3796 3797 3798 3799 3800 3801
	ret = __sdhci_add_host(host);
	if (ret)
		goto cleanup;

	return 0;

cleanup:
	sdhci_cleanup_host(host);

	return ret;
3802
}
3803
EXPORT_SYMBOL_GPL(sdhci_add_host);
3804

P
Pierre Ossman 已提交
3805
void sdhci_remove_host(struct sdhci_host *host, int dead)
3806
{
3807
	struct mmc_host *mmc = host->mmc;
P
Pierre Ossman 已提交
3808 3809 3810 3811 3812 3813 3814
	unsigned long flags;

	if (dead) {
		spin_lock_irqsave(&host->lock, flags);

		host->flags |= SDHCI_DEVICE_DEAD;

3815
		if (sdhci_has_requests(host)) {
3816
			pr_err("%s: Controller removed during "
3817
				" transfer!\n", mmc_hostname(mmc));
3818
			sdhci_error_out_mrqs(host, -ENOMEDIUM);
P
Pierre Ossman 已提交
3819 3820 3821 3822 3823
		}

		spin_unlock_irqrestore(&host->lock, flags);
	}

3824 3825
	sdhci_disable_card_detection(host);

3826
	mmc_remove_host(mmc);
3827

3828
	sdhci_led_unregister(host);
3829

P
Pierre Ossman 已提交
3830
	if (!dead)
3831
		sdhci_do_reset(host, SDHCI_RESET_ALL);
3832

3833 3834
	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3835 3836 3837
	free_irq(host->irq, host);

	del_timer_sync(&host->timer);
3838
	del_timer_sync(&host->data_timer);
3839 3840

	tasklet_kill(&host->finish_tasklet);
3841

3842 3843
	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);
3844

3845
	if (host->align_buffer)
3846 3847 3848
		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
				  host->adma_table_sz, host->align_buffer,
				  host->align_addr);
3849

3850
	host->adma_table = NULL;
3851
	host->align_buffer = NULL;
3852 3853
}

3854
EXPORT_SYMBOL_GPL(sdhci_remove_host);
3855

3856
void sdhci_free_host(struct sdhci_host *host)
3857
{
3858
	mmc_free_host(host->mmc);
3859 3860
}

3861
EXPORT_SYMBOL_GPL(sdhci_free_host);
3862 3863 3864 3865 3866 3867 3868 3869 3870

/*****************************************************************************\
 *                                                                           *
 * Driver init/exit                                                          *
 *                                                                           *
\*****************************************************************************/

static int __init sdhci_drv_init(void)
{
3871
	pr_info(DRIVER_NAME
3872
		": Secure Digital Host Controller Interface driver\n");
3873
	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3874

3875
	return 0;
3876 3877 3878 3879 3880 3881 3882 3883 3884
}

static void __exit sdhci_drv_exit(void)
{
}

module_init(sdhci_drv_init);
module_exit(sdhci_drv_exit);

3885
module_param(debug_quirks, uint, 0444);
3886
module_param(debug_quirks2, uint, 0444);
3887

3888
MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3889
MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3890
MODULE_LICENSE("GPL");
3891

3892
MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3893
MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");