sdhci.c 45.7 KB
Newer Older
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3
 *
4
 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 6
 *
 * This program is free software; you can redistribute it and/or modify
7 8 9
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or (at
 * your option) any later version.
10 11 12 13
 *
 * Thanks to the following companies for their support:
 *
 *     - JMicron (hardware and technical support)
14 15 16 17
 */

#include <linux/delay.h>
#include <linux/highmem.h>
18
#include <linux/io.h>
19
#include <linux/dma-mapping.h>
20
#include <linux/scatterlist.h>
21

22 23
#include <linux/leds.h>

24 25 26 27 28 29 30
#include <linux/mmc/host.h>

#include "sdhci.h"

#define DRIVER_NAME "sdhci"

#define DBG(f, x...) \
31
	pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
32

33 34 35 36 37
#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
	defined(CONFIG_MMC_SDHCI_MODULE))
#define SDHCI_USE_LEDS_CLASS
#endif

38
static unsigned int debug_quirks = 0;
39

40 41 42 43 44 45 46 47 48 49 50
static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
static void sdhci_finish_data(struct sdhci_host *);

static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
static void sdhci_finish_command(struct sdhci_host *);

static void sdhci_dumpregs(struct sdhci_host *host)
{
	printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");

	printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
51 52
		sdhci_readl(host, SDHCI_DMA_ADDRESS),
		sdhci_readw(host, SDHCI_HOST_VERSION));
53
	printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
54 55
		sdhci_readw(host, SDHCI_BLOCK_SIZE),
		sdhci_readw(host, SDHCI_BLOCK_COUNT));
56
	printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
57 58
		sdhci_readl(host, SDHCI_ARGUMENT),
		sdhci_readw(host, SDHCI_TRANSFER_MODE));
59
	printk(KERN_DEBUG DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
60 61
		sdhci_readl(host, SDHCI_PRESENT_STATE),
		sdhci_readb(host, SDHCI_HOST_CONTROL));
62
	printk(KERN_DEBUG DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
63 64
		sdhci_readb(host, SDHCI_POWER_CONTROL),
		sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
65
	printk(KERN_DEBUG DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
66 67
		sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
		sdhci_readw(host, SDHCI_CLOCK_CONTROL));
68
	printk(KERN_DEBUG DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
69 70
		sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
		sdhci_readl(host, SDHCI_INT_STATUS));
71
	printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
72 73
		sdhci_readl(host, SDHCI_INT_ENABLE),
		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
74
	printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
75 76
		sdhci_readw(host, SDHCI_ACMD12_ERR),
		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
77
	printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Max curr: 0x%08x\n",
78 79
		sdhci_readl(host, SDHCI_CAPABILITIES),
		sdhci_readl(host, SDHCI_MAX_CURRENT));
80 81 82 83 84 85 86 87 88 89

	printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
}

/*****************************************************************************\
 *                                                                           *
 * Low level functions                                                       *
 *                                                                           *
\*****************************************************************************/

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
{
	u32 ier;

	ier = sdhci_readl(host, SDHCI_INT_ENABLE);
	ier &= ~clear;
	ier |= set;
	sdhci_writel(host, ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
}

static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
{
	sdhci_clear_set_irqs(host, 0, irqs);
}

static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
{
	sdhci_clear_set_irqs(host, irqs, 0);
}

static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
	u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;

115 116 117
	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
		return;

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
	if (enable)
		sdhci_unmask_irqs(host, irqs);
	else
		sdhci_mask_irqs(host, irqs);
}

static void sdhci_enable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, true);
}

static void sdhci_disable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, false);
}

134 135
static void sdhci_reset(struct sdhci_host *host, u8 mask)
{
136 137
	unsigned long timeout;

138
	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
139
		if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
140 141 142 143
			SDHCI_CARD_PRESENT))
			return;
	}

144
	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
145

146
	if (mask & SDHCI_RESET_ALL)
147 148
		host->clock = 0;

149 150 151 152
	/* Wait max 100 ms */
	timeout = 100;

	/* hw clears the bit when it's done */
153
	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
154
		if (timeout == 0) {
P
Pierre Ossman 已提交
155
			printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
156 157 158 159 160 161
				mmc_hostname(host->mmc), (int)mask);
			sdhci_dumpregs(host);
			return;
		}
		timeout--;
		mdelay(1);
162 163 164 165 166 167 168
	}
}

static void sdhci_init(struct sdhci_host *host)
{
	sdhci_reset(host, SDHCI_RESET_ALL);

169 170
	sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
		SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
171 172
		SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
		SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
173
		SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
174
}
175

176 177 178 179
static void sdhci_reinit(struct sdhci_host *host)
{
	sdhci_init(host);
	sdhci_enable_card_detection(host);
180 181 182 183 184 185
}

static void sdhci_activate_led(struct sdhci_host *host)
{
	u8 ctrl;

186
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
187
	ctrl |= SDHCI_CTRL_LED;
188
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
189 190 191 192 193 194
}

static void sdhci_deactivate_led(struct sdhci_host *host)
{
	u8 ctrl;

195
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
196
	ctrl &= ~SDHCI_CTRL_LED;
197
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
198 199
}

200
#ifdef SDHCI_USE_LEDS_CLASS
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
static void sdhci_led_control(struct led_classdev *led,
	enum led_brightness brightness)
{
	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);

	if (brightness == LED_OFF)
		sdhci_deactivate_led(host);
	else
		sdhci_activate_led(host);

	spin_unlock_irqrestore(&host->lock, flags);
}
#endif

218 219 220 221 222 223
/*****************************************************************************\
 *                                                                           *
 * Core functions                                                            *
 *                                                                           *
\*****************************************************************************/

P
Pierre Ossman 已提交
224
static void sdhci_read_block_pio(struct sdhci_host *host)
225
{
226 227
	unsigned long flags;
	size_t blksize, len, chunk;
228
	u32 uninitialized_var(scratch);
229
	u8 *buf;
230

P
Pierre Ossman 已提交
231
	DBG("PIO reading\n");
232

P
Pierre Ossman 已提交
233
	blksize = host->data->blksz;
234
	chunk = 0;
235

236
	local_irq_save(flags);
237

P
Pierre Ossman 已提交
238
	while (blksize) {
239 240
		if (!sg_miter_next(&host->sg_miter))
			BUG();
241

242
		len = min(host->sg_miter.length, blksize);
243

244 245
		blksize -= len;
		host->sg_miter.consumed = len;
246

247
		buf = host->sg_miter.addr;
248

249 250
		while (len) {
			if (chunk == 0) {
251
				scratch = sdhci_readl(host, SDHCI_BUFFER);
252
				chunk = 4;
P
Pierre Ossman 已提交
253
			}
254 255 256 257 258 259 260

			*buf = scratch & 0xFF;

			buf++;
			scratch >>= 8;
			chunk--;
			len--;
261
		}
P
Pierre Ossman 已提交
262
	}
263 264 265 266

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
267
}
268

P
Pierre Ossman 已提交
269 270
static void sdhci_write_block_pio(struct sdhci_host *host)
{
271 272 273 274
	unsigned long flags;
	size_t blksize, len, chunk;
	u32 scratch;
	u8 *buf;
275

P
Pierre Ossman 已提交
276 277 278
	DBG("PIO writing\n");

	blksize = host->data->blksz;
279 280
	chunk = 0;
	scratch = 0;
281

282
	local_irq_save(flags);
283

P
Pierre Ossman 已提交
284
	while (blksize) {
285 286
		if (!sg_miter_next(&host->sg_miter))
			BUG();
P
Pierre Ossman 已提交
287

288 289 290 291 292 293
		len = min(host->sg_miter.length, blksize);

		blksize -= len;
		host->sg_miter.consumed = len;

		buf = host->sg_miter.addr;
294

295 296 297 298 299 300 301 302
		while (len) {
			scratch |= (u32)*buf << (chunk * 8);

			buf++;
			chunk++;
			len--;

			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
303
				sdhci_writel(host, scratch, SDHCI_BUFFER);
304 305
				chunk = 0;
				scratch = 0;
306 307 308
			}
		}
	}
309 310 311 312

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
P
Pierre Ossman 已提交
313 314 315 316 317 318 319 320
}

static void sdhci_transfer_pio(struct sdhci_host *host)
{
	u32 mask;

	BUG_ON(!host->data);

321
	if (host->blocks == 0)
P
Pierre Ossman 已提交
322 323 324 325 326 327 328
		return;

	if (host->data->flags & MMC_DATA_READ)
		mask = SDHCI_DATA_AVAILABLE;
	else
		mask = SDHCI_SPACE_AVAILABLE;

329 330 331 332 333 334 335 336 337
	/*
	 * Some controllers (JMicron JMB38x) mess up the buffer bits
	 * for transfers < 4 bytes. As long as it is just one block,
	 * we can ignore the bits.
	 */
	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
		(host->data->blocks == 1))
		mask = ~0;

338
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
339 340 341
		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
			udelay(100);

P
Pierre Ossman 已提交
342 343 344 345
		if (host->data->flags & MMC_DATA_READ)
			sdhci_read_block_pio(host);
		else
			sdhci_write_block_pio(host);
346

347 348
		host->blocks--;
		if (host->blocks == 0)
P
Pierre Ossman 已提交
349 350
			break;
	}
351

P
Pierre Ossman 已提交
352
	DBG("PIO transfer complete.\n");
353 354
}

355 356 357 358 359 360 361 362 363 364 365 366
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
	local_irq_save(*flags);
	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
}

static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
{
	kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
	local_irq_restore(*flags);
}

367
static int sdhci_adma_table_pre(struct sdhci_host *host,
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
	struct mmc_data *data)
{
	int direction;

	u8 *desc;
	u8 *align;
	dma_addr_t addr;
	dma_addr_t align_addr;
	int len, offset;

	struct scatterlist *sg;
	int i;
	char *buffer;
	unsigned long flags;

	/*
	 * The spec does not specify endianness of descriptor table.
	 * We currently guess that it is LE.
	 */

	if (data->flags & MMC_DATA_READ)
		direction = DMA_FROM_DEVICE;
	else
		direction = DMA_TO_DEVICE;

	/*
	 * The ADMA descriptor table is mapped further down as we
	 * need to fill it with data first.
	 */

	host->align_addr = dma_map_single(mmc_dev(host->mmc),
		host->align_buffer, 128 * 4, direction);
400
	if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
401
		goto fail;
402 403 404 405
	BUG_ON(host->align_addr & 0x3);

	host->sg_count = dma_map_sg(mmc_dev(host->mmc),
		data->sg, data->sg_len, direction);
406 407
	if (host->sg_count == 0)
		goto unmap_align;
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428

	desc = host->adma_desc;
	align = host->align_buffer;

	align_addr = host->align_addr;

	for_each_sg(data->sg, sg, host->sg_count, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);

		/*
		 * The SDHCI specification states that ADMA
		 * addresses must be 32-bit aligned. If they
		 * aren't, then we use a bounce buffer for
		 * the (up to three) bytes that screw up the
		 * alignment.
		 */
		offset = (4 - (addr & 0x3)) & 0x3;
		if (offset) {
			if (data->flags & MMC_DATA_WRITE) {
				buffer = sdhci_kmap_atomic(sg, &flags);
429
				WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
				memcpy(align, buffer, offset);
				sdhci_kunmap_atomic(buffer, &flags);
			}

			desc[7] = (align_addr >> 24) & 0xff;
			desc[6] = (align_addr >> 16) & 0xff;
			desc[5] = (align_addr >> 8) & 0xff;
			desc[4] = (align_addr >> 0) & 0xff;

			BUG_ON(offset > 65536);

			desc[3] = (offset >> 8) & 0xff;
			desc[2] = (offset >> 0) & 0xff;

			desc[1] = 0x00;
			desc[0] = 0x21; /* tran, valid */

			align += 4;
			align_addr += 4;

			desc += 8;

			addr += offset;
			len -= offset;
		}

		desc[7] = (addr >> 24) & 0xff;
		desc[6] = (addr >> 16) & 0xff;
		desc[5] = (addr >> 8) & 0xff;
		desc[4] = (addr >> 0) & 0xff;

		BUG_ON(len > 65536);

		desc[3] = (len >> 8) & 0xff;
		desc[2] = (len >> 0) & 0xff;

		desc[1] = 0x00;
		desc[0] = 0x21; /* tran, valid */

		desc += 8;

		/*
		 * If this triggers then we have a calculation bug
		 * somewhere. :/
		 */
		WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
	}

	/*
	 * Add a terminating entry.
	 */
	desc[7] = 0;
	desc[6] = 0;
	desc[5] = 0;
	desc[4] = 0;

	desc[3] = 0;
	desc[2] = 0;

	desc[1] = 0x00;
	desc[0] = 0x03; /* nop, end, valid */

	/*
	 * Resync align buffer as we might have changed it.
	 */
	if (data->flags & MMC_DATA_WRITE) {
		dma_sync_single_for_device(mmc_dev(host->mmc),
			host->align_addr, 128 * 4, direction);
	}

	host->adma_addr = dma_map_single(mmc_dev(host->mmc),
		host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
502
	if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
503
		goto unmap_entries;
504
	BUG_ON(host->adma_addr & 0x3);
505 506 507 508 509 510 511 512 513 514 515

	return 0;

unmap_entries:
	dma_unmap_sg(mmc_dev(host->mmc), data->sg,
		data->sg_len, direction);
unmap_align:
	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
		128 * 4, direction);
fail:
	return -EINVAL;
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
}

static void sdhci_adma_table_post(struct sdhci_host *host,
	struct mmc_data *data)
{
	int direction;

	struct scatterlist *sg;
	int i, size;
	u8 *align;
	char *buffer;
	unsigned long flags;

	if (data->flags & MMC_DATA_READ)
		direction = DMA_FROM_DEVICE;
	else
		direction = DMA_TO_DEVICE;

	dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
		(128 * 2 + 1) * 4, DMA_TO_DEVICE);

	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
		128 * 4, direction);

	if (data->flags & MMC_DATA_READ) {
		dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
			data->sg_len, direction);

		align = host->align_buffer;

		for_each_sg(data->sg, sg, host->sg_count, i) {
			if (sg_dma_address(sg) & 0x3) {
				size = 4 - (sg_dma_address(sg) & 0x3);

				buffer = sdhci_kmap_atomic(sg, &flags);
551
				WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
552 553 554 555 556 557 558 559 560 561 562 563
				memcpy(buffer, align, size);
				sdhci_kunmap_atomic(buffer, &flags);

				align += 4;
			}
		}
	}

	dma_unmap_sg(mmc_dev(host->mmc), data->sg,
		data->sg_len, direction);
}

564
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
565
{
566 567
	u8 count;
	unsigned target_timeout, current_timeout;
568

569 570 571 572 573 574 575 576
	/*
	 * If the host controller provides us with an incorrect timeout
	 * value, just skip the check and use 0xE.  The hardware may take
	 * longer to time out, but that's much better than having a too-short
	 * timeout value.
	 */
	if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL))
		return 0xE;
577

578 579 580
	/* timeout in us */
	target_timeout = data->timeout_ns / 1000 +
		data->timeout_clks / host->clock;
581

582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
	/*
	 * Figure out needed cycles.
	 * We do this in steps in order to fit inside a 32 bit int.
	 * The first step is the minimum timeout, which will have a
	 * minimum resolution of 6 bits:
	 * (1) 2^13*1000 > 2^22,
	 * (2) host->timeout_clk < 2^16
	 *     =>
	 *     (1) / (2) > 2^6
	 */
	count = 0;
	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
	while (current_timeout < target_timeout) {
		count++;
		current_timeout <<= 1;
		if (count >= 0xF)
			break;
	}

	if (count >= 0xF) {
		printk(KERN_WARNING "%s: Too large timeout requested!\n",
			mmc_hostname(host->mmc));
		count = 0xE;
	}

607 608 609
	return count;
}

610 611 612 613 614 615 616 617 618 619 620
static void sdhci_set_transfer_irqs(struct sdhci_host *host)
{
	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;

	if (host->flags & SDHCI_REQ_USE_DMA)
		sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
	else
		sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
}

621 622 623
static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
{
	u8 count;
624
	u8 ctrl;
625
	int ret;
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640

	WARN_ON(host->data);

	if (data == NULL)
		return;

	/* Sanity checks */
	BUG_ON(data->blksz * data->blocks > 524288);
	BUG_ON(data->blksz > host->mmc->max_blk_size);
	BUG_ON(data->blocks > 65535);

	host->data = data;
	host->data_early = 0;

	count = sdhci_calc_timeout(host, data);
641
	sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
642

643 644 645
	if (host->flags & SDHCI_USE_DMA)
		host->flags |= SDHCI_REQ_USE_DMA;

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
	/*
	 * FIXME: This doesn't account for merging when mapping the
	 * scatterlist.
	 */
	if (host->flags & SDHCI_REQ_USE_DMA) {
		int broken, i;
		struct scatterlist *sg;

		broken = 0;
		if (host->flags & SDHCI_USE_ADMA) {
			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
				broken = 1;
		} else {
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
				broken = 1;
		}

		if (unlikely(broken)) {
			for_each_sg(data->sg, sg, data->sg_len, i) {
				if (sg->length & 0x3) {
					DBG("Reverting to PIO because of "
						"transfer size (%d)\n",
						sg->length);
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
			}
		}
674 675 676 677 678 679
	}

	/*
	 * The assumption here being that alignment is the same after
	 * translation to device address space.
	 */
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
	if (host->flags & SDHCI_REQ_USE_DMA) {
		int broken, i;
		struct scatterlist *sg;

		broken = 0;
		if (host->flags & SDHCI_USE_ADMA) {
			/*
			 * As we use 3 byte chunks to work around
			 * alignment problems, we need to check this
			 * quirk.
			 */
			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
				broken = 1;
		} else {
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
				broken = 1;
		}

		if (unlikely(broken)) {
			for_each_sg(data->sg, sg, data->sg_len, i) {
				if (sg->offset & 0x3) {
					DBG("Reverting to PIO because of "
						"bad alignment\n");
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
			}
		}
	}

710 711 712 713 714 715 716 717 718
	if (host->flags & SDHCI_REQ_USE_DMA) {
		if (host->flags & SDHCI_USE_ADMA) {
			ret = sdhci_adma_table_pre(host, data);
			if (ret) {
				/*
				 * This only happens when someone fed
				 * us an invalid request.
				 */
				WARN_ON(1);
719
				host->flags &= ~SDHCI_REQ_USE_DMA;
720
			} else {
721 722
				sdhci_writel(host, host->adma_addr,
					SDHCI_ADMA_ADDRESS);
723 724
			}
		} else {
725
			int sg_cnt;
726

727
			sg_cnt = dma_map_sg(mmc_dev(host->mmc),
728 729 730 731
					data->sg, data->sg_len,
					(data->flags & MMC_DATA_READ) ?
						DMA_FROM_DEVICE :
						DMA_TO_DEVICE);
732
			if (sg_cnt == 0) {
733 734 735 736 737
				/*
				 * This only happens when someone fed
				 * us an invalid request.
				 */
				WARN_ON(1);
738
				host->flags &= ~SDHCI_REQ_USE_DMA;
739
			} else {
740
				WARN_ON(sg_cnt != 1);
741 742
				sdhci_writel(host, sg_dma_address(data->sg),
					SDHCI_DMA_ADDRESS);
743 744 745 746
			}
		}
	}

747 748 749 750 751 752
	/*
	 * Always adjust the DMA selection as some controllers
	 * (e.g. JMicron) can't do PIO properly when the selection
	 * is ADMA.
	 */
	if (host->version >= SDHCI_SPEC_200) {
753
		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
754 755 756 757 758 759
		ctrl &= ~SDHCI_CTRL_DMA_MASK;
		if ((host->flags & SDHCI_REQ_USE_DMA) &&
			(host->flags & SDHCI_USE_ADMA))
			ctrl |= SDHCI_CTRL_ADMA32;
		else
			ctrl |= SDHCI_CTRL_SDMA;
760
		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
761 762
	}

763
	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
764 765 766
		sg_miter_start(&host->sg_miter,
			data->sg, data->sg_len, SG_MITER_ATOMIC);
		host->blocks = data->blocks;
767
	}
768

769 770
	sdhci_set_transfer_irqs(host);

771
	/* We do not handle DMA boundaries, so set it to max (512 KiB) */
772 773
	sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE);
	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
774 775 776 777 778 779 780 781 782 783
}

static void sdhci_set_transfer_mode(struct sdhci_host *host,
	struct mmc_data *data)
{
	u16 mode;

	if (data == NULL)
		return;

784 785
	WARN_ON(!host->data);

786 787 788 789 790
	mode = SDHCI_TRNS_BLK_CNT_EN;
	if (data->blocks > 1)
		mode |= SDHCI_TRNS_MULTI;
	if (data->flags & MMC_DATA_READ)
		mode |= SDHCI_TRNS_READ;
791
	if (host->flags & SDHCI_REQ_USE_DMA)
792 793
		mode |= SDHCI_TRNS_DMA;

794
	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
795 796 797 798 799 800 801 802 803 804 805
}

static void sdhci_finish_data(struct sdhci_host *host)
{
	struct mmc_data *data;

	BUG_ON(!host->data);

	data = host->data;
	host->data = NULL;

806
	if (host->flags & SDHCI_REQ_USE_DMA) {
807 808 809 810 811 812 813
		if (host->flags & SDHCI_USE_ADMA)
			sdhci_adma_table_post(host, data);
		else {
			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
				data->sg_len, (data->flags & MMC_DATA_READ) ?
					DMA_FROM_DEVICE : DMA_TO_DEVICE);
		}
814 815 816
	}

	/*
817 818 819 820 821
	 * The specification states that the block count register must
	 * be updated, but it does not specify at what point in the
	 * data flow. That makes the register entirely useless to read
	 * back so we have to assume that nothing made it to the card
	 * in the event of an error.
822
	 */
823 824
	if (data->error)
		data->bytes_xfered = 0;
825
	else
826
		data->bytes_xfered = data->blksz * data->blocks;
827 828 829 830 831 832

	if (data->stop) {
		/*
		 * The controller needs a reset of internal state machines
		 * upon error conditions.
		 */
P
Pierre Ossman 已提交
833
		if (data->error) {
834 835 836 837 838 839 840 841 842 843 844 845
			sdhci_reset(host, SDHCI_RESET_CMD);
			sdhci_reset(host, SDHCI_RESET_DATA);
		}

		sdhci_send_command(host, data->stop);
	} else
		tasklet_schedule(&host->finish_tasklet);
}

static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
{
	int flags;
846
	u32 mask;
847
	unsigned long timeout;
848 849 850 851

	WARN_ON(host->cmd);

	/* Wait max 10 ms */
852
	timeout = 10;
853 854 855 856 857 858 859 860 861 862

	mask = SDHCI_CMD_INHIBIT;
	if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
		mask |= SDHCI_DATA_INHIBIT;

	/* We shouldn't wait for data inihibit for stop commands, even
	   though they might use busy signaling */
	if (host->mrq->data && (cmd == host->mrq->data->stop))
		mask &= ~SDHCI_DATA_INHIBIT;

863
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
864
		if (timeout == 0) {
865
			printk(KERN_ERR "%s: Controller never released "
P
Pierre Ossman 已提交
866
				"inhibit bit(s).\n", mmc_hostname(host->mmc));
867
			sdhci_dumpregs(host);
P
Pierre Ossman 已提交
868
			cmd->error = -EIO;
869 870 871
			tasklet_schedule(&host->finish_tasklet);
			return;
		}
872 873 874
		timeout--;
		mdelay(1);
	}
875 876 877 878 879 880 881

	mod_timer(&host->timer, jiffies + 10 * HZ);

	host->cmd = cmd;

	sdhci_prepare_data(host, cmd->data);

882
	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
883

884 885
	sdhci_set_transfer_mode(host, cmd->data);

886
	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
P
Pierre Ossman 已提交
887
		printk(KERN_ERR "%s: Unsupported response type!\n",
888
			mmc_hostname(host->mmc));
P
Pierre Ossman 已提交
889
		cmd->error = -EINVAL;
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
		tasklet_schedule(&host->finish_tasklet);
		return;
	}

	if (!(cmd->flags & MMC_RSP_PRESENT))
		flags = SDHCI_CMD_RESP_NONE;
	else if (cmd->flags & MMC_RSP_136)
		flags = SDHCI_CMD_RESP_LONG;
	else if (cmd->flags & MMC_RSP_BUSY)
		flags = SDHCI_CMD_RESP_SHORT_BUSY;
	else
		flags = SDHCI_CMD_RESP_SHORT;

	if (cmd->flags & MMC_RSP_CRC)
		flags |= SDHCI_CMD_CRC;
	if (cmd->flags & MMC_RSP_OPCODE)
		flags |= SDHCI_CMD_INDEX;
	if (cmd->data)
		flags |= SDHCI_CMD_DATA;

910
	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
911 912 913 914 915 916 917 918 919 920 921 922
}

static void sdhci_finish_command(struct sdhci_host *host)
{
	int i;

	BUG_ON(host->cmd == NULL);

	if (host->cmd->flags & MMC_RSP_PRESENT) {
		if (host->cmd->flags & MMC_RSP_136) {
			/* CRC is stripped so we need to do some shifting. */
			for (i = 0;i < 4;i++) {
923
				host->cmd->resp[i] = sdhci_readl(host,
924 925 926
					SDHCI_RESPONSE + (3-i)*4) << 8;
				if (i != 3)
					host->cmd->resp[i] |=
927
						sdhci_readb(host,
928 929 930
						SDHCI_RESPONSE + (3-i)*4-1);
			}
		} else {
931
			host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
932 933 934
		}
	}

P
Pierre Ossman 已提交
935
	host->cmd->error = 0;
936

937 938 939 940
	if (host->data && host->data_early)
		sdhci_finish_data(host);

	if (!host->cmd->data)
941 942 943 944 945 946 947 948 949
		tasklet_schedule(&host->finish_tasklet);

	host->cmd = NULL;
}

static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
	int div;
	u16 clk;
950
	unsigned long timeout;
951 952 953 954

	if (clock == host->clock)
		return;

955 956 957 958 959 960
	if (host->ops->set_clock) {
		host->ops->set_clock(host, clock);
		if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
			return;
	}

961
	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
962 963 964 965 966 967 968 969 970 971 972 973

	if (clock == 0)
		goto out;

	for (div = 1;div < 256;div *= 2) {
		if ((host->max_clk / div) <= clock)
			break;
	}
	div >>= 1;

	clk = div << SDHCI_DIVIDER_SHIFT;
	clk |= SDHCI_CLOCK_INT_EN;
974
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
975 976

	/* Wait max 10 ms */
977
	timeout = 10;
978
	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
979 980
		& SDHCI_CLOCK_INT_STABLE)) {
		if (timeout == 0) {
P
Pierre Ossman 已提交
981 982
			printk(KERN_ERR "%s: Internal clock never "
				"stabilised.\n", mmc_hostname(host->mmc));
983 984 985
			sdhci_dumpregs(host);
			return;
		}
986 987 988
		timeout--;
		mdelay(1);
	}
989 990

	clk |= SDHCI_CLOCK_CARD_EN;
991
	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
992 993 994 995 996

out:
	host->clock = clock;
}

997 998 999 1000 1001 1002 1003
static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
{
	u8 pwr;

	if (host->power == power)
		return;

1004
	if (power == (unsigned short)-1) {
1005
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1006
		goto out;
1007 1008 1009 1010 1011 1012
	}

	/*
	 * Spec says that we should clear the power reg before setting
	 * a new value. Some controllers don't seem to like this though.
	 */
1013
	if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1014
		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1015 1016 1017

	pwr = SDHCI_POWER_ON;

1018
	switch (1 << power) {
1019
	case MMC_VDD_165_195:
1020 1021
		pwr |= SDHCI_POWER_180;
		break;
1022 1023
	case MMC_VDD_29_30:
	case MMC_VDD_30_31:
1024 1025
		pwr |= SDHCI_POWER_300;
		break;
1026 1027
	case MMC_VDD_32_33:
	case MMC_VDD_33_34:
1028 1029 1030 1031 1032 1033
		pwr |= SDHCI_POWER_330;
		break;
	default:
		BUG();
	}

1034
	/*
1035
	 * At least the Marvell CaFe chip gets confused if we set the voltage
1036 1037
	 * and set turn on power at the same time, so set the voltage first.
	 */
1038
	if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
1039
		sdhci_writeb(host, pwr & ~SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1040

1041
	sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1042 1043 1044 1045 1046

out:
	host->power = power;
}

1047 1048 1049 1050 1051 1052 1053 1054 1055
/*****************************************************************************\
 *                                                                           *
 * MMC callbacks                                                             *
 *                                                                           *
\*****************************************************************************/

static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
	struct sdhci_host *host;
1056
	bool present;
1057 1058 1059 1060 1061 1062 1063 1064
	unsigned long flags;

	host = mmc_priv(mmc);

	spin_lock_irqsave(&host->lock, flags);

	WARN_ON(host->mrq != NULL);

1065
#ifndef SDHCI_USE_LEDS_CLASS
1066
	sdhci_activate_led(host);
1067
#endif
1068 1069 1070

	host->mrq = mrq;

1071 1072 1073 1074 1075 1076 1077 1078
	/* If polling, assume that the card is always present. */
	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
		present = true;
	else
		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				SDHCI_CARD_PRESENT;

	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
P
Pierre Ossman 已提交
1079
		host->mrq->cmd->error = -ENOMEDIUM;
1080 1081 1082 1083
		tasklet_schedule(&host->finish_tasklet);
	} else
		sdhci_send_command(host, mrq->cmd);

1084
	mmiowb();
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
	spin_unlock_irqrestore(&host->lock, flags);
}

static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct sdhci_host *host;
	unsigned long flags;
	u8 ctrl;

	host = mmc_priv(mmc);

	spin_lock_irqsave(&host->lock, flags);

P
Pierre Ossman 已提交
1098 1099 1100
	if (host->flags & SDHCI_DEVICE_DEAD)
		goto out;

1101 1102 1103 1104 1105
	/*
	 * Reset the chip on each power off.
	 * Should clear out any weird states.
	 */
	if (ios->power_mode == MMC_POWER_OFF) {
1106
		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1107
		sdhci_reinit(host);
1108 1109 1110 1111 1112
	}

	sdhci_set_clock(host, ios->clock);

	if (ios->power_mode == MMC_POWER_OFF)
1113
		sdhci_set_power(host, -1);
1114
	else
1115
		sdhci_set_power(host, ios->vdd);
1116

1117
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1118

1119 1120 1121 1122
	if (ios->bus_width == MMC_BUS_WIDTH_4)
		ctrl |= SDHCI_CTRL_4BITBUS;
	else
		ctrl &= ~SDHCI_CTRL_4BITBUS;
1123 1124 1125 1126 1127 1128

	if (ios->timing == MMC_TIMING_SD_HS)
		ctrl |= SDHCI_CTRL_HISPD;
	else
		ctrl &= ~SDHCI_CTRL_HISPD;

1129
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1130

1131 1132 1133 1134 1135
	/*
	 * Some (ENE) controllers go apeshit on some ios operation,
	 * signalling timeout and CRC errors even on CMD0. Resetting
	 * it on each ios seems to solve the problem.
	 */
1136
	if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1137 1138
		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);

P
Pierre Ossman 已提交
1139
out:
1140
	mmiowb();
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
	spin_unlock_irqrestore(&host->lock, flags);
}

static int sdhci_get_ro(struct mmc_host *mmc)
{
	struct sdhci_host *host;
	unsigned long flags;
	int present;

	host = mmc_priv(mmc);

	spin_lock_irqsave(&host->lock, flags);

P
Pierre Ossman 已提交
1154 1155 1156
	if (host->flags & SDHCI_DEVICE_DEAD)
		present = 0;
	else
1157
		present = sdhci_readl(host, SDHCI_PRESENT_STATE);
1158 1159 1160

	spin_unlock_irqrestore(&host->lock, flags);

1161 1162
	if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)
		return !!(present & SDHCI_WRITE_PROTECT);
1163 1164 1165
	return !(present & SDHCI_WRITE_PROTECT);
}

P
Pierre Ossman 已提交
1166 1167 1168 1169 1170 1171 1172 1173 1174
static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = mmc_priv(mmc);

	spin_lock_irqsave(&host->lock, flags);

P
Pierre Ossman 已提交
1175 1176 1177
	if (host->flags & SDHCI_DEVICE_DEAD)
		goto out;

P
Pierre Ossman 已提交
1178
	if (enable)
1179 1180 1181
		sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
	else
		sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
P
Pierre Ossman 已提交
1182
out:
P
Pierre Ossman 已提交
1183 1184 1185 1186 1187
	mmiowb();

	spin_unlock_irqrestore(&host->lock, flags);
}

1188
static const struct mmc_host_ops sdhci_ops = {
1189 1190 1191
	.request	= sdhci_request,
	.set_ios	= sdhci_set_ios,
	.get_ro		= sdhci_get_ro,
P
Pierre Ossman 已提交
1192
	.enable_sdio_irq = sdhci_enable_sdio_irq,
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
};

/*****************************************************************************\
 *                                                                           *
 * Tasklets                                                                  *
 *                                                                           *
\*****************************************************************************/

static void sdhci_tasklet_card(unsigned long param)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = (struct sdhci_host*)param;

	spin_lock_irqsave(&host->lock, flags);

1210
	if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
1211 1212 1213 1214 1215 1216 1217 1218 1219
		if (host->mrq) {
			printk(KERN_ERR "%s: Card removed during transfer!\n",
				mmc_hostname(host->mmc));
			printk(KERN_ERR "%s: Resetting controller.\n",
				mmc_hostname(host->mmc));

			sdhci_reset(host, SDHCI_RESET_CMD);
			sdhci_reset(host, SDHCI_RESET_DATA);

P
Pierre Ossman 已提交
1220
			host->mrq->cmd->error = -ENOMEDIUM;
1221 1222 1223 1224 1225 1226
			tasklet_schedule(&host->finish_tasklet);
		}
	}

	spin_unlock_irqrestore(&host->lock, flags);

P
Pierre Ossman 已提交
1227
	mmc_detect_change(host->mmc, msecs_to_jiffies(200));
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
}

static void sdhci_tasklet_finish(unsigned long param)
{
	struct sdhci_host *host;
	unsigned long flags;
	struct mmc_request *mrq;

	host = (struct sdhci_host*)param;

	spin_lock_irqsave(&host->lock, flags);

	del_timer(&host->timer);

	mrq = host->mrq;

	/*
	 * The controller needs a reset of internal state machines
	 * upon error conditions.
	 */
P
Pierre Ossman 已提交
1248 1249 1250 1251 1252
	if (!(host->flags & SDHCI_DEVICE_DEAD) &&
		(mrq->cmd->error ||
		 (mrq->data && (mrq->data->error ||
		  (mrq->data->stop && mrq->data->stop->error))) ||
		   (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
1253 1254

		/* Some controllers need this kick or reset won't work here */
1255
		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
			unsigned int clock;

			/* This is to force an update */
			clock = host->clock;
			host->clock = 0;
			sdhci_set_clock(host, clock);
		}

		/* Spec says we should do both at the same time, but Ricoh
		   controllers do not like that. */
1266 1267 1268 1269 1270 1271 1272 1273
		sdhci_reset(host, SDHCI_RESET_CMD);
		sdhci_reset(host, SDHCI_RESET_DATA);
	}

	host->mrq = NULL;
	host->cmd = NULL;
	host->data = NULL;

1274
#ifndef SDHCI_USE_LEDS_CLASS
1275
	sdhci_deactivate_led(host);
1276
#endif
1277

1278
	mmiowb();
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
	spin_unlock_irqrestore(&host->lock, flags);

	mmc_request_done(host->mmc, mrq);
}

static void sdhci_timeout_timer(unsigned long data)
{
	struct sdhci_host *host;
	unsigned long flags;

	host = (struct sdhci_host*)data;

	spin_lock_irqsave(&host->lock, flags);

	if (host->mrq) {
P
Pierre Ossman 已提交
1294 1295
		printk(KERN_ERR "%s: Timeout waiting for hardware "
			"interrupt.\n", mmc_hostname(host->mmc));
1296 1297 1298
		sdhci_dumpregs(host);

		if (host->data) {
P
Pierre Ossman 已提交
1299
			host->data->error = -ETIMEDOUT;
1300 1301 1302
			sdhci_finish_data(host);
		} else {
			if (host->cmd)
P
Pierre Ossman 已提交
1303
				host->cmd->error = -ETIMEDOUT;
1304
			else
P
Pierre Ossman 已提交
1305
				host->mrq->cmd->error = -ETIMEDOUT;
1306 1307 1308 1309 1310

			tasklet_schedule(&host->finish_tasklet);
		}
	}

1311
	mmiowb();
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
	spin_unlock_irqrestore(&host->lock, flags);
}

/*****************************************************************************\
 *                                                                           *
 * Interrupt handling                                                        *
 *                                                                           *
\*****************************************************************************/

static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
{
	BUG_ON(intmask == 0);

	if (!host->cmd) {
1326 1327 1328
		printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
			"though no command operation was in progress.\n",
			mmc_hostname(host->mmc), (unsigned)intmask);
1329 1330 1331 1332
		sdhci_dumpregs(host);
		return;
	}

1333
	if (intmask & SDHCI_INT_TIMEOUT)
P
Pierre Ossman 已提交
1334 1335 1336 1337
		host->cmd->error = -ETIMEDOUT;
	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
			SDHCI_INT_INDEX))
		host->cmd->error = -EILSEQ;
1338

1339
	if (host->cmd->error) {
1340
		tasklet_schedule(&host->finish_tasklet);
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
		return;
	}

	/*
	 * The host can send and interrupt when the busy state has
	 * ended, allowing us to wait without wasting CPU cycles.
	 * Unfortunately this is overloaded on the "data complete"
	 * interrupt, so we need to take some care when handling
	 * it.
	 *
	 * Note: The 1.0 specification is a bit ambiguous about this
	 *       feature so there might be some problems with older
	 *       controllers.
	 */
	if (host->cmd->flags & MMC_RSP_BUSY) {
		if (host->cmd->data)
			DBG("Cannot wait for busy signal when also "
				"doing a data transfer");
1359
		else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
1360
			return;
1361 1362 1363

		/* The controller does not support the end-of-busy IRQ,
		 * fall through and take the SDHCI_INT_RESPONSE */
1364 1365 1366
	}

	if (intmask & SDHCI_INT_RESPONSE)
1367
		sdhci_finish_command(host);
1368 1369 1370 1371 1372 1373 1374 1375
}

static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
	BUG_ON(intmask == 0);

	if (!host->data) {
		/*
1376 1377 1378
		 * The "data complete" interrupt is also used to
		 * indicate that a busy state has ended. See comment
		 * above in sdhci_cmd_irq().
1379
		 */
1380 1381 1382 1383 1384 1385
		if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
			if (intmask & SDHCI_INT_DATA_END) {
				sdhci_finish_command(host);
				return;
			}
		}
1386

1387 1388 1389
		printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
			"though no data operation was in progress.\n",
			mmc_hostname(host->mmc), (unsigned)intmask);
1390 1391 1392 1393 1394 1395
		sdhci_dumpregs(host);

		return;
	}

	if (intmask & SDHCI_INT_DATA_TIMEOUT)
P
Pierre Ossman 已提交
1396 1397 1398
		host->data->error = -ETIMEDOUT;
	else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
		host->data->error = -EILSEQ;
1399 1400
	else if (intmask & SDHCI_INT_ADMA_ERROR)
		host->data->error = -EIO;
1401

P
Pierre Ossman 已提交
1402
	if (host->data->error)
1403 1404
		sdhci_finish_data(host);
	else {
P
Pierre Ossman 已提交
1405
		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
1406 1407
			sdhci_transfer_pio(host);

1408 1409 1410 1411 1412 1413
		/*
		 * We currently don't do anything fancy with DMA
		 * boundaries, but as we can't disable the feature
		 * we need to at least restart the transfer.
		 */
		if (intmask & SDHCI_INT_DMA_END)
1414 1415
			sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS),
				SDHCI_DMA_ADDRESS);
1416

1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
		if (intmask & SDHCI_INT_DATA_END) {
			if (host->cmd) {
				/*
				 * Data managed to finish before the
				 * command completed. Make sure we do
				 * things in the proper order.
				 */
				host->data_early = 1;
			} else {
				sdhci_finish_data(host);
			}
		}
1429 1430 1431
	}
}

1432
static irqreturn_t sdhci_irq(int irq, void *dev_id)
1433 1434 1435 1436
{
	irqreturn_t result;
	struct sdhci_host* host = dev_id;
	u32 intmask;
P
Pierre Ossman 已提交
1437
	int cardint = 0;
1438 1439 1440

	spin_lock(&host->lock);

1441
	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
1442

1443
	if (!intmask || intmask == 0xffffffff) {
1444 1445 1446 1447
		result = IRQ_NONE;
		goto out;
	}

1448 1449
	DBG("*** %s got interrupt: 0x%08x\n",
		mmc_hostname(host->mmc), intmask);
1450

1451
	if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
1452 1453
		sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
			SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
1454
		tasklet_schedule(&host->card_tasklet);
1455
	}
1456

1457
	intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
1458

1459
	if (intmask & SDHCI_INT_CMD_MASK) {
1460 1461
		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
			SDHCI_INT_STATUS);
1462
		sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1463 1464 1465
	}

	if (intmask & SDHCI_INT_DATA_MASK) {
1466 1467
		sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
			SDHCI_INT_STATUS);
1468
		sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1469 1470 1471 1472
	}

	intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);

1473 1474
	intmask &= ~SDHCI_INT_ERROR;

1475
	if (intmask & SDHCI_INT_BUS_POWER) {
1476
		printk(KERN_ERR "%s: Card is consuming too much power!\n",
1477
			mmc_hostname(host->mmc));
1478
		sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
1479 1480
	}

1481
	intmask &= ~SDHCI_INT_BUS_POWER;
1482

P
Pierre Ossman 已提交
1483 1484 1485 1486 1487
	if (intmask & SDHCI_INT_CARD_INT)
		cardint = 1;

	intmask &= ~SDHCI_INT_CARD_INT;

1488
	if (intmask) {
P
Pierre Ossman 已提交
1489
		printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1490
			mmc_hostname(host->mmc), intmask);
1491 1492
		sdhci_dumpregs(host);

1493
		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
1494
	}
1495 1496 1497

	result = IRQ_HANDLED;

1498
	mmiowb();
1499 1500 1501
out:
	spin_unlock(&host->lock);

P
Pierre Ossman 已提交
1502 1503 1504 1505 1506 1507
	/*
	 * We have to delay this as it calls back into the driver.
	 */
	if (cardint)
		mmc_signal_sdio_irq(host->mmc);

1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
	return result;
}

/*****************************************************************************\
 *                                                                           *
 * Suspend/resume                                                            *
 *                                                                           *
\*****************************************************************************/

#ifdef CONFIG_PM

1519
int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1520
{
1521
	int ret;
1522

1523 1524
	sdhci_disable_card_detection(host);

1525 1526 1527
	ret = mmc_suspend_host(host->mmc, state);
	if (ret)
		return ret;
1528

1529
	free_irq(host->irq, host);
1530 1531 1532 1533

	return 0;
}

1534
EXPORT_SYMBOL_GPL(sdhci_suspend_host);
1535

1536 1537 1538
int sdhci_resume_host(struct sdhci_host *host)
{
	int ret;
1539

1540 1541 1542 1543
	if (host->flags & SDHCI_USE_DMA) {
		if (host->ops->enable_dma)
			host->ops->enable_dma(host);
	}
1544

1545 1546
	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
			  mmc_hostname(host->mmc), host);
1547 1548
	if (ret)
		return ret;
1549

1550 1551 1552 1553 1554 1555
	sdhci_init(host);
	mmiowb();

	ret = mmc_resume_host(host->mmc);
	if (ret)
		return ret;
1556

1557 1558
	sdhci_enable_card_detection(host);

1559 1560 1561
	return 0;
}

1562
EXPORT_SYMBOL_GPL(sdhci_resume_host);
1563 1564 1565 1566 1567

#endif /* CONFIG_PM */

/*****************************************************************************\
 *                                                                           *
1568
 * Device allocation/registration                                            *
1569 1570 1571
 *                                                                           *
\*****************************************************************************/

1572 1573
struct sdhci_host *sdhci_alloc_host(struct device *dev,
	size_t priv_size)
1574 1575 1576 1577
{
	struct mmc_host *mmc;
	struct sdhci_host *host;

1578
	WARN_ON(dev == NULL);
1579

1580
	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
1581
	if (!mmc)
1582
		return ERR_PTR(-ENOMEM);
1583 1584 1585 1586

	host = mmc_priv(mmc);
	host->mmc = mmc;

1587 1588
	return host;
}
1589

1590
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1591

1592 1593 1594 1595 1596
int sdhci_add_host(struct sdhci_host *host)
{
	struct mmc_host *mmc;
	unsigned int caps;
	int ret;
1597

1598 1599 1600
	WARN_ON(host == NULL);
	if (host == NULL)
		return -EINVAL;
1601

1602
	mmc = host->mmc;
1603

1604 1605
	if (debug_quirks)
		host->quirks = debug_quirks;
1606

1607 1608
	sdhci_reset(host, SDHCI_RESET_ALL);

1609
	host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
1610 1611 1612
	host->version = (host->version & SDHCI_SPEC_VER_MASK)
				>> SDHCI_SPEC_VER_SHIFT;
	if (host->version > SDHCI_SPEC_200) {
1613
		printk(KERN_ERR "%s: Unknown controller version (%d). "
1614
			"You may experience problems.\n", mmc_hostname(mmc),
1615
			host->version);
1616 1617
	}

1618
	caps = sdhci_readl(host, SDHCI_CAPABILITIES);
1619

1620
	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1621
		host->flags |= SDHCI_USE_DMA;
1622 1623 1624
	else if (!(caps & SDHCI_CAN_DO_DMA))
		DBG("Controller doesn't have DMA capability\n");
	else
1625 1626
		host->flags |= SDHCI_USE_DMA;

1627
	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
1628
		(host->flags & SDHCI_USE_DMA)) {
R
Rolf Eike Beer 已提交
1629
		DBG("Disabling DMA as it is marked broken\n");
1630 1631 1632
		host->flags &= ~SDHCI_USE_DMA;
	}

1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
	if (host->flags & SDHCI_USE_DMA) {
		if ((host->version >= SDHCI_SPEC_200) &&
				(caps & SDHCI_CAN_DO_ADMA2))
			host->flags |= SDHCI_USE_ADMA;
	}

	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
		(host->flags & SDHCI_USE_ADMA)) {
		DBG("Disabling ADMA as it is marked broken\n");
		host->flags &= ~SDHCI_USE_ADMA;
	}

1645
	if (host->flags & SDHCI_USE_DMA) {
1646 1647 1648 1649 1650
		if (host->ops->enable_dma) {
			if (host->ops->enable_dma(host)) {
				printk(KERN_WARNING "%s: No suitable DMA "
					"available. Falling back to PIO.\n",
					mmc_hostname(mmc));
1651
				host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA);
1652
			}
1653 1654 1655
		}
	}

1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673
	if (host->flags & SDHCI_USE_ADMA) {
		/*
		 * We need to allocate descriptors for all sg entries
		 * (128) and potentially one alignment transfer for
		 * each of those entries.
		 */
		host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
		host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
		if (!host->adma_desc || !host->align_buffer) {
			kfree(host->adma_desc);
			kfree(host->align_buffer);
			printk(KERN_WARNING "%s: Unable to allocate ADMA "
				"buffers. Falling back to standard DMA.\n",
				mmc_hostname(mmc));
			host->flags &= ~SDHCI_USE_ADMA;
		}
	}

1674 1675 1676 1677 1678 1679 1680 1681 1682
	/*
	 * If we use DMA, then it's up to the caller to set the DMA
	 * mask, but PIO does not need the hw shim so we set a new
	 * mask here in that case.
	 */
	if (!(host->flags & SDHCI_USE_DMA)) {
		host->dma_mask = DMA_BIT_MASK(64);
		mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
	}
1683

1684 1685
	host->max_clk =
		(caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1686
	host->max_clk *= 1000000;
1687
	if (host->max_clk == 0) {
1688 1689 1690 1691 1692 1693 1694
		if (!host->ops->get_max_clock) {
			printk(KERN_ERR
			       "%s: Hardware doesn't specify base clock "
			       "frequency.\n", mmc_hostname(mmc));
			return -ENODEV;
		}
		host->max_clk = host->ops->get_max_clock(host);
1695
	}
1696

1697 1698 1699
	host->timeout_clk =
		(caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
	if (host->timeout_clk == 0) {
1700 1701 1702 1703 1704 1705 1706
		if (!host->ops->get_timeout_clock) {
			printk(KERN_ERR
			       "%s: Hardware doesn't specify timeout clock "
			       "frequency.\n", mmc_hostname(mmc));
			return -ENODEV;
		}
		host->timeout_clk = host->ops->get_timeout_clock(host);
1707 1708 1709
	}
	if (caps & SDHCI_TIMEOUT_CLK_UNIT)
		host->timeout_clk *= 1000;
1710 1711 1712 1713 1714 1715 1716

	/*
	 * Set host parameters.
	 */
	mmc->ops = &sdhci_ops;
	mmc->f_min = host->max_clk / 256;
	mmc->f_max = host->max_clk;
1717
	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1718

1719
	if (caps & SDHCI_CAN_DO_HISPD)
1720 1721
		mmc->caps |= MMC_CAP_SD_HIGHSPEED;

1722 1723 1724
	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
		mmc->caps |= MMC_CAP_NEEDS_POLL;

1725 1726 1727
	mmc->ocr_avail = 0;
	if (caps & SDHCI_CAN_VDD_330)
		mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
P
Pierre Ossman 已提交
1728
	if (caps & SDHCI_CAN_VDD_300)
1729
		mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
P
Pierre Ossman 已提交
1730
	if (caps & SDHCI_CAN_VDD_180)
1731
		mmc->ocr_avail |= MMC_VDD_165_195;
1732 1733 1734

	if (mmc->ocr_avail == 0) {
		printk(KERN_ERR "%s: Hardware doesn't report any "
1735
			"support voltages.\n", mmc_hostname(mmc));
1736
		return -ENODEV;
1737 1738
	}

1739 1740 1741
	spin_lock_init(&host->lock);

	/*
1742 1743
	 * Maximum number of segments. Depends on if the hardware
	 * can do scatter/gather or not.
1744
	 */
1745 1746 1747
	if (host->flags & SDHCI_USE_ADMA)
		mmc->max_hw_segs = 128;
	else if (host->flags & SDHCI_USE_DMA)
1748
		mmc->max_hw_segs = 1;
1749 1750 1751
	else /* PIO */
		mmc->max_hw_segs = 128;
	mmc->max_phys_segs = 128;
1752 1753

	/*
1754
	 * Maximum number of sectors in one transfer. Limited by DMA boundary
1755
	 * size (512KiB).
1756
	 */
1757
	mmc->max_req_size = 524288;
1758 1759 1760

	/*
	 * Maximum segment size. Could be one segment with the maximum number
1761 1762
	 * of bytes. When doing hardware scatter/gather, each entry cannot
	 * be larger than 64 KiB though.
1763
	 */
1764 1765 1766 1767
	if (host->flags & SDHCI_USE_ADMA)
		mmc->max_seg_size = 65536;
	else
		mmc->max_seg_size = mmc->max_req_size;
1768

1769 1770 1771 1772 1773 1774
	/*
	 * Maximum block size. This varies from controller to controller and
	 * is specified in the capabilities register.
	 */
	mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
	if (mmc->max_blk_size >= 3) {
1775 1776
		printk(KERN_WARNING "%s: Invalid maximum block size, "
			"assuming 512 bytes\n", mmc_hostname(mmc));
1777 1778 1779
		mmc->max_blk_size = 512;
	} else
		mmc->max_blk_size = 512 << mmc->max_blk_size;
1780

1781 1782 1783 1784 1785
	/*
	 * Maximum block count.
	 */
	mmc->max_blk_count = 65535;

1786 1787 1788 1789 1790 1791 1792 1793
	/*
	 * Init tasklets.
	 */
	tasklet_init(&host->card_tasklet,
		sdhci_tasklet_card, (unsigned long)host);
	tasklet_init(&host->finish_tasklet,
		sdhci_tasklet_finish, (unsigned long)host);

1794
	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
1795

1796
	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1797
		mmc_hostname(mmc), host);
1798
	if (ret)
1799
		goto untasklet;
1800 1801 1802 1803 1804 1805 1806

	sdhci_init(host);

#ifdef CONFIG_MMC_DEBUG
	sdhci_dumpregs(host);
#endif

1807
#ifdef SDHCI_USE_LEDS_CLASS
H
Helmut Schaa 已提交
1808 1809 1810
	snprintf(host->led_name, sizeof(host->led_name),
		"%s::", mmc_hostname(mmc));
	host->led.name = host->led_name;
1811 1812 1813 1814
	host->led.brightness = LED_OFF;
	host->led.default_trigger = mmc_hostname(mmc);
	host->led.brightness_set = sdhci_led_control;

1815
	ret = led_classdev_register(mmc_dev(mmc), &host->led);
1816 1817 1818 1819
	if (ret)
		goto reset;
#endif

1820 1821
	mmiowb();

1822 1823
	mmc_add_host(mmc);

1824
	printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
1825
		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
1826
		(host->flags & SDHCI_USE_ADMA)?"A":"",
1827 1828
		(host->flags & SDHCI_USE_DMA)?"DMA":"PIO");

1829 1830
	sdhci_enable_card_detection(host);

1831 1832
	return 0;

1833
#ifdef SDHCI_USE_LEDS_CLASS
1834 1835 1836 1837
reset:
	sdhci_reset(host, SDHCI_RESET_ALL);
	free_irq(host->irq, host);
#endif
1838
untasklet:
1839 1840 1841 1842 1843 1844
	tasklet_kill(&host->card_tasklet);
	tasklet_kill(&host->finish_tasklet);

	return ret;
}

1845
EXPORT_SYMBOL_GPL(sdhci_add_host);
1846

P
Pierre Ossman 已提交
1847
void sdhci_remove_host(struct sdhci_host *host, int dead)
1848
{
P
Pierre Ossman 已提交
1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866
	unsigned long flags;

	if (dead) {
		spin_lock_irqsave(&host->lock, flags);

		host->flags |= SDHCI_DEVICE_DEAD;

		if (host->mrq) {
			printk(KERN_ERR "%s: Controller removed during "
				" transfer!\n", mmc_hostname(host->mmc));

			host->mrq->cmd->error = -ENOMEDIUM;
			tasklet_schedule(&host->finish_tasklet);
		}

		spin_unlock_irqrestore(&host->lock, flags);
	}

1867 1868
	sdhci_disable_card_detection(host);

1869
	mmc_remove_host(host->mmc);
1870

1871
#ifdef SDHCI_USE_LEDS_CLASS
1872 1873 1874
	led_classdev_unregister(&host->led);
#endif

P
Pierre Ossman 已提交
1875 1876
	if (!dead)
		sdhci_reset(host, SDHCI_RESET_ALL);
1877 1878 1879 1880 1881 1882 1883

	free_irq(host->irq, host);

	del_timer_sync(&host->timer);

	tasklet_kill(&host->card_tasklet);
	tasklet_kill(&host->finish_tasklet);
1884 1885 1886 1887 1888 1889

	kfree(host->adma_desc);
	kfree(host->align_buffer);

	host->adma_desc = NULL;
	host->align_buffer = NULL;
1890 1891
}

1892
EXPORT_SYMBOL_GPL(sdhci_remove_host);
1893

1894
void sdhci_free_host(struct sdhci_host *host)
1895
{
1896
	mmc_free_host(host->mmc);
1897 1898
}

1899
EXPORT_SYMBOL_GPL(sdhci_free_host);
1900 1901 1902 1903 1904 1905 1906 1907 1908 1909

/*****************************************************************************\
 *                                                                           *
 * Driver init/exit                                                          *
 *                                                                           *
\*****************************************************************************/

static int __init sdhci_drv_init(void)
{
	printk(KERN_INFO DRIVER_NAME
1910
		": Secure Digital Host Controller Interface driver\n");
1911 1912
	printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");

1913
	return 0;
1914 1915 1916 1917 1918 1919 1920 1921 1922
}

static void __exit sdhci_drv_exit(void)
{
}

module_init(sdhci_drv_init);
module_exit(sdhci_drv_exit);

1923
module_param(debug_quirks, uint, 0444);
1924

1925
MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
1926
MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
1927
MODULE_LICENSE("GPL");
1928

1929
MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");