dw_mmc.c 75.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Synopsys DesignWare Multimedia Card Interface driver
 *  (Based on NXP driver for lpc 31xx)
 *
 * Copyright (C) 2009 NXP Semiconductors
 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/blkdev.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/delay.h>
#include <linux/irq.h>
30
#include <linux/mmc/card.h>
31 32
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
33
#include <linux/mmc/sd.h>
34
#include <linux/mmc/sdio.h>
35 36
#include <linux/mmc/dw_mmc.h>
#include <linux/bitops.h>
37
#include <linux/regulator/consumer.h>
38
#include <linux/of.h>
39
#include <linux/of_gpio.h>
40
#include <linux/mmc/slot-gpio.h>
41 42 43 44

#include "dw_mmc.h"

/* Common flag combinations */
45
#define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46 47 48 49 50 51 52 53 54 55
				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
				 SDMMC_INT_EBE)
#define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
				 SDMMC_INT_RESP_ERR)
#define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
#define DW_MCI_SEND_STATUS	1
#define DW_MCI_RECV_STATUS	2
#define DW_MCI_DMA_THRESHOLD	16

56 57 58
#define DW_MCI_FREQ_MAX	200000000	/* unit: HZ */
#define DW_MCI_FREQ_MIN	400000		/* unit: HZ */

59
#ifdef CONFIG_MMC_DW_IDMAC
60 61 62 63 64
#define IDMAC_INT_CLR		(SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
				 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
				 SDMMC_IDMAC_INT_TI)

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
struct idmac_desc_64addr {
	u32		des0;	/* Control Descriptor */

	u32		des1;	/* Reserved */

	u32		des2;	/*Buffer sizes */
#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
	((d)->des2 = ((d)->des2 & 0x03ffe000) | ((s) & 0x1fff))

	u32		des3;	/* Reserved */

	u32		des4;	/* Lower 32-bits of Buffer Address Pointer 1*/
	u32		des5;	/* Upper 32-bits of Buffer Address Pointer 1*/

	u32		des6;	/* Lower 32-bits of Next Descriptor Address */
	u32		des7;	/* Upper 32-bits of Next Descriptor Address */
};

83 84 85 86 87 88 89 90 91 92 93 94
struct idmac_desc {
	u32		des0;	/* Control Descriptor */
#define IDMAC_DES0_DIC	BIT(1)
#define IDMAC_DES0_LD	BIT(2)
#define IDMAC_DES0_FD	BIT(3)
#define IDMAC_DES0_CH	BIT(4)
#define IDMAC_DES0_ER	BIT(5)
#define IDMAC_DES0_CES	BIT(30)
#define IDMAC_DES0_OWN	BIT(31)

	u32		des1;	/* Buffer sizes */
#define IDMAC_SET_BUFFER1_SIZE(d, s) \
95
	((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
96 97 98 99 100 101 102

	u32		des2;	/* buffer 1 physical address */

	u32		des3;	/* buffer 2 physical address */
};
#endif /* CONFIG_MMC_DW_IDMAC */

103
static bool dw_mci_reset(struct dw_mci *host);
104
static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
105
static int dw_mci_card_busy(struct mmc_host *mmc);
106

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
	struct dw_mci_slot *slot = s->private;
	struct mmc_request *mrq;
	struct mmc_command *cmd;
	struct mmc_command *stop;
	struct mmc_data	*data;

	/* Make sure we get a consistent snapshot */
	spin_lock_bh(&slot->host->lock);
	mrq = slot->mrq;

	if (mrq) {
		cmd = mrq->cmd;
		data = mrq->data;
		stop = mrq->stop;

		if (cmd)
			seq_printf(s,
				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
				   cmd->opcode, cmd->arg, cmd->flags,
				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
				   cmd->resp[2], cmd->error);
		if (data)
			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
				   data->bytes_xfered, data->blocks,
				   data->blksz, data->flags, data->error);
		if (stop)
			seq_printf(s,
				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
				   stop->opcode, stop->arg, stop->flags,
				   stop->resp[0], stop->resp[1], stop->resp[2],
				   stop->resp[2], stop->error);
	}

	spin_unlock_bh(&slot->host->lock);

	return 0;
}

static int dw_mci_req_open(struct inode *inode, struct file *file)
{
	return single_open(file, dw_mci_req_show, inode->i_private);
}

static const struct file_operations dw_mci_req_fops = {
	.owner		= THIS_MODULE,
	.open		= dw_mci_req_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static int dw_mci_regs_show(struct seq_file *s, void *v)
{
	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);

	return 0;
}

static int dw_mci_regs_open(struct inode *inode, struct file *file)
{
	return single_open(file, dw_mci_regs_show, inode->i_private);
}

static const struct file_operations dw_mci_regs_fops = {
	.owner		= THIS_MODULE,
	.open		= dw_mci_regs_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
{
	struct mmc_host	*mmc = slot->mmc;
	struct dw_mci *host = slot->host;
	struct dentry *root;
	struct dentry *node;

	root = mmc->debugfs_root;
	if (!root)
		return;

	node = debugfs_create_file("regs", S_IRUSR, root, host,
				   &dw_mci_regs_fops);
	if (!node)
		goto err;

	node = debugfs_create_file("req", S_IRUSR, root, slot,
				   &dw_mci_req_fops);
	if (!node)
		goto err;

	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
	if (!node)
		goto err;

	node = debugfs_create_x32("pending_events", S_IRUSR, root,
				  (u32 *)&host->pending_events);
	if (!node)
		goto err;

	node = debugfs_create_x32("completed_events", S_IRUSR, root,
				  (u32 *)&host->completed_events);
	if (!node)
		goto err;

	return;

err:
	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
}
#endif /* defined(CONFIG_DEBUG_FS) */

228 229
static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);

230 231 232
static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
{
	struct mmc_data	*data;
233
	struct dw_mci_slot *slot = mmc_priv(mmc);
234
	struct dw_mci *host = slot->host;
235
	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
236 237 238 239 240
	u32 cmdr;
	cmd->error = -EINPROGRESS;

	cmdr = cmd->opcode;

241 242 243 244 245
	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
	    cmd->opcode == MMC_GO_IDLE_STATE ||
	    cmd->opcode == MMC_GO_INACTIVE_STATE ||
	    (cmd->opcode == SD_IO_RW_DIRECT &&
	     ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
246
		cmdr |= SDMMC_CMD_STOP;
247 248
	else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
		cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
249

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
	if (cmd->opcode == SD_SWITCH_VOLTAGE) {
		u32 clk_en_a;

		/* Special bit makes CMD11 not die */
		cmdr |= SDMMC_CMD_VOLT_SWITCH;

		/* Change state to continue to handle CMD11 weirdness */
		WARN_ON(slot->host->state != STATE_SENDING_CMD);
		slot->host->state = STATE_SENDING_CMD11;

		/*
		 * We need to disable low power mode (automatic clock stop)
		 * while doing voltage switch so we don't confuse the card,
		 * since stopping the clock is a specific part of the UHS
		 * voltage change dance.
		 *
		 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
		 * unconditionally turned back on in dw_mci_setup_bus() if it's
		 * ever called with a non-zero clock.  That shouldn't happen
		 * until the voltage change is all done.
		 */
		clk_en_a = mci_readl(host, CLKENA);
		clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
		mci_writel(host, CLKENA, clk_en_a);
		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
			     SDMMC_CMD_PRV_DAT_WAIT, 0);
	}

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	if (cmd->flags & MMC_RSP_PRESENT) {
		/* We expect a response, so set this bit */
		cmdr |= SDMMC_CMD_RESP_EXP;
		if (cmd->flags & MMC_RSP_136)
			cmdr |= SDMMC_CMD_RESP_LONG;
	}

	if (cmd->flags & MMC_RSP_CRC)
		cmdr |= SDMMC_CMD_RESP_CRC;

	data = cmd->data;
	if (data) {
		cmdr |= SDMMC_CMD_DAT_EXP;
		if (data->flags & MMC_DATA_STREAM)
			cmdr |= SDMMC_CMD_STRM_MODE;
		if (data->flags & MMC_DATA_WRITE)
			cmdr |= SDMMC_CMD_DAT_WR;
	}

297 298
	if (drv_data && drv_data->prepare_command)
		drv_data->prepare_command(slot->host, &cmdr);
299

300 301 302
	return cmdr;
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
{
	struct mmc_command *stop;
	u32 cmdr;

	if (!cmd->data)
		return 0;

	stop = &host->stop_abort;
	cmdr = cmd->opcode;
	memset(stop, 0, sizeof(struct mmc_command));

	if (cmdr == MMC_READ_SINGLE_BLOCK ||
	    cmdr == MMC_READ_MULTIPLE_BLOCK ||
	    cmdr == MMC_WRITE_BLOCK ||
318 319 320
	    cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
	    cmdr == MMC_SEND_TUNING_BLOCK ||
	    cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
		stop->opcode = MMC_STOP_TRANSMISSION;
		stop->arg = 0;
		stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
	} else if (cmdr == SD_IO_RW_EXTENDED) {
		stop->opcode = SD_IO_RW_DIRECT;
		stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
			     ((cmd->arg >> 28) & 0x7);
		stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
	} else {
		return 0;
	}

	cmdr = stop->opcode | SDMMC_CMD_STOP |
		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;

	return cmdr;
}

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
{
	unsigned long timeout = jiffies + msecs_to_jiffies(500);

	/*
	 * Databook says that before issuing a new data transfer command
	 * we need to check to see if the card is busy.  Data transfer commands
	 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
	 *
	 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
	 * expected.
	 */
	if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
	    !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
		while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) {
			if (time_after(jiffies, timeout)) {
				/* Command will fail; we'll pass error then */
				dev_err(host->dev, "Busy; trying anyway\n");
				break;
			}
			udelay(10);
		}
	}
}

364 365 366 367
static void dw_mci_start_command(struct dw_mci *host,
				 struct mmc_command *cmd, u32 cmd_flags)
{
	host->cmd = cmd;
368
	dev_vdbg(host->dev,
369 370 371 372 373
		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
		 cmd->arg, cmd_flags);

	mci_writel(host, CMDARG, cmd->arg);
	wmb();
374
	dw_mci_wait_while_busy(host, cmd_flags);
375 376 377 378

	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
}

379
static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
380
{
381 382
	struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
	dw_mci_start_command(host, stop, host->stop_cmdr);
383 384 385 386 387
}

/* DMA interface functions */
static void dw_mci_stop_dma(struct dw_mci *host)
{
388
	if (host->using_dma) {
389 390 391
		host->dma_ops->stop(host);
		host->dma_ops->cleanup(host);
	}
392 393 394

	/* Data transfer was stopped by the interrupt handler */
	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
395 396
}

397 398 399 400 401 402 403 404
static int dw_mci_get_dma_dir(struct mmc_data *data)
{
	if (data->flags & MMC_DATA_WRITE)
		return DMA_TO_DEVICE;
	else
		return DMA_FROM_DEVICE;
}

405
#ifdef CONFIG_MMC_DW_IDMAC
406 407 408 409 410
static void dw_mci_dma_cleanup(struct dw_mci *host)
{
	struct mmc_data *data = host->data;

	if (data)
411
		if (!data->host_cookie)
412
			dma_unmap_sg(host->dev,
413 414 415
				     data->sg,
				     data->sg_len,
				     dw_mci_get_dma_dir(data));
416 417
}

418 419 420 421 422 423 424 425
static void dw_mci_idmac_reset(struct dw_mci *host)
{
	u32 bmod = mci_readl(host, BMOD);
	/* Software reset of DMA */
	bmod |= SDMMC_IDMAC_SWRESET;
	mci_writel(host, BMOD, bmod);
}

426 427 428 429 430 431 432 433 434 435 436 437
static void dw_mci_idmac_stop_dma(struct dw_mci *host)
{
	u32 temp;

	/* Disable and reset the IDMAC interface */
	temp = mci_readl(host, CTRL);
	temp &= ~SDMMC_CTRL_USE_IDMAC;
	temp |= SDMMC_CTRL_DMA_RESET;
	mci_writel(host, CTRL, temp);

	/* Stop the IDMAC running */
	temp = mci_readl(host, BMOD);
438
	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
439
	temp |= SDMMC_IDMAC_SWRESET;
440 441 442 443 444 445 446
	mci_writel(host, BMOD, temp);
}

static void dw_mci_idmac_complete_dma(struct dw_mci *host)
{
	struct mmc_data *data = host->data;

447
	dev_vdbg(host->dev, "DMA complete\n");
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464

	host->dma_ops->cleanup(host);

	/*
	 * If the card was removed, data will be NULL. No point in trying to
	 * send the stop command or waiting for NBUSY in this case.
	 */
	if (data) {
		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
		tasklet_schedule(&host->tasklet);
	}
}

static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
				    unsigned int sg_len)
{
	int i;
465 466 467 468 469 470
	if (host->dma_64bit_address == 1) {
		struct idmac_desc_64addr *desc = host->sg_cpu;

		for (i = 0; i < sg_len; i++, desc++) {
			unsigned int length = sg_dma_len(&data->sg[i]);
			u64 mem_addr = sg_dma_address(&data->sg[i]);
471

472 473 474 475 476 477 478 479 480 481 482 483 484
			/*
			 * Set the OWN bit and disable interrupts for this
			 * descriptor
			 */
			desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
						IDMAC_DES0_CH;
			/* Buffer length */
			IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);

			/* Physical address to DMA to/from */
			desc->des4 = mem_addr & 0xffffffff;
			desc->des5 = mem_addr >> 32;
		}
485

486 487 488
		/* Set first descriptor */
		desc = host->sg_cpu;
		desc->des0 |= IDMAC_DES0_FD;
489

490 491 492 493 494
		/* Set last descriptor */
		desc = host->sg_cpu + (i - 1) *
				sizeof(struct idmac_desc_64addr);
		desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
		desc->des0 |= IDMAC_DES0_LD;
495

496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
	} else {
		struct idmac_desc *desc = host->sg_cpu;

		for (i = 0; i < sg_len; i++, desc++) {
			unsigned int length = sg_dma_len(&data->sg[i]);
			u32 mem_addr = sg_dma_address(&data->sg[i]);

			/*
			 * Set the OWN bit and disable interrupts for this
			 * descriptor
			 */
			desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
						IDMAC_DES0_CH;
			/* Buffer length */
			IDMAC_SET_BUFFER1_SIZE(desc, length);
511

512 513 514 515 516 517 518
			/* Physical address to DMA to/from */
			desc->des2 = mem_addr;
		}

		/* Set first descriptor */
		desc = host->sg_cpu;
		desc->des0 |= IDMAC_DES0_FD;
519

520 521 522 523 524
		/* Set last descriptor */
		desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
		desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
		desc->des0 |= IDMAC_DES0_LD;
	}
525 526 527 528 529 530 531 532 533 534

	wmb();
}

static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
{
	u32 temp;

	dw_mci_translate_sglist(host, host->data, sg_len);

535 536 537 538
	/* Make sure to reset DMA in case we did PIO before this */
	dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
	dw_mci_idmac_reset(host);

539 540 541 542 543 544 545 546 547
	/* Select IDMAC interface */
	temp = mci_readl(host, CTRL);
	temp |= SDMMC_CTRL_USE_IDMAC;
	mci_writel(host, CTRL, temp);

	wmb();

	/* Enable the IDMAC */
	temp = mci_readl(host, BMOD);
548
	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
549 550 551 552 553 554 555 556
	mci_writel(host, BMOD, temp);

	/* Start it running */
	mci_writel(host, PLDMND, 1);
}

static int dw_mci_idmac_init(struct dw_mci *host)
{
557
	int i;
558

559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
	if (host->dma_64bit_address == 1) {
		struct idmac_desc_64addr *p;
		/* Number of descriptors in the ring buffer */
		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);

		/* Forward link the descriptor list */
		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
								i++, p++) {
			p->des6 = (host->sg_dma +
					(sizeof(struct idmac_desc_64addr) *
							(i + 1))) & 0xffffffff;

			p->des7 = (u64)(host->sg_dma +
					(sizeof(struct idmac_desc_64addr) *
							(i + 1))) >> 32;
			/* Initialize reserved and buffer size fields to "0" */
			p->des1 = 0;
			p->des2 = 0;
			p->des3 = 0;
		}
579

580 581 582 583
		/* Set the last descriptor as the end-of-ring descriptor */
		p->des6 = host->sg_dma & 0xffffffff;
		p->des7 = (u64)host->sg_dma >> 32;
		p->des0 = IDMAC_DES0_ER;
584

585 586 587 588 589 590 591 592 593 594 595 596 597 598
	} else {
		struct idmac_desc *p;
		/* Number of descriptors in the ring buffer */
		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);

		/* Forward link the descriptor list */
		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
			p->des3 = host->sg_dma + (sizeof(struct idmac_desc) *
								(i + 1));

		/* Set the last descriptor as the end-of-ring descriptor */
		p->des3 = host->sg_dma;
		p->des0 = IDMAC_DES0_ER;
	}
599

600
	dw_mci_idmac_reset(host);
601

602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
	if (host->dma_64bit_address == 1) {
		/* Mask out interrupts - get Tx & Rx complete only */
		mci_writel(host, IDSTS64, IDMAC_INT_CLR);
		mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);

		/* Set the descriptor base address */
		mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
		mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);

	} else {
		/* Mask out interrupts - get Tx & Rx complete only */
		mci_writel(host, IDSTS, IDMAC_INT_CLR);
		mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);

		/* Set the descriptor base address */
		mci_writel(host, DBADDR, host->sg_dma);
	}
621 622 623 624

	return 0;
}

625
static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
626 627 628 629 630 631 632 633
	.init = dw_mci_idmac_init,
	.start = dw_mci_idmac_start_dma,
	.stop = dw_mci_idmac_stop_dma,
	.complete = dw_mci_idmac_complete_dma,
	.cleanup = dw_mci_dma_cleanup,
};
#endif /* CONFIG_MMC_DW_IDMAC */

634 635 636
static int dw_mci_pre_dma_transfer(struct dw_mci *host,
				   struct mmc_data *data,
				   bool next)
637 638
{
	struct scatterlist *sg;
639
	unsigned int i, sg_len;
640

641 642
	if (!next && data->host_cookie)
		return data->host_cookie;
643 644 645 646 647 648 649 650

	/*
	 * We don't do DMA on "complex" transfers, i.e. with
	 * non-word-aligned buffers or lengths. Also, we don't bother
	 * with all the DMA setup overhead for short transfers.
	 */
	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
		return -EINVAL;
651

652 653 654 655 656 657 658 659
	if (data->blksz & 3)
		return -EINVAL;

	for_each_sg(data->sg, sg, data->sg_len, i) {
		if (sg->offset & 3 || sg->length & 3)
			return -EINVAL;
	}

660
	sg_len = dma_map_sg(host->dev,
661 662 663 664 665
			    data->sg,
			    data->sg_len,
			    dw_mci_get_dma_dir(data));
	if (sg_len == 0)
		return -EINVAL;
666

667 668
	if (next)
		data->host_cookie = sg_len;
669

670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
	return sg_len;
}

static void dw_mci_pre_req(struct mmc_host *mmc,
			   struct mmc_request *mrq,
			   bool is_first_req)
{
	struct dw_mci_slot *slot = mmc_priv(mmc);
	struct mmc_data *data = mrq->data;

	if (!slot->host->use_dma || !data)
		return;

	if (data->host_cookie) {
		data->host_cookie = 0;
		return;
	}

	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
		data->host_cookie = 0;
}

static void dw_mci_post_req(struct mmc_host *mmc,
			    struct mmc_request *mrq,
			    int err)
{
	struct dw_mci_slot *slot = mmc_priv(mmc);
	struct mmc_data *data = mrq->data;

	if (!slot->host->use_dma || !data)
		return;

	if (data->host_cookie)
703
		dma_unmap_sg(slot->host->dev,
704 705 706 707 708 709
			     data->sg,
			     data->sg_len,
			     dw_mci_get_dma_dir(data));
	data->host_cookie = 0;
}

710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
{
#ifdef CONFIG_MMC_DW_IDMAC
	unsigned int blksz = data->blksz;
	const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
	u32 fifo_width = 1 << host->data_shift;
	u32 blksz_depth = blksz / fifo_width, fifoth_val;
	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
	int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;

	tx_wmark = (host->fifo_depth) / 2;
	tx_wmark_invers = host->fifo_depth - tx_wmark;

	/*
	 * MSIZE is '1',
	 * if blksz is not a multiple of the FIFO width
	 */
	if (blksz % fifo_width) {
		msize = 0;
		rx_wmark = 1;
		goto done;
	}

	do {
		if (!((blksz_depth % mszs[idx]) ||
		     (tx_wmark_invers % mszs[idx]))) {
			msize = idx;
			rx_wmark = mszs[idx] - 1;
			break;
		}
	} while (--idx > 0);
	/*
	 * If idx is '0', it won't be tried
	 * Thus, initial values are uesed
	 */
done:
	fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
	mci_writel(host, FIFOTH, fifoth_val);
#endif
}

751 752 753 754 755 756 757 758
static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
{
	unsigned int blksz = data->blksz;
	u32 blksz_depth, fifo_depth;
	u16 thld_size;

	WARN_ON(!(data->flags & MMC_DATA_READ));

759 760 761 762 763 764 765
	/*
	 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
	 * in the FIFO region, so we really shouldn't access it).
	 */
	if (host->verid < DW_MMC_240A)
		return;

766
	if (host->timing != MMC_TIMING_MMC_HS200 &&
767
	    host->timing != MMC_TIMING_MMC_HS400 &&
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
	    host->timing != MMC_TIMING_UHS_SDR104)
		goto disable;

	blksz_depth = blksz / (1 << host->data_shift);
	fifo_depth = host->fifo_depth;

	if (blksz_depth > fifo_depth)
		goto disable;

	/*
	 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
	 * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
	 * Currently just choose blksz.
	 */
	thld_size = blksz;
	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
	return;

disable:
	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
}

790 791
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
{
792
	unsigned long irqflags;
793 794 795 796 797 798 799 800 801 802
	int sg_len;
	u32 temp;

	host->using_dma = 0;

	/* If we don't have a channel, we can't do DMA */
	if (!host->use_dma)
		return -ENODEV;

	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
803 804
	if (sg_len < 0) {
		host->dma_ops->stop(host);
805
		return sg_len;
806
	}
807 808

	host->using_dma = 1;
809

810
	dev_vdbg(host->dev,
811 812 813 814
		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
		 sg_len);

815 816 817 818 819 820 821 822
	/*
	 * Decide the MSIZE and RX/TX Watermark.
	 * If current block size is same with previous size,
	 * no need to update fifoth.
	 */
	if (host->prev_blksz != data->blksz)
		dw_mci_adjust_fifoth(host, data);

823 824 825 826 827 828
	/* Enable the DMA interface */
	temp = mci_readl(host, CTRL);
	temp |= SDMMC_CTRL_DMA_ENABLE;
	mci_writel(host, CTRL, temp);

	/* Disable RX/TX IRQs, let DMA handle it */
829
	spin_lock_irqsave(&host->irq_lock, irqflags);
830 831 832
	temp = mci_readl(host, INTMASK);
	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
	mci_writel(host, INTMASK, temp);
833
	spin_unlock_irqrestore(&host->irq_lock, irqflags);
834 835 836 837 838 839 840 841

	host->dma_ops->start(host, sg_len);

	return 0;
}

static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
{
842
	unsigned long irqflags;
843 844 845 846 847 848 849 850
	u32 temp;

	data->error = -EINPROGRESS;

	WARN_ON(host->data);
	host->sg = NULL;
	host->data = data;

851
	if (data->flags & MMC_DATA_READ) {
852
		host->dir_status = DW_MCI_RECV_STATUS;
853 854
		dw_mci_ctrl_rd_thld(host, data);
	} else {
855
		host->dir_status = DW_MCI_SEND_STATUS;
856
	}
857

858
	if (dw_mci_submit_data_dma(host, data)) {
859 860 861 862 863 864 865
		int flags = SG_MITER_ATOMIC;
		if (host->data->flags & MMC_DATA_READ)
			flags |= SG_MITER_TO_SG;
		else
			flags |= SG_MITER_FROM_SG;

		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
866
		host->sg = data->sg;
867 868
		host->part_buf_start = 0;
		host->part_buf_count = 0;
869

870
		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
871 872

		spin_lock_irqsave(&host->irq_lock, irqflags);
873 874 875
		temp = mci_readl(host, INTMASK);
		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
		mci_writel(host, INTMASK, temp);
876
		spin_unlock_irqrestore(&host->irq_lock, irqflags);
877 878 879 880

		temp = mci_readl(host, CTRL);
		temp &= ~SDMMC_CTRL_DMA_ENABLE;
		mci_writel(host, CTRL, temp);
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895

		/*
		 * Use the initial fifoth_val for PIO mode.
		 * If next issued data may be transfered by DMA mode,
		 * prev_blksz should be invalidated.
		 */
		mci_writel(host, FIFOTH, host->fifoth_val);
		host->prev_blksz = 0;
	} else {
		/*
		 * Keep the current block size.
		 * It will be used to decide whether to update
		 * fifoth register next time.
		 */
		host->prev_blksz = data->blksz;
896 897 898 899 900 901 902 903 904 905 906
	}
}

static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
{
	struct dw_mci *host = slot->host;
	unsigned long timeout = jiffies + msecs_to_jiffies(500);
	unsigned int cmd_status = 0;

	mci_writel(host, CMDARG, arg);
	wmb();
907
	dw_mci_wait_while_busy(host, cmd);
908 909 910 911 912 913 914 915 916 917 918 919
	mci_writel(host, CMD, SDMMC_CMD_START | cmd);

	while (time_before(jiffies, timeout)) {
		cmd_status = mci_readl(host, CMD);
		if (!(cmd_status & SDMMC_CMD_START))
			return;
	}
	dev_err(&slot->mmc->class_dev,
		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
		cmd, arg, cmd_status);
}

920
static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
921 922
{
	struct dw_mci *host = slot->host;
923
	unsigned int clock = slot->clock;
924
	u32 div;
925
	u32 clk_en_a;
926 927 928 929 930
	u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;

	/* We must continue to set bit 28 in CMD until the change is complete */
	if (host->state == STATE_WAITING_CMD11_DONE)
		sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
931

932 933
	if (!clock) {
		mci_writel(host, CLKENA, 0);
934
		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
935 936 937
	} else if (clock != host->current_speed || force_clkinit) {
		div = host->bus_hz / clock;
		if (host->bus_hz % clock && host->bus_hz > clock)
938 939 940 941
			/*
			 * move the + 1 after the divide to prevent
			 * over-clocking the card.
			 */
942 943
			div += 1;

944
		div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
945

946 947 948 949 950 951
		if ((clock << div) != slot->__clk_old || force_clkinit)
			dev_info(&slot->mmc->class_dev,
				 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
				 slot->id, host->bus_hz, clock,
				 div ? ((host->bus_hz / div) >> 1) :
				 host->bus_hz, div);
952 953 954 955 956 957

		/* disable clock */
		mci_writel(host, CLKENA, 0);
		mci_writel(host, CLKSRC, 0);

		/* inform CIU */
958
		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
959 960 961 962 963

		/* set clock to desired speed */
		mci_writel(host, CLKDIV, div);

		/* inform CIU */
964
		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
965

966 967
		/* enable clock; only low power if no SDIO */
		clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
968
		if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
969 970
			clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
		mci_writel(host, CLKENA, clk_en_a);
971 972

		/* inform CIU */
973
		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
974

975 976
		/* keep the clock with reflecting clock dividor */
		slot->__clk_old = clock << div;
977 978
	}

979 980
	host->current_speed = clock;

981
	/* Set the current slot bus width */
982
	mci_writel(host, CTYPE, (slot->ctype << slot->id));
983 984
}

985 986 987
static void __dw_mci_start_request(struct dw_mci *host,
				   struct dw_mci_slot *slot,
				   struct mmc_command *cmd)
988 989 990 991 992 993 994 995 996 997 998 999
{
	struct mmc_request *mrq;
	struct mmc_data	*data;
	u32 cmdflags;

	mrq = slot->mrq;

	host->cur_slot = slot;
	host->mrq = mrq;

	host->pending_events = 0;
	host->completed_events = 0;
1000
	host->cmd_status = 0;
1001
	host->data_status = 0;
1002
	host->dir_status = 0;
1003

1004
	data = cmd->data;
1005
	if (data) {
1006
		mci_writel(host, TMOUT, 0xFFFFFFFF);
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
		mci_writel(host, BYTCNT, data->blksz*data->blocks);
		mci_writel(host, BLKSIZ, data->blksz);
	}

	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);

	/* this is the first command, send the initialization clock */
	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
		cmdflags |= SDMMC_CMD_INIT;

	if (data) {
		dw_mci_submit_data(host, data);
		wmb();
	}

	dw_mci_start_command(host, cmd, cmdflags);

1024 1025 1026 1027 1028 1029 1030 1031 1032
	if (cmd->opcode == SD_SWITCH_VOLTAGE) {
		/*
		 * Databook says to fail after 2ms w/ no response; give an
		 * extra jiffy just in case we're about to roll over.
		 */
		mod_timer(&host->cmd11_timer,
			  jiffies + msecs_to_jiffies(2) + 1);
	}

1033 1034
	if (mrq->stop)
		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1035 1036
	else
		host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1037 1038
}

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
static void dw_mci_start_request(struct dw_mci *host,
				 struct dw_mci_slot *slot)
{
	struct mmc_request *mrq = slot->mrq;
	struct mmc_command *cmd;

	cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
	__dw_mci_start_request(host, slot, cmd);
}

1049
/* must be called with host->lock held */
1050 1051 1052 1053 1054 1055 1056 1057
static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
				 struct mmc_request *mrq)
{
	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
		 host->state);

	slot->mrq = mrq;

1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
	if (host->state == STATE_WAITING_CMD11_DONE) {
		dev_warn(&slot->mmc->class_dev,
			 "Voltage change didn't complete\n");
		/*
		 * this case isn't expected to happen, so we can
		 * either crash here or just try to continue on
		 * in the closest possible state
		 */
		host->state = STATE_IDLE;
	}

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	if (host->state == STATE_IDLE) {
		host->state = STATE_SENDING_CMD;
		dw_mci_start_request(host, slot);
	} else {
		list_add_tail(&slot->queue_node, &host->queue);
	}
}

static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
	struct dw_mci_slot *slot = mmc_priv(mmc);
	struct dw_mci *host = slot->host;

	WARN_ON(slot->mrq);

1084 1085 1086 1087 1088 1089 1090
	/*
	 * The check for card presence and queueing of the request must be
	 * atomic, otherwise the card could be removed in between and the
	 * request wouldn't fail until another card was inserted.
	 */
	spin_lock_bh(&host->lock);

1091
	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1092
		spin_unlock_bh(&host->lock);
1093 1094 1095 1096 1097 1098
		mrq->cmd->error = -ENOMEDIUM;
		mmc_request_done(mmc, mrq);
		return;
	}

	dw_mci_queue_request(host, slot, mrq);
1099 1100

	spin_unlock_bh(&host->lock);
1101 1102 1103 1104 1105
}

static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct dw_mci_slot *slot = mmc_priv(mmc);
1106
	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
J
Jaehoon Chung 已提交
1107
	u32 regs;
1108
	int ret;
1109 1110 1111 1112 1113

	switch (ios->bus_width) {
	case MMC_BUS_WIDTH_4:
		slot->ctype = SDMMC_CTYPE_4BIT;
		break;
1114 1115 1116
	case MMC_BUS_WIDTH_8:
		slot->ctype = SDMMC_CTYPE_8BIT;
		break;
1117 1118 1119
	default:
		/* set default 1 bit mode */
		slot->ctype = SDMMC_CTYPE_1BIT;
1120 1121
	}

1122 1123
	regs = mci_readl(slot->host, UHS_REG);

J
Jaehoon Chung 已提交
1124
	/* DDR mode set */
1125 1126
	if (ios->timing == MMC_TIMING_MMC_DDR52 ||
	    ios->timing == MMC_TIMING_MMC_HS400)
1127
		regs |= ((0x1 << slot->id) << 16);
1128
	else
1129
		regs &= ~((0x1 << slot->id) << 16);
1130 1131

	mci_writel(slot->host, UHS_REG, regs);
1132
	slot->host->timing = ios->timing;
J
Jaehoon Chung 已提交
1133

1134 1135 1136 1137 1138
	/*
	 * Use mirror of ios->clock to prevent race with mmc
	 * core ios update when finding the minimum.
	 */
	slot->clock = ios->clock;
1139

1140 1141
	if (drv_data && drv_data->set_ios)
		drv_data->set_ios(slot->host, ios);
1142

1143 1144
	switch (ios->power_mode) {
	case MMC_POWER_UP:
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
		if (!IS_ERR(mmc->supply.vmmc)) {
			ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
					ios->vdd);
			if (ret) {
				dev_err(slot->host->dev,
					"failed to enable vmmc regulator\n");
				/*return, if failed turn on vmmc*/
				return;
			}
		}
1155 1156 1157 1158 1159 1160
		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
		regs = mci_readl(slot->host, PWREN);
		regs |= (1 << slot->id);
		mci_writel(slot->host, PWREN, regs);
		break;
	case MMC_POWER_ON:
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
		if (!slot->host->vqmmc_enabled) {
			if (!IS_ERR(mmc->supply.vqmmc)) {
				ret = regulator_enable(mmc->supply.vqmmc);
				if (ret < 0)
					dev_err(slot->host->dev,
						"failed to enable vqmmc\n");
				else
					slot->host->vqmmc_enabled = true;

			} else {
				/* Keep track so we don't reset again */
1172
				slot->host->vqmmc_enabled = true;
1173 1174 1175 1176 1177
			}

			/* Reset our state machine after powering on */
			dw_mci_ctrl_reset(slot->host,
					  SDMMC_CTRL_ALL_RESET_FLAGS);
1178
		}
1179 1180 1181 1182

		/* Adjust clock / bus width after power is up */
		dw_mci_setup_bus(slot, false);

1183 1184
		break;
	case MMC_POWER_OFF:
1185 1186 1187
		/* Turn clock off before power goes down */
		dw_mci_setup_bus(slot, false);

1188 1189 1190
		if (!IS_ERR(mmc->supply.vmmc))
			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);

1191
		if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1192
			regulator_disable(mmc->supply.vqmmc);
1193
		slot->host->vqmmc_enabled = false;
1194

1195 1196 1197
		regs = mci_readl(slot->host, PWREN);
		regs &= ~(1 << slot->id);
		mci_writel(slot->host, PWREN, regs);
1198 1199 1200 1201
		break;
	default:
		break;
	}
1202 1203 1204

	if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
		slot->host->state = STATE_IDLE;
1205 1206
}

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
static int dw_mci_card_busy(struct mmc_host *mmc)
{
	struct dw_mci_slot *slot = mmc_priv(mmc);
	u32 status;

	/*
	 * Check the busy bit which is low when DAT[3:0]
	 * (the data lines) are 0000
	 */
	status = mci_readl(slot->host, STATUS);

	return !!(status & SDMMC_STATUS_BUSY);
}

static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct dw_mci_slot *slot = mmc_priv(mmc);
	struct dw_mci *host = slot->host;
	u32 uhs;
	u32 v18 = SDMMC_UHS_18V << slot->id;
	int min_uv, max_uv;
	int ret;

	/*
	 * Program the voltage.  Note that some instances of dw_mmc may use
	 * the UHS_REG for this.  For other instances (like exynos) the UHS_REG
	 * does no harm but you need to set the regulator directly.  Try both.
	 */
	uhs = mci_readl(host, UHS_REG);
	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
		min_uv = 2700000;
		max_uv = 3600000;
		uhs &= ~v18;
	} else {
		min_uv = 1700000;
		max_uv = 1950000;
		uhs |= v18;
	}
	if (!IS_ERR(mmc->supply.vqmmc)) {
		ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);

		if (ret) {
1249
			dev_dbg(&mmc->class_dev,
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
					 "Regulator set error %d: %d - %d\n",
					 ret, min_uv, max_uv);
			return ret;
		}
	}
	mci_writel(host, UHS_REG, uhs);

	return 0;
}

1260 1261 1262 1263
static int dw_mci_get_ro(struct mmc_host *mmc)
{
	int read_only;
	struct dw_mci_slot *slot = mmc_priv(mmc);
1264
	int gpio_ro = mmc_gpio_get_ro(mmc);
1265 1266

	/* Use platform get_ro function, else try on board write protect */
1267 1268
	if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
			(slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
1269
		read_only = 0;
1270 1271
	else if (!IS_ERR_VALUE(gpio_ro))
		read_only = gpio_ro;
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286
	else
		read_only =
			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;

	dev_dbg(&mmc->class_dev, "card is %s\n",
		read_only ? "read-only" : "read-write");

	return read_only;
}

static int dw_mci_get_cd(struct mmc_host *mmc)
{
	int present;
	struct dw_mci_slot *slot = mmc_priv(mmc);
	struct dw_mci_board *brd = slot->host->pdata;
Z
Zhangfei Gao 已提交
1287 1288
	struct dw_mci *host = slot->host;
	int gpio_cd = mmc_gpio_get_cd(mmc);
1289 1290

	/* Use platform get_cd function, else try onboard card detect */
1291 1292
	if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
		present = 1;
1293
	else if (!IS_ERR_VALUE(gpio_cd))
Z
Zhangfei Gao 已提交
1294
		present = gpio_cd;
1295 1296 1297 1298
	else
		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
			== 0 ? 1 : 0;

Z
Zhangfei Gao 已提交
1299
	spin_lock_bh(&host->lock);
1300 1301
	if (present) {
		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1302
		dev_dbg(&mmc->class_dev, "card is present\n");
1303 1304
	} else {
		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1305
		dev_dbg(&mmc->class_dev, "card is not present\n");
1306
	}
Z
Zhangfei Gao 已提交
1307
	spin_unlock_bh(&host->lock);
1308 1309 1310 1311

	return present;
}

1312
static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1313
{
1314
	struct dw_mci_slot *slot = mmc_priv(mmc);
1315 1316
	struct dw_mci *host = slot->host;

1317 1318 1319 1320 1321 1322 1323 1324 1325
	/*
	 * Low power mode will stop the card clock when idle.  According to the
	 * description of the CLKENA register we should disable low power mode
	 * for SDIO cards if we need SDIO interrupts to work.
	 */
	if (mmc->caps & MMC_CAP_SDIO_IRQ) {
		const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
		u32 clk_en_a_old;
		u32 clk_en_a;
1326

1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
		clk_en_a_old = mci_readl(host, CLKENA);

		if (card->type == MMC_TYPE_SDIO ||
		    card->type == MMC_TYPE_SD_COMBO) {
			set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
			clk_en_a = clk_en_a_old & ~clken_low_pwr;
		} else {
			clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
			clk_en_a = clk_en_a_old | clken_low_pwr;
		}

		if (clk_en_a != clk_en_a_old) {
			mci_writel(host, CLKENA, clk_en_a);
			mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
				     SDMMC_CMD_PRV_DAT_WAIT, 0);
		}
1343 1344 1345
	}
}

1346 1347 1348 1349
static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
{
	struct dw_mci_slot *slot = mmc_priv(mmc);
	struct dw_mci *host = slot->host;
1350
	unsigned long irqflags;
1351 1352
	u32 int_mask;

1353 1354
	spin_lock_irqsave(&host->irq_lock, irqflags);

1355 1356
	/* Enable/disable Slot Specific SDIO interrupt */
	int_mask = mci_readl(host, INTMASK);
1357 1358 1359 1360 1361
	if (enb)
		int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
	else
		int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
	mci_writel(host, INTMASK, int_mask);
1362 1363

	spin_unlock_irqrestore(&host->irq_lock, irqflags);
1364 1365
}

1366 1367 1368 1369 1370 1371 1372 1373
static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
	struct dw_mci_slot *slot = mmc_priv(mmc);
	struct dw_mci *host = slot->host;
	const struct dw_mci_drv_data *drv_data = host->drv_data;
	int err = -ENOSYS;

	if (drv_data && drv_data->execute_tuning)
1374
		err = drv_data->execute_tuning(slot);
1375 1376 1377
	return err;
}

1378
static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
{
	struct dw_mci_slot *slot = mmc_priv(mmc);
	struct dw_mci *host = slot->host;
	const struct dw_mci_drv_data *drv_data = host->drv_data;

	if (drv_data && drv_data->prepare_hs400_tuning)
		return drv_data->prepare_hs400_tuning(host, ios);

	return 0;
}

1390
static const struct mmc_host_ops dw_mci_ops = {
1391
	.request		= dw_mci_request,
1392 1393
	.pre_req		= dw_mci_pre_req,
	.post_req		= dw_mci_post_req,
1394 1395 1396 1397
	.set_ios		= dw_mci_set_ios,
	.get_ro			= dw_mci_get_ro,
	.get_cd			= dw_mci_get_cd,
	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
1398
	.execute_tuning		= dw_mci_execute_tuning,
1399 1400
	.card_busy		= dw_mci_card_busy,
	.start_signal_voltage_switch = dw_mci_switch_voltage,
1401
	.init_card		= dw_mci_init_card,
1402
	.prepare_hs400_tuning	= dw_mci_prepare_hs400_tuning,
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
};

static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
	__releases(&host->lock)
	__acquires(&host->lock)
{
	struct dw_mci_slot *slot;
	struct mmc_host	*prev_mmc = host->cur_slot->mmc;

	WARN_ON(host->cmd || host->data);

	host->cur_slot->mrq = NULL;
	host->mrq = NULL;
	if (!list_empty(&host->queue)) {
		slot = list_entry(host->queue.next,
				  struct dw_mci_slot, queue_node);
		list_del(&slot->queue_node);
1420
		dev_vdbg(host->dev, "list not empty: %s is next\n",
1421 1422 1423 1424
			 mmc_hostname(slot->mmc));
		host->state = STATE_SENDING_CMD;
		dw_mci_start_request(host, slot);
	} else {
1425
		dev_vdbg(host->dev, "list empty\n");
1426 1427 1428 1429 1430

		if (host->state == STATE_SENDING_CMD11)
			host->state = STATE_WAITING_CMD11_DONE;
		else
			host->state = STATE_IDLE;
1431 1432 1433 1434 1435 1436 1437
	}

	spin_unlock(&host->lock);
	mmc_request_done(prev_mmc, mrq);
	spin_lock(&host->lock);
}

1438
static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
{
	u32 status = host->cmd_status;

	host->cmd_status = 0;

	/* Read the response from the card (up to 16 bytes) */
	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136) {
			cmd->resp[3] = mci_readl(host, RESP0);
			cmd->resp[2] = mci_readl(host, RESP1);
			cmd->resp[1] = mci_readl(host, RESP2);
			cmd->resp[0] = mci_readl(host, RESP3);
		} else {
			cmd->resp[0] = mci_readl(host, RESP0);
			cmd->resp[1] = 0;
			cmd->resp[2] = 0;
			cmd->resp[3] = 0;
		}
	}

	if (status & SDMMC_INT_RTO)
		cmd->error = -ETIMEDOUT;
	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
		cmd->error = -EILSEQ;
	else if (status & SDMMC_INT_RESP_ERR)
		cmd->error = -EIO;
	else
		cmd->error = 0;

	if (cmd->error) {
		/* newer ip versions need a delay between retries */
		if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
			mdelay(20);
	}
1473 1474 1475 1476 1477 1478

	return cmd->error;
}

static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
{
1479
	u32 status = host->data_status;
1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504

	if (status & DW_MCI_DATA_ERROR_FLAGS) {
		if (status & SDMMC_INT_DRTO) {
			data->error = -ETIMEDOUT;
		} else if (status & SDMMC_INT_DCRC) {
			data->error = -EILSEQ;
		} else if (status & SDMMC_INT_EBE) {
			if (host->dir_status ==
				DW_MCI_SEND_STATUS) {
				/*
				 * No data CRC status was returned.
				 * The number of bytes transferred
				 * will be exaggerated in PIO mode.
				 */
				data->bytes_xfered = 0;
				data->error = -ETIMEDOUT;
			} else if (host->dir_status ==
					DW_MCI_RECV_STATUS) {
				data->error = -EIO;
			}
		} else {
			/* SDMMC_INT_SBE is included */
			data->error = -EIO;
		}

1505
		dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1506 1507 1508

		/*
		 * After an error, there may be data lingering
1509
		 * in the FIFO
1510
		 */
1511
		dw_mci_reset(host);
1512 1513 1514 1515 1516 1517
	} else {
		data->bytes_xfered = data->blocks * data->blksz;
		data->error = 0;
	}

	return data->error;
1518 1519 1520 1521 1522 1523 1524
}

static void dw_mci_tasklet_func(unsigned long priv)
{
	struct dw_mci *host = (struct dw_mci *)priv;
	struct mmc_data	*data;
	struct mmc_command *cmd;
1525
	struct mmc_request *mrq;
1526 1527
	enum dw_mci_state state;
	enum dw_mci_state prev_state;
1528
	unsigned int err;
1529 1530 1531 1532 1533

	spin_lock(&host->lock);

	state = host->state;
	data = host->data;
1534
	mrq = host->mrq;
1535 1536 1537 1538 1539 1540

	do {
		prev_state = state;

		switch (state) {
		case STATE_IDLE:
1541
		case STATE_WAITING_CMD11_DONE:
1542 1543
			break;

1544
		case STATE_SENDING_CMD11:
1545 1546 1547 1548 1549 1550 1551 1552
		case STATE_SENDING_CMD:
			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
						&host->pending_events))
				break;

			cmd = host->cmd;
			host->cmd = NULL;
			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1553 1554
			err = dw_mci_command_complete(host, cmd);
			if (cmd == mrq->sbc && !err) {
1555 1556
				prev_state = state = STATE_SENDING_CMD;
				__dw_mci_start_request(host, host->cur_slot,
1557
						       mrq->cmd);
1558 1559 1560
				goto unlock;
			}

1561
			if (cmd->data && err) {
1562
				dw_mci_stop_dma(host);
1563 1564 1565
				send_stop_abort(host, data);
				state = STATE_SENDING_STOP;
				break;
1566 1567
			}

1568 1569
			if (!cmd->data || err) {
				dw_mci_request_end(host, mrq);
1570 1571 1572 1573 1574 1575 1576
				goto unlock;
			}

			prev_state = state = STATE_SENDING_DATA;
			/* fall through */

		case STATE_SENDING_DATA:
1577 1578 1579 1580 1581 1582 1583 1584
			/*
			 * We could get a data error and never a transfer
			 * complete so we'd better check for it here.
			 *
			 * Note that we don't really care if we also got a
			 * transfer complete; stopping the DMA and sending an
			 * abort won't hurt.
			 */
1585 1586 1587
			if (test_and_clear_bit(EVENT_DATA_ERROR,
					       &host->pending_events)) {
				dw_mci_stop_dma(host);
1588 1589 1590 1591
				if (data->stop ||
				    !(host->data_status & (SDMMC_INT_DRTO |
							   SDMMC_INT_EBE)))
					send_stop_abort(host, data);
1592 1593 1594 1595 1596 1597 1598 1599 1600
				state = STATE_DATA_ERROR;
				break;
			}

			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
						&host->pending_events))
				break;

			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617

			/*
			 * Handle an EVENT_DATA_ERROR that might have shown up
			 * before the transfer completed.  This might not have
			 * been caught by the check above because the interrupt
			 * could have gone off between the previous check and
			 * the check for transfer complete.
			 *
			 * Technically this ought not be needed assuming we
			 * get a DATA_COMPLETE eventually (we'll notice the
			 * error and end the request), but it shouldn't hurt.
			 *
			 * This has the advantage of sending the stop command.
			 */
			if (test_and_clear_bit(EVENT_DATA_ERROR,
					       &host->pending_events)) {
				dw_mci_stop_dma(host);
1618 1619 1620 1621
				if (data->stop ||
				    !(host->data_status & (SDMMC_INT_DRTO |
							   SDMMC_INT_EBE)))
					send_stop_abort(host, data);
1622 1623 1624
				state = STATE_DATA_ERROR;
				break;
			}
1625
			prev_state = state = STATE_DATA_BUSY;
1626

1627 1628 1629 1630 1631 1632 1633 1634 1635
			/* fall through */

		case STATE_DATA_BUSY:
			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
						&host->pending_events))
				break;

			host->data = NULL;
			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1636 1637 1638 1639
			err = dw_mci_data_complete(host, data);

			if (!err) {
				if (!data->stop || mrq->sbc) {
1640
					if (mrq->sbc && data->stop)
1641 1642 1643
						data->stop->error = 0;
					dw_mci_request_end(host, mrq);
					goto unlock;
1644 1645
				}

1646 1647 1648
				/* stop command for open-ended transfer*/
				if (data->stop)
					send_stop_abort(host, data);
1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
			} else {
				/*
				 * If we don't have a command complete now we'll
				 * never get one since we just reset everything;
				 * better end the request.
				 *
				 * If we do have a command complete we'll fall
				 * through to the SENDING_STOP command and
				 * everything will be peachy keen.
				 */
				if (!test_bit(EVENT_CMD_COMPLETE,
					      &host->pending_events)) {
					host->cmd = NULL;
					dw_mci_request_end(host, mrq);
					goto unlock;
				}
1665 1666
			}

1667 1668 1669 1670
			/*
			 * If err has non-zero,
			 * stop-abort command has been already issued.
			 */
1671
			prev_state = state = STATE_SENDING_STOP;
1672

1673 1674 1675 1676 1677 1678 1679
			/* fall through */

		case STATE_SENDING_STOP:
			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
						&host->pending_events))
				break;

1680
			/* CMD error in data command */
1681
			if (mrq->cmd->error && mrq->data)
1682
				dw_mci_reset(host);
1683

1684
			host->cmd = NULL;
1685
			host->data = NULL;
1686

1687 1688
			if (mrq->stop)
				dw_mci_command_complete(host, mrq->stop);
1689 1690 1691
			else
				host->cmd_status = 0;

1692
			dw_mci_request_end(host, mrq);
1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
			goto unlock;

		case STATE_DATA_ERROR:
			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
						&host->pending_events))
				break;

			state = STATE_DATA_BUSY;
			break;
		}
	} while (state != prev_state);

	host->state = state;
unlock:
	spin_unlock(&host->lock);

}

1711 1712
/* push final bytes to part_buf, only use during push */
static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1713
{
1714 1715 1716
	memcpy((void *)&host->part_buf, buf, cnt);
	host->part_buf_count = cnt;
}
1717

1718 1719 1720 1721 1722 1723 1724 1725
/* append bytes to part_buf, only use during push */
static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
{
	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
	host->part_buf_count += cnt;
	return cnt;
}
1726

1727 1728 1729 1730 1731 1732 1733 1734 1735
/* pull first bytes from part_buf, only use during pull */
static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
{
	cnt = min(cnt, (int)host->part_buf_count);
	if (cnt) {
		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
		       cnt);
		host->part_buf_count -= cnt;
		host->part_buf_start += cnt;
1736
	}
1737
	return cnt;
1738 1739
}

1740 1741
/* pull final bytes from the part_buf, assuming it's just been filled */
static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1742
{
1743 1744 1745 1746
	memcpy(buf, &host->part_buf, cnt);
	host->part_buf_start = cnt;
	host->part_buf_count = (1 << host->data_shift) - cnt;
}
1747

1748 1749
static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
{
1750 1751 1752
	struct mmc_data *data = host->data;
	int init_cnt = cnt;

1753 1754 1755 1756 1757
	/* try and push anything in the part_buf */
	if (unlikely(host->part_buf_count)) {
		int len = dw_mci_push_part_bytes(host, buf, cnt);
		buf += len;
		cnt -= len;
1758
		if (host->part_buf_count == 2) {
1759 1760
			mci_writew(host, DATA(host->data_offset),
					host->part_buf16);
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
			host->part_buf_count = 0;
		}
	}
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
	if (unlikely((unsigned long)buf & 0x1)) {
		while (cnt >= 2) {
			u16 aligned_buf[64];
			int len = min(cnt & -2, (int)sizeof(aligned_buf));
			int items = len >> 1;
			int i;
			/* memcpy from input buffer into aligned buffer */
			memcpy(aligned_buf, buf, len);
			buf += len;
			cnt -= len;
			/* push data from aligned buffer into fifo */
			for (i = 0; i < items; ++i)
1777 1778
				mci_writew(host, DATA(host->data_offset),
						aligned_buf[i]);
1779 1780 1781 1782 1783 1784
		}
	} else
#endif
	{
		u16 *pdata = buf;
		for (; cnt >= 2; cnt -= 2)
1785
			mci_writew(host, DATA(host->data_offset), *pdata++);
1786 1787 1788 1789 1790
		buf = pdata;
	}
	/* put anything remaining in the part_buf */
	if (cnt) {
		dw_mci_set_part_bytes(host, buf, cnt);
1791 1792 1793
		 /* Push data if we have reached the expected data length */
		if ((data->bytes_xfered + init_cnt) ==
		    (data->blksz * data->blocks))
1794
			mci_writew(host, DATA(host->data_offset),
1795
				   host->part_buf16);
1796 1797
	}
}
1798

1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
	if (unlikely((unsigned long)buf & 0x1)) {
		while (cnt >= 2) {
			/* pull data from fifo into aligned buffer */
			u16 aligned_buf[64];
			int len = min(cnt & -2, (int)sizeof(aligned_buf));
			int items = len >> 1;
			int i;
			for (i = 0; i < items; ++i)
1810 1811
				aligned_buf[i] = mci_readw(host,
						DATA(host->data_offset));
1812 1813 1814 1815 1816 1817 1818 1819 1820 1821
			/* memcpy from aligned buffer into output buffer */
			memcpy(buf, aligned_buf, len);
			buf += len;
			cnt -= len;
		}
	} else
#endif
	{
		u16 *pdata = buf;
		for (; cnt >= 2; cnt -= 2)
1822
			*pdata++ = mci_readw(host, DATA(host->data_offset));
1823 1824 1825
		buf = pdata;
	}
	if (cnt) {
1826
		host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1827
		dw_mci_pull_final_bytes(host, buf, cnt);
1828 1829 1830 1831 1832
	}
}

static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
{
1833 1834 1835
	struct mmc_data *data = host->data;
	int init_cnt = cnt;

1836 1837 1838 1839 1840
	/* try and push anything in the part_buf */
	if (unlikely(host->part_buf_count)) {
		int len = dw_mci_push_part_bytes(host, buf, cnt);
		buf += len;
		cnt -= len;
1841
		if (host->part_buf_count == 4) {
1842 1843
			mci_writel(host, DATA(host->data_offset),
					host->part_buf32);
1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859
			host->part_buf_count = 0;
		}
	}
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
	if (unlikely((unsigned long)buf & 0x3)) {
		while (cnt >= 4) {
			u32 aligned_buf[32];
			int len = min(cnt & -4, (int)sizeof(aligned_buf));
			int items = len >> 2;
			int i;
			/* memcpy from input buffer into aligned buffer */
			memcpy(aligned_buf, buf, len);
			buf += len;
			cnt -= len;
			/* push data from aligned buffer into fifo */
			for (i = 0; i < items; ++i)
1860 1861
				mci_writel(host, DATA(host->data_offset),
						aligned_buf[i]);
1862 1863 1864 1865 1866 1867
		}
	} else
#endif
	{
		u32 *pdata = buf;
		for (; cnt >= 4; cnt -= 4)
1868
			mci_writel(host, DATA(host->data_offset), *pdata++);
1869 1870 1871 1872 1873
		buf = pdata;
	}
	/* put anything remaining in the part_buf */
	if (cnt) {
		dw_mci_set_part_bytes(host, buf, cnt);
1874 1875 1876
		 /* Push data if we have reached the expected data length */
		if ((data->bytes_xfered + init_cnt) ==
		    (data->blksz * data->blocks))
1877
			mci_writel(host, DATA(host->data_offset),
1878
				   host->part_buf32);
1879 1880 1881 1882 1883
	}
}

static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
{
1884 1885 1886 1887 1888 1889 1890 1891 1892
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
	if (unlikely((unsigned long)buf & 0x3)) {
		while (cnt >= 4) {
			/* pull data from fifo into aligned buffer */
			u32 aligned_buf[32];
			int len = min(cnt & -4, (int)sizeof(aligned_buf));
			int items = len >> 2;
			int i;
			for (i = 0; i < items; ++i)
1893 1894
				aligned_buf[i] = mci_readl(host,
						DATA(host->data_offset));
1895 1896 1897 1898 1899 1900 1901 1902 1903 1904
			/* memcpy from aligned buffer into output buffer */
			memcpy(buf, aligned_buf, len);
			buf += len;
			cnt -= len;
		}
	} else
#endif
	{
		u32 *pdata = buf;
		for (; cnt >= 4; cnt -= 4)
1905
			*pdata++ = mci_readl(host, DATA(host->data_offset));
1906 1907 1908
		buf = pdata;
	}
	if (cnt) {
1909
		host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1910
		dw_mci_pull_final_bytes(host, buf, cnt);
1911 1912 1913 1914 1915
	}
}

static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
{
1916 1917 1918
	struct mmc_data *data = host->data;
	int init_cnt = cnt;

1919 1920 1921 1922 1923
	/* try and push anything in the part_buf */
	if (unlikely(host->part_buf_count)) {
		int len = dw_mci_push_part_bytes(host, buf, cnt);
		buf += len;
		cnt -= len;
1924

1925
		if (host->part_buf_count == 8) {
1926
			mci_writeq(host, DATA(host->data_offset),
1927
					host->part_buf);
1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
			host->part_buf_count = 0;
		}
	}
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
	if (unlikely((unsigned long)buf & 0x7)) {
		while (cnt >= 8) {
			u64 aligned_buf[16];
			int len = min(cnt & -8, (int)sizeof(aligned_buf));
			int items = len >> 3;
			int i;
			/* memcpy from input buffer into aligned buffer */
			memcpy(aligned_buf, buf, len);
			buf += len;
			cnt -= len;
			/* push data from aligned buffer into fifo */
			for (i = 0; i < items; ++i)
1944 1945
				mci_writeq(host, DATA(host->data_offset),
						aligned_buf[i]);
1946 1947 1948 1949 1950 1951
		}
	} else
#endif
	{
		u64 *pdata = buf;
		for (; cnt >= 8; cnt -= 8)
1952
			mci_writeq(host, DATA(host->data_offset), *pdata++);
1953 1954 1955 1956 1957
		buf = pdata;
	}
	/* put anything remaining in the part_buf */
	if (cnt) {
		dw_mci_set_part_bytes(host, buf, cnt);
1958 1959 1960
		/* Push data if we have reached the expected data length */
		if ((data->bytes_xfered + init_cnt) ==
		    (data->blksz * data->blocks))
1961
			mci_writeq(host, DATA(host->data_offset),
1962
				   host->part_buf);
1963 1964 1965 1966 1967
	}
}

static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
{
1968 1969 1970 1971 1972 1973 1974 1975 1976
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
	if (unlikely((unsigned long)buf & 0x7)) {
		while (cnt >= 8) {
			/* pull data from fifo into aligned buffer */
			u64 aligned_buf[16];
			int len = min(cnt & -8, (int)sizeof(aligned_buf));
			int items = len >> 3;
			int i;
			for (i = 0; i < items; ++i)
1977 1978
				aligned_buf[i] = mci_readq(host,
						DATA(host->data_offset));
1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
			/* memcpy from aligned buffer into output buffer */
			memcpy(buf, aligned_buf, len);
			buf += len;
			cnt -= len;
		}
	} else
#endif
	{
		u64 *pdata = buf;
		for (; cnt >= 8; cnt -= 8)
1989
			*pdata++ = mci_readq(host, DATA(host->data_offset));
1990 1991 1992
		buf = pdata;
	}
	if (cnt) {
1993
		host->part_buf = mci_readq(host, DATA(host->data_offset));
1994 1995 1996
		dw_mci_pull_final_bytes(host, buf, cnt);
	}
}
1997

1998 1999 2000
static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
{
	int len;
2001

2002 2003 2004 2005 2006 2007 2008 2009 2010
	/* get remaining partial bytes */
	len = dw_mci_pull_part_bytes(host, buf, cnt);
	if (unlikely(len == cnt))
		return;
	buf += len;
	cnt -= len;

	/* get the rest of the data */
	host->pull_data(host, buf, cnt);
2011 2012
}

2013
static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2014
{
2015 2016 2017
	struct sg_mapping_iter *sg_miter = &host->sg_miter;
	void *buf;
	unsigned int offset;
2018 2019 2020
	struct mmc_data	*data = host->data;
	int shift = host->data_shift;
	u32 status;
2021
	unsigned int len;
2022
	unsigned int remain, fcnt;
2023 2024

	do {
2025 2026 2027
		if (!sg_miter_next(sg_miter))
			goto done;

2028
		host->sg = sg_miter->piter.sg;
2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
		buf = sg_miter->addr;
		remain = sg_miter->length;
		offset = 0;

		do {
			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
					<< shift) + host->part_buf_count;
			len = min(remain, fcnt);
			if (!len)
				break;
2039
			dw_mci_pull_data(host, (void *)(buf + offset), len);
2040
			data->bytes_xfered += len;
2041
			offset += len;
2042 2043
			remain -= len;
		} while (remain);
2044

2045
		sg_miter->consumed = offset;
2046 2047
		status = mci_readl(host, MINTSTS);
		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2048 2049 2050
	/* if the RXDR is ready read again */
	} while ((status & SDMMC_INT_RXDR) ||
		 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2051 2052 2053 2054 2055 2056 2057

	if (!remain) {
		if (!sg_miter_next(sg_miter))
			goto done;
		sg_miter->consumed = 0;
	}
	sg_miter_stop(sg_miter);
2058 2059 2060
	return;

done:
2061 2062
	sg_miter_stop(sg_miter);
	host->sg = NULL;
2063 2064 2065 2066 2067 2068
	smp_wmb();
	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}

static void dw_mci_write_data_pio(struct dw_mci *host)
{
2069 2070 2071
	struct sg_mapping_iter *sg_miter = &host->sg_miter;
	void *buf;
	unsigned int offset;
2072 2073 2074
	struct mmc_data	*data = host->data;
	int shift = host->data_shift;
	u32 status;
2075
	unsigned int len;
2076 2077
	unsigned int fifo_depth = host->fifo_depth;
	unsigned int remain, fcnt;
2078 2079

	do {
2080 2081 2082
		if (!sg_miter_next(sg_miter))
			goto done;

2083
		host->sg = sg_miter->piter.sg;
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
		buf = sg_miter->addr;
		remain = sg_miter->length;
		offset = 0;

		do {
			fcnt = ((fifo_depth -
				 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
					<< shift) - host->part_buf_count;
			len = min(remain, fcnt);
			if (!len)
				break;
2095
			host->push_data(host, (void *)(buf + offset), len);
2096
			data->bytes_xfered += len;
2097
			offset += len;
2098 2099
			remain -= len;
		} while (remain);
2100

2101
		sg_miter->consumed = offset;
2102 2103 2104
		status = mci_readl(host, MINTSTS);
		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2105 2106 2107 2108 2109 2110 2111

	if (!remain) {
		if (!sg_miter_next(sg_miter))
			goto done;
		sg_miter->consumed = 0;
	}
	sg_miter_stop(sg_miter);
2112 2113 2114
	return;

done:
2115 2116
	sg_miter_stop(sg_miter);
	host->sg = NULL;
2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131
	smp_wmb();
	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}

static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
{
	if (!host->cmd_status)
		host->cmd_status = status;

	smp_wmb();

	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
	tasklet_schedule(&host->tasklet);
}

2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148
static void dw_mci_handle_cd(struct dw_mci *host)
{
	int i;

	for (i = 0; i < host->num_slots; i++) {
		struct dw_mci_slot *slot = host->slot[i];

		if (!slot)
			continue;

		if (slot->mmc->ops->card_event)
			slot->mmc->ops->card_event(slot->mmc);
		mmc_detect_change(slot->mmc,
			msecs_to_jiffies(host->pdata->detect_delay_ms));
	}
}

2149 2150 2151
static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
{
	struct dw_mci *host = dev_id;
2152
	u32 pending;
2153
	int i;
2154

2155 2156
	pending = mci_readl(host, MINTSTS); /* read-only mask reg */

2157 2158 2159 2160 2161 2162 2163 2164 2165
	/*
	 * DTO fix - version 2.10a and below, and only if internal DMA
	 * is configured.
	 */
	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
		if (!pending &&
		    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
			pending |= SDMMC_INT_DATA_OVER;
	}
2166

2167
	if (pending) {
2168 2169 2170
		/* Check volt switch first, since it can look like an error */
		if ((host->state == STATE_SENDING_CMD11) &&
		    (pending & SDMMC_INT_VOLT_SWITCH)) {
2171 2172
			del_timer(&host->cmd11_timer);

2173 2174 2175 2176 2177
			mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
			pending &= ~SDMMC_INT_VOLT_SWITCH;
			dw_mci_cmd_interrupt(host, pending);
		}

2178 2179
		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2180
			host->cmd_status = pending;
2181 2182 2183 2184 2185 2186 2187
			smp_wmb();
			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
		}

		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
			/* if there is an error report DATA_ERROR */
			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2188
			host->data_status = pending;
2189 2190
			smp_wmb();
			set_bit(EVENT_DATA_ERROR, &host->pending_events);
2191
			tasklet_schedule(&host->tasklet);
2192 2193 2194 2195 2196
		}

		if (pending & SDMMC_INT_DATA_OVER) {
			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
			if (!host->data_status)
2197
				host->data_status = pending;
2198 2199 2200
			smp_wmb();
			if (host->dir_status == DW_MCI_RECV_STATUS) {
				if (host->sg != NULL)
2201
					dw_mci_read_data_pio(host, true);
2202 2203 2204 2205 2206 2207 2208
			}
			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
			tasklet_schedule(&host->tasklet);
		}

		if (pending & SDMMC_INT_RXDR) {
			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2209
			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2210
				dw_mci_read_data_pio(host, false);
2211 2212 2213 2214
		}

		if (pending & SDMMC_INT_TXDR) {
			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2215
			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2216 2217 2218 2219 2220
				dw_mci_write_data_pio(host);
		}

		if (pending & SDMMC_INT_CMD_DONE) {
			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2221
			dw_mci_cmd_interrupt(host, pending);
2222 2223 2224 2225
		}

		if (pending & SDMMC_INT_CD) {
			mci_writel(host, RINTSTS, SDMMC_INT_CD);
2226
			dw_mci_handle_cd(host);
2227 2228
		}

2229 2230 2231
		/* Handle SDIO Interrupts */
		for (i = 0; i < host->num_slots; i++) {
			struct dw_mci_slot *slot = host->slot[i];
2232 2233 2234 2235

			if (!slot)
				continue;

2236 2237 2238
			if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
				mci_writel(host, RINTSTS,
					   SDMMC_INT_SDIO(slot->sdio_id));
2239 2240 2241 2242
				mmc_signal_sdio_irq(slot->mmc);
			}
		}

2243
	}
2244 2245 2246

#ifdef CONFIG_MMC_DW_IDMAC
	/* Handle DMA interrupts */
2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262
	if (host->dma_64bit_address == 1) {
		pending = mci_readl(host, IDSTS64);
		if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
			mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
							SDMMC_IDMAC_INT_RI);
			mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
			host->dma_ops->complete(host);
		}
	} else {
		pending = mci_readl(host, IDSTS);
		if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
			mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
							SDMMC_IDMAC_INT_RI);
			mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
			host->dma_ops->complete(host);
		}
2263 2264 2265 2266 2267 2268
	}
#endif

	return IRQ_HANDLED;
}

2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
#ifdef CONFIG_OF
/* given a slot id, find out the device node representing that slot */
static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
{
	struct device_node *np;
	const __be32 *addr;
	int len;

	if (!dev || !dev->of_node)
		return NULL;

	for_each_child_of_node(dev->of_node, np) {
		addr = of_get_property(np, "reg", &len);
		if (!addr || (len < sizeof(int)))
			continue;
		if (be32_to_cpup(addr) == slot)
			return np;
	}
	return NULL;
}

2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307
static struct dw_mci_of_slot_quirks {
	char *quirk;
	int id;
} of_slot_quirks[] = {
	{
		.quirk	= "disable-wp",
		.id	= DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
	},
};

static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
{
	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
	int quirks = 0;
	int idx;

	/* get quirks */
	for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2308 2309 2310
		if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
			dev_warn(dev, "Slot quirk %s is deprecated\n",
					of_slot_quirks[idx].quirk);
2311
			quirks |= of_slot_quirks[idx].id;
2312
		}
2313 2314 2315

	return quirks;
}
2316
#else /* CONFIG_OF */
2317 2318 2319 2320
static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
{
	return 0;
}
2321 2322
#endif /* CONFIG_OF */

2323
static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2324 2325 2326
{
	struct mmc_host *mmc;
	struct dw_mci_slot *slot;
2327
	const struct dw_mci_drv_data *drv_data = host->drv_data;
2328
	int ctrl_id, ret;
2329
	u32 freq[2];
2330

2331
	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2332 2333 2334 2335 2336
	if (!mmc)
		return -ENOMEM;

	slot = mmc_priv(mmc);
	slot->id = id;
2337
	slot->sdio_id = host->sdio_id0 + id;
2338 2339
	slot->mmc = mmc;
	slot->host = host;
2340
	host->slot[id] = slot;
2341

2342 2343
	slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);

2344
	mmc->ops = &dw_mci_ops;
2345 2346 2347 2348 2349 2350 2351 2352
	if (of_property_read_u32_array(host->dev->of_node,
				       "clock-freq-min-max", freq, 2)) {
		mmc->f_min = DW_MCI_FREQ_MIN;
		mmc->f_max = DW_MCI_FREQ_MAX;
	} else {
		mmc->f_min = freq[0];
		mmc->f_max = freq[1];
	}
2353

2354 2355 2356
	/*if there are external regulators, get them*/
	ret = mmc_regulator_get_supply(mmc);
	if (ret == -EPROBE_DEFER)
2357
		goto err_host_allocated;
2358 2359 2360

	if (!mmc->ocr_avail)
		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2361

2362 2363 2364
	if (host->pdata->caps)
		mmc->caps = host->pdata->caps;

2365 2366 2367
	if (host->pdata->pm_caps)
		mmc->pm_caps = host->pdata->pm_caps;

2368 2369 2370 2371 2372 2373 2374
	if (host->dev->of_node) {
		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
		if (ctrl_id < 0)
			ctrl_id = 0;
	} else {
		ctrl_id = to_platform_device(host->dev)->id;
	}
2375 2376
	if (drv_data && drv_data->caps)
		mmc->caps |= drv_data->caps[ctrl_id];
2377

2378 2379 2380
	if (host->pdata->caps2)
		mmc->caps2 = host->pdata->caps2;

2381 2382 2383
	ret = mmc_of_parse(mmc);
	if (ret)
		goto err_host_allocated;
2384 2385 2386 2387 2388 2389 2390 2391 2392

	if (host->pdata->blk_settings) {
		mmc->max_segs = host->pdata->blk_settings->max_segs;
		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
	} else {
		/* Useful defaults if platform data is unset. */
2393 2394 2395 2396
#ifdef CONFIG_MMC_DW_IDMAC
		mmc->max_segs = host->ring_size;
		mmc->max_blk_size = 65536;
		mmc->max_seg_size = 0x1000;
2397 2398
		mmc->max_req_size = mmc->max_seg_size * host->ring_size;
		mmc->max_blk_count = mmc->max_req_size / 512;
2399
#else
2400 2401 2402 2403 2404 2405
		mmc->max_segs = 64;
		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
		mmc->max_blk_count = 512;
		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
		mmc->max_seg_size = mmc->max_req_size;
#endif /* CONFIG_MMC_DW_IDMAC */
2406
	}
2407

2408 2409 2410 2411 2412
	if (dw_mci_get_cd(mmc))
		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
	else
		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);

2413 2414
	ret = mmc_add_host(mmc);
	if (ret)
2415
		goto err_host_allocated;
2416 2417 2418 2419 2420 2421

#if defined(CONFIG_DEBUG_FS)
	dw_mci_init_debugfs(slot);
#endif

	return 0;
2422

2423
err_host_allocated:
2424
	mmc_free_host(mmc);
2425
	return ret;
2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
}

static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
{
	/* Debugfs stuff is cleaned up by mmc core */
	mmc_remove_host(slot->mmc);
	slot->host->slot[id] = NULL;
	mmc_free_host(slot->mmc);
}

static void dw_mci_init_dma(struct dw_mci *host)
{
2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453
	int addr_config;
	/* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
	addr_config = (mci_readl(host, HCON) >> 27) & 0x01;

	if (addr_config == 1) {
		/* host supports IDMAC in 64-bit address mode */
		host->dma_64bit_address = 1;
		dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
		if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
			dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
	} else {
		/* host supports IDMAC in 32-bit address mode */
		host->dma_64bit_address = 0;
		dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
	}

2454
	/* Alloc memory for sg translation */
2455
	host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2456 2457
					  &host->sg_dma, GFP_KERNEL);
	if (!host->sg_cpu) {
2458
		dev_err(host->dev, "%s: could not alloc DMA memory\n",
2459 2460 2461 2462 2463 2464 2465
			__func__);
		goto no_dma;
	}

	/* Determine which DMA interface to use */
#ifdef CONFIG_MMC_DW_IDMAC
	host->dma_ops = &dw_mci_idmac_ops;
2466
	dev_info(host->dev, "Using internal DMA controller.\n");
2467 2468 2469 2470 2471
#endif

	if (!host->dma_ops)
		goto no_dma;

2472 2473
	if (host->dma_ops->init && host->dma_ops->start &&
	    host->dma_ops->stop && host->dma_ops->cleanup) {
2474
		if (host->dma_ops->init(host)) {
2475
			dev_err(host->dev, "%s: Unable to initialize "
2476 2477 2478 2479
				"DMA Controller.\n", __func__);
			goto no_dma;
		}
	} else {
2480
		dev_err(host->dev, "DMA initialization not found.\n");
2481 2482 2483 2484 2485 2486 2487
		goto no_dma;
	}

	host->use_dma = 1;
	return;

no_dma:
2488
	dev_info(host->dev, "Using PIO mode.\n");
2489 2490 2491 2492
	host->use_dma = 0;
	return;
}

2493
static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2494 2495
{
	unsigned long timeout = jiffies + msecs_to_jiffies(500);
2496
	u32 ctrl;
2497

2498 2499 2500
	ctrl = mci_readl(host, CTRL);
	ctrl |= reset;
	mci_writel(host, CTRL, ctrl);
2501 2502 2503 2504

	/* wait till resets clear */
	do {
		ctrl = mci_readl(host, CTRL);
2505
		if (!(ctrl & reset))
2506 2507 2508
			return true;
	} while (time_before(jiffies, timeout));

2509 2510 2511
	dev_err(host->dev,
		"Timeout resetting block (ctrl reset %#x)\n",
		ctrl & reset);
2512 2513 2514 2515

	return false;
}

2516
static bool dw_mci_reset(struct dw_mci *host)
2517
{
2518 2519 2520
	u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
	bool ret = false;

2521 2522 2523 2524 2525 2526 2527 2528 2529
	/*
	 * Reseting generates a block interrupt, hence setting
	 * the scatter-gather pointer to NULL.
	 */
	if (host->sg) {
		sg_miter_stop(&host->sg_miter);
		host->sg = NULL;
	}

2530 2531
	if (host->use_dma)
		flags |= SDMMC_CTRL_DMA_RESET;
2532

2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583
	if (dw_mci_ctrl_reset(host, flags)) {
		/*
		 * In all cases we clear the RAWINTS register to clear any
		 * interrupts.
		 */
		mci_writel(host, RINTSTS, 0xFFFFFFFF);

		/* if using dma we wait for dma_req to clear */
		if (host->use_dma) {
			unsigned long timeout = jiffies + msecs_to_jiffies(500);
			u32 status;
			do {
				status = mci_readl(host, STATUS);
				if (!(status & SDMMC_STATUS_DMA_REQ))
					break;
				cpu_relax();
			} while (time_before(jiffies, timeout));

			if (status & SDMMC_STATUS_DMA_REQ) {
				dev_err(host->dev,
					"%s: Timeout waiting for dma_req to "
					"clear during reset\n", __func__);
				goto ciu_out;
			}

			/* when using DMA next we reset the fifo again */
			if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
				goto ciu_out;
		}
	} else {
		/* if the controller reset bit did clear, then set clock regs */
		if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
			dev_err(host->dev, "%s: fifo/dma reset bits didn't "
				"clear but ciu was reset, doing clock update\n",
				__func__);
			goto ciu_out;
		}
	}

#if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
	/* It is also recommended that we reset and reprogram idmac */
	dw_mci_idmac_reset(host);
#endif

	ret = true;

ciu_out:
	/* After a CTRL reset we need to have CIU set clock registers  */
	mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);

	return ret;
2584 2585
}

2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597
static void dw_mci_cmd11_timer(unsigned long arg)
{
	struct dw_mci *host = (struct dw_mci *)arg;

	if (host->state != STATE_SENDING_CMD11)
		dev_info(host->dev, "Unexpected CMD11 timeout\n");

	host->cmd_status = SDMMC_INT_RTO;
	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
	tasklet_schedule(&host->tasklet);
}

2598 2599 2600 2601 2602 2603 2604 2605
#ifdef CONFIG_OF
static struct dw_mci_of_quirks {
	char *quirk;
	int id;
} of_quirks[] = {
	{
		.quirk	= "broken-cd",
		.id	= DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2606 2607 2608
	}, {
		.quirk	= "disable-wp",
		.id	= DW_MCI_QUIRK_NO_WRITE_PROTECT,
2609 2610 2611 2612 2613 2614 2615 2616
	},
};

static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
{
	struct dw_mci_board *pdata;
	struct device *dev = host->dev;
	struct device_node *np = dev->of_node;
2617
	const struct dw_mci_drv_data *drv_data = host->drv_data;
2618
	int idx, ret;
2619
	u32 clock_frequency;
2620 2621

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2622
	if (!pdata)
2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
		return ERR_PTR(-ENOMEM);

	/* find out number of slots supported */
	if (of_property_read_u32(dev->of_node, "num-slots",
				&pdata->num_slots)) {
		dev_info(dev, "num-slots property not found, "
				"assuming 1 slot is available\n");
		pdata->num_slots = 1;
	}

	/* get quirks */
	for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
		if (of_get_property(np, of_quirks[idx].quirk, NULL))
			pdata->quirks |= of_quirks[idx].id;

	if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
		dev_info(dev, "fifo-depth property not found, using "
				"value of FIFOTH register as default\n");

	of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);

2644 2645 2646
	if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
		pdata->bus_hz = clock_frequency;

2647 2648
	if (drv_data && drv_data->parse_dt) {
		ret = drv_data->parse_dt(host);
2649 2650 2651 2652
		if (ret)
			return ERR_PTR(ret);
	}

2653 2654 2655
	if (of_find_property(np, "supports-highspeed", NULL))
		pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;

2656 2657 2658 2659 2660 2661 2662 2663 2664 2665
	return pdata;
}

#else /* CONFIG_OF */
static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
{
	return ERR_PTR(-EINVAL);
}
#endif /* CONFIG_OF */

2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693
static void dw_mci_enable_cd(struct dw_mci *host)
{
	struct dw_mci_board *brd = host->pdata;
	unsigned long irqflags;
	u32 temp;
	int i;

	/* No need for CD if broken card detection */
	if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
		return;

	/* No need for CD if all slots have a non-error GPIO */
	for (i = 0; i < host->num_slots; i++) {
		struct dw_mci_slot *slot = host->slot[i];

		if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
			break;
	}
	if (i == host->num_slots)
		return;

	spin_lock_irqsave(&host->irq_lock, irqflags);
	temp = mci_readl(host, INTMASK);
	temp  |= SDMMC_INT_CD;
	mci_writel(host, INTMASK, temp);
	spin_unlock_irqrestore(&host->irq_lock, irqflags);
}

2694
int dw_mci_probe(struct dw_mci *host)
2695
{
2696
	const struct dw_mci_drv_data *drv_data = host->drv_data;
2697
	int width, i, ret = 0;
2698
	u32 fifo_size;
2699
	int init_slots = 0;
2700

2701 2702 2703 2704 2705 2706
	if (!host->pdata) {
		host->pdata = dw_mci_parse_dt(host);
		if (IS_ERR(host->pdata)) {
			dev_err(host->dev, "platform data not available\n");
			return -EINVAL;
		}
2707 2708
	}

2709
	if (host->pdata->num_slots > 1) {
2710
		dev_err(host->dev,
2711
			"Platform data must supply num_slots.\n");
2712
		return -ENODEV;
2713 2714
	}

2715
	host->biu_clk = devm_clk_get(host->dev, "biu");
2716 2717 2718 2719 2720 2721 2722 2723 2724 2725
	if (IS_ERR(host->biu_clk)) {
		dev_dbg(host->dev, "biu clock not available\n");
	} else {
		ret = clk_prepare_enable(host->biu_clk);
		if (ret) {
			dev_err(host->dev, "failed to enable biu clock\n");
			return ret;
		}
	}

2726
	host->ciu_clk = devm_clk_get(host->dev, "ciu");
2727 2728
	if (IS_ERR(host->ciu_clk)) {
		dev_dbg(host->dev, "ciu clock not available\n");
2729
		host->bus_hz = host->pdata->bus_hz;
2730 2731 2732 2733 2734 2735 2736
	} else {
		ret = clk_prepare_enable(host->ciu_clk);
		if (ret) {
			dev_err(host->dev, "failed to enable ciu clock\n");
			goto err_clk_biu;
		}

2737 2738 2739 2740
		if (host->pdata->bus_hz) {
			ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
			if (ret)
				dev_warn(host->dev,
2741
					 "Unable to set bus rate to %uHz\n",
2742 2743
					 host->pdata->bus_hz);
		}
2744
		host->bus_hz = clk_get_rate(host->ciu_clk);
2745
	}
2746

2747 2748 2749 2750 2751 2752 2753
	if (!host->bus_hz) {
		dev_err(host->dev,
			"Platform data must supply bus speed\n");
		ret = -ENODEV;
		goto err_clk_ciu;
	}

2754 2755 2756 2757 2758 2759 2760 2761 2762
	if (drv_data && drv_data->init) {
		ret = drv_data->init(host);
		if (ret) {
			dev_err(host->dev,
				"implementation specific init failed\n");
			goto err_clk_ciu;
		}
	}

2763 2764
	if (drv_data && drv_data->setup_clock) {
		ret = drv_data->setup_clock(host);
2765 2766 2767 2768 2769 2770 2771
		if (ret) {
			dev_err(host->dev,
				"implementation specific clock setup failed\n");
			goto err_clk_ciu;
		}
	}

2772 2773 2774
	setup_timer(&host->cmd11_timer,
		    dw_mci_cmd11_timer, (unsigned long)host);

2775
	host->quirks = host->pdata->quirks;
2776 2777

	spin_lock_init(&host->lock);
2778
	spin_lock_init(&host->irq_lock);
2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807
	INIT_LIST_HEAD(&host->queue);

	/*
	 * Get the host data width - this assumes that HCON has been set with
	 * the correct values.
	 */
	i = (mci_readl(host, HCON) >> 7) & 0x7;
	if (!i) {
		host->push_data = dw_mci_push_data16;
		host->pull_data = dw_mci_pull_data16;
		width = 16;
		host->data_shift = 1;
	} else if (i == 2) {
		host->push_data = dw_mci_push_data64;
		host->pull_data = dw_mci_pull_data64;
		width = 64;
		host->data_shift = 3;
	} else {
		/* Check for a reserved value, and warn if it is */
		WARN((i != 1),
		     "HCON reports a reserved host data width!\n"
		     "Defaulting to 32-bit access.\n");
		host->push_data = dw_mci_push_data32;
		host->pull_data = dw_mci_pull_data32;
		width = 32;
		host->data_shift = 2;
	}

	/* Reset all blocks */
2808
	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
2809 2810 2811 2812
		return -ENODEV;

	host->dma_ops = host->pdata->dma_ops;
	dw_mci_init_dma(host);
2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824

	/* Clear the interrupts for the host controller */
	mci_writel(host, RINTSTS, 0xFFFFFFFF);
	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */

	/* Put in max timeout */
	mci_writel(host, TMOUT, 0xFFFFFFFF);

	/*
	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
	 */
2825 2826 2827 2828 2829 2830 2831 2832
	if (!host->pdata->fifo_depth) {
		/*
		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
		 * have been overwritten by the bootloader, just like we're
		 * about to do, so if you know the value for your hardware, you
		 * should put it in the platform data.
		 */
		fifo_size = mci_readl(host, FIFOTH);
2833
		fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2834 2835 2836 2837
	} else {
		fifo_size = host->pdata->fifo_depth;
	}
	host->fifo_depth = fifo_size;
2838 2839
	host->fifoth_val =
		SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2840
	mci_writel(host, FIFOTH, host->fifoth_val);
2841 2842 2843 2844 2845

	/* disable clock to CIU */
	mci_writel(host, CLKENA, 0);
	mci_writel(host, CLKSRC, 0);

2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857
	/*
	 * In 2.40a spec, Data offset is changed.
	 * Need to check the version-id and set data-offset for DATA register.
	 */
	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
	dev_info(host->dev, "Version ID is %04x\n", host->verid);

	if (host->verid < DW_MMC_240A)
		host->data_offset = DATA_OFFSET;
	else
		host->data_offset = DATA_240A_OFFSET;

2858
	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2859 2860
	ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
			       host->irq_flags, "dw-mci", host);
2861
	if (ret)
2862
		goto err_dmaunmap;
2863 2864 2865 2866 2867 2868

	if (host->pdata->num_slots)
		host->num_slots = host->pdata->num_slots;
	else
		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;

2869
	/*
2870
	 * Enable interrupts for command done, data over, data empty,
2871 2872 2873 2874 2875
	 * receive ready and error such as transmit, receive timeout, crc error
	 */
	mci_writel(host, RINTSTS, 0xFFFFFFFF);
	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2876
		   DW_MCI_ERROR_FLAGS);
2877 2878 2879 2880 2881 2882 2883
	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */

	dev_info(host->dev, "DW MMC controller at irq %d, "
		 "%d bit host data width, "
		 "%u deep fifo\n",
		 host->irq, width, fifo_size);

2884 2885 2886
	/* We need at least one slot to succeed */
	for (i = 0; i < host->num_slots; i++) {
		ret = dw_mci_init_slot(host, i);
2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897
		if (ret)
			dev_dbg(host->dev, "slot %d init failed\n", i);
		else
			init_slots++;
	}

	if (init_slots) {
		dev_info(host->dev, "%d slots initialized\n", init_slots);
	} else {
		dev_dbg(host->dev, "attempted to initialize %d slots, "
					"but failed on all\n", host->num_slots);
2898
		goto err_dmaunmap;
2899 2900
	}

2901 2902 2903
	/* Now that slots are all setup, we can enable card detect */
	dw_mci_enable_cd(host);

2904
	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2905
		dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2906 2907 2908 2909 2910 2911

	return 0;

err_dmaunmap:
	if (host->use_dma && host->dma_ops->exit)
		host->dma_ops->exit(host);
2912 2913

err_clk_ciu:
2914
	if (!IS_ERR(host->ciu_clk))
2915
		clk_disable_unprepare(host->ciu_clk);
2916

2917
err_clk_biu:
2918
	if (!IS_ERR(host->biu_clk))
2919
		clk_disable_unprepare(host->biu_clk);
2920

2921 2922
	return ret;
}
2923
EXPORT_SYMBOL(dw_mci_probe);
2924

2925
void dw_mci_remove(struct dw_mci *host)
2926 2927 2928 2929 2930 2931 2932
{
	int i;

	mci_writel(host, RINTSTS, 0xFFFFFFFF);
	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */

	for (i = 0; i < host->num_slots; i++) {
2933
		dev_dbg(host->dev, "remove slot %d\n", i);
2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944
		if (host->slot[i])
			dw_mci_cleanup_slot(host->slot[i], i);
	}

	/* disable clock to CIU */
	mci_writel(host, CLKENA, 0);
	mci_writel(host, CLKSRC, 0);

	if (host->use_dma && host->dma_ops->exit)
		host->dma_ops->exit(host);

2945 2946
	if (!IS_ERR(host->ciu_clk))
		clk_disable_unprepare(host->ciu_clk);
2947

2948 2949
	if (!IS_ERR(host->biu_clk))
		clk_disable_unprepare(host->biu_clk);
2950
}
2951 2952 2953
EXPORT_SYMBOL(dw_mci_remove);


2954

2955
#ifdef CONFIG_PM_SLEEP
2956 2957 2958
/*
 * TODO: we should probably disable the clock to the card in the suspend path.
 */
2959
int dw_mci_suspend(struct dw_mci *host)
2960 2961 2962
{
	return 0;
}
2963
EXPORT_SYMBOL(dw_mci_suspend);
2964

2965
int dw_mci_resume(struct dw_mci *host)
2966 2967 2968
{
	int i, ret;

2969
	if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
2970 2971 2972 2973
		ret = -ENODEV;
		return ret;
	}

2974
	if (host->use_dma && host->dma_ops->init)
2975 2976
		host->dma_ops->init(host);

2977 2978 2979 2980
	/*
	 * Restore the initial value at FIFOTH register
	 * And Invalidate the prev_blksz with zero
	 */
2981
	mci_writel(host, FIFOTH, host->fifoth_val);
2982
	host->prev_blksz = 0;
2983

2984 2985 2986
	/* Put in max timeout */
	mci_writel(host, TMOUT, 0xFFFFFFFF);

2987 2988 2989
	mci_writel(host, RINTSTS, 0xFFFFFFFF);
	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2990
		   DW_MCI_ERROR_FLAGS);
2991 2992
	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);

2993 2994 2995 2996
	for (i = 0; i < host->num_slots; i++) {
		struct dw_mci_slot *slot = host->slot[i];
		if (!slot)
			continue;
2997 2998 2999 3000
		if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
			dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
			dw_mci_setup_bus(slot, true);
		}
3001
	}
3002 3003 3004 3005

	/* Now that slots are all setup, we can enable card detect */
	dw_mci_enable_cd(host);

3006 3007
	return 0;
}
3008
EXPORT_SYMBOL(dw_mci_resume);
3009 3010
#endif /* CONFIG_PM_SLEEP */

3011 3012
static int __init dw_mci_init(void)
{
3013
	pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3014
	return 0;
3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027
}

static void __exit dw_mci_exit(void)
{
}

module_init(dw_mci_init);
module_exit(dw_mci_exit);

MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
MODULE_AUTHOR("NXP Semiconductor VietNam");
MODULE_AUTHOR("Imagination Technologies Ltd");
MODULE_LICENSE("GPL v2");