sh_mmcif.c 40.5 KB
Newer Older
Y
Yusuke Goda 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * MMCIF eMMC driver.
 *
 * Copyright (C) 2010 Renesas Solutions Corp.
 * Yusuke Goda <yusuke.goda.sx@renesas.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License.
 *
 *
 * TODO
 *  1. DMA
 *  2. Power management
 *  3. Handle MMC errors better
 *
 */

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*
 * The MMCIF driver is now processing MMC requests asynchronously, according
 * to the Linux MMC API requirement.
 *
 * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
 * data, and optional stop. To achieve asynchronous processing each of these
 * stages is split into two halves: a top and a bottom half. The top half
 * initialises the hardware, installs a timeout handler to handle completion
 * timeouts, and returns. In case of the command stage this immediately returns
 * control to the caller, leaving all further processing to run asynchronously.
 * All further request processing is performed by the bottom halves.
 *
 * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
 * thread, a DMA completion callback, if DMA is used, a timeout work, and
 * request- and stage-specific handler methods.
 *
 * Each bottom half run begins with either a hardware interrupt, a DMA callback
 * invocation, or a timeout work run. In case of an error or a successful
 * processing completion, the MMC core is informed and the request processing is
 * finished. In case processing has to continue, i.e., if data has to be read
 * from or written to the card, or if a stop command has to be sent, the next
 * top half is called, which performs the necessary hardware handling and
 * reschedules the timeout work. This returns the driver state machine into the
 * bottom half waiting state.
 */

45
#include <linux/bitops.h>
46 47
#include <linux/clk.h>
#include <linux/completion.h>
48
#include <linux/delay.h>
Y
Yusuke Goda 已提交
49
#include <linux/dma-mapping.h>
50
#include <linux/dmaengine.h>
Y
Yusuke Goda 已提交
51 52
#include <linux/mmc/card.h>
#include <linux/mmc/core.h>
53
#include <linux/mmc/host.h>
Y
Yusuke Goda 已提交
54 55 56
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sh_mmcif.h>
57
#include <linux/mmc/slot-gpio.h>
58
#include <linux/mod_devicetable.h>
59
#include <linux/mutex.h>
60
#include <linux/pagemap.h>
61
#include <linux/platform_device.h>
62
#include <linux/pm_qos.h>
63
#include <linux/pm_runtime.h>
64
#include <linux/sh_dma.h>
65
#include <linux/spinlock.h>
66
#include <linux/module.h>
Y
Yusuke Goda 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92

#define DRIVER_NAME	"sh_mmcif"
#define DRIVER_VERSION	"2010-04-28"

/* CE_CMD_SET */
#define CMD_MASK		0x3f000000
#define CMD_SET_RTYP_NO		((0 << 23) | (0 << 22))
#define CMD_SET_RTYP_6B		((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
#define CMD_SET_RTYP_17B	((1 << 23) | (0 << 22)) /* R2 */
#define CMD_SET_RBSY		(1 << 21) /* R1b */
#define CMD_SET_CCSEN		(1 << 20)
#define CMD_SET_WDAT		(1 << 19) /* 1: on data, 0: no data */
#define CMD_SET_DWEN		(1 << 18) /* 1: write, 0: read */
#define CMD_SET_CMLTE		(1 << 17) /* 1: multi block trans, 0: single */
#define CMD_SET_CMD12EN		(1 << 16) /* 1: CMD12 auto issue */
#define CMD_SET_RIDXC_INDEX	((0 << 15) | (0 << 14)) /* index check */
#define CMD_SET_RIDXC_BITS	((0 << 15) | (1 << 14)) /* check bits check */
#define CMD_SET_RIDXC_NO	((1 << 15) | (0 << 14)) /* no check */
#define CMD_SET_CRC7C		((0 << 13) | (0 << 12)) /* CRC7 check*/
#define CMD_SET_CRC7C_BITS	((0 << 13) | (1 << 12)) /* check bits check*/
#define CMD_SET_CRC7C_INTERNAL	((1 << 13) | (0 << 12)) /* internal CRC7 check*/
#define CMD_SET_CRC16C		(1 << 10) /* 0: CRC16 check*/
#define CMD_SET_CRCSTE		(1 << 8) /* 1: not receive CRC status */
#define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */
#define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */
#define CMD_SET_CCSH		(1 << 5)
93
#define CMD_SET_DARS		(1 << 2) /* Dual Data Rate */
Y
Yusuke Goda 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
#define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */
#define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */
#define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */

/* CE_CMD_CTRL */
#define CMD_CTRL_BREAK		(1 << 0)

/* CE_BLOCK_SET */
#define BLOCK_SIZE_MASK		0x0000ffff

/* CE_INT */
#define INT_CCSDE		(1 << 29)
#define INT_CMD12DRE		(1 << 26)
#define INT_CMD12RBE		(1 << 25)
#define INT_CMD12CRE		(1 << 24)
#define INT_DTRANE		(1 << 23)
#define INT_BUFRE		(1 << 22)
#define INT_BUFWEN		(1 << 21)
#define INT_BUFREN		(1 << 20)
#define INT_CCSRCV		(1 << 19)
#define INT_RBSYE		(1 << 17)
#define INT_CRSPE		(1 << 16)
#define INT_CMDVIO		(1 << 15)
#define INT_BUFVIO		(1 << 14)
#define INT_WDATERR		(1 << 11)
#define INT_RDATERR		(1 << 10)
#define INT_RIDXERR		(1 << 9)
#define INT_RSPERR		(1 << 8)
#define INT_CCSTO		(1 << 5)
#define INT_CRCSTO		(1 << 4)
#define INT_WDATTO		(1 << 3)
#define INT_RDATTO		(1 << 2)
#define INT_RBSYTO		(1 << 1)
#define INT_RSPTO		(1 << 0)
#define INT_ERR_STS		(INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
				 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \
				 INT_RDATTO | INT_RBSYTO | INT_RSPTO)

133 134 135 136
#define INT_ALL			(INT_RBSYE | INT_CRSPE | INT_BUFREN |	 \
				 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
				 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)

137 138
#define INT_CCS			(INT_CCSTO | INT_CCSRCV | INT_CCSDE)

Y
Yusuke Goda 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
/* CE_INT_MASK */
#define MASK_ALL		0x00000000
#define MASK_MCCSDE		(1 << 29)
#define MASK_MCMD12DRE		(1 << 26)
#define MASK_MCMD12RBE		(1 << 25)
#define MASK_MCMD12CRE		(1 << 24)
#define MASK_MDTRANE		(1 << 23)
#define MASK_MBUFRE		(1 << 22)
#define MASK_MBUFWEN		(1 << 21)
#define MASK_MBUFREN		(1 << 20)
#define MASK_MCCSRCV		(1 << 19)
#define MASK_MRBSYE		(1 << 17)
#define MASK_MCRSPE		(1 << 16)
#define MASK_MCMDVIO		(1 << 15)
#define MASK_MBUFVIO		(1 << 14)
#define MASK_MWDATERR		(1 << 11)
#define MASK_MRDATERR		(1 << 10)
#define MASK_MRIDXERR		(1 << 9)
#define MASK_MRSPERR		(1 << 8)
#define MASK_MCCSTO		(1 << 5)
#define MASK_MCRCSTO		(1 << 4)
#define MASK_MWDATTO		(1 << 3)
#define MASK_MRDATTO		(1 << 2)
#define MASK_MRBSYTO		(1 << 1)
#define MASK_MRSPTO		(1 << 0)

165 166
#define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
167
				 MASK_MCRCSTO | MASK_MWDATTO | \
168 169
				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)

170 171 172 173 174
#define MASK_CLEAN		(INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |	\
				 MASK_MBUFREN | MASK_MBUFWEN |			\
				 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE |	\
				 MASK_MCMD12RBE | MASK_MCMD12CRE)

Y
Yusuke Goda 已提交
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
/* CE_HOST_STS1 */
#define STS1_CMDSEQ		(1 << 31)

/* CE_HOST_STS2 */
#define STS2_CRCSTE		(1 << 31)
#define STS2_CRC16E		(1 << 30)
#define STS2_AC12CRCE		(1 << 29)
#define STS2_RSPCRC7E		(1 << 28)
#define STS2_CRCSTEBE		(1 << 27)
#define STS2_RDATEBE		(1 << 26)
#define STS2_AC12REBE		(1 << 25)
#define STS2_RSPEBE		(1 << 24)
#define STS2_AC12IDXE		(1 << 23)
#define STS2_RSPIDXE		(1 << 22)
#define STS2_CCSTO		(1 << 15)
#define STS2_RDATTO		(1 << 14)
#define STS2_DATBSYTO		(1 << 13)
#define STS2_CRCSTTO		(1 << 12)
#define STS2_AC12BSYTO		(1 << 11)
#define STS2_RSPBSYTO		(1 << 10)
#define STS2_AC12RSPTO		(1 << 9)
#define STS2_RSPTO		(1 << 8)
#define STS2_CRC_ERR		(STS2_CRCSTE | STS2_CRC16E |		\
				 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
#define STS2_TIMEOUT_ERR	(STS2_CCSTO | STS2_RDATTO |		\
				 STS2_DATBSYTO | STS2_CRCSTTO |		\
				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
				 STS2_AC12RSPTO | STS2_RSPTO)

#define CLKDEV_EMMC_DATA	52000000 /* 52MHz */
#define CLKDEV_MMC_DATA		20000000 /* 20MHz */
#define CLKDEV_INIT		400000   /* 400 KHz */

208 209 210 211
enum mmcif_state {
	STATE_IDLE,
	STATE_REQUEST,
	STATE_IOS,
212
	STATE_TIMEOUT,
213 214
};

215 216 217 218 219 220 221 222 223 224 225 226
enum mmcif_wait_for {
	MMCIF_WAIT_FOR_REQUEST,
	MMCIF_WAIT_FOR_CMD,
	MMCIF_WAIT_FOR_MREAD,
	MMCIF_WAIT_FOR_MWRITE,
	MMCIF_WAIT_FOR_READ,
	MMCIF_WAIT_FOR_WRITE,
	MMCIF_WAIT_FOR_READ_END,
	MMCIF_WAIT_FOR_WRITE_END,
	MMCIF_WAIT_FOR_STOP,
};

Y
Yusuke Goda 已提交
227 228
struct sh_mmcif_host {
	struct mmc_host *mmc;
229
	struct mmc_request *mrq;
Y
Yusuke Goda 已提交
230 231 232 233
	struct platform_device *pd;
	struct clk *hclk;
	unsigned int clk;
	int bus_width;
234
	unsigned char timing;
235
	bool sd_error;
236
	bool dying;
Y
Yusuke Goda 已提交
237 238
	long timeout;
	void __iomem *addr;
239
	u32 *pio_ptr;
240
	spinlock_t lock;		/* protect sh_mmcif_host::state */
241
	enum mmcif_state state;
242 243 244 245 246
	enum mmcif_wait_for wait_for;
	struct delayed_work timeout_work;
	size_t blocksize;
	int sg_idx;
	int sg_blkidx;
247
	bool power;
248
	bool card_present;
249
	bool ccs_enable;		/* Command Completion Signal support */
250
	bool clk_ctrl2_enable;
251
	struct mutex thread_lock;
Y
Yusuke Goda 已提交
252

253 254 255 256
	/* DMA support */
	struct dma_chan		*chan_rx;
	struct dma_chan		*chan_tx;
	struct completion	dma_complete;
257
	bool			dma_active;
258
};
Y
Yusuke Goda 已提交
259 260 261 262

static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
					unsigned int reg, u32 val)
{
263
	writel(val | readl(host->addr + reg), host->addr + reg);
Y
Yusuke Goda 已提交
264 265 266 267 268
}

static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
					unsigned int reg, u32 val)
{
269
	writel(~val & readl(host->addr + reg), host->addr + reg);
Y
Yusuke Goda 已提交
270 271
}

272 273 274
static void mmcif_dma_complete(void *arg)
{
	struct sh_mmcif_host *host = arg;
275
	struct mmc_request *mrq = host->mrq;
276

277 278
	dev_dbg(&host->pd->dev, "Command completed\n");

279
	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
280 281 282 283 284 285 286 287
		 dev_name(&host->pd->dev)))
		return;

	complete(&host->dma_complete);
}

static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
{
288 289
	struct mmc_data *data = host->mrq->data;
	struct scatterlist *sg = data->sg;
290 291 292 293 294
	struct dma_async_tx_descriptor *desc = NULL;
	struct dma_chan *chan = host->chan_rx;
	dma_cookie_t cookie = -EINVAL;
	int ret;

295
	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
296
			 DMA_FROM_DEVICE);
297
	if (ret > 0) {
298
		host->dma_active = true;
299
		desc = dmaengine_prep_slave_sg(chan, sg, ret,
300
			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
301 302 303 304 305
	}

	if (desc) {
		desc->callback = mmcif_dma_complete;
		desc->callback_param = host;
306 307 308
		cookie = dmaengine_submit(desc);
		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
		dma_async_issue_pending(chan);
309 310
	}
	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
311
		__func__, data->sg_len, ret, cookie);
312 313 314 315 316 317

	if (!desc) {
		/* DMA failed, fall back to PIO */
		if (ret >= 0)
			ret = -EIO;
		host->chan_rx = NULL;
318
		host->dma_active = false;
319 320 321 322 323 324 325 326 327 328 329 330 331
		dma_release_channel(chan);
		/* Free the Tx channel too */
		chan = host->chan_tx;
		if (chan) {
			host->chan_tx = NULL;
			dma_release_channel(chan);
		}
		dev_warn(&host->pd->dev,
			 "DMA failed: %d, falling back to PIO\n", ret);
		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
	}

	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
332
		desc, cookie, data->sg_len);
333 334 335 336
}

static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
{
337 338
	struct mmc_data *data = host->mrq->data;
	struct scatterlist *sg = data->sg;
339 340 341 342 343
	struct dma_async_tx_descriptor *desc = NULL;
	struct dma_chan *chan = host->chan_tx;
	dma_cookie_t cookie = -EINVAL;
	int ret;

344
	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
345
			 DMA_TO_DEVICE);
346
	if (ret > 0) {
347
		host->dma_active = true;
348
		desc = dmaengine_prep_slave_sg(chan, sg, ret,
349
			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
350 351 352 353 354
	}

	if (desc) {
		desc->callback = mmcif_dma_complete;
		desc->callback_param = host;
355 356 357
		cookie = dmaengine_submit(desc);
		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
		dma_async_issue_pending(chan);
358 359
	}
	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
360
		__func__, data->sg_len, ret, cookie);
361 362 363 364 365 366

	if (!desc) {
		/* DMA failed, fall back to PIO */
		if (ret >= 0)
			ret = -EIO;
		host->chan_tx = NULL;
367
		host->dma_active = false;
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
		dma_release_channel(chan);
		/* Free the Rx channel too */
		chan = host->chan_rx;
		if (chan) {
			host->chan_rx = NULL;
			dma_release_channel(chan);
		}
		dev_warn(&host->pd->dev,
			 "DMA failed: %d, falling back to PIO\n", ret);
		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
	}

	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
		desc, cookie);
}

384 385 386 387
static struct dma_chan *
sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
			 struct sh_mmcif_plat_data *pdata,
			 enum dma_transfer_direction direction)
388
{
389
	struct dma_slave_config cfg = { 0, };
390 391 392
	struct dma_chan *chan;
	unsigned int slave_id;
	struct resource *res;
393 394
	dma_cap_mask_t mask;
	int ret;
395

396 397 398 399 400 401 402 403 404 405
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	if (pdata)
		slave_id = direction == DMA_MEM_TO_DEV
			 ? pdata->slave_id_tx : pdata->slave_id_rx;
	else
		slave_id = 0;

	chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
406
				(void *)(unsigned long)slave_id, &host->pd->dev,
407 408 409 410 411 412 413 414 415 416 417 418 419
				direction == DMA_MEM_TO_DEV ? "tx" : "rx");

	dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
		direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);

	if (!chan)
		return NULL;

	res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);

	/* In the OF case the driver will get the slave ID from the DT */
	cfg.slave_id = slave_id;
	cfg.direction = direction;
420

421
	if (direction == DMA_DEV_TO_MEM) {
422
		cfg.src_addr = res->start + MMCIF_CE_DATA;
423 424
		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	} else {
425
		cfg.dst_addr = res->start + MMCIF_CE_DATA;
426 427
		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	}
428

429 430 431 432 433 434 435 436 437 438 439 440
	ret = dmaengine_slave_config(chan, &cfg);
	if (ret < 0) {
		dma_release_channel(chan);
		return NULL;
	}

	return chan;
}

static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
				 struct sh_mmcif_plat_data *pdata)
{
441
	host->dma_active = false;
442

443 444 445 446
	if (pdata) {
		if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
			return;
	} else if (!host->pd->dev.of_node) {
447
		return;
448
	}
449

450
	/* We can only either use DMA for both Tx and Rx or not use it at all */
451
	host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV);
452 453
	if (!host->chan_tx)
		return;
454

455 456 457 458 459
	host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM);
	if (!host->chan_rx) {
		dma_release_channel(host->chan_tx);
		host->chan_tx = NULL;
	}
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
}

static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
{
	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
	/* Descriptors are freed automatically */
	if (host->chan_tx) {
		struct dma_chan *chan = host->chan_tx;
		host->chan_tx = NULL;
		dma_release_channel(chan);
	}
	if (host->chan_rx) {
		struct dma_chan *chan = host->chan_rx;
		host->chan_rx = NULL;
		dma_release_channel(chan);
	}

477
	host->dma_active = false;
478
}
Y
Yusuke Goda 已提交
479 480 481 482

static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
{
	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
483
	bool sup_pclk = p ? p->sup_pclk : false;
Y
Yusuke Goda 已提交
484 485 486 487 488 489

	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);

	if (!clk)
		return;
490
	if (sup_pclk && clk == host->clk)
Y
Yusuke Goda 已提交
491 492 493
		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
	else
		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
S
Simon Horman 已提交
494 495
				((fls(DIV_ROUND_UP(host->clk,
						   clk) - 1) - 1) << 16));
Y
Yusuke Goda 已提交
496 497 498 499 500 501 502 503

	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
}

static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
{
	u32 tmp;

504
	tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
Y
Yusuke Goda 已提交
505

506 507
	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
508 509
	if (host->ccs_enable)
		tmp |= SCCSTO_29;
510 511
	if (host->clk_ctrl2_enable)
		sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
Y
Yusuke Goda 已提交
512
	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
513
		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
Y
Yusuke Goda 已提交
514 515 516 517 518 519 520
	/* byte swap on */
	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
}

static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
{
	u32 state1, state2;
521
	int ret, timeout;
Y
Yusuke Goda 已提交
522

523
	host->sd_error = false;
Y
Yusuke Goda 已提交
524

525 526
	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
527 528
	dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
	dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
Y
Yusuke Goda 已提交
529 530 531 532

	if (state1 & STS1_CMDSEQ) {
		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
533
		for (timeout = 10000000; timeout; timeout--) {
534
			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
535
			      & STS1_CMDSEQ))
Y
Yusuke Goda 已提交
536 537 538
				break;
			mdelay(1);
		}
539 540 541 542 543
		if (!timeout) {
			dev_err(&host->pd->dev,
				"Forced end of command sequence timeout err\n");
			return -EIO;
		}
Y
Yusuke Goda 已提交
544
		sh_mmcif_sync_reset(host);
545
		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
Y
Yusuke Goda 已提交
546 547 548 549
		return -EIO;
	}

	if (state2 & STS2_CRC_ERR) {
550 551
		dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
			host->state, host->wait_for);
Y
Yusuke Goda 已提交
552 553
		ret = -EIO;
	} else if (state2 & STS2_TIMEOUT_ERR) {
554 555
		dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
			host->state, host->wait_for);
Y
Yusuke Goda 已提交
556 557
		ret = -ETIMEDOUT;
	} else {
558 559
		dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
			host->state, host->wait_for);
Y
Yusuke Goda 已提交
560 561 562 563 564
		ret = -EIO;
	}
	return ret;
}

565
static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
Y
Yusuke Goda 已提交
566
{
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
	struct mmc_data *data = host->mrq->data;

	host->sg_blkidx += host->blocksize;

	/* data->sg->length must be a multiple of host->blocksize? */
	BUG_ON(host->sg_blkidx > data->sg->length);

	if (host->sg_blkidx == data->sg->length) {
		host->sg_blkidx = 0;
		if (++host->sg_idx < data->sg_len)
			host->pio_ptr = sg_virt(++data->sg);
	} else {
		host->pio_ptr = p;
	}

582
	return host->sg_idx != data->sg_len;
583 584 585 586 587 588 589 590 591
}

static void sh_mmcif_single_read(struct sh_mmcif_host *host,
				 struct mmc_request *mrq)
{
	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
			   BLOCK_SIZE_MASK) + 3;

	host->wait_for = MMCIF_WAIT_FOR_READ;
Y
Yusuke Goda 已提交
592 593 594

	/* buf read enable */
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
595 596 597 598 599 600 601 602 603 604
}

static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
{
	struct mmc_data *data = host->mrq->data;
	u32 *p = sg_virt(data->sg);
	int i;

	if (host->sd_error) {
		data->error = sh_mmcif_error_manage(host);
605
		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
606 607 608 609
		return false;
	}

	for (i = 0; i < host->blocksize / 4; i++)
610
		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
Y
Yusuke Goda 已提交
611 612 613

	/* buffer read end */
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
614
	host->wait_for = MMCIF_WAIT_FOR_READ_END;
Y
Yusuke Goda 已提交
615

616
	return true;
Y
Yusuke Goda 已提交
617 618
}

619 620
static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
				struct mmc_request *mrq)
Y
Yusuke Goda 已提交
621 622
{
	struct mmc_data *data = mrq->data;
623 624 625 626 627 628 629 630 631 632 633

	if (!data->sg_len || !data->sg->length)
		return;

	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
		BLOCK_SIZE_MASK;

	host->wait_for = MMCIF_WAIT_FOR_MREAD;
	host->sg_idx = 0;
	host->sg_blkidx = 0;
	host->pio_ptr = sg_virt(data->sg);
634

635 636 637 638 639 640 641 642 643 644 645
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
}

static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
{
	struct mmc_data *data = host->mrq->data;
	u32 *p = host->pio_ptr;
	int i;

	if (host->sd_error) {
		data->error = sh_mmcif_error_manage(host);
646
		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
647
		return false;
Y
Yusuke Goda 已提交
648
	}
649 650 651 652 653 654 655 656 657 658 659 660

	BUG_ON(!data->sg->length);

	for (i = 0; i < host->blocksize / 4; i++)
		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);

	if (!sh_mmcif_next_block(host, p))
		return false;

	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);

	return true;
Y
Yusuke Goda 已提交
661 662
}

663
static void sh_mmcif_single_write(struct sh_mmcif_host *host,
Y
Yusuke Goda 已提交
664 665
					struct mmc_request *mrq)
{
666 667
	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
			   BLOCK_SIZE_MASK) + 3;
Y
Yusuke Goda 已提交
668

669
	host->wait_for = MMCIF_WAIT_FOR_WRITE;
Y
Yusuke Goda 已提交
670 671

	/* buf write enable */
672 673 674 675 676 677 678 679 680 681 682
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
}

static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
{
	struct mmc_data *data = host->mrq->data;
	u32 *p = sg_virt(data->sg);
	int i;

	if (host->sd_error) {
		data->error = sh_mmcif_error_manage(host);
683
		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
684 685 686 687
		return false;
	}

	for (i = 0; i < host->blocksize / 4; i++)
688
		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
Y
Yusuke Goda 已提交
689 690 691

	/* buffer write end */
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
692
	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
Y
Yusuke Goda 已提交
693

694
	return true;
Y
Yusuke Goda 已提交
695 696
}

697 698
static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
				struct mmc_request *mrq)
Y
Yusuke Goda 已提交
699 700 701
{
	struct mmc_data *data = mrq->data;

702 703
	if (!data->sg_len || !data->sg->length)
		return;
Y
Yusuke Goda 已提交
704

705 706
	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
		BLOCK_SIZE_MASK;
Y
Yusuke Goda 已提交
707

708 709 710 711
	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
	host->sg_idx = 0;
	host->sg_blkidx = 0;
	host->pio_ptr = sg_virt(data->sg);
712

713 714
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
}
Y
Yusuke Goda 已提交
715

716 717 718 719 720 721 722 723
static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
{
	struct mmc_data *data = host->mrq->data;
	u32 *p = host->pio_ptr;
	int i;

	if (host->sd_error) {
		data->error = sh_mmcif_error_manage(host);
724
		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
725
		return false;
Y
Yusuke Goda 已提交
726
	}
727 728 729 730 731 732 733 734 735 736 737 738

	BUG_ON(!data->sg->length);

	for (i = 0; i < host->blocksize / 4; i++)
		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);

	if (!sh_mmcif_next_block(host, p))
		return false;

	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);

	return true;
Y
Yusuke Goda 已提交
739 740 741 742 743 744
}

static void sh_mmcif_get_response(struct sh_mmcif_host *host,
						struct mmc_command *cmd)
{
	if (cmd->flags & MMC_RSP_136) {
745 746 747 748
		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
		cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
		cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
		cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
Y
Yusuke Goda 已提交
749
	} else
750
		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
Y
Yusuke Goda 已提交
751 752 753 754 755
}

static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
						struct mmc_command *cmd)
{
756
	cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
Y
Yusuke Goda 已提交
757 758 759
}

static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
760
			    struct mmc_request *mrq)
Y
Yusuke Goda 已提交
761
{
762 763 764
	struct mmc_data *data = mrq->data;
	struct mmc_command *cmd = mrq->cmd;
	u32 opc = cmd->opcode;
Y
Yusuke Goda 已提交
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
	u32 tmp = 0;

	/* Response Type check */
	switch (mmc_resp_type(cmd)) {
	case MMC_RSP_NONE:
		tmp |= CMD_SET_RTYP_NO;
		break;
	case MMC_RSP_R1:
	case MMC_RSP_R1B:
	case MMC_RSP_R3:
		tmp |= CMD_SET_RTYP_6B;
		break;
	case MMC_RSP_R2:
		tmp |= CMD_SET_RTYP_17B;
		break;
	default:
781
		dev_err(&host->pd->dev, "Unsupported response type.\n");
Y
Yusuke Goda 已提交
782 783 784 785
		break;
	}
	switch (opc) {
	/* RBSY */
786
	case MMC_SLEEP_AWAKE:
Y
Yusuke Goda 已提交
787 788 789 790 791 792 793 794 795
	case MMC_SWITCH:
	case MMC_STOP_TRANSMISSION:
	case MMC_SET_WRITE_PROT:
	case MMC_CLR_WRITE_PROT:
	case MMC_ERASE:
		tmp |= CMD_SET_RBSY;
		break;
	}
	/* WDAT / DATW */
796
	if (data) {
Y
Yusuke Goda 已提交
797 798 799 800 801 802 803 804 805 806 807 808
		tmp |= CMD_SET_WDAT;
		switch (host->bus_width) {
		case MMC_BUS_WIDTH_1:
			tmp |= CMD_SET_DATW_1;
			break;
		case MMC_BUS_WIDTH_4:
			tmp |= CMD_SET_DATW_4;
			break;
		case MMC_BUS_WIDTH_8:
			tmp |= CMD_SET_DATW_8;
			break;
		default:
809
			dev_err(&host->pd->dev, "Unsupported bus width.\n");
Y
Yusuke Goda 已提交
810 811
			break;
		}
812
		switch (host->timing) {
813
		case MMC_TIMING_MMC_DDR52:
814 815
			/*
			 * MMC core will only set this timing, if the host
816 817 818 819
			 * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
			 * capability. MMCIF implementations with this
			 * capability, e.g. sh73a0, will have to set it
			 * in their platform data.
820 821 822 823
			 */
			tmp |= CMD_SET_DARS;
			break;
		}
Y
Yusuke Goda 已提交
824 825 826 827 828 829 830 831
	}
	/* DWEN */
	if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
		tmp |= CMD_SET_DWEN;
	/* CMLTE/CMD12EN */
	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
832
				data->blocks << 16);
Y
Yusuke Goda 已提交
833 834 835 836 837 838 839 840 841 842 843 844 845
	}
	/* RIDXC[1:0] check bits */
	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
	    opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
		tmp |= CMD_SET_RIDXC_BITS;
	/* RCRC7C[1:0] check bits */
	if (opc == MMC_SEND_OP_COND)
		tmp |= CMD_SET_CRC7C_BITS;
	/* RCRC7C[1:0] internal CRC7 */
	if (opc == MMC_ALL_SEND_CID ||
		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
		tmp |= CMD_SET_CRC7C_INTERNAL;

846
	return (opc << 24) | tmp;
Y
Yusuke Goda 已提交
847 848
}

849
static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
850
			       struct mmc_request *mrq, u32 opc)
Y
Yusuke Goda 已提交
851 852 853
{
	switch (opc) {
	case MMC_READ_MULTIPLE_BLOCK:
854 855
		sh_mmcif_multi_read(host, mrq);
		return 0;
Y
Yusuke Goda 已提交
856
	case MMC_WRITE_MULTIPLE_BLOCK:
857 858
		sh_mmcif_multi_write(host, mrq);
		return 0;
Y
Yusuke Goda 已提交
859
	case MMC_WRITE_BLOCK:
860 861
		sh_mmcif_single_write(host, mrq);
		return 0;
Y
Yusuke Goda 已提交
862 863
	case MMC_READ_SINGLE_BLOCK:
	case MMC_SEND_EXT_CSD:
864 865
		sh_mmcif_single_read(host, mrq);
		return 0;
Y
Yusuke Goda 已提交
866
	default:
867
		dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
868
		return -EINVAL;
Y
Yusuke Goda 已提交
869 870 871 872
	}
}

static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
873
			       struct mmc_request *mrq)
Y
Yusuke Goda 已提交
874
{
875
	struct mmc_command *cmd = mrq->cmd;
876 877
	u32 opc = cmd->opcode;
	u32 mask;
Y
Yusuke Goda 已提交
878 879

	switch (opc) {
880
	/* response busy check */
881
	case MMC_SLEEP_AWAKE:
Y
Yusuke Goda 已提交
882 883 884 885 886
	case MMC_SWITCH:
	case MMC_STOP_TRANSMISSION:
	case MMC_SET_WRITE_PROT:
	case MMC_CLR_WRITE_PROT:
	case MMC_ERASE:
887
		mask = MASK_START_CMD | MASK_MRBSYE;
Y
Yusuke Goda 已提交
888 889
		break;
	default:
890
		mask = MASK_START_CMD | MASK_MCRSPE;
Y
Yusuke Goda 已提交
891 892 893
		break;
	}

894 895 896
	if (host->ccs_enable)
		mask |= MASK_MCCSTO;

897
	if (mrq->data) {
898 899 900
		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
				mrq->data->blksz);
Y
Yusuke Goda 已提交
901
	}
902
	opc = sh_mmcif_set_cmd(host, mrq);
Y
Yusuke Goda 已提交
903

904 905 906 907
	if (host->ccs_enable)
		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
	else
		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
908
	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
Y
Yusuke Goda 已提交
909
	/* set arg */
910
	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
Y
Yusuke Goda 已提交
911
	/* set cmd */
912
	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
Y
Yusuke Goda 已提交
913

914 915
	host->wait_for = MMCIF_WAIT_FOR_CMD;
	schedule_delayed_work(&host->timeout_work, host->timeout);
Y
Yusuke Goda 已提交
916 917 918
}

static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
919
			      struct mmc_request *mrq)
Y
Yusuke Goda 已提交
920
{
921 922
	switch (mrq->cmd->opcode) {
	case MMC_READ_MULTIPLE_BLOCK:
Y
Yusuke Goda 已提交
923
		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
924 925
		break;
	case MMC_WRITE_MULTIPLE_BLOCK:
Y
Yusuke Goda 已提交
926
		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
927 928
		break;
	default:
929
		dev_err(&host->pd->dev, "unsupported stop cmd\n");
930
		mrq->stop->error = sh_mmcif_error_manage(host);
Y
Yusuke Goda 已提交
931 932 933
		return;
	}

934
	host->wait_for = MMCIF_WAIT_FOR_STOP;
Y
Yusuke Goda 已提交
935 936 937 938 939
}

static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
	struct sh_mmcif_host *host = mmc_priv(mmc);
940 941 942 943
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
	if (host->state != STATE_IDLE) {
944
		dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
945 946 947 948 949 950 951 952
		spin_unlock_irqrestore(&host->lock, flags);
		mrq->cmd->error = -EAGAIN;
		mmc_request_done(mmc, mrq);
		return;
	}

	host->state = STATE_REQUEST;
	spin_unlock_irqrestore(&host->lock, flags);
Y
Yusuke Goda 已提交
953 954 955

	switch (mrq->cmd->opcode) {
	/* MMCIF does not support SD/SDIO command */
956 957 958 959
	case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */
	case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
		if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
			break;
Y
Yusuke Goda 已提交
960
	case MMC_APP_CMD:
961
	case SD_IO_RW_DIRECT:
962
		host->state = STATE_IDLE;
Y
Yusuke Goda 已提交
963 964 965 966 967 968
		mrq->cmd->error = -ETIMEDOUT;
		mmc_request_done(mmc, mrq);
		return;
	default:
		break;
	}
969 970

	host->mrq = mrq;
Y
Yusuke Goda 已提交
971

972
	sh_mmcif_start_cmd(host, mrq);
Y
Yusuke Goda 已提交
973 974
}

975 976
static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
{
977
	int ret = clk_prepare_enable(host->hclk);
978 979 980 981 982 983 984 985 986 987

	if (!ret) {
		host->clk = clk_get_rate(host->hclk);
		host->mmc->f_max = host->clk / 2;
		host->mmc->f_min = host->clk / 512;
	}

	return ret;
}

988 989 990 991 992 993 994 995 996 997
static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
{
	struct mmc_host *mmc = host->mmc;

	if (!IS_ERR(mmc->supply.vmmc))
		/* Errors ignored... */
		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
				      ios->power_mode ? ios->vdd : 0);
}

Y
Yusuke Goda 已提交
998 999 1000
static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct sh_mmcif_host *host = mmc_priv(mmc);
1001 1002 1003 1004
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
	if (host->state != STATE_IDLE) {
1005
		dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
1006 1007 1008 1009 1010 1011
		spin_unlock_irqrestore(&host->lock, flags);
		return;
	}

	host->state = STATE_IOS;
	spin_unlock_irqrestore(&host->lock, flags);
Y
Yusuke Goda 已提交
1012

1013
	if (ios->power_mode == MMC_POWER_UP) {
1014
		if (!host->card_present) {
1015 1016
			/* See if we also get DMA */
			sh_mmcif_request_dma(host, host->pd->dev.platform_data);
1017
			host->card_present = true;
1018
		}
1019
		sh_mmcif_set_power(host, ios);
1020
	} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
Y
Yusuke Goda 已提交
1021 1022
		/* clock stop */
		sh_mmcif_clock_control(host, 0);
1023
		if (ios->power_mode == MMC_POWER_OFF) {
1024
			if (host->card_present) {
1025
				sh_mmcif_release_dma(host);
1026
				host->card_present = false;
1027
			}
1028 1029
		}
		if (host->power) {
1030
			pm_runtime_put_sync(&host->pd->dev);
1031
			clk_disable_unprepare(host->hclk);
1032
			host->power = false;
1033 1034
			if (ios->power_mode == MMC_POWER_OFF)
				sh_mmcif_set_power(host, ios);
1035
		}
1036
		host->state = STATE_IDLE;
Y
Yusuke Goda 已提交
1037 1038 1039
		return;
	}

1040 1041
	if (ios->clock) {
		if (!host->power) {
1042
			sh_mmcif_clk_update(host);
1043 1044 1045 1046
			pm_runtime_get_sync(&host->pd->dev);
			host->power = true;
			sh_mmcif_sync_reset(host);
		}
Y
Yusuke Goda 已提交
1047
		sh_mmcif_clock_control(host, ios->clock);
1048
	}
Y
Yusuke Goda 已提交
1049

1050
	host->timing = ios->timing;
Y
Yusuke Goda 已提交
1051
	host->bus_width = ios->bus_width;
1052
	host->state = STATE_IDLE;
Y
Yusuke Goda 已提交
1053 1054
}

1055 1056 1057 1058
static int sh_mmcif_get_cd(struct mmc_host *mmc)
{
	struct sh_mmcif_host *host = mmc_priv(mmc);
	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
1059 1060 1061 1062
	int ret = mmc_gpio_get_cd(mmc);

	if (ret >= 0)
		return ret;
1063

1064
	if (!p || !p->get_cd)
1065 1066 1067 1068 1069
		return -ENOSYS;
	else
		return p->get_cd(host->pd);
}

Y
Yusuke Goda 已提交
1070 1071 1072
static struct mmc_host_ops sh_mmcif_ops = {
	.request	= sh_mmcif_request,
	.set_ios	= sh_mmcif_set_ios,
1073
	.get_cd		= sh_mmcif_get_cd,
Y
Yusuke Goda 已提交
1074 1075
};

1076 1077 1078
static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
{
	struct mmc_command *cmd = host->mrq->cmd;
1079
	struct mmc_data *data = host->mrq->data;
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
	long time;

	if (host->sd_error) {
		switch (cmd->opcode) {
		case MMC_ALL_SEND_CID:
		case MMC_SELECT_CARD:
		case MMC_APP_CMD:
			cmd->error = -ETIMEDOUT;
			break;
		default:
			cmd->error = sh_mmcif_error_manage(host);
			break;
		}
1093 1094
		dev_dbg(&host->pd->dev, "CMD%d error %d\n",
			cmd->opcode, cmd->error);
1095
		host->sd_error = false;
1096 1097 1098 1099 1100 1101 1102 1103 1104
		return false;
	}
	if (!(cmd->flags & MMC_RSP_PRESENT)) {
		cmd->error = 0;
		return false;
	}

	sh_mmcif_get_response(host, cmd);

1105
	if (!data)
1106 1107
		return false;

1108 1109 1110 1111 1112 1113
	/*
	 * Completion can be signalled from DMA callback and error, so, have to
	 * reset here, before setting .dma_active
	 */
	init_completion(&host->dma_complete);

1114
	if (data->flags & MMC_DATA_READ) {
1115 1116 1117 1118 1119 1120 1121 1122
		if (host->chan_rx)
			sh_mmcif_start_dma_rx(host);
	} else {
		if (host->chan_tx)
			sh_mmcif_start_dma_tx(host);
	}

	if (!host->dma_active) {
1123
		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1124
		return !data->error;
1125 1126 1127 1128 1129
	}

	/* Running in the IRQ thread, can sleep */
	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
							 host->timeout);
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139

	if (data->flags & MMC_DATA_READ)
		dma_unmap_sg(host->chan_rx->device->dev,
			     data->sg, data->sg_len,
			     DMA_FROM_DEVICE);
	else
		dma_unmap_sg(host->chan_tx->device->dev,
			     data->sg, data->sg_len,
			     DMA_TO_DEVICE);

1140 1141 1142 1143
	if (host->sd_error) {
		dev_err(host->mmc->parent,
			"Error IRQ while waiting for DMA completion!\n");
		/* Woken up by an error IRQ: abort DMA */
1144
		data->error = sh_mmcif_error_manage(host);
1145
	} else if (!time) {
1146
		dev_err(host->mmc->parent, "DMA timeout!\n");
1147
		data->error = -ETIMEDOUT;
1148
	} else if (time < 0) {
1149 1150
		dev_err(host->mmc->parent,
			"wait_for_completion_...() error %ld!\n", time);
1151
		data->error = time;
1152 1153 1154 1155 1156
	}
	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
	host->dma_active = false;

1157
	if (data->error) {
1158
		data->bytes_xfered = 0;
1159 1160 1161 1162 1163 1164
		/* Abort DMA */
		if (data->flags & MMC_DATA_READ)
			dmaengine_terminate_all(host->chan_rx);
		else
			dmaengine_terminate_all(host->chan_tx);
	}
1165 1166 1167 1168 1169 1170 1171

	return false;
}

static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
{
	struct sh_mmcif_host *host = dev_id;
1172
	struct mmc_request *mrq;
1173
	bool wait = false;
1174 1175 1176

	cancel_delayed_work_sync(&host->timeout_work);

1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
	mutex_lock(&host->thread_lock);

	mrq = host->mrq;
	if (!mrq) {
		dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
			host->state, host->wait_for);
		mutex_unlock(&host->thread_lock);
		return IRQ_HANDLED;
	}

1187 1188 1189 1190 1191 1192 1193
	/*
	 * All handlers return true, if processing continues, and false, if the
	 * request has to be completed - successfully or not
	 */
	switch (host->wait_for) {
	case MMCIF_WAIT_FOR_REQUEST:
		/* We're too late, the timeout has already kicked in */
1194
		mutex_unlock(&host->thread_lock);
1195 1196
		return IRQ_HANDLED;
	case MMCIF_WAIT_FOR_CMD:
1197 1198
		/* Wait for data? */
		wait = sh_mmcif_end_cmd(host);
1199 1200
		break;
	case MMCIF_WAIT_FOR_MREAD:
1201 1202
		/* Wait for more data? */
		wait = sh_mmcif_mread_block(host);
1203 1204
		break;
	case MMCIF_WAIT_FOR_READ:
1205 1206
		/* Wait for data end? */
		wait = sh_mmcif_read_block(host);
1207 1208
		break;
	case MMCIF_WAIT_FOR_MWRITE:
1209 1210
		/* Wait data to write? */
		wait = sh_mmcif_mwrite_block(host);
1211 1212
		break;
	case MMCIF_WAIT_FOR_WRITE:
1213 1214
		/* Wait for data end? */
		wait = sh_mmcif_write_block(host);
1215 1216 1217 1218
		break;
	case MMCIF_WAIT_FOR_STOP:
		if (host->sd_error) {
			mrq->stop->error = sh_mmcif_error_manage(host);
1219
			dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
1220 1221 1222 1223 1224 1225 1226
			break;
		}
		sh_mmcif_get_cmd12response(host, mrq->stop);
		mrq->stop->error = 0;
		break;
	case MMCIF_WAIT_FOR_READ_END:
	case MMCIF_WAIT_FOR_WRITE_END:
1227
		if (host->sd_error) {
1228
			mrq->data->error = sh_mmcif_error_manage(host);
1229 1230
			dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
		}
1231 1232 1233 1234 1235
		break;
	default:
		BUG();
	}

1236 1237 1238
	if (wait) {
		schedule_delayed_work(&host->timeout_work, host->timeout);
		/* Wait for more data */
1239
		mutex_unlock(&host->thread_lock);
1240 1241 1242
		return IRQ_HANDLED;
	}

1243
	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1244
		struct mmc_data *data = mrq->data;
1245 1246 1247
		if (!mrq->cmd->error && data && !data->error)
			data->bytes_xfered =
				data->blocks * data->blksz;
1248

1249
		if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1250
			sh_mmcif_stop_cmd(host, mrq);
1251 1252
			if (!mrq->stop->error) {
				schedule_delayed_work(&host->timeout_work, host->timeout);
1253
				mutex_unlock(&host->thread_lock);
1254
				return IRQ_HANDLED;
1255
			}
1256 1257 1258 1259 1260
		}
	}

	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
	host->state = STATE_IDLE;
1261
	host->mrq = NULL;
1262 1263
	mmc_request_done(host->mmc, mrq);

1264 1265
	mutex_unlock(&host->thread_lock);

1266 1267 1268
	return IRQ_HANDLED;
}

Y
Yusuke Goda 已提交
1269 1270 1271
static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
{
	struct sh_mmcif_host *host = dev_id;
1272
	u32 state, mask;
Y
Yusuke Goda 已提交
1273

1274
	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1275 1276 1277 1278 1279
	mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
	if (host->ccs_enable)
		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
	else
		sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
1280
	sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
Y
Yusuke Goda 已提交
1281

1282 1283 1284 1285 1286
	if (state & ~MASK_CLEAN)
		dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
			state);

	if (state & INT_ERR_STS || state & ~INT_ALL) {
1287
		host->sd_error = true;
1288
		dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
Y
Yusuke Goda 已提交
1289
	}
1290
	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1291 1292
		if (!host->mrq)
			dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
1293 1294 1295 1296 1297
		if (!host->dma_active)
			return IRQ_WAKE_THREAD;
		else if (host->sd_error)
			mmcif_dma_complete(host);
	} else {
1298
		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
1299
	}
Y
Yusuke Goda 已提交
1300 1301 1302 1303

	return IRQ_HANDLED;
}

1304 1305 1306 1307 1308
static void mmcif_timeout_work(struct work_struct *work)
{
	struct delayed_work *d = container_of(work, struct delayed_work, work);
	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
	struct mmc_request *mrq = host->mrq;
1309
	unsigned long flags;
1310 1311 1312 1313 1314

	if (host->dying)
		/* Don't run after mmc_remove_host() */
		return;

1315
	dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
		host->wait_for, mrq->cmd->opcode);

	spin_lock_irqsave(&host->lock, flags);
	if (host->state == STATE_IDLE) {
		spin_unlock_irqrestore(&host->lock, flags);
		return;
	}

	host->state = STATE_TIMEOUT;
	spin_unlock_irqrestore(&host->lock, flags);

1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
	/*
	 * Handle races with cancel_delayed_work(), unless
	 * cancel_delayed_work_sync() is used
	 */
	switch (host->wait_for) {
	case MMCIF_WAIT_FOR_CMD:
		mrq->cmd->error = sh_mmcif_error_manage(host);
		break;
	case MMCIF_WAIT_FOR_STOP:
		mrq->stop->error = sh_mmcif_error_manage(host);
		break;
	case MMCIF_WAIT_FOR_MREAD:
	case MMCIF_WAIT_FOR_MWRITE:
	case MMCIF_WAIT_FOR_READ:
	case MMCIF_WAIT_FOR_WRITE:
	case MMCIF_WAIT_FOR_READ_END:
	case MMCIF_WAIT_FOR_WRITE_END:
1344
		mrq->data->error = sh_mmcif_error_manage(host);
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
		break;
	default:
		BUG();
	}

	host->state = STATE_IDLE;
	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
	host->mrq = NULL;
	mmc_request_done(host->mmc, mrq);
}

1356 1357 1358 1359 1360 1361 1362
static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
{
	struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
	struct mmc_host *mmc = host->mmc;

	mmc_regulator_get_supply(mmc);

1363 1364 1365
	if (!pd)
		return;

1366 1367 1368 1369 1370 1371
	if (!mmc->ocr_avail)
		mmc->ocr_avail = pd->ocr;
	else if (pd->ocr)
		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
}

B
Bill Pemberton 已提交
1372
static int sh_mmcif_probe(struct platform_device *pdev)
Y
Yusuke Goda 已提交
1373 1374 1375
{
	int ret = 0, irq[2];
	struct mmc_host *mmc;
1376
	struct sh_mmcif_host *host;
1377
	struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
Y
Yusuke Goda 已提交
1378 1379
	struct resource *res;
	void __iomem *reg;
1380
	const char *name;
Y
Yusuke Goda 已提交
1381 1382 1383

	irq[0] = platform_get_irq(pdev, 0);
	irq[1] = platform_get_irq(pdev, 1);
1384
	if (irq[0] < 0) {
1385
		dev_err(&pdev->dev, "Get irq error\n");
Y
Yusuke Goda 已提交
1386 1387
		return -ENXIO;
	}
B
Ben Dooks 已提交
1388

Y
Yusuke Goda 已提交
1389
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
B
Ben Dooks 已提交
1390 1391 1392
	reg = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(reg))
		return PTR_ERR(reg);
1393

Y
Yusuke Goda 已提交
1394
	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
B
Ben Dooks 已提交
1395 1396
	if (!mmc)
		return -ENOMEM;
1397 1398 1399

	ret = mmc_of_parse(mmc);
	if (ret < 0)
1400
		goto err_host;
1401

Y
Yusuke Goda 已提交
1402 1403 1404
	host		= mmc_priv(mmc);
	host->mmc	= mmc;
	host->addr	= reg;
1405
	host->timeout	= msecs_to_jiffies(1000);
1406
	host->ccs_enable = !pd || !pd->ccs_unsupported;
1407
	host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
Y
Yusuke Goda 已提交
1408 1409 1410

	host->pd = pdev;

1411
	spin_lock_init(&host->lock);
Y
Yusuke Goda 已提交
1412 1413

	mmc->ops = &sh_mmcif_ops;
1414 1415
	sh_mmcif_init_ocr(host);

1416
	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1417
	if (pd && pd->caps)
Y
Yusuke Goda 已提交
1418
		mmc->caps |= pd->caps;
1419
	mmc->max_segs = 32;
Y
Yusuke Goda 已提交
1420
	mmc->max_blk_size = 512;
1421 1422
	mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
Y
Yusuke Goda 已提交
1423 1424 1425
	mmc->max_seg_size = mmc->max_req_size;

	platform_set_drvdata(pdev, host);
1426

1427 1428 1429
	pm_runtime_enable(&pdev->dev);
	host->power = false;

1430
	host->hclk = devm_clk_get(&pdev->dev, NULL);
1431 1432
	if (IS_ERR(host->hclk)) {
		ret = PTR_ERR(host->hclk);
1433
		dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
1434
		goto err_pm;
1435
	}
1436 1437
	ret = sh_mmcif_clk_update(host);
	if (ret < 0)
1438
		goto err_pm;
1439

1440 1441
	ret = pm_runtime_resume(&pdev->dev);
	if (ret < 0)
1442
		goto err_clk;
1443

1444
	INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
Y
Yusuke Goda 已提交
1445

1446
	sh_mmcif_sync_reset(host);
1447 1448
	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);

1449
	name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error";
1450 1451
	ret = devm_request_threaded_irq(&pdev->dev, irq[0], sh_mmcif_intr,
					sh_mmcif_irqt, 0, name, host);
Y
Yusuke Goda 已提交
1452
	if (ret) {
1453
		dev_err(&pdev->dev, "request_irq error (%s)\n", name);
1454
		goto err_clk;
Y
Yusuke Goda 已提交
1455
	}
1456
	if (irq[1] >= 0) {
1457 1458 1459
		ret = devm_request_threaded_irq(&pdev->dev, irq[1],
						sh_mmcif_intr, sh_mmcif_irqt,
						0, "sh_mmc:int", host);
1460 1461
		if (ret) {
			dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1462
			goto err_clk;
1463
		}
Y
Yusuke Goda 已提交
1464 1465
	}

1466
	if (pd && pd->use_cd_gpio) {
1467
		ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0);
1468
		if (ret < 0)
1469
			goto err_clk;
1470 1471
	}

1472 1473
	mutex_init(&host->thread_lock);

1474 1475
	ret = mmc_add_host(mmc);
	if (ret < 0)
1476
		goto err_clk;
Y
Yusuke Goda 已提交
1477

1478 1479
	dev_pm_qos_expose_latency_limit(&pdev->dev, 100);

1480 1481 1482 1483 1484
	dev_info(&pdev->dev, "Chip version 0x%04x, clock rate %luMHz\n",
		 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
		 clk_get_rate(host->hclk) / 1000000UL);

	clk_disable_unprepare(host->hclk);
Y
Yusuke Goda 已提交
1485 1486
	return ret;

1487
err_clk:
1488
	clk_disable_unprepare(host->hclk);
1489
err_pm:
1490
	pm_runtime_disable(&pdev->dev);
1491
err_host:
Y
Yusuke Goda 已提交
1492 1493 1494 1495
	mmc_free_host(mmc);
	return ret;
}

B
Bill Pemberton 已提交
1496
static int sh_mmcif_remove(struct platform_device *pdev)
Y
Yusuke Goda 已提交
1497 1498 1499
{
	struct sh_mmcif_host *host = platform_get_drvdata(pdev);

1500
	host->dying = true;
1501
	clk_prepare_enable(host->hclk);
1502
	pm_runtime_get_sync(&pdev->dev);
Y
Yusuke Goda 已提交
1503

1504 1505
	dev_pm_qos_hide_latency_limit(&pdev->dev);

1506
	mmc_remove_host(host->mmc);
1507 1508
	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);

1509 1510 1511 1512 1513 1514 1515
	/*
	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
	 * mmc_remove_host() call above. But swapping order doesn't help either
	 * (a query on the linux-mmc mailing list didn't bring any replies).
	 */
	cancel_delayed_work_sync(&host->timeout_work);

1516
	clk_disable_unprepare(host->hclk);
Y
Yusuke Goda 已提交
1517
	mmc_free_host(host->mmc);
1518 1519
	pm_runtime_put_sync(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
Y
Yusuke Goda 已提交
1520 1521 1522 1523

	return 0;
}

1524
#ifdef CONFIG_PM_SLEEP
1525 1526
static int sh_mmcif_suspend(struct device *dev)
{
1527
	struct sh_mmcif_host *host = dev_get_drvdata(dev);
1528

1529
	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1530

1531
	return 0;
1532 1533 1534 1535
}

static int sh_mmcif_resume(struct device *dev)
{
1536
	return 0;
1537
}
1538
#endif
1539

1540 1541 1542 1543 1544 1545
static const struct of_device_id mmcif_of_match[] = {
	{ .compatible = "renesas,sh-mmcif" },
	{ }
};
MODULE_DEVICE_TABLE(of, mmcif_of_match);

1546
static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1547
	SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
1548 1549
};

Y
Yusuke Goda 已提交
1550 1551 1552 1553 1554
static struct platform_driver sh_mmcif_driver = {
	.probe		= sh_mmcif_probe,
	.remove		= sh_mmcif_remove,
	.driver		= {
		.name	= DRIVER_NAME,
1555
		.pm	= &sh_mmcif_dev_pm_ops,
1556 1557
		.owner	= THIS_MODULE,
		.of_match_table = mmcif_of_match,
Y
Yusuke Goda 已提交
1558 1559 1560
	},
};

1561
module_platform_driver(sh_mmcif_driver);
Y
Yusuke Goda 已提交
1562 1563 1564

MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
MODULE_LICENSE("GPL");
1565
MODULE_ALIAS("platform:" DRIVER_NAME);
Y
Yusuke Goda 已提交
1566
MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");