sh_mmcif.c 40.3 KB
Newer Older
Y
Yusuke Goda 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * MMCIF eMMC driver.
 *
 * Copyright (C) 2010 Renesas Solutions Corp.
 * Yusuke Goda <yusuke.goda.sx@renesas.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License.
 *
 *
 * TODO
 *  1. DMA
 *  2. Power management
 *  3. Handle MMC errors better
 *
 */

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*
 * The MMCIF driver is now processing MMC requests asynchronously, according
 * to the Linux MMC API requirement.
 *
 * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
 * data, and optional stop. To achieve asynchronous processing each of these
 * stages is split into two halves: a top and a bottom half. The top half
 * initialises the hardware, installs a timeout handler to handle completion
 * timeouts, and returns. In case of the command stage this immediately returns
 * control to the caller, leaving all further processing to run asynchronously.
 * All further request processing is performed by the bottom halves.
 *
 * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
 * thread, a DMA completion callback, if DMA is used, a timeout work, and
 * request- and stage-specific handler methods.
 *
 * Each bottom half run begins with either a hardware interrupt, a DMA callback
 * invocation, or a timeout work run. In case of an error or a successful
 * processing completion, the MMC core is informed and the request processing is
 * finished. In case processing has to continue, i.e., if data has to be read
 * from or written to the card, or if a stop command has to be sent, the next
 * top half is called, which performs the necessary hardware handling and
 * reschedules the timeout work. This returns the driver state machine into the
 * bottom half waiting state.
 */

45
#include <linux/bitops.h>
46 47
#include <linux/clk.h>
#include <linux/completion.h>
48
#include <linux/delay.h>
Y
Yusuke Goda 已提交
49
#include <linux/dma-mapping.h>
50
#include <linux/dmaengine.h>
Y
Yusuke Goda 已提交
51 52
#include <linux/mmc/card.h>
#include <linux/mmc/core.h>
53
#include <linux/mmc/host.h>
Y
Yusuke Goda 已提交
54 55 56
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sh_mmcif.h>
57
#include <linux/mmc/slot-gpio.h>
58
#include <linux/mod_devicetable.h>
59
#include <linux/mutex.h>
60
#include <linux/pagemap.h>
61
#include <linux/platform_device.h>
62
#include <linux/pm_qos.h>
63
#include <linux/pm_runtime.h>
64
#include <linux/sh_dma.h>
65
#include <linux/spinlock.h>
66
#include <linux/module.h>
Y
Yusuke Goda 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92

#define DRIVER_NAME	"sh_mmcif"
#define DRIVER_VERSION	"2010-04-28"

/* CE_CMD_SET */
#define CMD_MASK		0x3f000000
#define CMD_SET_RTYP_NO		((0 << 23) | (0 << 22))
#define CMD_SET_RTYP_6B		((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
#define CMD_SET_RTYP_17B	((1 << 23) | (0 << 22)) /* R2 */
#define CMD_SET_RBSY		(1 << 21) /* R1b */
#define CMD_SET_CCSEN		(1 << 20)
#define CMD_SET_WDAT		(1 << 19) /* 1: on data, 0: no data */
#define CMD_SET_DWEN		(1 << 18) /* 1: write, 0: read */
#define CMD_SET_CMLTE		(1 << 17) /* 1: multi block trans, 0: single */
#define CMD_SET_CMD12EN		(1 << 16) /* 1: CMD12 auto issue */
#define CMD_SET_RIDXC_INDEX	((0 << 15) | (0 << 14)) /* index check */
#define CMD_SET_RIDXC_BITS	((0 << 15) | (1 << 14)) /* check bits check */
#define CMD_SET_RIDXC_NO	((1 << 15) | (0 << 14)) /* no check */
#define CMD_SET_CRC7C		((0 << 13) | (0 << 12)) /* CRC7 check*/
#define CMD_SET_CRC7C_BITS	((0 << 13) | (1 << 12)) /* check bits check*/
#define CMD_SET_CRC7C_INTERNAL	((1 << 13) | (0 << 12)) /* internal CRC7 check*/
#define CMD_SET_CRC16C		(1 << 10) /* 0: CRC16 check*/
#define CMD_SET_CRCSTE		(1 << 8) /* 1: not receive CRC status */
#define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */
#define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */
#define CMD_SET_CCSH		(1 << 5)
93
#define CMD_SET_DARS		(1 << 2) /* Dual Data Rate */
Y
Yusuke Goda 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
#define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */
#define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */
#define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */

/* CE_CMD_CTRL */
#define CMD_CTRL_BREAK		(1 << 0)

/* CE_BLOCK_SET */
#define BLOCK_SIZE_MASK		0x0000ffff

/* CE_INT */
#define INT_CCSDE		(1 << 29)
#define INT_CMD12DRE		(1 << 26)
#define INT_CMD12RBE		(1 << 25)
#define INT_CMD12CRE		(1 << 24)
#define INT_DTRANE		(1 << 23)
#define INT_BUFRE		(1 << 22)
#define INT_BUFWEN		(1 << 21)
#define INT_BUFREN		(1 << 20)
#define INT_CCSRCV		(1 << 19)
#define INT_RBSYE		(1 << 17)
#define INT_CRSPE		(1 << 16)
#define INT_CMDVIO		(1 << 15)
#define INT_BUFVIO		(1 << 14)
#define INT_WDATERR		(1 << 11)
#define INT_RDATERR		(1 << 10)
#define INT_RIDXERR		(1 << 9)
#define INT_RSPERR		(1 << 8)
#define INT_CCSTO		(1 << 5)
#define INT_CRCSTO		(1 << 4)
#define INT_WDATTO		(1 << 3)
#define INT_RDATTO		(1 << 2)
#define INT_RBSYTO		(1 << 1)
#define INT_RSPTO		(1 << 0)
#define INT_ERR_STS		(INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
				 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \
				 INT_RDATTO | INT_RBSYTO | INT_RSPTO)

133 134 135 136
#define INT_ALL			(INT_RBSYE | INT_CRSPE | INT_BUFREN |	 \
				 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
				 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)

137 138
#define INT_CCS			(INT_CCSTO | INT_CCSRCV | INT_CCSDE)

Y
Yusuke Goda 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
/* CE_INT_MASK */
#define MASK_ALL		0x00000000
#define MASK_MCCSDE		(1 << 29)
#define MASK_MCMD12DRE		(1 << 26)
#define MASK_MCMD12RBE		(1 << 25)
#define MASK_MCMD12CRE		(1 << 24)
#define MASK_MDTRANE		(1 << 23)
#define MASK_MBUFRE		(1 << 22)
#define MASK_MBUFWEN		(1 << 21)
#define MASK_MBUFREN		(1 << 20)
#define MASK_MCCSRCV		(1 << 19)
#define MASK_MRBSYE		(1 << 17)
#define MASK_MCRSPE		(1 << 16)
#define MASK_MCMDVIO		(1 << 15)
#define MASK_MBUFVIO		(1 << 14)
#define MASK_MWDATERR		(1 << 11)
#define MASK_MRDATERR		(1 << 10)
#define MASK_MRIDXERR		(1 << 9)
#define MASK_MRSPERR		(1 << 8)
#define MASK_MCCSTO		(1 << 5)
#define MASK_MCRCSTO		(1 << 4)
#define MASK_MWDATTO		(1 << 3)
#define MASK_MRDATTO		(1 << 2)
#define MASK_MRBSYTO		(1 << 1)
#define MASK_MRSPTO		(1 << 0)

165 166
#define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
167
				 MASK_MCRCSTO | MASK_MWDATTO | \
168 169
				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)

170 171 172 173 174
#define MASK_CLEAN		(INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |	\
				 MASK_MBUFREN | MASK_MBUFWEN |			\
				 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE |	\
				 MASK_MCMD12RBE | MASK_MCMD12CRE)

Y
Yusuke Goda 已提交
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
/* CE_HOST_STS1 */
#define STS1_CMDSEQ		(1 << 31)

/* CE_HOST_STS2 */
#define STS2_CRCSTE		(1 << 31)
#define STS2_CRC16E		(1 << 30)
#define STS2_AC12CRCE		(1 << 29)
#define STS2_RSPCRC7E		(1 << 28)
#define STS2_CRCSTEBE		(1 << 27)
#define STS2_RDATEBE		(1 << 26)
#define STS2_AC12REBE		(1 << 25)
#define STS2_RSPEBE		(1 << 24)
#define STS2_AC12IDXE		(1 << 23)
#define STS2_RSPIDXE		(1 << 22)
#define STS2_CCSTO		(1 << 15)
#define STS2_RDATTO		(1 << 14)
#define STS2_DATBSYTO		(1 << 13)
#define STS2_CRCSTTO		(1 << 12)
#define STS2_AC12BSYTO		(1 << 11)
#define STS2_RSPBSYTO		(1 << 10)
#define STS2_AC12RSPTO		(1 << 9)
#define STS2_RSPTO		(1 << 8)
#define STS2_CRC_ERR		(STS2_CRCSTE | STS2_CRC16E |		\
				 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
#define STS2_TIMEOUT_ERR	(STS2_CCSTO | STS2_RDATTO |		\
				 STS2_DATBSYTO | STS2_CRCSTTO |		\
				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
				 STS2_AC12RSPTO | STS2_RSPTO)

#define CLKDEV_EMMC_DATA	52000000 /* 52MHz */
#define CLKDEV_MMC_DATA		20000000 /* 20MHz */
#define CLKDEV_INIT		400000   /* 400 KHz */

208 209 210 211
enum mmcif_state {
	STATE_IDLE,
	STATE_REQUEST,
	STATE_IOS,
212
	STATE_TIMEOUT,
213 214
};

215 216 217 218 219 220 221 222 223 224 225 226
enum mmcif_wait_for {
	MMCIF_WAIT_FOR_REQUEST,
	MMCIF_WAIT_FOR_CMD,
	MMCIF_WAIT_FOR_MREAD,
	MMCIF_WAIT_FOR_MWRITE,
	MMCIF_WAIT_FOR_READ,
	MMCIF_WAIT_FOR_WRITE,
	MMCIF_WAIT_FOR_READ_END,
	MMCIF_WAIT_FOR_WRITE_END,
	MMCIF_WAIT_FOR_STOP,
};

Y
Yusuke Goda 已提交
227 228
struct sh_mmcif_host {
	struct mmc_host *mmc;
229
	struct mmc_request *mrq;
Y
Yusuke Goda 已提交
230 231 232 233
	struct platform_device *pd;
	struct clk *hclk;
	unsigned int clk;
	int bus_width;
234
	unsigned char timing;
235
	bool sd_error;
236
	bool dying;
Y
Yusuke Goda 已提交
237 238
	long timeout;
	void __iomem *addr;
239
	u32 *pio_ptr;
240
	spinlock_t lock;		/* protect sh_mmcif_host::state */
241
	enum mmcif_state state;
242 243 244 245 246
	enum mmcif_wait_for wait_for;
	struct delayed_work timeout_work;
	size_t blocksize;
	int sg_idx;
	int sg_blkidx;
247
	bool power;
248
	bool card_present;
249
	bool ccs_enable;		/* Command Completion Signal support */
250
	bool clk_ctrl2_enable;
251
	struct mutex thread_lock;
Y
Yusuke Goda 已提交
252

253 254 255 256
	/* DMA support */
	struct dma_chan		*chan_rx;
	struct dma_chan		*chan_tx;
	struct completion	dma_complete;
257
	bool			dma_active;
258
};
Y
Yusuke Goda 已提交
259 260 261 262

static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
					unsigned int reg, u32 val)
{
263
	writel(val | readl(host->addr + reg), host->addr + reg);
Y
Yusuke Goda 已提交
264 265 266 267 268
}

static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
					unsigned int reg, u32 val)
{
269
	writel(~val & readl(host->addr + reg), host->addr + reg);
Y
Yusuke Goda 已提交
270 271
}

272 273 274
static void mmcif_dma_complete(void *arg)
{
	struct sh_mmcif_host *host = arg;
275
	struct mmc_request *mrq = host->mrq;
276

277 278
	dev_dbg(&host->pd->dev, "Command completed\n");

279
	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
280 281 282 283 284 285 286 287
		 dev_name(&host->pd->dev)))
		return;

	complete(&host->dma_complete);
}

static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
{
288 289
	struct mmc_data *data = host->mrq->data;
	struct scatterlist *sg = data->sg;
290 291 292 293 294
	struct dma_async_tx_descriptor *desc = NULL;
	struct dma_chan *chan = host->chan_rx;
	dma_cookie_t cookie = -EINVAL;
	int ret;

295
	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
296
			 DMA_FROM_DEVICE);
297
	if (ret > 0) {
298
		host->dma_active = true;
299
		desc = dmaengine_prep_slave_sg(chan, sg, ret,
300
			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
301 302 303 304 305
	}

	if (desc) {
		desc->callback = mmcif_dma_complete;
		desc->callback_param = host;
306 307 308
		cookie = dmaengine_submit(desc);
		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
		dma_async_issue_pending(chan);
309 310
	}
	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
311
		__func__, data->sg_len, ret, cookie);
312 313 314 315 316 317

	if (!desc) {
		/* DMA failed, fall back to PIO */
		if (ret >= 0)
			ret = -EIO;
		host->chan_rx = NULL;
318
		host->dma_active = false;
319 320 321 322 323 324 325 326 327 328 329 330 331
		dma_release_channel(chan);
		/* Free the Tx channel too */
		chan = host->chan_tx;
		if (chan) {
			host->chan_tx = NULL;
			dma_release_channel(chan);
		}
		dev_warn(&host->pd->dev,
			 "DMA failed: %d, falling back to PIO\n", ret);
		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
	}

	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
332
		desc, cookie, data->sg_len);
333 334 335 336
}

static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
{
337 338
	struct mmc_data *data = host->mrq->data;
	struct scatterlist *sg = data->sg;
339 340 341 342 343
	struct dma_async_tx_descriptor *desc = NULL;
	struct dma_chan *chan = host->chan_tx;
	dma_cookie_t cookie = -EINVAL;
	int ret;

344
	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
345
			 DMA_TO_DEVICE);
346
	if (ret > 0) {
347
		host->dma_active = true;
348
		desc = dmaengine_prep_slave_sg(chan, sg, ret,
349
			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
350 351 352 353 354
	}

	if (desc) {
		desc->callback = mmcif_dma_complete;
		desc->callback_param = host;
355 356 357
		cookie = dmaengine_submit(desc);
		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
		dma_async_issue_pending(chan);
358 359
	}
	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
360
		__func__, data->sg_len, ret, cookie);
361 362 363 364 365 366

	if (!desc) {
		/* DMA failed, fall back to PIO */
		if (ret >= 0)
			ret = -EIO;
		host->chan_tx = NULL;
367
		host->dma_active = false;
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
		dma_release_channel(chan);
		/* Free the Rx channel too */
		chan = host->chan_rx;
		if (chan) {
			host->chan_rx = NULL;
			dma_release_channel(chan);
		}
		dev_warn(&host->pd->dev,
			 "DMA failed: %d, falling back to PIO\n", ret);
		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
	}

	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
		desc, cookie);
}

384 385 386 387
static struct dma_chan *
sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
			 struct sh_mmcif_plat_data *pdata,
			 enum dma_transfer_direction direction)
388
{
389
	struct dma_slave_config cfg;
390 391 392
	struct dma_chan *chan;
	unsigned int slave_id;
	struct resource *res;
393 394
	dma_cap_mask_t mask;
	int ret;
395

396 397 398 399 400 401 402 403 404 405
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	if (pdata)
		slave_id = direction == DMA_MEM_TO_DEV
			 ? pdata->slave_id_tx : pdata->slave_id_rx;
	else
		slave_id = 0;

	chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
406
				(void *)(unsigned long)slave_id, &host->pd->dev,
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
				direction == DMA_MEM_TO_DEV ? "tx" : "rx");

	dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
		direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);

	if (!chan)
		return NULL;

	res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);

	/* In the OF case the driver will get the slave ID from the DT */
	cfg.slave_id = slave_id;
	cfg.direction = direction;
	cfg.dst_addr = res->start + MMCIF_CE_DATA;
	cfg.src_addr = 0;
	ret = dmaengine_slave_config(chan, &cfg);
	if (ret < 0) {
		dma_release_channel(chan);
		return NULL;
	}

	return chan;
}

static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
				 struct sh_mmcif_plat_data *pdata)
{
434
	host->dma_active = false;
435

436 437 438 439
	if (pdata) {
		if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
			return;
	} else if (!host->pd->dev.of_node) {
440
		return;
441
	}
442

443
	/* We can only either use DMA for both Tx and Rx or not use it at all */
444
	host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV);
445 446
	if (!host->chan_tx)
		return;
447

448 449 450 451 452
	host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM);
	if (!host->chan_rx) {
		dma_release_channel(host->chan_tx);
		host->chan_tx = NULL;
	}
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
}

static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
{
	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
	/* Descriptors are freed automatically */
	if (host->chan_tx) {
		struct dma_chan *chan = host->chan_tx;
		host->chan_tx = NULL;
		dma_release_channel(chan);
	}
	if (host->chan_rx) {
		struct dma_chan *chan = host->chan_rx;
		host->chan_rx = NULL;
		dma_release_channel(chan);
	}

470
	host->dma_active = false;
471
}
Y
Yusuke Goda 已提交
472 473 474 475

static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
{
	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
476
	bool sup_pclk = p ? p->sup_pclk : false;
Y
Yusuke Goda 已提交
477 478 479 480 481 482

	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);

	if (!clk)
		return;
483
	if (sup_pclk && clk == host->clk)
Y
Yusuke Goda 已提交
484 485 486
		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
	else
		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
S
Simon Horman 已提交
487 488
				((fls(DIV_ROUND_UP(host->clk,
						   clk) - 1) - 1) << 16));
Y
Yusuke Goda 已提交
489 490 491 492 493 494 495 496

	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
}

static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
{
	u32 tmp;

497
	tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
Y
Yusuke Goda 已提交
498

499 500
	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
501 502
	if (host->ccs_enable)
		tmp |= SCCSTO_29;
503 504
	if (host->clk_ctrl2_enable)
		sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
Y
Yusuke Goda 已提交
505
	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
506
		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
Y
Yusuke Goda 已提交
507 508 509 510 511 512 513
	/* byte swap on */
	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
}

static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
{
	u32 state1, state2;
514
	int ret, timeout;
Y
Yusuke Goda 已提交
515

516
	host->sd_error = false;
Y
Yusuke Goda 已提交
517

518 519
	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
520 521
	dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
	dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
Y
Yusuke Goda 已提交
522 523 524 525

	if (state1 & STS1_CMDSEQ) {
		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
526
		for (timeout = 10000000; timeout; timeout--) {
527
			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
528
			      & STS1_CMDSEQ))
Y
Yusuke Goda 已提交
529 530 531
				break;
			mdelay(1);
		}
532 533 534 535 536
		if (!timeout) {
			dev_err(&host->pd->dev,
				"Forced end of command sequence timeout err\n");
			return -EIO;
		}
Y
Yusuke Goda 已提交
537
		sh_mmcif_sync_reset(host);
538
		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
Y
Yusuke Goda 已提交
539 540 541 542
		return -EIO;
	}

	if (state2 & STS2_CRC_ERR) {
543 544
		dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
			host->state, host->wait_for);
Y
Yusuke Goda 已提交
545 546
		ret = -EIO;
	} else if (state2 & STS2_TIMEOUT_ERR) {
547 548
		dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
			host->state, host->wait_for);
Y
Yusuke Goda 已提交
549 550
		ret = -ETIMEDOUT;
	} else {
551 552
		dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
			host->state, host->wait_for);
Y
Yusuke Goda 已提交
553 554 555 556 557
		ret = -EIO;
	}
	return ret;
}

558
static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
Y
Yusuke Goda 已提交
559
{
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
	struct mmc_data *data = host->mrq->data;

	host->sg_blkidx += host->blocksize;

	/* data->sg->length must be a multiple of host->blocksize? */
	BUG_ON(host->sg_blkidx > data->sg->length);

	if (host->sg_blkidx == data->sg->length) {
		host->sg_blkidx = 0;
		if (++host->sg_idx < data->sg_len)
			host->pio_ptr = sg_virt(++data->sg);
	} else {
		host->pio_ptr = p;
	}

575
	return host->sg_idx != data->sg_len;
576 577 578 579 580 581 582 583 584
}

static void sh_mmcif_single_read(struct sh_mmcif_host *host,
				 struct mmc_request *mrq)
{
	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
			   BLOCK_SIZE_MASK) + 3;

	host->wait_for = MMCIF_WAIT_FOR_READ;
Y
Yusuke Goda 已提交
585 586 587

	/* buf read enable */
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
588 589 590 591 592 593 594 595 596 597
}

static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
{
	struct mmc_data *data = host->mrq->data;
	u32 *p = sg_virt(data->sg);
	int i;

	if (host->sd_error) {
		data->error = sh_mmcif_error_manage(host);
598
		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
599 600 601 602
		return false;
	}

	for (i = 0; i < host->blocksize / 4; i++)
603
		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
Y
Yusuke Goda 已提交
604 605 606

	/* buffer read end */
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
607
	host->wait_for = MMCIF_WAIT_FOR_READ_END;
Y
Yusuke Goda 已提交
608

609
	return true;
Y
Yusuke Goda 已提交
610 611
}

612 613
static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
				struct mmc_request *mrq)
Y
Yusuke Goda 已提交
614 615
{
	struct mmc_data *data = mrq->data;
616 617 618 619 620 621 622 623 624 625 626

	if (!data->sg_len || !data->sg->length)
		return;

	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
		BLOCK_SIZE_MASK;

	host->wait_for = MMCIF_WAIT_FOR_MREAD;
	host->sg_idx = 0;
	host->sg_blkidx = 0;
	host->pio_ptr = sg_virt(data->sg);
627

628 629 630 631 632 633 634 635 636 637 638
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
}

static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
{
	struct mmc_data *data = host->mrq->data;
	u32 *p = host->pio_ptr;
	int i;

	if (host->sd_error) {
		data->error = sh_mmcif_error_manage(host);
639
		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
640
		return false;
Y
Yusuke Goda 已提交
641
	}
642 643 644 645 646 647 648 649 650 651 652 653

	BUG_ON(!data->sg->length);

	for (i = 0; i < host->blocksize / 4; i++)
		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);

	if (!sh_mmcif_next_block(host, p))
		return false;

	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);

	return true;
Y
Yusuke Goda 已提交
654 655
}

656
static void sh_mmcif_single_write(struct sh_mmcif_host *host,
Y
Yusuke Goda 已提交
657 658
					struct mmc_request *mrq)
{
659 660
	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
			   BLOCK_SIZE_MASK) + 3;
Y
Yusuke Goda 已提交
661

662
	host->wait_for = MMCIF_WAIT_FOR_WRITE;
Y
Yusuke Goda 已提交
663 664

	/* buf write enable */
665 666 667 668 669 670 671 672 673 674 675
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
}

static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
{
	struct mmc_data *data = host->mrq->data;
	u32 *p = sg_virt(data->sg);
	int i;

	if (host->sd_error) {
		data->error = sh_mmcif_error_manage(host);
676
		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
677 678 679 680
		return false;
	}

	for (i = 0; i < host->blocksize / 4; i++)
681
		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
Y
Yusuke Goda 已提交
682 683 684

	/* buffer write end */
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
685
	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
Y
Yusuke Goda 已提交
686

687
	return true;
Y
Yusuke Goda 已提交
688 689
}

690 691
static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
				struct mmc_request *mrq)
Y
Yusuke Goda 已提交
692 693 694
{
	struct mmc_data *data = mrq->data;

695 696
	if (!data->sg_len || !data->sg->length)
		return;
Y
Yusuke Goda 已提交
697

698 699
	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
		BLOCK_SIZE_MASK;
Y
Yusuke Goda 已提交
700

701 702 703 704
	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
	host->sg_idx = 0;
	host->sg_blkidx = 0;
	host->pio_ptr = sg_virt(data->sg);
705

706 707
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
}
Y
Yusuke Goda 已提交
708

709 710 711 712 713 714 715 716
static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
{
	struct mmc_data *data = host->mrq->data;
	u32 *p = host->pio_ptr;
	int i;

	if (host->sd_error) {
		data->error = sh_mmcif_error_manage(host);
717
		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
718
		return false;
Y
Yusuke Goda 已提交
719
	}
720 721 722 723 724 725 726 727 728 729 730 731

	BUG_ON(!data->sg->length);

	for (i = 0; i < host->blocksize / 4; i++)
		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);

	if (!sh_mmcif_next_block(host, p))
		return false;

	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);

	return true;
Y
Yusuke Goda 已提交
732 733 734 735 736 737
}

static void sh_mmcif_get_response(struct sh_mmcif_host *host,
						struct mmc_command *cmd)
{
	if (cmd->flags & MMC_RSP_136) {
738 739 740 741
		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
		cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
		cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
		cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
Y
Yusuke Goda 已提交
742
	} else
743
		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
Y
Yusuke Goda 已提交
744 745 746 747 748
}

static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
						struct mmc_command *cmd)
{
749
	cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
Y
Yusuke Goda 已提交
750 751 752
}

static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
753
			    struct mmc_request *mrq)
Y
Yusuke Goda 已提交
754
{
755 756 757
	struct mmc_data *data = mrq->data;
	struct mmc_command *cmd = mrq->cmd;
	u32 opc = cmd->opcode;
Y
Yusuke Goda 已提交
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
	u32 tmp = 0;

	/* Response Type check */
	switch (mmc_resp_type(cmd)) {
	case MMC_RSP_NONE:
		tmp |= CMD_SET_RTYP_NO;
		break;
	case MMC_RSP_R1:
	case MMC_RSP_R1B:
	case MMC_RSP_R3:
		tmp |= CMD_SET_RTYP_6B;
		break;
	case MMC_RSP_R2:
		tmp |= CMD_SET_RTYP_17B;
		break;
	default:
774
		dev_err(&host->pd->dev, "Unsupported response type.\n");
Y
Yusuke Goda 已提交
775 776 777 778
		break;
	}
	switch (opc) {
	/* RBSY */
779
	case MMC_SLEEP_AWAKE:
Y
Yusuke Goda 已提交
780 781 782 783 784 785 786 787 788
	case MMC_SWITCH:
	case MMC_STOP_TRANSMISSION:
	case MMC_SET_WRITE_PROT:
	case MMC_CLR_WRITE_PROT:
	case MMC_ERASE:
		tmp |= CMD_SET_RBSY;
		break;
	}
	/* WDAT / DATW */
789
	if (data) {
Y
Yusuke Goda 已提交
790 791 792 793 794 795 796 797 798 799 800 801
		tmp |= CMD_SET_WDAT;
		switch (host->bus_width) {
		case MMC_BUS_WIDTH_1:
			tmp |= CMD_SET_DATW_1;
			break;
		case MMC_BUS_WIDTH_4:
			tmp |= CMD_SET_DATW_4;
			break;
		case MMC_BUS_WIDTH_8:
			tmp |= CMD_SET_DATW_8;
			break;
		default:
802
			dev_err(&host->pd->dev, "Unsupported bus width.\n");
Y
Yusuke Goda 已提交
803 804
			break;
		}
805
		switch (host->timing) {
806
		case MMC_TIMING_MMC_DDR52:
807 808
			/*
			 * MMC core will only set this timing, if the host
809 810 811 812
			 * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
			 * capability. MMCIF implementations with this
			 * capability, e.g. sh73a0, will have to set it
			 * in their platform data.
813 814 815 816
			 */
			tmp |= CMD_SET_DARS;
			break;
		}
Y
Yusuke Goda 已提交
817 818 819 820 821 822 823 824
	}
	/* DWEN */
	if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
		tmp |= CMD_SET_DWEN;
	/* CMLTE/CMD12EN */
	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
825
				data->blocks << 16);
Y
Yusuke Goda 已提交
826 827 828 829 830 831 832 833 834 835 836 837 838
	}
	/* RIDXC[1:0] check bits */
	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
	    opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
		tmp |= CMD_SET_RIDXC_BITS;
	/* RCRC7C[1:0] check bits */
	if (opc == MMC_SEND_OP_COND)
		tmp |= CMD_SET_CRC7C_BITS;
	/* RCRC7C[1:0] internal CRC7 */
	if (opc == MMC_ALL_SEND_CID ||
		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
		tmp |= CMD_SET_CRC7C_INTERNAL;

839
	return (opc << 24) | tmp;
Y
Yusuke Goda 已提交
840 841
}

842
static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
843
			       struct mmc_request *mrq, u32 opc)
Y
Yusuke Goda 已提交
844 845 846
{
	switch (opc) {
	case MMC_READ_MULTIPLE_BLOCK:
847 848
		sh_mmcif_multi_read(host, mrq);
		return 0;
Y
Yusuke Goda 已提交
849
	case MMC_WRITE_MULTIPLE_BLOCK:
850 851
		sh_mmcif_multi_write(host, mrq);
		return 0;
Y
Yusuke Goda 已提交
852
	case MMC_WRITE_BLOCK:
853 854
		sh_mmcif_single_write(host, mrq);
		return 0;
Y
Yusuke Goda 已提交
855 856
	case MMC_READ_SINGLE_BLOCK:
	case MMC_SEND_EXT_CSD:
857 858
		sh_mmcif_single_read(host, mrq);
		return 0;
Y
Yusuke Goda 已提交
859
	default:
860
		dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
861
		return -EINVAL;
Y
Yusuke Goda 已提交
862 863 864 865
	}
}

static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
866
			       struct mmc_request *mrq)
Y
Yusuke Goda 已提交
867
{
868
	struct mmc_command *cmd = mrq->cmd;
869 870
	u32 opc = cmd->opcode;
	u32 mask;
Y
Yusuke Goda 已提交
871 872

	switch (opc) {
873
	/* response busy check */
874
	case MMC_SLEEP_AWAKE:
Y
Yusuke Goda 已提交
875 876 877 878 879
	case MMC_SWITCH:
	case MMC_STOP_TRANSMISSION:
	case MMC_SET_WRITE_PROT:
	case MMC_CLR_WRITE_PROT:
	case MMC_ERASE:
880
		mask = MASK_START_CMD | MASK_MRBSYE;
Y
Yusuke Goda 已提交
881 882
		break;
	default:
883
		mask = MASK_START_CMD | MASK_MCRSPE;
Y
Yusuke Goda 已提交
884 885 886
		break;
	}

887 888 889
	if (host->ccs_enable)
		mask |= MASK_MCCSTO;

890
	if (mrq->data) {
891 892 893
		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
				mrq->data->blksz);
Y
Yusuke Goda 已提交
894
	}
895
	opc = sh_mmcif_set_cmd(host, mrq);
Y
Yusuke Goda 已提交
896

897 898 899 900
	if (host->ccs_enable)
		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
	else
		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
901
	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
Y
Yusuke Goda 已提交
902
	/* set arg */
903
	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
Y
Yusuke Goda 已提交
904
	/* set cmd */
905
	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
Y
Yusuke Goda 已提交
906

907 908
	host->wait_for = MMCIF_WAIT_FOR_CMD;
	schedule_delayed_work(&host->timeout_work, host->timeout);
Y
Yusuke Goda 已提交
909 910 911
}

static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
912
			      struct mmc_request *mrq)
Y
Yusuke Goda 已提交
913
{
914 915
	switch (mrq->cmd->opcode) {
	case MMC_READ_MULTIPLE_BLOCK:
Y
Yusuke Goda 已提交
916
		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
917 918
		break;
	case MMC_WRITE_MULTIPLE_BLOCK:
Y
Yusuke Goda 已提交
919
		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
920 921
		break;
	default:
922
		dev_err(&host->pd->dev, "unsupported stop cmd\n");
923
		mrq->stop->error = sh_mmcif_error_manage(host);
Y
Yusuke Goda 已提交
924 925 926
		return;
	}

927
	host->wait_for = MMCIF_WAIT_FOR_STOP;
Y
Yusuke Goda 已提交
928 929 930 931 932
}

static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
	struct sh_mmcif_host *host = mmc_priv(mmc);
933 934 935 936
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
	if (host->state != STATE_IDLE) {
937
		dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
938 939 940 941 942 943 944 945
		spin_unlock_irqrestore(&host->lock, flags);
		mrq->cmd->error = -EAGAIN;
		mmc_request_done(mmc, mrq);
		return;
	}

	host->state = STATE_REQUEST;
	spin_unlock_irqrestore(&host->lock, flags);
Y
Yusuke Goda 已提交
946 947 948

	switch (mrq->cmd->opcode) {
	/* MMCIF does not support SD/SDIO command */
949 950 951 952
	case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */
	case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
		if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
			break;
Y
Yusuke Goda 已提交
953
	case MMC_APP_CMD:
954
	case SD_IO_RW_DIRECT:
955
		host->state = STATE_IDLE;
Y
Yusuke Goda 已提交
956 957 958 959 960 961
		mrq->cmd->error = -ETIMEDOUT;
		mmc_request_done(mmc, mrq);
		return;
	default:
		break;
	}
962 963

	host->mrq = mrq;
Y
Yusuke Goda 已提交
964

965
	sh_mmcif_start_cmd(host, mrq);
Y
Yusuke Goda 已提交
966 967
}

968 969
static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
{
970
	int ret = clk_prepare_enable(host->hclk);
971 972 973 974 975 976 977 978 979 980

	if (!ret) {
		host->clk = clk_get_rate(host->hclk);
		host->mmc->f_max = host->clk / 2;
		host->mmc->f_min = host->clk / 512;
	}

	return ret;
}

981 982 983 984 985 986 987 988 989 990
static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
{
	struct mmc_host *mmc = host->mmc;

	if (!IS_ERR(mmc->supply.vmmc))
		/* Errors ignored... */
		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
				      ios->power_mode ? ios->vdd : 0);
}

Y
Yusuke Goda 已提交
991 992 993
static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct sh_mmcif_host *host = mmc_priv(mmc);
994 995 996 997
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
	if (host->state != STATE_IDLE) {
998
		dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
999 1000 1001 1002 1003 1004
		spin_unlock_irqrestore(&host->lock, flags);
		return;
	}

	host->state = STATE_IOS;
	spin_unlock_irqrestore(&host->lock, flags);
Y
Yusuke Goda 已提交
1005

1006
	if (ios->power_mode == MMC_POWER_UP) {
1007
		if (!host->card_present) {
1008 1009
			/* See if we also get DMA */
			sh_mmcif_request_dma(host, host->pd->dev.platform_data);
1010
			host->card_present = true;
1011
		}
1012
		sh_mmcif_set_power(host, ios);
1013
	} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
Y
Yusuke Goda 已提交
1014 1015
		/* clock stop */
		sh_mmcif_clock_control(host, 0);
1016
		if (ios->power_mode == MMC_POWER_OFF) {
1017
			if (host->card_present) {
1018
				sh_mmcif_release_dma(host);
1019
				host->card_present = false;
1020
			}
1021 1022
		}
		if (host->power) {
1023
			pm_runtime_put_sync(&host->pd->dev);
1024
			clk_disable_unprepare(host->hclk);
1025
			host->power = false;
1026 1027
			if (ios->power_mode == MMC_POWER_OFF)
				sh_mmcif_set_power(host, ios);
1028
		}
1029
		host->state = STATE_IDLE;
Y
Yusuke Goda 已提交
1030 1031 1032
		return;
	}

1033 1034
	if (ios->clock) {
		if (!host->power) {
1035
			sh_mmcif_clk_update(host);
1036 1037 1038 1039
			pm_runtime_get_sync(&host->pd->dev);
			host->power = true;
			sh_mmcif_sync_reset(host);
		}
Y
Yusuke Goda 已提交
1040
		sh_mmcif_clock_control(host, ios->clock);
1041
	}
Y
Yusuke Goda 已提交
1042

1043
	host->timing = ios->timing;
Y
Yusuke Goda 已提交
1044
	host->bus_width = ios->bus_width;
1045
	host->state = STATE_IDLE;
Y
Yusuke Goda 已提交
1046 1047
}

1048 1049 1050 1051
static int sh_mmcif_get_cd(struct mmc_host *mmc)
{
	struct sh_mmcif_host *host = mmc_priv(mmc);
	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
1052 1053 1054 1055
	int ret = mmc_gpio_get_cd(mmc);

	if (ret >= 0)
		return ret;
1056

1057
	if (!p || !p->get_cd)
1058 1059 1060 1061 1062
		return -ENOSYS;
	else
		return p->get_cd(host->pd);
}

Y
Yusuke Goda 已提交
1063 1064 1065
static struct mmc_host_ops sh_mmcif_ops = {
	.request	= sh_mmcif_request,
	.set_ios	= sh_mmcif_set_ios,
1066
	.get_cd		= sh_mmcif_get_cd,
Y
Yusuke Goda 已提交
1067 1068
};

1069 1070 1071
static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
{
	struct mmc_command *cmd = host->mrq->cmd;
1072
	struct mmc_data *data = host->mrq->data;
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	long time;

	if (host->sd_error) {
		switch (cmd->opcode) {
		case MMC_ALL_SEND_CID:
		case MMC_SELECT_CARD:
		case MMC_APP_CMD:
			cmd->error = -ETIMEDOUT;
			break;
		default:
			cmd->error = sh_mmcif_error_manage(host);
			break;
		}
1086 1087
		dev_dbg(&host->pd->dev, "CMD%d error %d\n",
			cmd->opcode, cmd->error);
1088
		host->sd_error = false;
1089 1090 1091 1092 1093 1094 1095 1096 1097
		return false;
	}
	if (!(cmd->flags & MMC_RSP_PRESENT)) {
		cmd->error = 0;
		return false;
	}

	sh_mmcif_get_response(host, cmd);

1098
	if (!data)
1099 1100
		return false;

1101 1102 1103 1104 1105 1106
	/*
	 * Completion can be signalled from DMA callback and error, so, have to
	 * reset here, before setting .dma_active
	 */
	init_completion(&host->dma_complete);

1107
	if (data->flags & MMC_DATA_READ) {
1108 1109 1110 1111 1112 1113 1114 1115
		if (host->chan_rx)
			sh_mmcif_start_dma_rx(host);
	} else {
		if (host->chan_tx)
			sh_mmcif_start_dma_tx(host);
	}

	if (!host->dma_active) {
1116
		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1117
		return !data->error;
1118 1119 1120 1121 1122
	}

	/* Running in the IRQ thread, can sleep */
	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
							 host->timeout);
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132

	if (data->flags & MMC_DATA_READ)
		dma_unmap_sg(host->chan_rx->device->dev,
			     data->sg, data->sg_len,
			     DMA_FROM_DEVICE);
	else
		dma_unmap_sg(host->chan_tx->device->dev,
			     data->sg, data->sg_len,
			     DMA_TO_DEVICE);

1133 1134 1135 1136
	if (host->sd_error) {
		dev_err(host->mmc->parent,
			"Error IRQ while waiting for DMA completion!\n");
		/* Woken up by an error IRQ: abort DMA */
1137
		data->error = sh_mmcif_error_manage(host);
1138
	} else if (!time) {
1139
		dev_err(host->mmc->parent, "DMA timeout!\n");
1140
		data->error = -ETIMEDOUT;
1141
	} else if (time < 0) {
1142 1143
		dev_err(host->mmc->parent,
			"wait_for_completion_...() error %ld!\n", time);
1144
		data->error = time;
1145 1146 1147 1148 1149
	}
	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
	host->dma_active = false;

1150
	if (data->error) {
1151
		data->bytes_xfered = 0;
1152 1153 1154 1155 1156 1157
		/* Abort DMA */
		if (data->flags & MMC_DATA_READ)
			dmaengine_terminate_all(host->chan_rx);
		else
			dmaengine_terminate_all(host->chan_tx);
	}
1158 1159 1160 1161 1162 1163 1164

	return false;
}

static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
{
	struct sh_mmcif_host *host = dev_id;
1165
	struct mmc_request *mrq;
1166
	bool wait = false;
1167 1168 1169

	cancel_delayed_work_sync(&host->timeout_work);

1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
	mutex_lock(&host->thread_lock);

	mrq = host->mrq;
	if (!mrq) {
		dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
			host->state, host->wait_for);
		mutex_unlock(&host->thread_lock);
		return IRQ_HANDLED;
	}

1180 1181 1182 1183 1184 1185 1186
	/*
	 * All handlers return true, if processing continues, and false, if the
	 * request has to be completed - successfully or not
	 */
	switch (host->wait_for) {
	case MMCIF_WAIT_FOR_REQUEST:
		/* We're too late, the timeout has already kicked in */
1187
		mutex_unlock(&host->thread_lock);
1188 1189
		return IRQ_HANDLED;
	case MMCIF_WAIT_FOR_CMD:
1190 1191
		/* Wait for data? */
		wait = sh_mmcif_end_cmd(host);
1192 1193
		break;
	case MMCIF_WAIT_FOR_MREAD:
1194 1195
		/* Wait for more data? */
		wait = sh_mmcif_mread_block(host);
1196 1197
		break;
	case MMCIF_WAIT_FOR_READ:
1198 1199
		/* Wait for data end? */
		wait = sh_mmcif_read_block(host);
1200 1201
		break;
	case MMCIF_WAIT_FOR_MWRITE:
1202 1203
		/* Wait data to write? */
		wait = sh_mmcif_mwrite_block(host);
1204 1205
		break;
	case MMCIF_WAIT_FOR_WRITE:
1206 1207
		/* Wait for data end? */
		wait = sh_mmcif_write_block(host);
1208 1209 1210 1211
		break;
	case MMCIF_WAIT_FOR_STOP:
		if (host->sd_error) {
			mrq->stop->error = sh_mmcif_error_manage(host);
1212
			dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
1213 1214 1215 1216 1217 1218 1219
			break;
		}
		sh_mmcif_get_cmd12response(host, mrq->stop);
		mrq->stop->error = 0;
		break;
	case MMCIF_WAIT_FOR_READ_END:
	case MMCIF_WAIT_FOR_WRITE_END:
1220
		if (host->sd_error) {
1221
			mrq->data->error = sh_mmcif_error_manage(host);
1222 1223
			dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
		}
1224 1225 1226 1227 1228
		break;
	default:
		BUG();
	}

1229 1230 1231
	if (wait) {
		schedule_delayed_work(&host->timeout_work, host->timeout);
		/* Wait for more data */
1232
		mutex_unlock(&host->thread_lock);
1233 1234 1235
		return IRQ_HANDLED;
	}

1236
	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1237
		struct mmc_data *data = mrq->data;
1238 1239 1240
		if (!mrq->cmd->error && data && !data->error)
			data->bytes_xfered =
				data->blocks * data->blksz;
1241

1242
		if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1243
			sh_mmcif_stop_cmd(host, mrq);
1244 1245
			if (!mrq->stop->error) {
				schedule_delayed_work(&host->timeout_work, host->timeout);
1246
				mutex_unlock(&host->thread_lock);
1247
				return IRQ_HANDLED;
1248
			}
1249 1250 1251 1252 1253
		}
	}

	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
	host->state = STATE_IDLE;
1254
	host->mrq = NULL;
1255 1256
	mmc_request_done(host->mmc, mrq);

1257 1258
	mutex_unlock(&host->thread_lock);

1259 1260 1261
	return IRQ_HANDLED;
}

Y
Yusuke Goda 已提交
1262 1263 1264
static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
{
	struct sh_mmcif_host *host = dev_id;
1265
	u32 state, mask;
Y
Yusuke Goda 已提交
1266

1267
	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1268 1269 1270 1271 1272
	mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
	if (host->ccs_enable)
		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
	else
		sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
1273
	sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
Y
Yusuke Goda 已提交
1274

1275 1276 1277 1278 1279
	if (state & ~MASK_CLEAN)
		dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
			state);

	if (state & INT_ERR_STS || state & ~INT_ALL) {
1280
		host->sd_error = true;
1281
		dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
Y
Yusuke Goda 已提交
1282
	}
1283
	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1284 1285
		if (!host->mrq)
			dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
1286 1287 1288 1289 1290
		if (!host->dma_active)
			return IRQ_WAKE_THREAD;
		else if (host->sd_error)
			mmcif_dma_complete(host);
	} else {
1291
		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
1292
	}
Y
Yusuke Goda 已提交
1293 1294 1295 1296

	return IRQ_HANDLED;
}

1297 1298 1299 1300 1301
static void mmcif_timeout_work(struct work_struct *work)
{
	struct delayed_work *d = container_of(work, struct delayed_work, work);
	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
	struct mmc_request *mrq = host->mrq;
1302
	unsigned long flags;
1303 1304 1305 1306 1307

	if (host->dying)
		/* Don't run after mmc_remove_host() */
		return;

1308
	dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
		host->wait_for, mrq->cmd->opcode);

	spin_lock_irqsave(&host->lock, flags);
	if (host->state == STATE_IDLE) {
		spin_unlock_irqrestore(&host->lock, flags);
		return;
	}

	host->state = STATE_TIMEOUT;
	spin_unlock_irqrestore(&host->lock, flags);

1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
	/*
	 * Handle races with cancel_delayed_work(), unless
	 * cancel_delayed_work_sync() is used
	 */
	switch (host->wait_for) {
	case MMCIF_WAIT_FOR_CMD:
		mrq->cmd->error = sh_mmcif_error_manage(host);
		break;
	case MMCIF_WAIT_FOR_STOP:
		mrq->stop->error = sh_mmcif_error_manage(host);
		break;
	case MMCIF_WAIT_FOR_MREAD:
	case MMCIF_WAIT_FOR_MWRITE:
	case MMCIF_WAIT_FOR_READ:
	case MMCIF_WAIT_FOR_WRITE:
	case MMCIF_WAIT_FOR_READ_END:
	case MMCIF_WAIT_FOR_WRITE_END:
1337
		mrq->data->error = sh_mmcif_error_manage(host);
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
		break;
	default:
		BUG();
	}

	host->state = STATE_IDLE;
	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
	host->mrq = NULL;
	mmc_request_done(host->mmc, mrq);
}

1349 1350 1351 1352 1353 1354 1355
static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
{
	struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
	struct mmc_host *mmc = host->mmc;

	mmc_regulator_get_supply(mmc);

1356 1357 1358
	if (!pd)
		return;

1359 1360 1361 1362 1363 1364
	if (!mmc->ocr_avail)
		mmc->ocr_avail = pd->ocr;
	else if (pd->ocr)
		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
}

B
Bill Pemberton 已提交
1365
static int sh_mmcif_probe(struct platform_device *pdev)
Y
Yusuke Goda 已提交
1366 1367 1368
{
	int ret = 0, irq[2];
	struct mmc_host *mmc;
1369
	struct sh_mmcif_host *host;
1370
	struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
Y
Yusuke Goda 已提交
1371 1372
	struct resource *res;
	void __iomem *reg;
1373
	const char *name;
Y
Yusuke Goda 已提交
1374 1375 1376

	irq[0] = platform_get_irq(pdev, 0);
	irq[1] = platform_get_irq(pdev, 1);
1377
	if (irq[0] < 0) {
1378
		dev_err(&pdev->dev, "Get irq error\n");
Y
Yusuke Goda 已提交
1379 1380
		return -ENXIO;
	}
B
Ben Dooks 已提交
1381

Y
Yusuke Goda 已提交
1382
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
B
Ben Dooks 已提交
1383 1384 1385
	reg = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(reg))
		return PTR_ERR(reg);
1386

Y
Yusuke Goda 已提交
1387
	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
B
Ben Dooks 已提交
1388 1389
	if (!mmc)
		return -ENOMEM;
1390 1391 1392

	ret = mmc_of_parse(mmc);
	if (ret < 0)
1393
		goto err_host;
1394

Y
Yusuke Goda 已提交
1395 1396 1397
	host		= mmc_priv(mmc);
	host->mmc	= mmc;
	host->addr	= reg;
1398
	host->timeout	= msecs_to_jiffies(1000);
1399
	host->ccs_enable = !pd || !pd->ccs_unsupported;
1400
	host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
Y
Yusuke Goda 已提交
1401 1402 1403

	host->pd = pdev;

1404
	spin_lock_init(&host->lock);
Y
Yusuke Goda 已提交
1405 1406

	mmc->ops = &sh_mmcif_ops;
1407 1408
	sh_mmcif_init_ocr(host);

1409
	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1410
	if (pd && pd->caps)
Y
Yusuke Goda 已提交
1411
		mmc->caps |= pd->caps;
1412
	mmc->max_segs = 32;
Y
Yusuke Goda 已提交
1413
	mmc->max_blk_size = 512;
1414 1415
	mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
Y
Yusuke Goda 已提交
1416 1417 1418
	mmc->max_seg_size = mmc->max_req_size;

	platform_set_drvdata(pdev, host);
1419

1420 1421 1422
	pm_runtime_enable(&pdev->dev);
	host->power = false;

1423
	host->hclk = devm_clk_get(&pdev->dev, NULL);
1424 1425
	if (IS_ERR(host->hclk)) {
		ret = PTR_ERR(host->hclk);
1426
		dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
1427
		goto err_pm;
1428
	}
1429 1430
	ret = sh_mmcif_clk_update(host);
	if (ret < 0)
1431
		goto err_pm;
1432

1433 1434
	ret = pm_runtime_resume(&pdev->dev);
	if (ret < 0)
1435
		goto err_clk;
1436

1437
	INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
Y
Yusuke Goda 已提交
1438

1439
	sh_mmcif_sync_reset(host);
1440 1441
	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);

1442
	name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error";
1443 1444
	ret = devm_request_threaded_irq(&pdev->dev, irq[0], sh_mmcif_intr,
					sh_mmcif_irqt, 0, name, host);
Y
Yusuke Goda 已提交
1445
	if (ret) {
1446
		dev_err(&pdev->dev, "request_irq error (%s)\n", name);
1447
		goto err_clk;
Y
Yusuke Goda 已提交
1448
	}
1449
	if (irq[1] >= 0) {
1450 1451 1452
		ret = devm_request_threaded_irq(&pdev->dev, irq[1],
						sh_mmcif_intr, sh_mmcif_irqt,
						0, "sh_mmc:int", host);
1453 1454
		if (ret) {
			dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1455
			goto err_clk;
1456
		}
Y
Yusuke Goda 已提交
1457 1458
	}

1459
	if (pd && pd->use_cd_gpio) {
1460
		ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0);
1461
		if (ret < 0)
1462
			goto err_clk;
1463 1464
	}

1465 1466
	mutex_init(&host->thread_lock);

1467 1468
	ret = mmc_add_host(mmc);
	if (ret < 0)
1469
		goto err_clk;
Y
Yusuke Goda 已提交
1470

1471 1472
	dev_pm_qos_expose_latency_limit(&pdev->dev, 100);

1473 1474 1475 1476 1477
	dev_info(&pdev->dev, "Chip version 0x%04x, clock rate %luMHz\n",
		 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
		 clk_get_rate(host->hclk) / 1000000UL);

	clk_disable_unprepare(host->hclk);
Y
Yusuke Goda 已提交
1478 1479
	return ret;

1480
err_clk:
1481
	clk_disable_unprepare(host->hclk);
1482
err_pm:
1483
	pm_runtime_disable(&pdev->dev);
1484
err_host:
Y
Yusuke Goda 已提交
1485 1486 1487 1488
	mmc_free_host(mmc);
	return ret;
}

B
Bill Pemberton 已提交
1489
static int sh_mmcif_remove(struct platform_device *pdev)
Y
Yusuke Goda 已提交
1490 1491 1492
{
	struct sh_mmcif_host *host = platform_get_drvdata(pdev);

1493
	host->dying = true;
1494
	clk_prepare_enable(host->hclk);
1495
	pm_runtime_get_sync(&pdev->dev);
Y
Yusuke Goda 已提交
1496

1497 1498
	dev_pm_qos_hide_latency_limit(&pdev->dev);

1499
	mmc_remove_host(host->mmc);
1500 1501
	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);

1502 1503 1504 1505 1506 1507 1508
	/*
	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
	 * mmc_remove_host() call above. But swapping order doesn't help either
	 * (a query on the linux-mmc mailing list didn't bring any replies).
	 */
	cancel_delayed_work_sync(&host->timeout_work);

1509
	clk_disable_unprepare(host->hclk);
Y
Yusuke Goda 已提交
1510
	mmc_free_host(host->mmc);
1511 1512
	pm_runtime_put_sync(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
Y
Yusuke Goda 已提交
1513 1514 1515 1516

	return 0;
}

1517
#ifdef CONFIG_PM_SLEEP
1518 1519
static int sh_mmcif_suspend(struct device *dev)
{
1520
	struct sh_mmcif_host *host = dev_get_drvdata(dev);
1521

1522
	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1523

1524
	return 0;
1525 1526 1527 1528
}

static int sh_mmcif_resume(struct device *dev)
{
1529
	return 0;
1530
}
1531
#endif
1532

1533 1534 1535 1536 1537 1538
static const struct of_device_id mmcif_of_match[] = {
	{ .compatible = "renesas,sh-mmcif" },
	{ }
};
MODULE_DEVICE_TABLE(of, mmcif_of_match);

1539
static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1540
	SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
1541 1542
};

Y
Yusuke Goda 已提交
1543 1544 1545 1546 1547
static struct platform_driver sh_mmcif_driver = {
	.probe		= sh_mmcif_probe,
	.remove		= sh_mmcif_remove,
	.driver		= {
		.name	= DRIVER_NAME,
1548
		.pm	= &sh_mmcif_dev_pm_ops,
1549 1550
		.owner	= THIS_MODULE,
		.of_match_table = mmcif_of_match,
Y
Yusuke Goda 已提交
1551 1552 1553
	},
};

1554
module_platform_driver(sh_mmcif_driver);
Y
Yusuke Goda 已提交
1555 1556 1557

MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
MODULE_LICENSE("GPL");
1558
MODULE_ALIAS("platform:" DRIVER_NAME);
Y
Yusuke Goda 已提交
1559
MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");