tmio_mmc.c 23.4 KB
Newer Older
I
Ian Molton 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  linux/drivers/mmc/tmio_mmc.c
 *
 *  Copyright (C) 2004 Ian Molton
 *  Copyright (C) 2007 Ian Molton
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * Driver for the MMC / SD / SDIO cell found in:
 *
13
 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
I
Ian Molton 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 *
 * This driver draws mainly on scattered spec sheets, Reverse engineering
 * of the toshiba e800  SD driver and some parts of the 2.4 ASIC3 driver (4 bit
 * support). (Further 4 bit support from a later datasheet).
 *
 * TODO:
 *   Investigate using a workqueue for PIO transfers
 *   Eliminate FIXMEs
 *   SDIO support
 *   Better Power management
 *   Handle MMC errors better
 *   double buffer support
 *
 */
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/device.h>
#include <linux/delay.h>
32
#include <linux/dmaengine.h>
I
Ian Molton 已提交
33 34 35 36 37 38 39 40
#include <linux/mmc/host.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>

#include "tmio_mmc.h"

static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
{
I
Ian Molton 已提交
41
	u32 clk = 0, clock;
I
Ian Molton 已提交
42 43

	if (new_clock) {
I
Ian Molton 已提交
44 45
		for (clock = host->mmc->f_min, clk = 0x80000080;
			new_clock >= (clock<<1); clk >>= 1)
I
Ian Molton 已提交
46 47 48 49
			clock <<= 1;
		clk |= 0x100;
	}

50 51 52
	if (host->set_clk_div)
		host->set_clk_div(host->pdev, (clk>>22) & 1);

I
Ian Molton 已提交
53
	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
I
Ian Molton 已提交
54 55 56 57
}

static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
P
Philipp Zabel 已提交
58
	sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
I
Ian Molton 已提交
59
	msleep(10);
P
Philipp Zabel 已提交
60 61
	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
I
Ian Molton 已提交
62 63 64 65 66
	msleep(10);
}

static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
P
Philipp Zabel 已提交
67 68
	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
I
Ian Molton 已提交
69
	msleep(10);
P
Philipp Zabel 已提交
70
	sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
I
Ian Molton 已提交
71 72 73 74 75 76
	msleep(10);
}

static void reset(struct tmio_mmc_host *host)
{
	/* FIXME - should we set stop clock reg here */
P
Philipp Zabel 已提交
77 78
	sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
	sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
I
Ian Molton 已提交
79
	msleep(10);
P
Philipp Zabel 已提交
80 81
	sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
	sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
I
Ian Molton 已提交
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
	msleep(10);
}

static void
tmio_mmc_finish_request(struct tmio_mmc_host *host)
{
	struct mmc_request *mrq = host->mrq;

	host->mrq = NULL;
	host->cmd = NULL;
	host->data = NULL;

	mmc_request_done(host->mmc, mrq);
}

/* These are the bitmasks the tmio chip requires to implement the MMC response
 * types. Note that R1 and R6 are the same in this scheme. */
#define APP_CMD        0x0040
#define RESP_NONE      0x0300
#define RESP_R1        0x0400
#define RESP_R1B       0x0500
#define RESP_R2        0x0600
#define RESP_R3        0x0700
#define DATA_PRESENT   0x0800
#define TRANSFER_READ  0x1000
#define TRANSFER_MULTI 0x2000
#define SECURITY_CMD   0x4000

static int
tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
{
	struct mmc_data *data = host->data;
	int c = cmd->opcode;

	/* Command 12 is handled by hardware */
	if (cmd->opcode == 12 && !cmd->arg) {
P
Philipp Zabel 已提交
118
		sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
I
Ian Molton 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
		return 0;
	}

	switch (mmc_resp_type(cmd)) {
	case MMC_RSP_NONE: c |= RESP_NONE; break;
	case MMC_RSP_R1:   c |= RESP_R1;   break;
	case MMC_RSP_R1B:  c |= RESP_R1B;  break;
	case MMC_RSP_R2:   c |= RESP_R2;   break;
	case MMC_RSP_R3:   c |= RESP_R3;   break;
	default:
		pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
		return -EINVAL;
	}

	host->cmd = cmd;

135 136
/* FIXME - this seems to be ok commented out but the spec suggest this bit
 *         should be set when issuing app commands.
I
Ian Molton 已提交
137 138 139 140 141 142
 *	if(cmd->flags & MMC_FLAG_ACMD)
 *		c |= APP_CMD;
 */
	if (data) {
		c |= DATA_PRESENT;
		if (data->blocks > 1) {
P
Philipp Zabel 已提交
143
			sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
I
Ian Molton 已提交
144 145 146 147 148 149
			c |= TRANSFER_MULTI;
		}
		if (data->flags & MMC_DATA_READ)
			c |= TRANSFER_READ;
	}

P
Philipp Zabel 已提交
150
	enable_mmc_irqs(host, TMIO_MASK_CMD);
I
Ian Molton 已提交
151 152

	/* Fire off the command */
P
Philipp Zabel 已提交
153 154
	sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
	sd_ctrl_write16(host, CTL_SD_CMD, c);
I
Ian Molton 已提交
155 156 157 158

	return 0;
}

159 160
/*
 * This chip always returns (at least?) as much data as you ask for.
I
Ian Molton 已提交
161 162 163
 * I'm unsure what happens if you ask for less than a block. This should be
 * looked into to ensure that a funny length read doesnt hose the controller.
 */
164
static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
I
Ian Molton 已提交
165 166
{
	struct mmc_data *data = host->data;
167
	void *sg_virt;
I
Ian Molton 已提交
168 169 170 171 172 173 174 175 176
	unsigned short *buf;
	unsigned int count;
	unsigned long flags;

	if (!data) {
		pr_debug("Spurious PIO IRQ\n");
		return;
	}

177 178
	sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
	buf = (unsigned short *)(sg_virt + host->sg_off);
I
Ian Molton 已提交
179 180 181 182 183 184

	count = host->sg_ptr->length - host->sg_off;
	if (count > data->blksz)
		count = data->blksz;

	pr_debug("count: %08x offset: %08x flags %08x\n",
185
		 count, host->sg_off, data->flags);
I
Ian Molton 已提交
186 187 188

	/* Transfer the data */
	if (data->flags & MMC_DATA_READ)
P
Philipp Zabel 已提交
189
		sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
I
Ian Molton 已提交
190
	else
P
Philipp Zabel 已提交
191
		sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
I
Ian Molton 已提交
192 193 194

	host->sg_off += count;

195
	tmio_mmc_kunmap_atomic(sg_virt, &flags);
I
Ian Molton 已提交
196 197 198 199 200 201 202

	if (host->sg_off == host->sg_ptr->length)
		tmio_mmc_next_sg(host);

	return;
}

203
static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
I
Ian Molton 已提交
204 205
{
	struct mmc_data *data = host->data;
206
	struct mmc_command *stop;
I
Ian Molton 已提交
207 208 209 210

	host->data = NULL;

	if (!data) {
211
		dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
I
Ian Molton 已提交
212 213
		return;
	}
214
	stop = data->stop;
I
Ian Molton 已提交
215 216 217 218 219 220 221 222 223

	/* FIXME - return correct transfer count on errors */
	if (!data->error)
		data->bytes_xfered = data->blocks * data->blksz;
	else
		data->bytes_xfered = 0;

	pr_debug("Completed data request\n");

224 225
	/*
	 * FIXME: other drivers allow an optional stop command of any given type
I
Ian Molton 已提交
226 227 228 229 230 231 232
	 *        which we dont do, as the chip can auto generate them.
	 *        Perhaps we can be smarter about when to use auto CMD12 and
	 *        only issue the auto request when we know this is the desired
	 *        stop command, allowing fallback to the stop command the
	 *        upper layers expect. For now, we do what works.
	 */

233 234 235 236 237 238 239 240 241 242 243
	if (data->flags & MMC_DATA_READ) {
		if (!host->chan_rx)
			disable_mmc_irqs(host, TMIO_MASK_READOP);
		dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
			host->mrq);
	} else {
		if (!host->chan_tx)
			disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
		dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
			host->mrq);
	}
I
Ian Molton 已提交
244 245 246

	if (stop) {
		if (stop->opcode == 12 && !stop->arg)
P
Philipp Zabel 已提交
247
			sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
I
Ian Molton 已提交
248 249 250 251 252 253 254
		else
			BUG();
	}

	tmio_mmc_finish_request(host);
}

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
{
	struct mmc_data *data = host->data;

	if (!data)
		return;

	if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
		/*
		 * Has all data been written out yet? Testing on SuperH showed,
		 * that in most cases the first interrupt comes already with the
		 * BUSY status bit clear, but on some operations, like mount or
		 * in the beginning of a write / sync / umount, there is one
		 * DATAEND interrupt with the BUSY bit set, in this cases
		 * waiting for one more interrupt fixes the problem.
		 */
		if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
			disable_mmc_irqs(host, TMIO_STAT_DATAEND);
			tasklet_schedule(&host->dma_complete);
		}
	} else if (host->chan_rx && (data->flags & MMC_DATA_READ)) {
		disable_mmc_irqs(host, TMIO_STAT_DATAEND);
		tasklet_schedule(&host->dma_complete);
	} else {
		tmio_mmc_do_data_irq(host);
	}
}

static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
I
Ian Molton 已提交
284 285 286
	unsigned int stat)
{
	struct mmc_command *cmd = host->cmd;
P
Philipp Zabel 已提交
287
	int i, addr;
I
Ian Molton 已提交
288 289 290 291 292 293 294 295 296 297 298 299 300

	if (!host->cmd) {
		pr_debug("Spurious CMD irq\n");
		return;
	}

	host->cmd = NULL;

	/* This controller is sicker than the PXA one. Not only do we need to
	 * drop the top 8 bits of the first response word, we also need to
	 * modify the order of the response for short response command types.
	 */

P
Philipp Zabel 已提交
301 302
	for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
		cmd->resp[i] = sd_ctrl_read32(host, addr);
I
Ian Molton 已提交
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322

	if (cmd->flags &  MMC_RSP_136) {
		cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
		cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
		cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
		cmd->resp[3] <<= 8;
	} else if (cmd->flags & MMC_RSP_R3) {
		cmd->resp[0] = cmd->resp[3];
	}

	if (stat & TMIO_STAT_CMDTIMEOUT)
		cmd->error = -ETIMEDOUT;
	else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
		cmd->error = -EILSEQ;

	/* If there is data to handle we enable data IRQs here, and
	 * we will ultimatley finish the request in the data_end handler.
	 * If theres no data or we encountered an error, finish now.
	 */
	if (host->data && !cmd->error) {
323 324 325 326 327 328 329 330 331 332
		if (host->data->flags & MMC_DATA_READ) {
			if (!host->chan_rx)
				enable_mmc_irqs(host, TMIO_MASK_READOP);
		} else {
			struct dma_chan *chan = host->chan_tx;
			if (!chan)
				enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
			else
				tasklet_schedule(&host->dma_issue);
		}
I
Ian Molton 已提交
333 334 335 336 337 338 339 340 341 342 343 344 345 346
	} else {
		tmio_mmc_finish_request(host);
	}

	return;
}

static irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
	struct tmio_mmc_host *host = devid;
	unsigned int ireg, irq_mask, status;

	pr_debug("MMC IRQ begin\n");

P
Philipp Zabel 已提交
347 348
	status = sd_ctrl_read32(host, CTL_STATUS);
	irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
I
Ian Molton 已提交
349 350 351 352 353 354
	ireg = status & TMIO_MASK_IRQ & ~irq_mask;

	pr_debug_status(status);
	pr_debug_status(ireg);

	if (!ireg) {
P
Philipp Zabel 已提交
355
		disable_mmc_irqs(host, status & ~irq_mask);
I
Ian Molton 已提交
356

357
		pr_warning("tmio_mmc: Spurious irq, disabling! "
I
Ian Molton 已提交
358 359 360 361 362 363 364 365 366
			"0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
		pr_debug_status(status);

		goto out;
	}

	while (ireg) {
		/* Card insert / remove attempts */
		if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
P
Philipp Zabel 已提交
367
			ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
I
Ian Molton 已提交
368
				TMIO_STAT_CARD_REMOVE);
369
			mmc_detect_change(host->mmc, msecs_to_jiffies(100));
I
Ian Molton 已提交
370 371 372 373 374 375 376 377 378
		}

		/* CRC and other errors */
/*		if (ireg & TMIO_STAT_ERR_IRQ)
 *			handled |= tmio_error_irq(host, irq, stat);
 */

		/* Command completion */
		if (ireg & TMIO_MASK_CMD) {
P
Philipp Zabel 已提交
379
			ack_mmc_irqs(host, TMIO_MASK_CMD);
I
Ian Molton 已提交
380 381 382 383 384
			tmio_mmc_cmd_irq(host, status);
		}

		/* Data transfer */
		if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
P
Philipp Zabel 已提交
385
			ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
I
Ian Molton 已提交
386 387 388 389 390
			tmio_mmc_pio_irq(host);
		}

		/* Data transfer completion */
		if (ireg & TMIO_STAT_DATAEND) {
P
Philipp Zabel 已提交
391
			ack_mmc_irqs(host, TMIO_STAT_DATAEND);
I
Ian Molton 已提交
392 393 394 395
			tmio_mmc_data_irq(host);
		}

		/* Check status - keep going until we've handled it all */
P
Philipp Zabel 已提交
396 397
		status = sd_ctrl_read32(host, CTL_STATUS);
		irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
I
Ian Molton 已提交
398 399 400 401 402 403 404 405 406 407 408
		ireg = status & TMIO_MASK_IRQ & ~irq_mask;

		pr_debug("Status at end of loop: %08x\n", status);
		pr_debug_status(status);
	}
	pr_debug("MMC IRQ end\n");

out:
	return IRQ_HANDLED;
}

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
#ifdef CONFIG_TMIO_MMC_DMA
static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
{
#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
	/* Switch DMA mode on or off - SuperH specific? */
	sd_ctrl_write16(host, 0xd8, enable ? 2 : 0);
#endif
}

static void tmio_dma_complete(void *arg)
{
	struct tmio_mmc_host *host = arg;

	dev_dbg(&host->pdev->dev, "Command completed\n");

	if (!host->data)
		dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
	else
		enable_mmc_irqs(host, TMIO_STAT_DATAEND);
}

static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{
	struct scatterlist *sg = host->sg_ptr;
	struct dma_async_tx_descriptor *desc = NULL;
	struct dma_chan *chan = host->chan_rx;
	int ret;

	ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
	if (ret > 0) {
		host->dma_sglen = ret;
		desc = chan->device->device_prep_slave_sg(chan, sg, ret,
			DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	}

	if (desc) {
		host->desc = desc;
		desc->callback = tmio_dma_complete;
		desc->callback_param = host;
		host->cookie = desc->tx_submit(desc);
		if (host->cookie < 0) {
			host->desc = NULL;
			ret = host->cookie;
		} else {
			chan->device->device_issue_pending(chan);
		}
	}
	dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
		__func__, host->sg_len, ret, host->cookie, host->mrq);

	if (!host->desc) {
		/* DMA failed, fall back to PIO */
		if (ret >= 0)
			ret = -EIO;
		host->chan_rx = NULL;
		dma_release_channel(chan);
		/* Free the Tx channel too */
		chan = host->chan_tx;
		if (chan) {
			host->chan_tx = NULL;
			dma_release_channel(chan);
		}
		dev_warn(&host->pdev->dev,
			 "DMA failed: %d, falling back to PIO\n", ret);
		tmio_mmc_enable_dma(host, false);
		reset(host);
		/* Fail this request, let above layers recover */
		host->mrq->cmd->error = ret;
		tmio_mmc_finish_request(host);
	}

	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
		desc, host->cookie, host->sg_len);

	return ret > 0 ? 0 : ret;
}

static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
{
	struct scatterlist *sg = host->sg_ptr;
	struct dma_async_tx_descriptor *desc = NULL;
	struct dma_chan *chan = host->chan_tx;
	int ret;

	ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
	if (ret > 0) {
		host->dma_sglen = ret;
		desc = chan->device->device_prep_slave_sg(chan, sg, ret,
			DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	}

	if (desc) {
		host->desc = desc;
		desc->callback = tmio_dma_complete;
		desc->callback_param = host;
		host->cookie = desc->tx_submit(desc);
		if (host->cookie < 0) {
			host->desc = NULL;
			ret = host->cookie;
		}
	}
	dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
		__func__, host->sg_len, ret, host->cookie, host->mrq);

	if (!host->desc) {
		/* DMA failed, fall back to PIO */
		if (ret >= 0)
			ret = -EIO;
		host->chan_tx = NULL;
		dma_release_channel(chan);
		/* Free the Rx channel too */
		chan = host->chan_rx;
		if (chan) {
			host->chan_rx = NULL;
			dma_release_channel(chan);
		}
		dev_warn(&host->pdev->dev,
			 "DMA failed: %d, falling back to PIO\n", ret);
		tmio_mmc_enable_dma(host, false);
		reset(host);
		/* Fail this request, let above layers recover */
		host->mrq->cmd->error = ret;
		tmio_mmc_finish_request(host);
	}

	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
		desc, host->cookie);

	return ret > 0 ? 0 : ret;
}

static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
			       struct mmc_data *data)
{
	if (data->flags & MMC_DATA_READ) {
		if (host->chan_rx)
			return tmio_mmc_start_dma_rx(host);
	} else {
		if (host->chan_tx)
			return tmio_mmc_start_dma_tx(host);
	}

	return 0;
}

static void tmio_issue_tasklet_fn(unsigned long priv)
{
	struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
	struct dma_chan *chan = host->chan_tx;

	chan->device->device_issue_pending(chan);
}

static void tmio_tasklet_fn(unsigned long arg)
{
	struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;

	if (host->data->flags & MMC_DATA_READ)
		dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
			     DMA_FROM_DEVICE);
	else
		dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
			     DMA_TO_DEVICE);

	tmio_mmc_do_data_irq(host);
}

/* It might be necessary to make filter MFD specific */
static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
{
	dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
	chan->private = arg;
	return true;
}

static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
				 struct tmio_mmc_data *pdata)
{
	host->cookie = -EINVAL;
	host->desc = NULL;

	/* We can only either use DMA for both Tx and Rx or not use it at all */
	if (pdata->dma) {
		dma_cap_mask_t mask;

		dma_cap_zero(mask);
		dma_cap_set(DMA_SLAVE, mask);

		host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
						    pdata->dma->chan_priv_tx);
		dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
			host->chan_tx);

		if (!host->chan_tx)
			return;

		host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
						    pdata->dma->chan_priv_rx);
		dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
			host->chan_rx);

		if (!host->chan_rx) {
			dma_release_channel(host->chan_tx);
			host->chan_tx = NULL;
			return;
		}

		tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host);
		tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host);

		tmio_mmc_enable_dma(host, true);
	}
}

static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
{
	if (host->chan_tx) {
		struct dma_chan *chan = host->chan_tx;
		host->chan_tx = NULL;
		dma_release_channel(chan);
	}
	if (host->chan_rx) {
		struct dma_chan *chan = host->chan_rx;
		host->chan_rx = NULL;
		dma_release_channel(chan);
	}

	host->cookie = -EINVAL;
	host->desc = NULL;
}
#else
static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
			       struct mmc_data *data)
{
	return 0;
}

static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
				 struct tmio_mmc_data *pdata)
{
	host->chan_tx = NULL;
	host->chan_rx = NULL;
}

static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
{
}
#endif

I
Ian Molton 已提交
658 659 660 661
static int tmio_mmc_start_data(struct tmio_mmc_host *host,
	struct mmc_data *data)
{
	pr_debug("setup data transfer: blocksize %08x  nr_blocks %d\n",
662
		 data->blksz, data->blocks);
I
Ian Molton 已提交
663 664 665

	/* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */
	if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
666 667
		pr_err("%s: %d byte block unsupported in 4 bit mode\n",
		       mmc_hostname(host->mmc), data->blksz);
I
Ian Molton 已提交
668 669 670 671 672 673 674
		return -EINVAL;
	}

	tmio_mmc_init_sg(host, data);
	host->data = data;

	/* Set transfer length / blocksize */
P
Philipp Zabel 已提交
675 676
	sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
	sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
I
Ian Molton 已提交
677

678
	return tmio_mmc_start_dma(host, data);
I
Ian Molton 已提交
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
}

/* Process requests from the MMC layer */
static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
	struct tmio_mmc_host *host = mmc_priv(mmc);
	int ret;

	if (host->mrq)
		pr_debug("request not null\n");

	host->mrq = mrq;

	if (mrq->data) {
		ret = tmio_mmc_start_data(host, mrq->data);
		if (ret)
			goto fail;
	}

	ret = tmio_mmc_start_command(host, mrq->cmd);
	if (!ret)
		return;

fail:
	mrq->cmd->error = ret;
	mmc_request_done(mmc, mrq);
}

/* Set MMC clock / power.
 * Note: This controller uses a simple divider scheme therefore it cannot
 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
 * slowest setting.
 */
static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct tmio_mmc_host *host = mmc_priv(mmc);

	if (ios->clock)
		tmio_mmc_set_clock(host, ios->clock);

	/* Power sequence - OFF -> ON -> UP */
	switch (ios->power_mode) {
	case MMC_POWER_OFF: /* power down SD bus */
723 724
		if (host->set_pwr)
			host->set_pwr(host->pdev, 0);
I
Ian Molton 已提交
725 726 727
		tmio_mmc_clk_stop(host);
		break;
	case MMC_POWER_ON: /* power up SD bus */
728 729
		if (host->set_pwr)
			host->set_pwr(host->pdev, 1);
I
Ian Molton 已提交
730 731 732 733 734 735 736 737
		break;
	case MMC_POWER_UP: /* start bus clock */
		tmio_mmc_clk_start(host);
		break;
	}

	switch (ios->bus_width) {
	case MMC_BUS_WIDTH_1:
P
Philipp Zabel 已提交
738
		sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
I
Ian Molton 已提交
739 740
	break;
	case MMC_BUS_WIDTH_4:
P
Philipp Zabel 已提交
741
		sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
I
Ian Molton 已提交
742 743 744 745 746 747 748 749 750 751
	break;
	}

	/* Let things settle. delay taken from winCE driver */
	udelay(140);
}

static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
	struct tmio_mmc_host *host = mmc_priv(mmc);
752 753
	struct mfd_cell	*cell = host->pdev->dev.platform_data;
	struct tmio_mmc_data *pdata = cell->driver_data;
I
Ian Molton 已提交
754

755 756
	return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
		(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
I
Ian Molton 已提交
757 758
}

759
static const struct mmc_host_ops tmio_mmc_ops = {
I
Ian Molton 已提交
760 761 762 763 764 765 766 767 768 769 770 771
	.request	= tmio_mmc_request,
	.set_ios	= tmio_mmc_set_ios,
	.get_ro         = tmio_mmc_get_ro,
};

#ifdef CONFIG_PM
static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
{
	struct mfd_cell	*cell = (struct mfd_cell *)dev->dev.platform_data;
	struct mmc_host *mmc = platform_get_drvdata(dev);
	int ret;

772
	ret = mmc_suspend_host(mmc);
I
Ian Molton 已提交
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787

	/* Tell MFD core it can disable us now.*/
	if (!ret && cell->disable)
		cell->disable(dev);

	return ret;
}

static int tmio_mmc_resume(struct platform_device *dev)
{
	struct mfd_cell	*cell = (struct mfd_cell *)dev->dev.platform_data;
	struct mmc_host *mmc = platform_get_drvdata(dev);
	int ret = 0;

	/* Tell the MFD core we are ready to be enabled */
788 789
	if (cell->resume) {
		ret = cell->resume(dev);
I
Ian Molton 已提交
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
		if (ret)
			goto out;
	}

	mmc_resume_host(mmc);

out:
	return ret;
}
#else
#define tmio_mmc_suspend NULL
#define tmio_mmc_resume NULL
#endif

static int __devinit tmio_mmc_probe(struct platform_device *dev)
{
	struct mfd_cell	*cell = (struct mfd_cell *)dev->dev.platform_data;
807
	struct tmio_mmc_data *pdata;
808
	struct resource *res_ctl;
I
Ian Molton 已提交
809 810
	struct tmio_mmc_host *host;
	struct mmc_host *mmc;
811
	int ret = -EINVAL;
812
	u32 irq_mask = TMIO_MASK_CMD;
I
Ian Molton 已提交
813

814
	if (dev->num_resources != 2)
I
Ian Molton 已提交
815 816 817
		goto out;

	res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0);
818
	if (!res_ctl)
I
Ian Molton 已提交
819 820
		goto out;

821
	pdata = cell->driver_data;
822
	if (!pdata || !pdata->hclk)
823
		goto out;
824 825

	ret = -ENOMEM;
826

I
Ian Molton 已提交
827 828 829 830 831 832
	mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev);
	if (!mmc)
		goto out;

	host = mmc_priv(mmc);
	host->mmc = mmc;
833
	host->pdev = dev;
I
Ian Molton 已提交
834 835
	platform_set_drvdata(dev, mmc);

836 837 838
	host->set_pwr = pdata->set_pwr;
	host->set_clk_div = pdata->set_clk_div;

P
Philipp Zabel 已提交
839 840 841
	/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
	host->bus_shift = resource_size(res_ctl) >> 10;

842
	host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
I
Ian Molton 已提交
843 844 845 846 847
	if (!host->ctl)
		goto host_free;

	mmc->ops = &tmio_mmc_ops;
	mmc->caps = MMC_CAP_4_BIT_DATA;
848
	mmc->caps |= pdata->capabilities;
849 850
	mmc->f_max = pdata->hclk;
	mmc->f_min = mmc->f_max / 512;
851 852 853 854
	if (pdata->ocr_mask)
		mmc->ocr_avail = pdata->ocr_mask;
	else
		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
I
Ian Molton 已提交
855 856 857 858 859

	/* Tell the MFD core we are ready to be enabled */
	if (cell->enable) {
		ret = cell->enable(dev);
		if (ret)
860
			goto unmap_ctl;
I
Ian Molton 已提交
861 862 863 864 865 866 867 868 869
	}

	tmio_mmc_clk_stop(host);
	reset(host);

	ret = platform_get_irq(dev, 0);
	if (ret >= 0)
		host->irq = ret;
	else
870
		goto cell_disable;
I
Ian Molton 已提交
871

P
Philipp Zabel 已提交
872
	disable_mmc_irqs(host, TMIO_MASK_ALL);
I
Ian Molton 已提交
873

P
Philipp Zabel 已提交
874
	ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
875
		IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
I
Ian Molton 已提交
876
	if (ret)
877
		goto cell_disable;
I
Ian Molton 已提交
878

879 880 881
	/* See if we also get DMA */
	tmio_mmc_request_dma(host, pdata);

I
Ian Molton 已提交
882 883
	mmc_add_host(mmc);

884 885
	pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
		(unsigned long)host->ctl, host->irq);
I
Ian Molton 已提交
886 887

	/* Unmask the IRQs we want to know about */
888 889 890 891 892
	if (!host->chan_rx)
		irq_mask |= TMIO_MASK_READOP;
	if (!host->chan_tx)
		irq_mask |= TMIO_MASK_WRITEOP;
	enable_mmc_irqs(host, irq_mask);
I
Ian Molton 已提交
893 894 895

	return 0;

896 897 898
cell_disable:
	if (cell->disable)
		cell->disable(dev);
I
Ian Molton 已提交
899 900 901 902 903 904 905 906 907 908
unmap_ctl:
	iounmap(host->ctl);
host_free:
	mmc_free_host(mmc);
out:
	return ret;
}

static int __devexit tmio_mmc_remove(struct platform_device *dev)
{
909
	struct mfd_cell	*cell = (struct mfd_cell *)dev->dev.platform_data;
I
Ian Molton 已提交
910 911 912 913 914 915 916
	struct mmc_host *mmc = platform_get_drvdata(dev);

	platform_set_drvdata(dev, NULL);

	if (mmc) {
		struct tmio_mmc_host *host = mmc_priv(mmc);
		mmc_remove_host(mmc);
917
		tmio_mmc_release_dma(host);
I
Ian Molton 已提交
918
		free_irq(host->irq, host);
919 920
		if (cell->disable)
			cell->disable(dev);
I
Ian Molton 已提交
921
		iounmap(host->ctl);
922
		mmc_free_host(mmc);
I
Ian Molton 已提交
923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
	}

	return 0;
}

/* ------------------- device registration ----------------------- */

static struct platform_driver tmio_mmc_driver = {
	.driver = {
		.name = "tmio-mmc",
		.owner = THIS_MODULE,
	},
	.probe = tmio_mmc_probe,
	.remove = __devexit_p(tmio_mmc_remove),
	.suspend = tmio_mmc_suspend,
	.resume = tmio_mmc_resume,
};


static int __init tmio_mmc_init(void)
{
	return platform_driver_register(&tmio_mmc_driver);
}

static void __exit tmio_mmc_exit(void)
{
	platform_driver_unregister(&tmio_mmc_driver);
}

module_init(tmio_mmc_init);
module_exit(tmio_mmc_exit);

MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver");
MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:tmio-mmc");