omap_hsmmc.c 58.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * drivers/mmc/host/omap_hsmmc.c
 *
 * Driver for OMAP2430/3430 MMC controller.
 *
 * Copyright (C) 2007 Texas Instruments.
 *
 * Authors:
 *	Syed Mohammed Khasim	<x0khasim@ti.com>
 *	Madhusudhan		<madhu.cr@ti.com>
 *	Mohit Jalori		<mjalori@ti.com>
 *
 * This file is licensed under the terms of the GNU General Public License
 * version 2. This program is licensed "as is" without any warranty of any
 * kind, whether express or implied.
 */

#include <linux/module.h>
#include <linux/init.h>
20
#include <linux/kernel.h>
21
#include <linux/debugfs.h>
22
#include <linux/dmaengine.h>
23
#include <linux/seq_file.h>
24
#include <linux/sizes.h>
25 26 27 28 29 30
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/timer.h>
#include <linux/clk.h>
31
#include <linux/of.h>
32
#include <linux/of_irq.h>
33 34
#include <linux/of_gpio.h>
#include <linux/of_device.h>
35
#include <linux/omap-dmaengine.h>
36
#include <linux/mmc/host.h>
37
#include <linux/mmc/core.h>
38
#include <linux/mmc/mmc.h>
39
#include <linux/mmc/slot-gpio.h>
40
#include <linux/io.h>
41
#include <linux/irq.h>
42 43
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
44
#include <linux/pinctrl/consumer.h>
45
#include <linux/pm_runtime.h>
46
#include <linux/pm_wakeirq.h>
47
#include <linux/platform_data/hsmmc-omap.h>
48 49

/* OMAP HSMMC Host Controller Registers */
50
#define OMAP_HSMMC_SYSSTATUS	0x0014
51
#define OMAP_HSMMC_CON		0x002C
52
#define OMAP_HSMMC_SDMASA	0x0100
53 54 55 56 57 58 59 60
#define OMAP_HSMMC_BLK		0x0104
#define OMAP_HSMMC_ARG		0x0108
#define OMAP_HSMMC_CMD		0x010C
#define OMAP_HSMMC_RSP10	0x0110
#define OMAP_HSMMC_RSP32	0x0114
#define OMAP_HSMMC_RSP54	0x0118
#define OMAP_HSMMC_RSP76	0x011C
#define OMAP_HSMMC_DATA		0x0120
61
#define OMAP_HSMMC_PSTATE	0x0124
62 63 64 65 66
#define OMAP_HSMMC_HCTL		0x0128
#define OMAP_HSMMC_SYSCTL	0x012C
#define OMAP_HSMMC_STAT		0x0130
#define OMAP_HSMMC_IE		0x0134
#define OMAP_HSMMC_ISE		0x0138
67
#define OMAP_HSMMC_AC12		0x013C
68 69 70 71
#define OMAP_HSMMC_CAPA		0x0140

#define VS18			(1 << 26)
#define VS30			(1 << 25)
72
#define HSS			(1 << 21)
73 74
#define SDVS18			(0x5 << 9)
#define SDVS30			(0x6 << 9)
75
#define SDVS33			(0x7 << 9)
76
#define SDVS_MASK		0x00000E00
77 78 79 80 81 82 83 84
#define SDVSCLR			0xFFFFF1FF
#define SDVSDET			0x00000400
#define AUTOIDLE		0x1
#define SDBP			(1 << 8)
#define DTO			0xe
#define ICE			0x1
#define ICS			0x2
#define CEN			(1 << 2)
85
#define CLKD_MAX		0x3FF		/* max clock divisor: 1023 */
86 87 88 89 90
#define CLKD_MASK		0x0000FFC0
#define CLKD_SHIFT		6
#define DTO_MASK		0x000F0000
#define DTO_SHIFT		16
#define INIT_STREAM		(1 << 1)
91
#define ACEN_ACMD23		(2 << 2)
92 93
#define DP_SELECT		(1 << 21)
#define DDIR			(1 << 4)
94
#define DMAE			0x1
95 96 97
#define MSBS			(1 << 5)
#define BCE			(1 << 1)
#define FOUR_BIT		(1 << 1)
98
#define HSPE			(1 << 2)
99
#define IWE			(1 << 24)
100
#define DDR			(1 << 19)
101 102
#define CLKEXTFREE		(1 << 16)
#define CTPL			(1 << 11)
103
#define DW8			(1 << 5)
104 105 106 107 108 109
#define OD			0x1
#define STAT_CLEAR		0xFFFFFFFF
#define INIT_STREAM_CMD		0x00000000
#define DUAL_VOLT_OCR_BIT	7
#define SRC			(1 << 25)
#define SRD			(1 << 26)
110
#define SOFTRESET		(1 << 1)
111

112 113 114
/* PSTATE */
#define DLEV_DAT(x)		(1 << (20 + (x)))

115 116 117 118 119
/* Interrupt masks for IE and ISE register */
#define CC_EN			(1 << 0)
#define TC_EN			(1 << 1)
#define BWR_EN			(1 << 4)
#define BRR_EN			(1 << 5)
120
#define CIRQ_EN			(1 << 8)
121 122 123 124 125 126 127 128
#define ERR_EN			(1 << 15)
#define CTO_EN			(1 << 16)
#define CCRC_EN			(1 << 17)
#define CEB_EN			(1 << 18)
#define CIE_EN			(1 << 19)
#define DTO_EN			(1 << 20)
#define DCRC_EN			(1 << 21)
#define DEB_EN			(1 << 22)
129
#define ACE_EN			(1 << 24)
130 131 132
#define CERR_EN			(1 << 28)
#define BADA_EN			(1 << 29)

133
#define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\
134 135 136
		DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \
		BRR_EN | BWR_EN | TC_EN | CC_EN)

137 138 139 140 141 142 143
#define CNI	(1 << 7)
#define ACIE	(1 << 4)
#define ACEB	(1 << 3)
#define ACCE	(1 << 2)
#define ACTO	(1 << 1)
#define ACNE	(1 << 0)

144
#define MMC_AUTOSUSPEND_DELAY	100
145 146
#define MMC_TIMEOUT_MS		20		/* 20 mSec */
#define MMC_TIMEOUT_US		20000		/* 20000 micro Sec */
147 148
#define OMAP_MMC_MIN_CLOCK	400000
#define OMAP_MMC_MAX_CLOCK	52000000
149
#define DRIVER_NAME		"omap_hsmmc"
150

151 152 153 154
#define VDD_1V8			1800000		/* 180000 uV */
#define VDD_3V0			3000000		/* 300000 uV */
#define VDD_165_195		(ffs(MMC_VDD_165_195) - 1)

155 156 157 158 159
/*
 * One controller can have multiple slots, like on some omap boards using
 * omap.c controller driver. Luckily this is not currently done on any known
 * omap_hsmmc.c device.
 */
160
#define mmc_pdata(host)		host->pdata
161 162 163 164 165 166 167 168 169 170

/*
 * MMC Host controller read/write API's
 */
#define OMAP_HSMMC_READ(base, reg)	\
	__raw_readl((base) + OMAP_HSMMC_##reg)

#define OMAP_HSMMC_WRITE(base, reg, val) \
	__raw_writel((val), (base) + OMAP_HSMMC_##reg)

171 172 173 174 175
struct omap_hsmmc_next {
	unsigned int	dma_len;
	s32		cookie;
};

176
struct omap_hsmmc_host {
177 178 179 180 181 182 183
	struct	device		*dev;
	struct	mmc_host	*mmc;
	struct	mmc_request	*mrq;
	struct	mmc_command	*cmd;
	struct	mmc_data	*data;
	struct	clk		*fclk;
	struct	clk		*dbclk;
184 185
	struct	regulator	*pbias;
	bool			pbias_enabled;
186 187
	void	__iomem		*base;
	resource_size_t		mapbase;
188
	spinlock_t		irq_lock; /* Prevent races with irq handler */
189
	unsigned int		dma_len;
190
	unsigned int		dma_sg_idx;
191
	unsigned char		bus_mode;
192
	unsigned char		power_mode;
193
	int			suspended;
194 195 196 197
	u32			con;
	u32			hctl;
	u32			sysctl;
	u32			capa;
198
	int			irq;
199
	int			wake_irq;
200
	int			use_dma, dma_ch;
201 202
	struct dma_chan		*tx_chan;
	struct dma_chan		*rx_chan;
203
	int			response_busy;
204
	int			context_loss;
205 206
	int			protect_card;
	int			reqs_blocked;
207
	int			req_in_progress;
208
	unsigned long		clk_rate;
209
	unsigned int		flags;
210 211
#define AUTO_CMD23		(1 << 0)        /* Auto CMD23 support */
#define HSMMC_SDIO_IRQ_ENABLED	(1 << 1)        /* SDIO irq enabled */
212
	struct omap_hsmmc_next	next_data;
213
	struct	omap_hsmmc_platform_data	*pdata;
214 215 216 217 218 219 220

	/* return MMC cover switch state, can be NULL if not supported.
	 *
	 * possible return values:
	 *   0 - closed
	 *   1 - open
	 */
221
	int (*get_cover_state)(struct device *dev);
222

223
	int (*card_detect)(struct device *dev);
224 225
};

226 227 228 229 230
struct omap_mmc_of_data {
	u32 reg_offset;
	u8 controller_flags;
};

231 232
static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host);

233
static int omap_hsmmc_card_detect(struct device *dev)
234
{
235
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
236

237
	return mmc_gpio_get_cd(host->mmc);
238 239
}

240
static int omap_hsmmc_get_cover_state(struct device *dev)
241
{
242
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
243

244
	return mmc_gpio_get_cd(host->mmc);
245 246
}

247 248
#ifdef CONFIG_REGULATOR

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
static int omap_hsmmc_enable_supply(struct mmc_host *mmc, int vdd)
{
	int ret;

	if (mmc->supply.vmmc) {
		ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
		if (ret)
			return ret;
	}

	/* Enable interface voltage rail, if needed */
	if (mmc->supply.vqmmc) {
		ret = regulator_enable(mmc->supply.vqmmc);
		if (ret) {
			dev_err(mmc_dev(mmc), "vmmc_aux reg enable failed\n");
			goto err_vqmmc;
		}
	}

	return 0;

err_vqmmc:
	if (mmc->supply.vmmc)
		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);

	return ret;
}

static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
{
	int ret;
	int status;

	if (mmc->supply.vqmmc) {
		ret = regulator_disable(mmc->supply.vqmmc);
		if (ret) {
			dev_err(mmc_dev(mmc), "vmmc_aux reg disable failed\n");
			return ret;
		}
	}

	if (mmc->supply.vmmc) {
		ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
		if (ret)
			goto err_set_ocr;
	}

	return 0;

err_set_ocr:
	if (mmc->supply.vqmmc) {
		status = regulator_enable(mmc->supply.vqmmc);
		if (status)
			dev_err(mmc_dev(mmc), "vmmc_aux re-enable failed\n");
	}

	return ret;
}

308
static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
309 310 311
{
	struct omap_hsmmc_host *host =
		platform_get_drvdata(to_platform_device(dev));
312
	struct mmc_host *mmc = host->mmc;
313 314
	int ret = 0;

315 316 317
	if (mmc_pdata(host)->set_power)
		return mmc_pdata(host)->set_power(dev, power_on, vdd);

318 319 320 321
	/*
	 * If we don't see a Vcc regulator, assume it's a fixed
	 * voltage always-on regulator.
	 */
322
	if (!mmc->supply.vmmc)
323 324
		return 0;

325
	if (mmc_pdata(host)->before_set_reg)
326
		mmc_pdata(host)->before_set_reg(dev, power_on, vdd);
327

328 329 330
	if (host->pbias) {
		if (host->pbias_enabled == 1) {
			ret = regulator_disable(host->pbias);
331 332 333 334 335
			if (ret) {
				dev_err(dev, "pbias reg disable failed\n");
				return ret;
			}
			host->pbias_enabled = 0;
336 337 338
		}
	}

339 340 341 342 343 344 345 346 347 348 349 350 351 352
	/*
	 * Assume Vcc regulator is used only to power the card ... OMAP
	 * VDDS is used to power the pins, optionally with a transceiver to
	 * support cards using voltages other than VDDS (1.8V nominal).  When a
	 * transceiver is used, DAT3..7 are muxed as transceiver control pins.
	 *
	 * In some cases this regulator won't support enable/disable;
	 * e.g. it's a fixed rail for a WLAN chip.
	 *
	 * In other cases vcc_aux switches interface power.  Example, for
	 * eMMC cards it represents VccQ.  Sometimes transceivers or SDIO
	 * chips/cards need an interface voltage rail too.
	 */
	if (power_on) {
353 354 355
		ret = omap_hsmmc_enable_supply(mmc, vdd);
		if (ret)
			return ret;
356
	} else {
357 358 359
		ret = omap_hsmmc_disable_supply(mmc);
		if (ret)
			return ret;
360 361
	}

362 363 364 365 366 367 368 369
	if (host->pbias) {
		if (vdd <= VDD_165_195)
			ret = regulator_set_voltage(host->pbias, VDD_1V8,
								VDD_1V8);
		else
			ret = regulator_set_voltage(host->pbias, VDD_3V0,
								VDD_3V0);
		if (ret < 0)
370
			goto err_set_voltage;
371 372 373

		if (host->pbias_enabled == 0) {
			ret = regulator_enable(host->pbias);
374 375 376 377
			if (ret) {
				dev_err(dev, "pbias reg enable failed\n");
				goto err_set_voltage;
			} else {
378
				host->pbias_enabled = 1;
379
			}
380 381 382
		}
	}

383
	if (mmc_pdata(host)->after_set_reg)
384
		mmc_pdata(host)->after_set_reg(dev, power_on, vdd);
385

386 387 388
	return 0;

err_set_voltage:
389
	omap_hsmmc_disable_supply(mmc);
390

391 392 393 394 395
	return ret;
}

static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
396
	int ocr_value = 0;
397
	int ret;
398
	struct mmc_host *mmc = host->mmc;
399

400 401 402
	if (mmc_pdata(host)->set_power)
		return 0;

403 404 405
	mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc");
	if (IS_ERR(mmc->supply.vmmc)) {
		ret = PTR_ERR(mmc->supply.vmmc);
406 407 408
		if (ret != -ENODEV)
			return ret;
		dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
409 410
			PTR_ERR(mmc->supply.vmmc));
		mmc->supply.vmmc = NULL;
411
	} else {
412
		ocr_value = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
413
		if (ocr_value > 0)
414
			mmc_pdata(host)->ocr_mask = ocr_value;
415
	}
416

417
	/* Allow an aux regulator */
418 419 420
	mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux");
	if (IS_ERR(mmc->supply.vqmmc)) {
		ret = PTR_ERR(mmc->supply.vqmmc);
421 422 423
		if (ret != -ENODEV)
			return ret;
		dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
424 425
			PTR_ERR(mmc->supply.vqmmc));
		mmc->supply.vqmmc = NULL;
426
	}
427

428 429 430
	host->pbias = devm_regulator_get_optional(host->dev, "pbias");
	if (IS_ERR(host->pbias)) {
		ret = PTR_ERR(host->pbias);
431 432 433
		if (ret != -ENODEV)
			return ret;
		dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
434 435
			PTR_ERR(host->pbias));
		host->pbias = NULL;
436
	}
437

438
	/* For eMMC do not power off when not in sleep state */
439
	if (mmc_pdata(host)->no_regulator_off_init)
440 441 442 443 444
		return 0;
	/*
	 * To disable boot_on regulator, enable regulator
	 * to increase usecount and then disable it.
	 */
445 446
	if ((mmc->supply.vmmc && regulator_is_enabled(mmc->supply.vmmc) > 0) ||
	    (mmc->supply.vqmmc && regulator_is_enabled(mmc->supply.vqmmc))) {
447
		int vdd = ffs(mmc_pdata(host)->ocr_mask) - 1;
448

449 450
		omap_hsmmc_set_power(host->dev, 1, vdd);
		omap_hsmmc_set_power(host->dev, 0, 0);
451 452 453 454 455
	}

	return 0;
}

456 457 458 459 460 461 462
static inline int omap_hsmmc_have_reg(void)
{
	return 1;
}

#else

463 464 465 466 467
static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
{
	return 0;
}

468 469 470 471 472 473 474 475 476 477 478 479
static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
	return -EINVAL;
}

static inline int omap_hsmmc_have_reg(void)
{
	return 0;
}

#endif

480
static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id);
481 482 483

static int omap_hsmmc_gpio_init(struct mmc_host *mmc,
				struct omap_hsmmc_host *host,
484
				struct omap_hsmmc_platform_data *pdata)
485 486 487
{
	int ret;

488 489
	if (gpio_is_valid(pdata->gpio_cod)) {
		ret = mmc_gpio_request_cd(mmc, pdata->gpio_cod, 0);
490 491
		if (ret)
			return ret;
492 493 494

		host->get_cover_state = omap_hsmmc_get_cover_state;
		mmc_gpio_set_cd_isr(mmc, omap_hsmmc_cover_irq);
495 496
	} else if (gpio_is_valid(pdata->gpio_cd)) {
		ret = mmc_gpio_request_cd(mmc, pdata->gpio_cd, 0);
497 498 499 500
		if (ret)
			return ret;

		host->card_detect = omap_hsmmc_card_detect;
501
	}
502

503
	if (gpio_is_valid(pdata->gpio_wp)) {
504
		ret = mmc_gpio_request_ro(mmc, pdata->gpio_wp);
505
		if (ret)
506
			return ret;
507
	}
508 509 510 511

	return 0;
}

512 513 514 515 516 517 518 519 520
/*
 * Start clock to the card
 */
static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host)
{
	OMAP_HSMMC_WRITE(host->base, SYSCTL,
		OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
}

521 522 523
/*
 * Stop clock to the card
 */
524
static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
525 526 527 528
{
	OMAP_HSMMC_WRITE(host->base, SYSCTL,
		OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
	if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0)
M
Masanari Iida 已提交
529
		dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n");
530 531
}

532 533
static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host,
				  struct mmc_command *cmd)
534
{
535 536
	u32 irq_mask = INT_EN_MASK;
	unsigned long flags;
537 538

	if (host->use_dma)
539
		irq_mask &= ~(BRR_EN | BWR_EN);
540

541 542
	/* Disable timeout for erases */
	if (cmd->opcode == MMC_ERASE)
543
		irq_mask &= ~DTO_EN;
544

545
	spin_lock_irqsave(&host->irq_lock, flags);
546 547
	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
	OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
548 549 550 551

	/* latch pending CIRQ, but don't signal MMC core */
	if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
		irq_mask |= CIRQ_EN;
552
	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
553
	spin_unlock_irqrestore(&host->irq_lock, flags);
554 555 556 557
}

static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
{
558 559 560 561 562 563 564 565 566
	u32 irq_mask = 0;
	unsigned long flags;

	spin_lock_irqsave(&host->irq_lock, flags);
	/* no transfer running but need to keep cirq if enabled */
	if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
		irq_mask |= CIRQ_EN;
	OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
567
	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
568
	spin_unlock_irqrestore(&host->irq_lock, flags);
569 570
}

571
/* Calculate divisor for the given clock frequency */
572
static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
573 574 575 576
{
	u16 dsor = 0;

	if (ios->clock) {
577
		dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
578 579
		if (dsor > CLKD_MAX)
			dsor = CLKD_MAX;
580 581 582 583 584
	}

	return dsor;
}

585 586 587 588 589
static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
{
	struct mmc_ios *ios = &host->mmc->ios;
	unsigned long regval;
	unsigned long timeout;
590
	unsigned long clkdiv;
591

592
	dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
593 594 595 596 597

	omap_hsmmc_stop_clock(host);

	regval = OMAP_HSMMC_READ(host->base, SYSCTL);
	regval = regval & ~(CLKD_MASK | DTO_MASK);
598 599
	clkdiv = calc_divisor(host, ios);
	regval = regval | (clkdiv << 6) | (DTO << 16);
600 601 602 603 604 605 606 607 608 609
	OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
	OMAP_HSMMC_WRITE(host->base, SYSCTL,
		OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);

	/* Wait till the ICS bit is set */
	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
	while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
		&& time_before(jiffies, timeout))
		cpu_relax();

610 611 612 613 614 615 616 617 618
	/*
	 * Enable High-Speed Support
	 * Pre-Requisites
	 *	- Controller should support High-Speed-Enable Bit
	 *	- Controller should not be using DDR Mode
	 *	- Controller should advertise that it supports High Speed
	 *	  in capabilities register
	 *	- MMC/SD clock coming out of controller > 25MHz
	 */
619
	if ((mmc_pdata(host)->features & HSMMC_HAS_HSPE_SUPPORT) &&
620
	    (ios->timing != MMC_TIMING_MMC_DDR52) &&
621
	    (ios->timing != MMC_TIMING_UHS_DDR50) &&
622 623 624 625 626 627 628 629 630 631
	    ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) {
		regval = OMAP_HSMMC_READ(host->base, HCTL);
		if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000)
			regval |= HSPE;
		else
			regval &= ~HSPE;

		OMAP_HSMMC_WRITE(host->base, HCTL, regval);
	}

632 633 634
	omap_hsmmc_start_clock(host);
}

635 636 637 638 639 640
static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
{
	struct mmc_ios *ios = &host->mmc->ios;
	u32 con;

	con = OMAP_HSMMC_READ(host->base, CON);
641 642
	if (ios->timing == MMC_TIMING_MMC_DDR52 ||
	    ios->timing == MMC_TIMING_UHS_DDR50)
643 644 645
		con |= DDR;	/* configure in DDR mode */
	else
		con &= ~DDR;
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
	switch (ios->bus_width) {
	case MMC_BUS_WIDTH_8:
		OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
		break;
	case MMC_BUS_WIDTH_4:
		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
		OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
		break;
	case MMC_BUS_WIDTH_1:
		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
		OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
		break;
	}
}

static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
{
	struct mmc_ios *ios = &host->mmc->ios;
	u32 con;

	con = OMAP_HSMMC_READ(host->base, CON);
	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
		OMAP_HSMMC_WRITE(host->base, CON, con | OD);
	else
		OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
}

675 676 677 678 679 680
#ifdef CONFIG_PM

/*
 * Restore the MMC host context, if it was lost as result of a
 * power state change.
 */
681
static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
682 683
{
	struct mmc_ios *ios = &host->mmc->ios;
684
	u32 hctl, capa;
685 686
	unsigned long timeout;

687 688 689 690 691 692 693 694
	if (host->con == OMAP_HSMMC_READ(host->base, CON) &&
	    host->hctl == OMAP_HSMMC_READ(host->base, HCTL) &&
	    host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) &&
	    host->capa == OMAP_HSMMC_READ(host->base, CAPA))
		return 0;

	host->context_loss++;

695
	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
696 697 698 699 700 701 702 703 704 705 706
		if (host->power_mode != MMC_POWER_OFF &&
		    (1 << ios->vdd) <= MMC_VDD_23_24)
			hctl = SDVS18;
		else
			hctl = SDVS30;
		capa = VS30 | VS18;
	} else {
		hctl = SDVS18;
		capa = VS18;
	}

707 708 709
	if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
		hctl |= IWE;

710 711 712 713 714 715 716 717 718 719 720 721 722 723
	OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | hctl);

	OMAP_HSMMC_WRITE(host->base, CAPA,
			OMAP_HSMMC_READ(host->base, CAPA) | capa);

	OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | SDBP);

	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
	while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP
		&& time_before(jiffies, timeout))
		;

724 725 726
	OMAP_HSMMC_WRITE(host->base, ISE, 0);
	OMAP_HSMMC_WRITE(host->base, IE, 0);
	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
727 728 729 730 731

	/* Do not initialize card-specific things if the power is off */
	if (host->power_mode == MMC_POWER_OFF)
		goto out;

732
	omap_hsmmc_set_bus_width(host);
733

734
	omap_hsmmc_set_clock(host);
735

736 737
	omap_hsmmc_set_bus_mode(host);

738
out:
739 740
	dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n",
		host->context_loss);
741 742 743 744 745 746
	return 0;
}

/*
 * Save the MMC host context (store the number of power state changes so far).
 */
747
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
748
{
749 750 751 752
	host->con =  OMAP_HSMMC_READ(host->base, CON);
	host->hctl = OMAP_HSMMC_READ(host->base, HCTL);
	host->sysctl =  OMAP_HSMMC_READ(host->base, SYSCTL);
	host->capa = OMAP_HSMMC_READ(host->base, CAPA);
753 754 755 756
}

#else

757
static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
758 759 760 761
{
	return 0;
}

762
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
763 764 765 766 767
{
}

#endif

768 769 770 771
/*
 * Send init stream sequence to card
 * before sending IDLE command
 */
772
static void send_init_stream(struct omap_hsmmc_host *host)
773 774 775 776
{
	int reg = 0;
	unsigned long timeout;

777 778 779
	if (host->protect_card)
		return;

780
	disable_irq(host->irq);
781 782

	OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
783 784 785 786 787
	OMAP_HSMMC_WRITE(host->base, CON,
		OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
	OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);

	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
788 789
	while ((reg != CC_EN) && time_before(jiffies, timeout))
		reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN;
790 791 792

	OMAP_HSMMC_WRITE(host->base, CON,
		OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM);
793 794 795 796

	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
	OMAP_HSMMC_READ(host->base, STAT);

797 798 799 800
	enable_irq(host->irq);
}

static inline
801
int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host)
802 803 804
{
	int r = 1;

805
	if (host->get_cover_state)
806
		r = host->get_cover_state(host->dev);
807 808 809 810
	return r;
}

static ssize_t
811
omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr,
812 813 814
			   char *buf)
{
	struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
815
	struct omap_hsmmc_host *host = mmc_priv(mmc);
816

817 818
	return sprintf(buf, "%s\n",
			omap_hsmmc_cover_is_closed(host) ? "closed" : "open");
819 820
}

821
static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL);
822 823

static ssize_t
824
omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
825 826 827
			char *buf)
{
	struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
828
	struct omap_hsmmc_host *host = mmc_priv(mmc);
829

830
	return sprintf(buf, "%s\n", mmc_pdata(host)->name);
831 832
}

833
static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
834 835 836 837 838

/*
 * Configure the response type and send the cmd.
 */
static void
839
omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
840 841 842 843
	struct mmc_data *data)
{
	int cmdreg = 0, resptype = 0, cmdtype = 0;

844
	dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
845 846 847
		mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
	host->cmd = cmd;

848
	omap_hsmmc_enable_irq(host, cmd);
849

850
	host->response_busy = 0;
851 852 853
	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136)
			resptype = 1;
854 855 856 857
		else if (cmd->flags & MMC_RSP_BUSY) {
			resptype = 3;
			host->response_busy = 1;
		} else
858 859 860 861 862 863 864 865 866 867 868 869 870
			resptype = 2;
	}

	/*
	 * Unlike OMAP1 controller, the cmdtype does not seem to be based on
	 * ac, bc, adtc, bcr. Only commands ending an open ended transfer need
	 * a val of 0x3, rest 0x0.
	 */
	if (cmd == host->mrq->stop)
		cmdtype = 0x3;

	cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);

871 872 873 874 875
	if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) &&
	    host->mrq->sbc) {
		cmdreg |= ACEN_ACMD23;
		OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg);
	}
876 877 878 879 880 881 882 883 884
	if (data) {
		cmdreg |= DP_SELECT | MSBS | BCE;
		if (data->flags & MMC_DATA_READ)
			cmdreg |= DDIR;
		else
			cmdreg &= ~(DDIR);
	}

	if (host->use_dma)
885
		cmdreg |= DMAE;
886

887
	host->req_in_progress = 1;
888

889 890 891 892
	OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
	OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
}

893
static int
894
omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
895 896 897 898 899 900 901
{
	if (data->flags & MMC_DATA_WRITE)
		return DMA_TO_DEVICE;
	else
		return DMA_FROM_DEVICE;
}

902 903 904 905 906 907
static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
	struct mmc_data *data)
{
	return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
}

908 909 910
static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
{
	int dma_ch;
911
	unsigned long flags;
912

913
	spin_lock_irqsave(&host->irq_lock, flags);
914 915
	host->req_in_progress = 0;
	dma_ch = host->dma_ch;
916
	spin_unlock_irqrestore(&host->irq_lock, flags);
917 918 919 920 921 922 923

	omap_hsmmc_disable_irq(host);
	/* Do not complete the request if DMA is still in progress */
	if (mrq->data && host->use_dma && dma_ch != -1)
		return;
	host->mrq = NULL;
	mmc_request_done(host->mmc, mrq);
924 925
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
926 927
}

928 929 930 931
/*
 * Notify the transfer complete to MMC core
 */
static void
932
omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
933
{
934 935 936
	if (!data) {
		struct mmc_request *mrq = host->mrq;

937 938 939 940 941 942 943
		/* TC before CC from CMD6 - don't know why, but it happens */
		if (host->cmd && host->cmd->opcode == 6 &&
		    host->response_busy) {
			host->response_busy = 0;
			return;
		}

944
		omap_hsmmc_request_done(host, mrq);
945 946 947
		return;
	}

948 949 950 951 952 953 954
	host->data = NULL;

	if (!data->error)
		data->bytes_xfered += data->blocks * (data->blksz);
	else
		data->bytes_xfered = 0;

955 956 957
	if (data->stop && (data->error || !host->mrq->sbc))
		omap_hsmmc_start_command(host, data->stop, NULL);
	else
958
		omap_hsmmc_request_done(host, data->mrq);
959 960 961 962 963 964
}

/*
 * Notify the core about command completion
 */
static void
965
omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
966
{
967
	if (host->mrq->sbc && (host->cmd == host->mrq->sbc) &&
968
	    !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) {
969
		host->cmd = NULL;
970 971 972 973 974 975
		omap_hsmmc_start_dma_transfer(host);
		omap_hsmmc_start_command(host, host->mrq->cmd,
						host->mrq->data);
		return;
	}

976 977
	host->cmd = NULL;

978 979 980 981 982 983 984 985 986 987 988 989
	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136) {
			/* response type 2 */
			cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10);
			cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32);
			cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54);
			cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76);
		} else {
			/* response types 1, 1b, 3, 4, 5, 6 */
			cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
		}
	}
990
	if ((host->data == NULL && !host->response_busy) || cmd->error)
991
		omap_hsmmc_request_done(host, host->mrq);
992 993 994 995 996
}

/*
 * DMA clean up for command errors
 */
997
static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
998
{
999
	int dma_ch;
1000
	unsigned long flags;
1001

1002
	host->data->error = errno;
1003

1004
	spin_lock_irqsave(&host->irq_lock, flags);
1005 1006
	dma_ch = host->dma_ch;
	host->dma_ch = -1;
1007
	spin_unlock_irqrestore(&host->irq_lock, flags);
1008 1009

	if (host->use_dma && dma_ch != -1) {
1010 1011 1012 1013 1014
		struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);

		dmaengine_terminate_all(chan);
		dma_unmap_sg(chan->device->dev,
			host->data->sg, host->data->sg_len,
1015
			omap_hsmmc_get_dma_dir(host, host->data));
1016

1017
		host->data->host_cookie = 0;
1018 1019 1020 1021 1022 1023 1024 1025
	}
	host->data = NULL;
}

/*
 * Readable error output
 */
#ifdef CONFIG_MMC_DEBUG
1026
static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
1027 1028
{
	/* --- means reserved bit without definition at documentation */
1029
	static const char *omap_hsmmc_status_bits[] = {
1030 1031 1032 1033
		"CC"  , "TC"  , "BGE", "---", "BWR" , "BRR" , "---" , "---" ,
		"CIRQ",	"OBI" , "---", "---", "---" , "---" , "---" , "ERRI",
		"CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" ,
		"ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---"
1034 1035 1036 1037 1038 1039 1040 1041
	};
	char res[256];
	char *buf = res;
	int len, i;

	len = sprintf(buf, "MMC IRQ 0x%x :", status);
	buf += len;

1042
	for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++)
1043
		if (status & (1 << i)) {
1044
			len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]);
1045 1046 1047
			buf += len;
		}

1048
	dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
1049
}
1050 1051 1052 1053 1054
#else
static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
					     u32 status)
{
}
1055 1056
#endif  /* CONFIG_MMC_DEBUG */

1057 1058 1059 1060 1061 1062 1063
/*
 * MMC controller internal state machines reset
 *
 * Used to reset command or data internal state machines, using respectively
 *  SRC or SRD bit of SYSCTL register
 * Can be called from interrupt context
 */
1064 1065
static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
						   unsigned long bit)
1066 1067
{
	unsigned long i = 0;
1068
	unsigned long limit = MMC_TIMEOUT_US;
1069 1070 1071 1072

	OMAP_HSMMC_WRITE(host->base, SYSCTL,
			 OMAP_HSMMC_READ(host->base, SYSCTL) | bit);

1073 1074 1075 1076
	/*
	 * OMAP4 ES2 and greater has an updated reset logic.
	 * Monitor a 0->1 transition first
	 */
1077
	if (mmc_pdata(host)->features & HSMMC_HAS_UPDATED_RESET) {
1078
		while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
1079
					&& (i++ < limit))
1080
			udelay(1);
1081 1082 1083
	}
	i = 0;

1084 1085
	while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&
		(i++ < limit))
1086
		udelay(1);
1087 1088 1089 1090 1091 1092

	if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit)
		dev_err(mmc_dev(host->mmc),
			"Timeout waiting on controller reset in %s\n",
			__func__);
}
1093

1094 1095
static void hsmmc_command_incomplete(struct omap_hsmmc_host *host,
					int err, int end_cmd)
1096
{
1097
	if (end_cmd) {
1098
		omap_hsmmc_reset_controller_fsm(host, SRC);
1099 1100 1101
		if (host->cmd)
			host->cmd->error = err;
	}
1102 1103 1104 1105

	if (host->data) {
		omap_hsmmc_reset_controller_fsm(host, SRD);
		omap_hsmmc_dma_cleanup(host, err);
1106 1107
	} else if (host->mrq && host->mrq->cmd)
		host->mrq->cmd->error = err;
1108 1109
}

1110
static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1111 1112
{
	struct mmc_data *data;
1113
	int end_cmd = 0, end_trans = 0;
1114
	int error = 0;
1115

1116
	data = host->data;
1117
	dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
1118

1119
	if (status & ERR_EN) {
1120
		omap_hsmmc_dbg_report_irq(host, status);
1121

1122
		if (status & (CTO_EN | CCRC_EN))
1123
			end_cmd = 1;
1124 1125 1126 1127
		if (host->data || host->response_busy) {
			end_trans = !end_cmd;
			host->response_busy = 0;
		}
1128
		if (status & (CTO_EN | DTO_EN))
1129
			hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
1130 1131
		else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
				   BADA_EN))
1132
			hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
1133

1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
		if (status & ACE_EN) {
			u32 ac12;
			ac12 = OMAP_HSMMC_READ(host->base, AC12);
			if (!(ac12 & ACNE) && host->mrq->sbc) {
				end_cmd = 1;
				if (ac12 & ACTO)
					error =  -ETIMEDOUT;
				else if (ac12 & (ACCE | ACEB | ACIE))
					error = -EILSEQ;
				host->mrq->sbc->error = error;
				hsmmc_command_incomplete(host, error, end_cmd);
			}
			dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
		}
1148 1149
	}

1150
	OMAP_HSMMC_WRITE(host->base, STAT, status);
1151
	if (end_cmd || ((status & CC_EN) && host->cmd))
1152
		omap_hsmmc_cmd_done(host, host->cmd);
1153
	if ((end_trans || (status & TC_EN)) && host->mrq)
1154
		omap_hsmmc_xfer_done(host, data);
1155
}
1156

1157 1158 1159 1160 1161 1162 1163 1164 1165
/*
 * MMC controller IRQ handler
 */
static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
{
	struct omap_hsmmc_host *host = dev_id;
	int status;

	status = OMAP_HSMMC_READ(host->base, STAT);
1166 1167 1168 1169 1170 1171
	while (status & (INT_EN_MASK | CIRQ_EN)) {
		if (host->req_in_progress)
			omap_hsmmc_do_irq(host, status);

		if (status & CIRQ_EN)
			mmc_signal_sdio_irq(host->mmc);
1172

1173 1174
		/* Flush posted write */
		status = OMAP_HSMMC_READ(host->base, STAT);
1175
	}
1176

1177 1178 1179
	return IRQ_HANDLED;
}

1180
static void set_sd_bus_power(struct omap_hsmmc_host *host)
A
Adrian Hunter 已提交
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
{
	unsigned long i;

	OMAP_HSMMC_WRITE(host->base, HCTL,
			 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
	for (i = 0; i < loops_per_jiffy; i++) {
		if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP)
			break;
		cpu_relax();
	}
}

1193
/*
1194 1195 1196 1197 1198
 * Switch MMC interface voltage ... only relevant for MMC1.
 *
 * MMC2 and MMC3 use fixed 1.8V levels, and maybe a transceiver.
 * The MMC2 transceiver controls are used instead of DAT4..DAT7.
 * Some chips, like eMMC ones, use internal transceivers.
1199
 */
1200
static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1201 1202 1203 1204 1205
{
	u32 reg_val = 0;
	int ret;

	/* Disable the clocks */
1206
	pm_runtime_put_sync(host->dev);
1207
	if (host->dbclk)
1208
		clk_disable_unprepare(host->dbclk);
1209 1210

	/* Turn the power off */
1211
	ret = omap_hsmmc_set_power(host->dev, 0, 0);
1212 1213

	/* Turn the power ON with given VDD 1.8 or 3.0v */
1214
	if (!ret)
1215
		ret = omap_hsmmc_set_power(host->dev, 1, vdd);
1216
	pm_runtime_get_sync(host->dev);
1217
	if (host->dbclk)
1218
		clk_prepare_enable(host->dbclk);
1219

1220 1221 1222 1223 1224 1225
	if (ret != 0)
		goto err;

	OMAP_HSMMC_WRITE(host->base, HCTL,
		OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR);
	reg_val = OMAP_HSMMC_READ(host->base, HCTL);
1226

1227 1228 1229
	/*
	 * If a MMC dual voltage card is detected, the set_ios fn calls
	 * this fn with VDD bit set for 1.8V. Upon card removal from the
1230
	 * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
1231
	 *
1232 1233 1234 1235 1236 1237 1238 1239 1240
	 * Cope with a bit of slop in the range ... per data sheets:
	 *  - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max,
	 *    but recommended values are 1.71V to 1.89V
	 *  - "3.0V" for vdds_mmc1/vdds_mmc1a can be up to 3.5V max,
	 *    but recommended values are 2.7V to 3.3V
	 *
	 * Board setup code shouldn't permit anything very out-of-range.
	 * TWL4030-family VMMC1 and VSIM regulators are fine (avoiding the
	 * middle range) but VSIM can't power DAT4..DAT7 at more than 3V.
1241
	 */
1242
	if ((1 << vdd) <= MMC_VDD_23_24)
1243
		reg_val |= SDVS18;
1244 1245
	else
		reg_val |= SDVS30;
1246 1247

	OMAP_HSMMC_WRITE(host->base, HCTL, reg_val);
A
Adrian Hunter 已提交
1248
	set_sd_bus_power(host);
1249 1250 1251

	return 0;
err:
1252
	dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n");
1253 1254 1255
	return ret;
}

1256 1257 1258
/* Protect the card while the cover is open */
static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
{
1259
	if (!host->get_cover_state)
1260 1261 1262
		return;

	host->reqs_blocked = 0;
1263
	if (host->get_cover_state(host->dev)) {
1264
		if (host->protect_card) {
1265
			dev_info(host->dev, "%s: cover is closed, "
1266 1267 1268 1269 1270 1271
					 "card is now accessible\n",
					 mmc_hostname(host->mmc));
			host->protect_card = 0;
		}
	} else {
		if (!host->protect_card) {
1272
			dev_info(host->dev, "%s: cover is open, "
1273 1274 1275 1276 1277 1278 1279
					 "card is now inaccessible\n",
					 mmc_hostname(host->mmc));
			host->protect_card = 1;
		}
	}
}

1280
/*
1281
 * irq handler when (cell-phone) cover is mounted/removed
1282
 */
1283
static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id)
1284
{
1285
	struct omap_hsmmc_host *host = dev_id;
1286 1287

	sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
1288

1289 1290
	omap_hsmmc_protect_card(host);
	mmc_detect_change(host->mmc, (HZ * 200) / 1000);
1291 1292 1293
	return IRQ_HANDLED;
}

1294
static void omap_hsmmc_dma_callback(void *param)
1295
{
1296 1297
	struct omap_hsmmc_host *host = param;
	struct dma_chan *chan;
1298
	struct mmc_data *data;
1299
	int req_in_progress;
1300

1301
	spin_lock_irq(&host->irq_lock);
1302
	if (host->dma_ch < 0) {
1303
		spin_unlock_irq(&host->irq_lock);
1304
		return;
1305
	}
1306

1307
	data = host->mrq->data;
1308
	chan = omap_hsmmc_get_dma_chan(host, data);
1309
	if (!data->host_cookie)
1310 1311
		dma_unmap_sg(chan->device->dev,
			     data->sg, data->sg_len,
1312
			     omap_hsmmc_get_dma_dir(host, data));
1313 1314

	req_in_progress = host->req_in_progress;
1315
	host->dma_ch = -1;
1316
	spin_unlock_irq(&host->irq_lock);
1317 1318 1319 1320 1321 1322 1323

	/* If DMA has finished after TC, complete the request */
	if (!req_in_progress) {
		struct mmc_request *mrq = host->mrq;

		host->mrq = NULL;
		mmc_request_done(host->mmc, mrq);
1324 1325
		pm_runtime_mark_last_busy(host->dev);
		pm_runtime_put_autosuspend(host->dev);
1326
	}
1327 1328
}

1329 1330
static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
				       struct mmc_data *data,
1331
				       struct omap_hsmmc_next *next,
1332
				       struct dma_chan *chan)
1333 1334 1335 1336 1337
{
	int dma_len;

	if (!next && data->host_cookie &&
	    data->host_cookie != host->next_data.cookie) {
1338
		dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d"
1339 1340 1341 1342 1343 1344
		       " host->next_data.cookie %d\n",
		       __func__, data->host_cookie, host->next_data.cookie);
		data->host_cookie = 0;
	}

	/* Check if next job is already prepared */
1345
	if (next || data->host_cookie != host->next_data.cookie) {
1346
		dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
				     omap_hsmmc_get_dma_dir(host, data));

	} else {
		dma_len = host->next_data.dma_len;
		host->next_data.dma_len = 0;
	}


	if (dma_len == 0)
		return -EINVAL;

	if (next) {
		next->dma_len = dma_len;
		data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
	} else
		host->dma_len = dma_len;

	return 0;
}

1367 1368 1369
/*
 * Routine to configure and start DMA for the MMC card
 */
1370
static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
1371
					struct mmc_request *req)
1372
{
1373 1374 1375
	struct dma_slave_config cfg;
	struct dma_async_tx_descriptor *tx;
	int ret = 0, i;
1376
	struct mmc_data *data = req->data;
1377
	struct dma_chan *chan;
1378

1379
	/* Sanity check: all the SG entries must be aligned by block size. */
1380
	for (i = 0; i < data->sg_len; i++) {
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
		struct scatterlist *sgl;

		sgl = data->sg + i;
		if (sgl->length % data->blksz)
			return -EINVAL;
	}
	if ((data->blksz % 4) != 0)
		/* REVISIT: The MMC buffer increments only when MSB is written.
		 * Return error for blksz which is non multiple of four.
		 */
		return -EINVAL;

1393
	BUG_ON(host->dma_ch != -1);
1394

1395 1396
	chan = omap_hsmmc_get_dma_chan(host, data);

1397 1398 1399 1400 1401 1402
	cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
	cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.src_maxburst = data->blksz / 4;
	cfg.dst_maxburst = data->blksz / 4;
1403

1404 1405
	ret = dmaengine_slave_config(chan, &cfg);
	if (ret)
1406
		return ret;
1407

1408
	ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
1409 1410
	if (ret)
		return ret;
1411

1412 1413 1414 1415 1416 1417 1418 1419
	tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
		data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!tx) {
		dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
		/* FIXME: cleanup */
		return -1;
	}
1420

1421 1422
	tx->callback = omap_hsmmc_dma_callback;
	tx->callback_param = host;
1423

1424 1425
	/* Does not fail */
	dmaengine_submit(tx);
1426

1427
	host->dma_ch = 1;
1428

1429 1430 1431
	return 0;
}

1432
static void set_data_timeout(struct omap_hsmmc_host *host,
1433 1434
			     unsigned int timeout_ns,
			     unsigned int timeout_clks)
1435 1436 1437 1438 1439 1440 1441 1442 1443
{
	unsigned int timeout, cycle_ns;
	uint32_t reg, clkd, dto = 0;

	reg = OMAP_HSMMC_READ(host->base, SYSCTL);
	clkd = (reg & CLKD_MASK) >> CLKD_SHIFT;
	if (clkd == 0)
		clkd = 1;

1444
	cycle_ns = 1000000000 / (host->clk_rate / clkd);
1445 1446
	timeout = timeout_ns / cycle_ns;
	timeout += timeout_clks;
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
	if (timeout) {
		while ((timeout & 0x80000000) == 0) {
			dto += 1;
			timeout <<= 1;
		}
		dto = 31 - dto;
		timeout <<= 1;
		if (timeout && dto)
			dto += 1;
		if (dto >= 13)
			dto -= 13;
		else
			dto = 0;
		if (dto > 14)
			dto = 14;
	}

	reg &= ~DTO_MASK;
	reg |= dto << DTO_SHIFT;
	OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
}

1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
{
	struct mmc_request *req = host->mrq;
	struct dma_chan *chan;

	if (!req->data)
		return;
	OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
				| (req->data->blocks << 16));
	set_data_timeout(host, req->data->timeout_ns,
				req->data->timeout_clks);
	chan = omap_hsmmc_get_dma_chan(host, req->data);
	dma_async_issue_pending(chan);
}

1484 1485 1486 1487
/*
 * Configure block length for MMC/SD cards and initiate the transfer.
 */
static int
1488
omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
1489 1490 1491 1492 1493 1494
{
	int ret;
	host->data = req->data;

	if (req->data == NULL) {
		OMAP_HSMMC_WRITE(host->base, BLK, 0);
1495 1496 1497 1498 1499 1500
		/*
		 * Set an arbitrary 100ms data timeout for commands with
		 * busy signal.
		 */
		if (req->cmd->flags & MMC_RSP_BUSY)
			set_data_timeout(host, 100000000U, 0);
1501 1502 1503 1504
		return 0;
	}

	if (host->use_dma) {
1505
		ret = omap_hsmmc_setup_dma_transfer(host, req);
1506
		if (ret != 0) {
1507
			dev_err(mmc_dev(host->mmc), "MMC start dma failure\n");
1508 1509 1510 1511 1512 1513
			return ret;
		}
	}
	return 0;
}

1514 1515 1516 1517 1518 1519
static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
				int err)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);
	struct mmc_data *data = mrq->data;

1520
	if (host->use_dma && data->host_cookie) {
1521 1522
		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);

1523 1524
		dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
			     omap_hsmmc_get_dma_dir(host, data));
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538
		data->host_cookie = 0;
	}
}

static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
			       bool is_first_req)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);

	if (mrq->data->host_cookie) {
		mrq->data->host_cookie = 0;
		return ;
	}

1539 1540 1541
	if (host->use_dma) {
		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);

1542
		if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
1543
						&host->next_data, c))
1544
			mrq->data->host_cookie = 0;
1545
	}
1546 1547
}

1548 1549 1550
/*
 * Request function. for read/write operation
 */
1551
static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1552
{
1553
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1554
	int err;
1555

1556 1557
	BUG_ON(host->req_in_progress);
	BUG_ON(host->dma_ch != -1);
1558
	pm_runtime_get_sync(host->dev);
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
	if (host->protect_card) {
		if (host->reqs_blocked < 3) {
			/*
			 * Ensure the controller is left in a consistent
			 * state by resetting the command and data state
			 * machines.
			 */
			omap_hsmmc_reset_controller_fsm(host, SRD);
			omap_hsmmc_reset_controller_fsm(host, SRC);
			host->reqs_blocked += 1;
		}
		req->cmd->error = -EBADF;
		if (req->data)
			req->data->error = -EBADF;
		req->cmd->retries = 0;
		mmc_request_done(mmc, req);
1575 1576
		pm_runtime_mark_last_busy(host->dev);
		pm_runtime_put_autosuspend(host->dev);
1577 1578 1579
		return;
	} else if (host->reqs_blocked)
		host->reqs_blocked = 0;
1580 1581
	WARN_ON(host->mrq != NULL);
	host->mrq = req;
1582
	host->clk_rate = clk_get_rate(host->fclk);
1583
	err = omap_hsmmc_prepare_data(host, req);
1584 1585 1586 1587 1588 1589
	if (err) {
		req->cmd->error = err;
		if (req->data)
			req->data->error = err;
		host->mrq = NULL;
		mmc_request_done(mmc, req);
1590 1591
		pm_runtime_mark_last_busy(host->dev);
		pm_runtime_put_autosuspend(host->dev);
1592 1593
		return;
	}
1594
	if (req->sbc && !(host->flags & AUTO_CMD23)) {
1595 1596 1597
		omap_hsmmc_start_command(host, req->sbc, NULL);
		return;
	}
1598

1599
	omap_hsmmc_start_dma_transfer(host);
1600
	omap_hsmmc_start_command(host, req->cmd, req->data);
1601 1602 1603
}

/* Routine to configure clock values. Exposed API to core */
1604
static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1605
{
1606
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1607
	int do_send_init_stream = 0;
1608

1609
	pm_runtime_get_sync(host->dev);
1610

1611 1612 1613
	if (ios->power_mode != host->power_mode) {
		switch (ios->power_mode) {
		case MMC_POWER_OFF:
1614
			omap_hsmmc_set_power(host->dev, 0, 0);
1615 1616
			break;
		case MMC_POWER_UP:
1617
			omap_hsmmc_set_power(host->dev, 1, ios->vdd);
1618 1619 1620 1621 1622 1623
			break;
		case MMC_POWER_ON:
			do_send_init_stream = 1;
			break;
		}
		host->power_mode = ios->power_mode;
1624 1625
	}

1626 1627
	/* FIXME: set registers based only on changes to ios */

1628
	omap_hsmmc_set_bus_width(host);
1629

1630
	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1631 1632 1633
		/* Only MMC1 can interface at 3V without some flavor
		 * of external transceiver; but they all handle 1.8V.
		 */
1634
		if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) &&
1635
			(ios->vdd == DUAL_VOLT_OCR_BIT)) {
1636 1637 1638 1639 1640 1641
				/*
				 * The mmc_select_voltage fn of the core does
				 * not seem to set the power_mode to
				 * MMC_POWER_UP upon recalculating the voltage.
				 * vdd 1.8v.
				 */
1642 1643
			if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0)
				dev_dbg(mmc_dev(host->mmc),
1644 1645 1646 1647
						"Switch operation failed\n");
		}
	}

1648
	omap_hsmmc_set_clock(host);
1649

1650
	if (do_send_init_stream)
1651 1652
		send_init_stream(host);

1653
	omap_hsmmc_set_bus_mode(host);
1654

1655
	pm_runtime_put_autosuspend(host->dev);
1656 1657 1658 1659
}

static int omap_hsmmc_get_cd(struct mmc_host *mmc)
{
1660
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1661

1662
	if (!host->card_detect)
1663
		return -ENOSYS;
1664
	return host->card_detect(host->dev);
1665 1666
}

1667 1668 1669 1670
static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);

1671 1672
	if (mmc_pdata(host)->init_card)
		mmc_pdata(host)->init_card(card);
1673 1674
}

1675 1676 1677
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1678
	u32 irq_mask, con;
1679 1680 1681 1682
	unsigned long flags;

	spin_lock_irqsave(&host->irq_lock, flags);

1683
	con = OMAP_HSMMC_READ(host->base, CON);
1684 1685 1686 1687
	irq_mask = OMAP_HSMMC_READ(host->base, ISE);
	if (enable) {
		host->flags |= HSMMC_SDIO_IRQ_ENABLED;
		irq_mask |= CIRQ_EN;
1688
		con |= CTPL | CLKEXTFREE;
1689 1690 1691
	} else {
		host->flags &= ~HSMMC_SDIO_IRQ_ENABLED;
		irq_mask &= ~CIRQ_EN;
1692
		con &= ~(CTPL | CLKEXTFREE);
1693
	}
1694
	OMAP_HSMMC_WRITE(host->base, CON, con);
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722
	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);

	/*
	 * if enable, piggy back detection on current request
	 * but always disable immediately
	 */
	if (!host->req_in_progress || !enable)
		OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);

	/* flush posted write */
	OMAP_HSMMC_READ(host->base, IE);

	spin_unlock_irqrestore(&host->irq_lock, flags);
}

static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
{
	int ret;

	/*
	 * For omaps with wake-up path, wakeirq will be irq from pinctrl and
	 * for other omaps, wakeirq will be from GPIO (dat line remuxed to
	 * gpio). wakeirq is needed to detect sdio irq in runtime suspend state
	 * with functional clock disabled.
	 */
	if (!host->dev->of_node || !host->wake_irq)
		return -ENODEV;

1723
	ret = dev_pm_set_dedicated_wake_irq(host->dev, host->wake_irq);
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
	if (ret) {
		dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n");
		goto err;
	}

	/*
	 * Some omaps don't have wake-up path from deeper idle states
	 * and need to remux SDIO DAT1 to GPIO for wake-up from idle.
	 */
	if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752
		struct pinctrl *p = devm_pinctrl_get(host->dev);
		if (!p) {
			ret = -ENODEV;
			goto err_free_irq;
		}
		if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
			dev_info(host->dev, "missing default pinctrl state\n");
			devm_pinctrl_put(p);
			ret = -EINVAL;
			goto err_free_irq;
		}

		if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
			dev_info(host->dev, "missing idle pinctrl state\n");
			devm_pinctrl_put(p);
			ret = -EINVAL;
			goto err_free_irq;
		}
		devm_pinctrl_put(p);
1753 1754
	}

1755 1756
	OMAP_HSMMC_WRITE(host->base, HCTL,
			 OMAP_HSMMC_READ(host->base, HCTL) | IWE);
1757 1758
	return 0;

1759
err_free_irq:
1760
	dev_pm_clear_wake_irq(host->dev);
1761 1762 1763 1764 1765 1766
err:
	dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n");
	host->wake_irq = 0;
	return ret;
}

1767
static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
1768 1769 1770 1771
{
	u32 hctl, capa, value;

	/* Only MMC1 supports 3.0V */
1772
	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
		hctl = SDVS30;
		capa = VS30 | VS18;
	} else {
		hctl = SDVS18;
		capa = VS18;
	}

	value = OMAP_HSMMC_READ(host->base, HCTL) & ~SDVS_MASK;
	OMAP_HSMMC_WRITE(host->base, HCTL, value | hctl);

	value = OMAP_HSMMC_READ(host->base, CAPA);
	OMAP_HSMMC_WRITE(host->base, CAPA, value | capa);

	/* Set SD bus power bit */
A
Adrian Hunter 已提交
1787
	set_sd_bus_power(host);
1788 1789
}

1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
static int omap_hsmmc_multi_io_quirk(struct mmc_card *card,
				     unsigned int direction, int blk_size)
{
	/* This controller can't do multiblock reads due to hw bugs */
	if (direction == MMC_DATA_READ)
		return 1;

	return blk_size;
}

static struct mmc_host_ops omap_hsmmc_ops = {
1801 1802
	.post_req = omap_hsmmc_post_req,
	.pre_req = omap_hsmmc_pre_req,
1803 1804
	.request = omap_hsmmc_request,
	.set_ios = omap_hsmmc_set_ios,
1805
	.get_cd = omap_hsmmc_get_cd,
1806
	.get_ro = mmc_gpio_get_ro,
1807
	.init_card = omap_hsmmc_init_card,
1808
	.enable_sdio_irq = omap_hsmmc_enable_sdio_irq,
1809 1810
};

1811 1812
#ifdef CONFIG_DEBUG_FS

1813
static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
1814 1815
{
	struct mmc_host *mmc = s->private;
1816
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1817

1818 1819 1820
	seq_printf(s, "mmc%d:\n", mmc->index);
	seq_printf(s, "sdio irq mode\t%s\n",
		   (mmc->caps & MMC_CAP_SDIO_IRQ) ? "interrupt" : "polling");
1821

1822 1823 1824 1825 1826 1827
	if (mmc->caps & MMC_CAP_SDIO_IRQ) {
		seq_printf(s, "sdio irq \t%s\n",
			   (host->flags & HSMMC_SDIO_IRQ_ENABLED) ?  "enabled"
			   : "disabled");
	}
	seq_printf(s, "ctx_loss:\t%d\n", host->context_loss);
1828

1829 1830
	pm_runtime_get_sync(host->dev);
	seq_puts(s, "\nregs:\n");
1831 1832
	seq_printf(s, "CON:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, CON));
1833 1834
	seq_printf(s, "PSTATE:\t\t0x%08x\n",
		   OMAP_HSMMC_READ(host->base, PSTATE));
1835 1836 1837 1838 1839 1840 1841 1842 1843 1844
	seq_printf(s, "HCTL:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, HCTL));
	seq_printf(s, "SYSCTL:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, SYSCTL));
	seq_printf(s, "IE:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, IE));
	seq_printf(s, "ISE:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, ISE));
	seq_printf(s, "CAPA:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, CAPA));
1845

1846 1847
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
1848

1849 1850 1851
	return 0;
}

1852
static int omap_hsmmc_regs_open(struct inode *inode, struct file *file)
1853
{
1854
	return single_open(file, omap_hsmmc_regs_show, inode->i_private);
1855 1856 1857
}

static const struct file_operations mmc_regs_fops = {
1858
	.open           = omap_hsmmc_regs_open,
1859 1860 1861 1862 1863
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = single_release,
};

1864
static void omap_hsmmc_debugfs(struct mmc_host *mmc)
1865 1866 1867 1868 1869 1870 1871 1872
{
	if (mmc->debugfs_root)
		debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root,
			mmc, &mmc_regs_fops);
}

#else

1873
static void omap_hsmmc_debugfs(struct mmc_host *mmc)
1874 1875 1876 1877 1878
{
}

#endif

1879
#ifdef CONFIG_OF
1880 1881 1882 1883 1884 1885 1886 1887
static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = {
	/* See 35xx errata 2.1.1.128 in SPRZ278F */
	.controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
};

static const struct omap_mmc_of_data omap4_mmc_of_data = {
	.reg_offset = 0x100,
};
1888 1889 1890 1891
static const struct omap_mmc_of_data am33xx_mmc_of_data = {
	.reg_offset = 0x100,
	.controller_flags = OMAP_HSMMC_SWAKEUP_MISSING,
};
1892 1893 1894 1895 1896

static const struct of_device_id omap_mmc_of_match[] = {
	{
		.compatible = "ti,omap2-hsmmc",
	},
1897 1898 1899 1900
	{
		.compatible = "ti,omap3-pre-es3-hsmmc",
		.data = &omap3_pre_es3_mmc_of_data,
	},
1901 1902 1903 1904 1905
	{
		.compatible = "ti,omap3-hsmmc",
	},
	{
		.compatible = "ti,omap4-hsmmc",
1906
		.data = &omap4_mmc_of_data,
1907
	},
1908 1909 1910 1911
	{
		.compatible = "ti,am33xx-hsmmc",
		.data = &am33xx_mmc_of_data,
	},
1912
	{},
1913
};
1914 1915
MODULE_DEVICE_TABLE(of, omap_mmc_of_match);

1916
static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
1917
{
1918
	struct omap_hsmmc_platform_data *pdata;
1919 1920 1921 1922
	struct device_node *np = dev->of_node;

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
1923
		return ERR_PTR(-ENOMEM); /* out of memory */
1924 1925 1926 1927

	if (of_find_property(np, "ti,dual-volt", NULL))
		pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;

1928 1929
	pdata->gpio_cd = -EINVAL;
	pdata->gpio_cod = -EINVAL;
1930
	pdata->gpio_wp = -EINVAL;
1931 1932

	if (of_find_property(np, "ti,non-removable", NULL)) {
1933 1934
		pdata->nonremovable = true;
		pdata->no_regulator_off_init = true;
1935 1936 1937
	}

	if (of_find_property(np, "ti,needs-special-reset", NULL))
1938
		pdata->features |= HSMMC_HAS_UPDATED_RESET;
1939

1940
	if (of_find_property(np, "ti,needs-special-hs-handling", NULL))
1941
		pdata->features |= HSMMC_HAS_HSPE_SUPPORT;
1942

1943 1944 1945
	return pdata;
}
#else
1946
static inline struct omap_hsmmc_platform_data
1947 1948
			*of_get_hsmmc_pdata(struct device *dev)
{
1949
	return ERR_PTR(-EINVAL);
1950 1951 1952
}
#endif

1953
static int omap_hsmmc_probe(struct platform_device *pdev)
1954
{
1955
	struct omap_hsmmc_platform_data *pdata = pdev->dev.platform_data;
1956
	struct mmc_host *mmc;
1957
	struct omap_hsmmc_host *host = NULL;
1958
	struct resource *res;
1959
	int ret, irq;
1960
	const struct of_device_id *match;
1961 1962
	dma_cap_mask_t mask;
	unsigned tx_req, rx_req;
1963
	const struct omap_mmc_of_data *data;
1964
	void __iomem *base;
1965 1966 1967 1968

	match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
	if (match) {
		pdata = of_get_hsmmc_pdata(&pdev->dev);
1969 1970 1971 1972

		if (IS_ERR(pdata))
			return PTR_ERR(pdata);

1973
		if (match->data) {
1974 1975 1976
			data = match->data;
			pdata->reg_offset = data->reg_offset;
			pdata->controller_flags |= data->controller_flags;
1977 1978
		}
	}
1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989

	if (pdata == NULL) {
		dev_err(&pdev->dev, "Platform Data is missing\n");
		return -ENXIO;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (res == NULL || irq < 0)
		return -ENXIO;

1990 1991 1992
	base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(base))
		return PTR_ERR(base);
1993

1994
	mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
1995 1996
	if (!mmc) {
		ret = -ENOMEM;
1997
		goto err;
1998 1999
	}

2000 2001 2002 2003
	ret = mmc_of_parse(mmc);
	if (ret)
		goto err1;

2004 2005 2006 2007 2008 2009 2010
	host		= mmc_priv(mmc);
	host->mmc	= mmc;
	host->pdata	= pdata;
	host->dev	= &pdev->dev;
	host->use_dma	= 1;
	host->dma_ch	= -1;
	host->irq	= irq;
2011
	host->mapbase	= res->start + pdata->reg_offset;
2012
	host->base	= base + pdata->reg_offset;
2013
	host->power_mode = MMC_POWER_OFF;
2014
	host->next_data.cookie = 1;
2015
	host->pbias_enabled = 0;
2016

2017
	ret = omap_hsmmc_gpio_init(mmc, host, pdata);
2018 2019 2020
	if (ret)
		goto err_gpio;

2021 2022
	platform_set_drvdata(pdev, host);

2023 2024 2025
	if (pdev->dev.of_node)
		host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1);

2026
	mmc->ops	= &omap_hsmmc_ops;
2027

2028 2029 2030 2031
	mmc->f_min = OMAP_MMC_MIN_CLOCK;

	if (pdata->max_freq > 0)
		mmc->f_max = pdata->max_freq;
2032
	else if (mmc->f_max == 0)
2033
		mmc->f_max = OMAP_MMC_MAX_CLOCK;
2034

2035
	spin_lock_init(&host->irq_lock);
2036

2037
	host->fclk = devm_clk_get(&pdev->dev, "fck");
2038 2039 2040 2041 2042 2043
	if (IS_ERR(host->fclk)) {
		ret = PTR_ERR(host->fclk);
		host->fclk = NULL;
		goto err1;
	}

2044 2045
	if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
		dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
2046
		omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
2047
	}
2048

2049
	device_init_wakeup(&pdev->dev, true);
2050 2051 2052 2053
	pm_runtime_enable(host->dev);
	pm_runtime_get_sync(host->dev);
	pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
	pm_runtime_use_autosuspend(host->dev);
2054

2055 2056
	omap_hsmmc_context_save(host);

2057
	host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck");
2058 2059 2060 2061 2062
	/*
	 * MMC can still work without debounce clock.
	 */
	if (IS_ERR(host->dbclk)) {
		host->dbclk = NULL;
2063
	} else if (clk_prepare_enable(host->dbclk) != 0) {
2064 2065
		dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
		host->dbclk = NULL;
2066
	}
2067

2068 2069
	/* Since we do only SG emulation, we can have as many segs
	 * as we want. */
2070
	mmc->max_segs = 1024;
2071

2072 2073 2074 2075 2076
	mmc->max_blk_size = 512;       /* Block Length at max can be 1024 */
	mmc->max_blk_count = 0xFFFF;    /* No. of Blocks is 16 bits */
	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
	mmc->max_seg_size = mmc->max_req_size;

2077
	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
2078
		     MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
2079

2080
	mmc->caps |= mmc_pdata(host)->caps;
2081
	if (mmc->caps & MMC_CAP_8_BIT_DATA)
2082 2083
		mmc->caps |= MMC_CAP_4_BIT_DATA;

2084
	if (mmc_pdata(host)->nonremovable)
2085 2086
		mmc->caps |= MMC_CAP_NONREMOVABLE;

2087
	mmc->pm_caps |= mmc_pdata(host)->pm_caps;
2088

2089
	omap_hsmmc_conf_bus_power(host);
2090

2091 2092 2093 2094 2095 2096 2097 2098
	if (!pdev->dev.of_node) {
		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
		if (!res) {
			dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
			ret = -ENXIO;
			goto err_irq;
		}
		tx_req = res->start;
2099

2100 2101 2102 2103 2104 2105 2106
		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
		if (!res) {
			dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
			ret = -ENXIO;
			goto err_irq;
		}
		rx_req = res->start;
2107
	}
2108

2109 2110 2111
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

2112 2113 2114 2115
	host->rx_chan =
		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
						 &rx_req, &pdev->dev, "rx");

2116 2117
	if (!host->rx_chan) {
		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
2118
		ret = -ENXIO;
2119 2120 2121
		goto err_irq;
	}

2122 2123 2124 2125
	host->tx_chan =
		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
						 &tx_req, &pdev->dev, "tx");

2126 2127
	if (!host->tx_chan) {
		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
2128
		ret = -ENXIO;
2129
		goto err_irq;
2130
	}
2131 2132

	/* Request IRQ for MMC operations */
2133
	ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
2134 2135
			mmc_hostname(mmc), host);
	if (ret) {
2136
		dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
2137 2138 2139
		goto err_irq;
	}

2140
	if (omap_hsmmc_have_reg()) {
2141 2142
		ret = omap_hsmmc_reg_get(host);
		if (ret)
2143
			goto err_irq;
2144 2145
	}

2146
	mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
2147

2148
	omap_hsmmc_disable_irq(host);
2149

2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161
	/*
	 * For now, only support SDIO interrupt if we have a separate
	 * wake-up interrupt configured from device tree. This is because
	 * the wake-up interrupt is needed for idle state and some
	 * platforms need special quirks. And we don't want to add new
	 * legacy mux platform init code callbacks any longer as we
	 * are moving to DT based booting anyways.
	 */
	ret = omap_hsmmc_configure_wake_irq(host);
	if (!ret)
		mmc->caps |= MMC_CAP_SDIO_IRQ;

2162 2163
	omap_hsmmc_protect_card(host);

2164 2165
	mmc_add_host(mmc);

2166
	if (mmc_pdata(host)->name != NULL) {
2167 2168 2169 2170
		ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
		if (ret < 0)
			goto err_slot_name;
	}
2171
	if (host->get_cover_state) {
2172
		ret = device_create_file(&mmc->class_dev,
2173
					 &dev_attr_cover_switch);
2174
		if (ret < 0)
2175
			goto err_slot_name;
2176 2177
	}

2178
	omap_hsmmc_debugfs(mmc);
2179 2180
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
2181

2182 2183 2184 2185 2186
	return 0;

err_slot_name:
	mmc_remove_host(mmc);
err_irq:
2187
	device_init_wakeup(&pdev->dev, false);
2188 2189 2190 2191
	if (host->tx_chan)
		dma_release_channel(host->tx_chan);
	if (host->rx_chan)
		dma_release_channel(host->rx_chan);
2192
	pm_runtime_put_sync(host->dev);
2193
	pm_runtime_disable(host->dev);
2194
	if (host->dbclk)
2195
		clk_disable_unprepare(host->dbclk);
2196
err1:
2197
err_gpio:
2198
	mmc_free_host(mmc);
2199 2200 2201 2202
err:
	return ret;
}

2203
static int omap_hsmmc_remove(struct platform_device *pdev)
2204
{
2205
	struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2206

2207 2208
	pm_runtime_get_sync(host->dev);
	mmc_remove_host(host->mmc);
2209

2210 2211 2212 2213 2214
	if (host->tx_chan)
		dma_release_channel(host->tx_chan);
	if (host->rx_chan)
		dma_release_channel(host->rx_chan);

2215 2216
	pm_runtime_put_sync(host->dev);
	pm_runtime_disable(host->dev);
2217
	device_init_wakeup(&pdev->dev, false);
2218
	if (host->dbclk)
2219
		clk_disable_unprepare(host->dbclk);
2220

2221
	mmc_free_host(host->mmc);
2222

2223 2224 2225
	return 0;
}

2226
#ifdef CONFIG_PM_SLEEP
2227
static int omap_hsmmc_suspend(struct device *dev)
2228
{
2229
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2230

2231
	if (!host)
2232 2233
		return 0;

2234
	pm_runtime_get_sync(host->dev);
2235

2236
	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
2237 2238 2239
		OMAP_HSMMC_WRITE(host->base, ISE, 0);
		OMAP_HSMMC_WRITE(host->base, IE, 0);
		OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
2240 2241
		OMAP_HSMMC_WRITE(host->base, HCTL,
				OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2242
	}
2243

2244
	if (host->dbclk)
2245
		clk_disable_unprepare(host->dbclk);
2246

2247
	pm_runtime_put_sync(host->dev);
2248
	return 0;
2249 2250 2251
}

/* Routine to resume the MMC device */
2252
static int omap_hsmmc_resume(struct device *dev)
2253
{
2254 2255 2256 2257
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);

	if (!host)
		return 0;
2258

2259
	pm_runtime_get_sync(host->dev);
2260

2261
	if (host->dbclk)
2262
		clk_prepare_enable(host->dbclk);
2263

2264 2265
	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
		omap_hsmmc_conf_bus_power(host);
2266

2267 2268 2269
	omap_hsmmc_protect_card(host);
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
2270
	return 0;
2271 2272 2273
}
#endif

2274 2275 2276
static int omap_hsmmc_runtime_suspend(struct device *dev)
{
	struct omap_hsmmc_host *host;
2277
	unsigned long flags;
2278
	int ret = 0;
2279 2280 2281

	host = platform_get_drvdata(to_platform_device(dev));
	omap_hsmmc_context_save(host);
2282
	dev_dbg(dev, "disabled\n");
2283

2284 2285 2286 2287 2288 2289
	spin_lock_irqsave(&host->irq_lock, flags);
	if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
	    (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
		/* disable sdio irq handling to prevent race */
		OMAP_HSMMC_WRITE(host->base, ISE, 0);
		OMAP_HSMMC_WRITE(host->base, IE, 0);
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304

		if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) {
			/*
			 * dat1 line low, pending sdio irq
			 * race condition: possible irq handler running on
			 * multi-core, abort
			 */
			dev_dbg(dev, "pending sdio irq, abort suspend\n");
			OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
			OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
			OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
			pm_runtime_mark_last_busy(dev);
			ret = -EBUSY;
			goto abort;
		}
2305

2306 2307 2308
		pinctrl_pm_select_idle_state(dev);
	} else {
		pinctrl_pm_select_idle_state(dev);
2309
	}
2310

2311
abort:
2312
	spin_unlock_irqrestore(&host->irq_lock, flags);
2313
	return ret;
2314 2315 2316 2317 2318
}

static int omap_hsmmc_runtime_resume(struct device *dev)
{
	struct omap_hsmmc_host *host;
2319
	unsigned long flags;
2320 2321 2322

	host = platform_get_drvdata(to_platform_device(dev));
	omap_hsmmc_context_restore(host);
2323
	dev_dbg(dev, "enabled\n");
2324

2325 2326 2327 2328
	spin_lock_irqsave(&host->irq_lock, flags);
	if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
	    (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {

2329 2330 2331
		pinctrl_pm_select_default_state(host->dev);

		/* irq lost, if pinmux incorrect */
2332 2333 2334
		OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
		OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
		OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
2335 2336
	} else {
		pinctrl_pm_select_default_state(host->dev);
2337 2338
	}
	spin_unlock_irqrestore(&host->irq_lock, flags);
2339 2340 2341
	return 0;
}

2342
static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
2343
	SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume)
2344 2345
	.runtime_suspend = omap_hsmmc_runtime_suspend,
	.runtime_resume = omap_hsmmc_runtime_resume,
2346 2347 2348
};

static struct platform_driver omap_hsmmc_driver = {
2349
	.probe		= omap_hsmmc_probe,
2350
	.remove		= omap_hsmmc_remove,
2351 2352
	.driver		= {
		.name = DRIVER_NAME,
2353
		.pm = &omap_hsmmc_dev_pm_ops,
2354
		.of_match_table = of_match_ptr(omap_mmc_of_match),
2355 2356 2357
	},
};

2358
module_platform_driver(omap_hsmmc_driver);
2359 2360 2361 2362
MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Texas Instruments Inc");
新手
引导
客服 返回
顶部