omap_hsmmc.c 62.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * drivers/mmc/host/omap_hsmmc.c
 *
 * Driver for OMAP2430/3430 MMC controller.
 *
 * Copyright (C) 2007 Texas Instruments.
 *
 * Authors:
 *	Syed Mohammed Khasim	<x0khasim@ti.com>
 *	Madhusudhan		<madhu.cr@ti.com>
 *	Mohit Jalori		<mjalori@ti.com>
 *
 * This file is licensed under the terms of the GNU General Public License
 * version 2. This program is licensed "as is" without any warranty of any
 * kind, whether express or implied.
 */

#include <linux/module.h>
#include <linux/init.h>
20
#include <linux/kernel.h>
21
#include <linux/debugfs.h>
22
#include <linux/dmaengine.h>
23
#include <linux/seq_file.h>
24
#include <linux/sizes.h>
25 26 27 28 29 30
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/timer.h>
#include <linux/clk.h>
31
#include <linux/of.h>
32
#include <linux/of_irq.h>
33 34
#include <linux/of_gpio.h>
#include <linux/of_device.h>
35
#include <linux/omap-dmaengine.h>
36
#include <linux/mmc/host.h>
37
#include <linux/mmc/core.h>
38
#include <linux/mmc/mmc.h>
39
#include <linux/io.h>
40
#include <linux/irq.h>
41 42
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
43
#include <linux/pinctrl/consumer.h>
44
#include <linux/pm_runtime.h>
45
#include <linux/platform_data/hsmmc-omap.h>
46 47

/* OMAP HSMMC Host Controller Registers */
48
#define OMAP_HSMMC_SYSSTATUS	0x0014
49
#define OMAP_HSMMC_CON		0x002C
50
#define OMAP_HSMMC_SDMASA	0x0100
51 52 53 54 55 56 57 58
#define OMAP_HSMMC_BLK		0x0104
#define OMAP_HSMMC_ARG		0x0108
#define OMAP_HSMMC_CMD		0x010C
#define OMAP_HSMMC_RSP10	0x0110
#define OMAP_HSMMC_RSP32	0x0114
#define OMAP_HSMMC_RSP54	0x0118
#define OMAP_HSMMC_RSP76	0x011C
#define OMAP_HSMMC_DATA		0x0120
59
#define OMAP_HSMMC_PSTATE	0x0124
60 61 62 63 64
#define OMAP_HSMMC_HCTL		0x0128
#define OMAP_HSMMC_SYSCTL	0x012C
#define OMAP_HSMMC_STAT		0x0130
#define OMAP_HSMMC_IE		0x0134
#define OMAP_HSMMC_ISE		0x0138
65
#define OMAP_HSMMC_AC12		0x013C
66 67 68 69
#define OMAP_HSMMC_CAPA		0x0140

#define VS18			(1 << 26)
#define VS30			(1 << 25)
70
#define HSS			(1 << 21)
71 72
#define SDVS18			(0x5 << 9)
#define SDVS30			(0x6 << 9)
73
#define SDVS33			(0x7 << 9)
74
#define SDVS_MASK		0x00000E00
75 76 77 78 79 80 81 82
#define SDVSCLR			0xFFFFF1FF
#define SDVSDET			0x00000400
#define AUTOIDLE		0x1
#define SDBP			(1 << 8)
#define DTO			0xe
#define ICE			0x1
#define ICS			0x2
#define CEN			(1 << 2)
83
#define CLKD_MAX		0x3FF		/* max clock divisor: 1023 */
84 85 86 87 88
#define CLKD_MASK		0x0000FFC0
#define CLKD_SHIFT		6
#define DTO_MASK		0x000F0000
#define DTO_SHIFT		16
#define INIT_STREAM		(1 << 1)
89
#define ACEN_ACMD23		(2 << 2)
90 91
#define DP_SELECT		(1 << 21)
#define DDIR			(1 << 4)
92
#define DMAE			0x1
93 94 95
#define MSBS			(1 << 5)
#define BCE			(1 << 1)
#define FOUR_BIT		(1 << 1)
96
#define HSPE			(1 << 2)
97
#define IWE			(1 << 24)
98
#define DDR			(1 << 19)
99 100
#define CLKEXTFREE		(1 << 16)
#define CTPL			(1 << 11)
101
#define DW8			(1 << 5)
102 103 104 105 106 107
#define OD			0x1
#define STAT_CLEAR		0xFFFFFFFF
#define INIT_STREAM_CMD		0x00000000
#define DUAL_VOLT_OCR_BIT	7
#define SRC			(1 << 25)
#define SRD			(1 << 26)
108
#define SOFTRESET		(1 << 1)
109

110 111 112
/* PSTATE */
#define DLEV_DAT(x)		(1 << (20 + (x)))

113 114 115 116 117
/* Interrupt masks for IE and ISE register */
#define CC_EN			(1 << 0)
#define TC_EN			(1 << 1)
#define BWR_EN			(1 << 4)
#define BRR_EN			(1 << 5)
118
#define CIRQ_EN			(1 << 8)
119 120 121 122 123 124 125 126
#define ERR_EN			(1 << 15)
#define CTO_EN			(1 << 16)
#define CCRC_EN			(1 << 17)
#define CEB_EN			(1 << 18)
#define CIE_EN			(1 << 19)
#define DTO_EN			(1 << 20)
#define DCRC_EN			(1 << 21)
#define DEB_EN			(1 << 22)
127
#define ACE_EN			(1 << 24)
128 129 130
#define CERR_EN			(1 << 28)
#define BADA_EN			(1 << 29)

131
#define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\
132 133 134
		DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \
		BRR_EN | BWR_EN | TC_EN | CC_EN)

135 136 137 138 139 140 141
#define CNI	(1 << 7)
#define ACIE	(1 << 4)
#define ACEB	(1 << 3)
#define ACCE	(1 << 2)
#define ACTO	(1 << 1)
#define ACNE	(1 << 0)

142
#define MMC_AUTOSUSPEND_DELAY	100
143 144
#define MMC_TIMEOUT_MS		20		/* 20 mSec */
#define MMC_TIMEOUT_US		20000		/* 20000 micro Sec */
145 146
#define OMAP_MMC_MIN_CLOCK	400000
#define OMAP_MMC_MAX_CLOCK	52000000
147
#define DRIVER_NAME		"omap_hsmmc"
148

149 150 151 152
#define VDD_1V8			1800000		/* 180000 uV */
#define VDD_3V0			3000000		/* 300000 uV */
#define VDD_165_195		(ffs(MMC_VDD_165_195) - 1)

153 154 155 156 157
/*
 * One controller can have multiple slots, like on some omap boards using
 * omap.c controller driver. Luckily this is not currently done on any known
 * omap_hsmmc.c device.
 */
158
#define mmc_pdata(host)		host->pdata
159 160 161 162 163 164 165 166 167 168

/*
 * MMC Host controller read/write API's
 */
#define OMAP_HSMMC_READ(base, reg)	\
	__raw_readl((base) + OMAP_HSMMC_##reg)

#define OMAP_HSMMC_WRITE(base, reg, val) \
	__raw_writel((val), (base) + OMAP_HSMMC_##reg)

169 170 171 172 173
struct omap_hsmmc_next {
	unsigned int	dma_len;
	s32		cookie;
};

174
struct omap_hsmmc_host {
175 176 177 178 179 180 181
	struct	device		*dev;
	struct	mmc_host	*mmc;
	struct	mmc_request	*mrq;
	struct	mmc_command	*cmd;
	struct	mmc_data	*data;
	struct	clk		*fclk;
	struct	clk		*dbclk;
182 183 184 185 186 187 188 189 190
	/*
	 * vcc == configured supply
	 * vcc_aux == optional
	 *   -	MMC1, supply for DAT4..DAT7
	 *   -	MMC2/MMC2, external level shifter voltage supply, for
	 *	chip (SDIO, eMMC, etc) or transceiver (MMC2 only)
	 */
	struct	regulator	*vcc;
	struct	regulator	*vcc_aux;
191 192
	struct	regulator	*pbias;
	bool			pbias_enabled;
193 194
	void	__iomem		*base;
	resource_size_t		mapbase;
195
	spinlock_t		irq_lock; /* Prevent races with irq handler */
196
	unsigned int		dma_len;
197
	unsigned int		dma_sg_idx;
198
	unsigned char		bus_mode;
199
	unsigned char		power_mode;
200
	int			suspended;
201 202 203 204
	u32			con;
	u32			hctl;
	u32			sysctl;
	u32			capa;
205
	int			irq;
206
	int			wake_irq;
207
	int			use_dma, dma_ch;
208 209
	struct dma_chan		*tx_chan;
	struct dma_chan		*rx_chan;
210
	int			slot_id;
211
	int			response_busy;
212
	int			context_loss;
213 214
	int			protect_card;
	int			reqs_blocked;
215
	int			use_reg;
216
	int			req_in_progress;
217
	unsigned long		clk_rate;
218
	unsigned int		flags;
219 220 221
#define AUTO_CMD23		(1 << 0)        /* Auto CMD23 support */
#define HSMMC_SDIO_IRQ_ENABLED	(1 << 1)        /* SDIO irq enabled */
#define HSMMC_WAKE_IRQ_ENABLED	(1 << 2)
222
	struct omap_hsmmc_next	next_data;
223
	struct	omap_hsmmc_platform_data	*pdata;
224 225
};

226 227 228 229 230
struct omap_mmc_of_data {
	u32 reg_offset;
	u8 controller_flags;
};

231 232
static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host);

233 234
static int omap_hsmmc_card_detect(struct device *dev, int slot)
{
235
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
236
	struct omap_hsmmc_platform_data *mmc = host->pdata;
237 238

	/* NOTE: assumes card detect signal is active-low */
239
	return !gpio_get_value_cansleep(mmc->switch_pin);
240 241 242 243
}

static int omap_hsmmc_get_wp(struct device *dev, int slot)
{
244
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
245
	struct omap_hsmmc_platform_data *mmc = host->pdata;
246 247

	/* NOTE: assumes write protect signal is active-high */
248
	return gpio_get_value_cansleep(mmc->gpio_wp);
249 250 251 252
}

static int omap_hsmmc_get_cover_state(struct device *dev, int slot)
{
253
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
254
	struct omap_hsmmc_platform_data *mmc = host->pdata;
255 256

	/* NOTE: assumes card detect signal is active-low */
257
	return !gpio_get_value_cansleep(mmc->switch_pin);
258 259 260 261 262 263
}

#ifdef CONFIG_PM

static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot)
{
264
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
265
	struct omap_hsmmc_platform_data *mmc = host->pdata;
266

267
	disable_irq(mmc->card_detect_irq);
268 269 270 271 272
	return 0;
}

static int omap_hsmmc_resume_cdirq(struct device *dev, int slot)
{
273
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
274
	struct omap_hsmmc_platform_data *mmc = host->pdata;
275

276
	enable_irq(mmc->card_detect_irq);
277 278 279 280 281 282 283 284 285 286
	return 0;
}

#else

#define omap_hsmmc_suspend_cdirq	NULL
#define omap_hsmmc_resume_cdirq		NULL

#endif

287 288
#ifdef CONFIG_REGULATOR

289
static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,
290 291 292 293 294 295 296 297 298 299 300 301 302
				   int vdd)
{
	struct omap_hsmmc_host *host =
		platform_get_drvdata(to_platform_device(dev));
	int ret = 0;

	/*
	 * If we don't see a Vcc regulator, assume it's a fixed
	 * voltage always-on regulator.
	 */
	if (!host->vcc)
		return 0;

303 304
	if (mmc_pdata(host)->before_set_reg)
		mmc_pdata(host)->before_set_reg(dev, slot, power_on, vdd);
305

306 307 308 309 310 311 312 313 314
	if (host->pbias) {
		if (host->pbias_enabled == 1) {
			ret = regulator_disable(host->pbias);
			if (!ret)
				host->pbias_enabled = 0;
		}
		regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0);
	}

315 316 317 318 319 320 321 322 323 324 325 326 327 328
	/*
	 * Assume Vcc regulator is used only to power the card ... OMAP
	 * VDDS is used to power the pins, optionally with a transceiver to
	 * support cards using voltages other than VDDS (1.8V nominal).  When a
	 * transceiver is used, DAT3..7 are muxed as transceiver control pins.
	 *
	 * In some cases this regulator won't support enable/disable;
	 * e.g. it's a fixed rail for a WLAN chip.
	 *
	 * In other cases vcc_aux switches interface power.  Example, for
	 * eMMC cards it represents VccQ.  Sometimes transceivers or SDIO
	 * chips/cards need an interface voltage rail too.
	 */
	if (power_on) {
329 330
		if (host->vcc)
			ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
331 332 333
		/* Enable interface voltage rail, if needed */
		if (ret == 0 && host->vcc_aux) {
			ret = regulator_enable(host->vcc_aux);
334
			if (ret < 0 && host->vcc)
335 336
				ret = mmc_regulator_set_ocr(host->mmc,
							host->vcc, 0);
337 338
		}
	} else {
339
		/* Shut down the rail */
340 341
		if (host->vcc_aux)
			ret = regulator_disable(host->vcc_aux);
342
		if (host->vcc) {
343 344 345 346
			/* Then proceed to shut down the local regulator */
			ret = mmc_regulator_set_ocr(host->mmc,
						host->vcc, 0);
		}
347 348
	}

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
	if (host->pbias) {
		if (vdd <= VDD_165_195)
			ret = regulator_set_voltage(host->pbias, VDD_1V8,
								VDD_1V8);
		else
			ret = regulator_set_voltage(host->pbias, VDD_3V0,
								VDD_3V0);
		if (ret < 0)
			goto error_set_power;

		if (host->pbias_enabled == 0) {
			ret = regulator_enable(host->pbias);
			if (!ret)
				host->pbias_enabled = 1;
		}
	}

366 367
	if (mmc_pdata(host)->after_set_reg)
		mmc_pdata(host)->after_set_reg(dev, slot, power_on, vdd);
368

369
error_set_power:
370 371 372 373 374 375
	return ret;
}

static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
	struct regulator *reg;
376
	int ocr_value = 0;
377

378
	reg = devm_regulator_get(host->dev, "vmmc");
379
	if (IS_ERR(reg)) {
380 381
		dev_err(host->dev, "unable to get vmmc regulator %ld\n",
			PTR_ERR(reg));
382
		return PTR_ERR(reg);
383 384
	} else {
		host->vcc = reg;
385
		ocr_value = mmc_regulator_get_ocrmask(reg);
386 387
		if (!mmc_pdata(host)->ocr_mask) {
			mmc_pdata(host)->ocr_mask = ocr_value;
388
		} else {
389
			if (!(mmc_pdata(host)->ocr_mask & ocr_value)) {
390
				dev_err(host->dev, "ocrmask %x is not supported\n",
391 392
					mmc_pdata(host)->ocr_mask);
				mmc_pdata(host)->ocr_mask = 0;
393 394 395
				return -EINVAL;
			}
		}
396
	}
397
	mmc_pdata(host)->set_power = omap_hsmmc_set_power;
398

399 400 401 402
	/* Allow an aux regulator */
	reg = devm_regulator_get_optional(host->dev, "vmmc_aux");
	host->vcc_aux = IS_ERR(reg) ? NULL : reg;

403 404 405
	reg = devm_regulator_get_optional(host->dev, "pbias");
	host->pbias = IS_ERR(reg) ? NULL : reg;

406
	/* For eMMC do not power off when not in sleep state */
407
	if (mmc_pdata(host)->no_regulator_off_init)
408 409 410 411 412 413 414
		return 0;
	/*
	 * To disable boot_on regulator, enable regulator
	 * to increase usecount and then disable it.
	 */
	if ((host->vcc && regulator_is_enabled(host->vcc) > 0) ||
	    (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) {
415
		int vdd = ffs(mmc_pdata(host)->ocr_mask) - 1;
416

417 418
		mmc_pdata(host)->set_power(host->dev, host->slot_id, 1, vdd);
		mmc_pdata(host)->set_power(host->dev, host->slot_id, 0, 0);
419 420 421 422 423 424 425
	}

	return 0;
}

static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
{
426
	mmc_pdata(host)->set_power = NULL;
427 428
}

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
static inline int omap_hsmmc_have_reg(void)
{
	return 1;
}

#else

static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
	return -EINVAL;
}

static inline void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
{
}

static inline int omap_hsmmc_have_reg(void)
{
	return 0;
}

#endif

452 453
static int omap_hsmmc_gpio_init(struct omap_hsmmc_host *host,
				struct omap_hsmmc_platform_data *pdata)
454 455 456
{
	int ret;

457 458 459
	if (gpio_is_valid(pdata->switch_pin)) {
		if (pdata->cover)
			pdata->get_cover_state =
460 461
					omap_hsmmc_get_cover_state;
		else
462 463 464 465
			pdata->card_detect = omap_hsmmc_card_detect;
		pdata->card_detect_irq =
				gpio_to_irq(pdata->switch_pin);
		ret = gpio_request(pdata->switch_pin, "mmc_cd");
466 467
		if (ret)
			return ret;
468
		ret = gpio_direction_input(pdata->switch_pin);
469 470
		if (ret)
			goto err_free_sp;
471 472 473
	} else {
		pdata->switch_pin = -EINVAL;
	}
474

475 476 477
	if (gpio_is_valid(pdata->gpio_wp)) {
		pdata->get_ro = omap_hsmmc_get_wp;
		ret = gpio_request(pdata->gpio_wp, "mmc_wp");
478 479
		if (ret)
			goto err_free_cd;
480
		ret = gpio_direction_input(pdata->gpio_wp);
481 482
		if (ret)
			goto err_free_wp;
483 484 485
	} else {
		pdata->gpio_wp = -EINVAL;
	}
486 487 488 489

	return 0;

err_free_wp:
490
	gpio_free(pdata->gpio_wp);
491
err_free_cd:
492
	if (gpio_is_valid(pdata->switch_pin))
493
err_free_sp:
494
		gpio_free(pdata->switch_pin);
495 496 497
	return ret;
}

498 499
static void omap_hsmmc_gpio_free(struct omap_hsmmc_host *host,
				 struct omap_hsmmc_platform_data *pdata)
500
{
501 502 503 504
	if (gpio_is_valid(pdata->gpio_wp))
		gpio_free(pdata->gpio_wp);
	if (gpio_is_valid(pdata->switch_pin))
		gpio_free(pdata->switch_pin);
505 506
}

507 508 509 510 511 512 513 514 515
/*
 * Start clock to the card
 */
static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host)
{
	OMAP_HSMMC_WRITE(host->base, SYSCTL,
		OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
}

516 517 518
/*
 * Stop clock to the card
 */
519
static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
520 521 522 523
{
	OMAP_HSMMC_WRITE(host->base, SYSCTL,
		OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
	if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0)
M
Masanari Iida 已提交
524
		dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n");
525 526
}

527 528
static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host,
				  struct mmc_command *cmd)
529
{
530 531
	u32 irq_mask = INT_EN_MASK;
	unsigned long flags;
532 533

	if (host->use_dma)
534
		irq_mask &= ~(BRR_EN | BWR_EN);
535

536 537
	/* Disable timeout for erases */
	if (cmd->opcode == MMC_ERASE)
538
		irq_mask &= ~DTO_EN;
539

540
	spin_lock_irqsave(&host->irq_lock, flags);
541 542
	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
	OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
543 544 545 546

	/* latch pending CIRQ, but don't signal MMC core */
	if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
		irq_mask |= CIRQ_EN;
547
	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
548
	spin_unlock_irqrestore(&host->irq_lock, flags);
549 550 551 552
}

static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
{
553 554 555 556 557 558 559 560 561
	u32 irq_mask = 0;
	unsigned long flags;

	spin_lock_irqsave(&host->irq_lock, flags);
	/* no transfer running but need to keep cirq if enabled */
	if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
		irq_mask |= CIRQ_EN;
	OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
562
	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
563
	spin_unlock_irqrestore(&host->irq_lock, flags);
564 565
}

566
/* Calculate divisor for the given clock frequency */
567
static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
568 569 570 571
{
	u16 dsor = 0;

	if (ios->clock) {
572
		dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
573 574
		if (dsor > CLKD_MAX)
			dsor = CLKD_MAX;
575 576 577 578 579
	}

	return dsor;
}

580 581 582 583 584
static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
{
	struct mmc_ios *ios = &host->mmc->ios;
	unsigned long regval;
	unsigned long timeout;
585
	unsigned long clkdiv;
586

587
	dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
588 589 590 591 592

	omap_hsmmc_stop_clock(host);

	regval = OMAP_HSMMC_READ(host->base, SYSCTL);
	regval = regval & ~(CLKD_MASK | DTO_MASK);
593 594
	clkdiv = calc_divisor(host, ios);
	regval = regval | (clkdiv << 6) | (DTO << 16);
595 596 597 598 599 600 601 602 603 604
	OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
	OMAP_HSMMC_WRITE(host->base, SYSCTL,
		OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);

	/* Wait till the ICS bit is set */
	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
	while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
		&& time_before(jiffies, timeout))
		cpu_relax();

605 606 607 608 609 610 611 612 613
	/*
	 * Enable High-Speed Support
	 * Pre-Requisites
	 *	- Controller should support High-Speed-Enable Bit
	 *	- Controller should not be using DDR Mode
	 *	- Controller should advertise that it supports High Speed
	 *	  in capabilities register
	 *	- MMC/SD clock coming out of controller > 25MHz
	 */
614
	if ((mmc_pdata(host)->features & HSMMC_HAS_HSPE_SUPPORT) &&
615
	    (ios->timing != MMC_TIMING_MMC_DDR52) &&
616 617 618 619 620 621 622 623 624 625
	    ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) {
		regval = OMAP_HSMMC_READ(host->base, HCTL);
		if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000)
			regval |= HSPE;
		else
			regval &= ~HSPE;

		OMAP_HSMMC_WRITE(host->base, HCTL, regval);
	}

626 627 628
	omap_hsmmc_start_clock(host);
}

629 630 631 632 633 634
static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
{
	struct mmc_ios *ios = &host->mmc->ios;
	u32 con;

	con = OMAP_HSMMC_READ(host->base, CON);
635
	if (ios->timing == MMC_TIMING_MMC_DDR52)
636 637 638
		con |= DDR;	/* configure in DDR mode */
	else
		con &= ~DDR;
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
	switch (ios->bus_width) {
	case MMC_BUS_WIDTH_8:
		OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
		break;
	case MMC_BUS_WIDTH_4:
		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
		OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
		break;
	case MMC_BUS_WIDTH_1:
		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
		OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
		break;
	}
}

static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
{
	struct mmc_ios *ios = &host->mmc->ios;
	u32 con;

	con = OMAP_HSMMC_READ(host->base, CON);
	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
		OMAP_HSMMC_WRITE(host->base, CON, con | OD);
	else
		OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
}

668 669 670 671 672 673
#ifdef CONFIG_PM

/*
 * Restore the MMC host context, if it was lost as result of a
 * power state change.
 */
674
static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
675 676
{
	struct mmc_ios *ios = &host->mmc->ios;
677
	u32 hctl, capa;
678 679
	unsigned long timeout;

680 681 682 683 684 685 686 687
	if (host->con == OMAP_HSMMC_READ(host->base, CON) &&
	    host->hctl == OMAP_HSMMC_READ(host->base, HCTL) &&
	    host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) &&
	    host->capa == OMAP_HSMMC_READ(host->base, CAPA))
		return 0;

	host->context_loss++;

688
	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
689 690 691 692 693 694 695 696 697 698 699
		if (host->power_mode != MMC_POWER_OFF &&
		    (1 << ios->vdd) <= MMC_VDD_23_24)
			hctl = SDVS18;
		else
			hctl = SDVS30;
		capa = VS30 | VS18;
	} else {
		hctl = SDVS18;
		capa = VS18;
	}

700 701 702
	if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
		hctl |= IWE;

703 704 705 706 707 708 709 710 711 712 713 714 715 716
	OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | hctl);

	OMAP_HSMMC_WRITE(host->base, CAPA,
			OMAP_HSMMC_READ(host->base, CAPA) | capa);

	OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | SDBP);

	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
	while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP
		&& time_before(jiffies, timeout))
		;

717 718 719
	OMAP_HSMMC_WRITE(host->base, ISE, 0);
	OMAP_HSMMC_WRITE(host->base, IE, 0);
	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
720 721 722 723 724

	/* Do not initialize card-specific things if the power is off */
	if (host->power_mode == MMC_POWER_OFF)
		goto out;

725
	omap_hsmmc_set_bus_width(host);
726

727
	omap_hsmmc_set_clock(host);
728

729 730
	omap_hsmmc_set_bus_mode(host);

731
out:
732 733
	dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n",
		host->context_loss);
734 735 736 737 738 739
	return 0;
}

/*
 * Save the MMC host context (store the number of power state changes so far).
 */
740
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
741
{
742 743 744 745
	host->con =  OMAP_HSMMC_READ(host->base, CON);
	host->hctl = OMAP_HSMMC_READ(host->base, HCTL);
	host->sysctl =  OMAP_HSMMC_READ(host->base, SYSCTL);
	host->capa = OMAP_HSMMC_READ(host->base, CAPA);
746 747 748 749
}

#else

750
static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
751 752 753 754
{
	return 0;
}

755
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
756 757 758 759 760
{
}

#endif

761 762 763 764
/*
 * Send init stream sequence to card
 * before sending IDLE command
 */
765
static void send_init_stream(struct omap_hsmmc_host *host)
766 767 768 769
{
	int reg = 0;
	unsigned long timeout;

770 771 772
	if (host->protect_card)
		return;

773
	disable_irq(host->irq);
774 775

	OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
776 777 778 779 780
	OMAP_HSMMC_WRITE(host->base, CON,
		OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
	OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);

	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
781 782
	while ((reg != CC_EN) && time_before(jiffies, timeout))
		reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN;
783 784 785

	OMAP_HSMMC_WRITE(host->base, CON,
		OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM);
786 787 788 789

	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
	OMAP_HSMMC_READ(host->base, STAT);

790 791 792 793
	enable_irq(host->irq);
}

static inline
794
int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host)
795 796 797
{
	int r = 1;

798 799
	if (mmc_pdata(host)->get_cover_state)
		r = mmc_pdata(host)->get_cover_state(host->dev, host->slot_id);
800 801 802 803
	return r;
}

static ssize_t
804
omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr,
805 806 807
			   char *buf)
{
	struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
808
	struct omap_hsmmc_host *host = mmc_priv(mmc);
809

810 811
	return sprintf(buf, "%s\n",
			omap_hsmmc_cover_is_closed(host) ? "closed" : "open");
812 813
}

814
static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL);
815 816

static ssize_t
817
omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
818 819 820
			char *buf)
{
	struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
821
	struct omap_hsmmc_host *host = mmc_priv(mmc);
822

823
	return sprintf(buf, "%s\n", mmc_pdata(host)->name);
824 825
}

826
static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
827 828 829 830 831

/*
 * Configure the response type and send the cmd.
 */
static void
832
omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
833 834 835 836
	struct mmc_data *data)
{
	int cmdreg = 0, resptype = 0, cmdtype = 0;

837
	dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
838 839 840
		mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
	host->cmd = cmd;

841
	omap_hsmmc_enable_irq(host, cmd);
842

843
	host->response_busy = 0;
844 845 846
	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136)
			resptype = 1;
847 848 849 850
		else if (cmd->flags & MMC_RSP_BUSY) {
			resptype = 3;
			host->response_busy = 1;
		} else
851 852 853 854 855 856 857 858 859 860 861 862 863
			resptype = 2;
	}

	/*
	 * Unlike OMAP1 controller, the cmdtype does not seem to be based on
	 * ac, bc, adtc, bcr. Only commands ending an open ended transfer need
	 * a val of 0x3, rest 0x0.
	 */
	if (cmd == host->mrq->stop)
		cmdtype = 0x3;

	cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);

864 865 866 867 868
	if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) &&
	    host->mrq->sbc) {
		cmdreg |= ACEN_ACMD23;
		OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg);
	}
869 870 871 872 873 874 875 876 877
	if (data) {
		cmdreg |= DP_SELECT | MSBS | BCE;
		if (data->flags & MMC_DATA_READ)
			cmdreg |= DDIR;
		else
			cmdreg &= ~(DDIR);
	}

	if (host->use_dma)
878
		cmdreg |= DMAE;
879

880
	host->req_in_progress = 1;
881

882 883 884 885
	OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
	OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
}

886
static int
887
omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
888 889 890 891 892 893 894
{
	if (data->flags & MMC_DATA_WRITE)
		return DMA_TO_DEVICE;
	else
		return DMA_FROM_DEVICE;
}

895 896 897 898 899 900
static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
	struct mmc_data *data)
{
	return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
}

901 902 903
static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
{
	int dma_ch;
904
	unsigned long flags;
905

906
	spin_lock_irqsave(&host->irq_lock, flags);
907 908
	host->req_in_progress = 0;
	dma_ch = host->dma_ch;
909
	spin_unlock_irqrestore(&host->irq_lock, flags);
910 911 912 913 914 915 916 917 918

	omap_hsmmc_disable_irq(host);
	/* Do not complete the request if DMA is still in progress */
	if (mrq->data && host->use_dma && dma_ch != -1)
		return;
	host->mrq = NULL;
	mmc_request_done(host->mmc, mrq);
}

919 920 921 922
/*
 * Notify the transfer complete to MMC core
 */
static void
923
omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
924
{
925 926 927
	if (!data) {
		struct mmc_request *mrq = host->mrq;

928 929 930 931 932 933 934
		/* TC before CC from CMD6 - don't know why, but it happens */
		if (host->cmd && host->cmd->opcode == 6 &&
		    host->response_busy) {
			host->response_busy = 0;
			return;
		}

935
		omap_hsmmc_request_done(host, mrq);
936 937 938
		return;
	}

939 940 941 942 943 944 945
	host->data = NULL;

	if (!data->error)
		data->bytes_xfered += data->blocks * (data->blksz);
	else
		data->bytes_xfered = 0;

946 947 948
	if (data->stop && (data->error || !host->mrq->sbc))
		omap_hsmmc_start_command(host, data->stop, NULL);
	else
949
		omap_hsmmc_request_done(host, data->mrq);
950 951 952 953 954 955
}

/*
 * Notify the core about command completion
 */
static void
956
omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
957
{
958
	if (host->mrq->sbc && (host->cmd == host->mrq->sbc) &&
959
	    !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) {
960
		host->cmd = NULL;
961 962 963 964 965 966
		omap_hsmmc_start_dma_transfer(host);
		omap_hsmmc_start_command(host, host->mrq->cmd,
						host->mrq->data);
		return;
	}

967 968
	host->cmd = NULL;

969 970 971 972 973 974 975 976 977 978 979 980
	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136) {
			/* response type 2 */
			cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10);
			cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32);
			cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54);
			cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76);
		} else {
			/* response types 1, 1b, 3, 4, 5, 6 */
			cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
		}
	}
981
	if ((host->data == NULL && !host->response_busy) || cmd->error)
982
		omap_hsmmc_request_done(host, host->mrq);
983 984 985 986 987
}

/*
 * DMA clean up for command errors
 */
988
static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
989
{
990
	int dma_ch;
991
	unsigned long flags;
992

993
	host->data->error = errno;
994

995
	spin_lock_irqsave(&host->irq_lock, flags);
996 997
	dma_ch = host->dma_ch;
	host->dma_ch = -1;
998
	spin_unlock_irqrestore(&host->irq_lock, flags);
999 1000

	if (host->use_dma && dma_ch != -1) {
1001 1002 1003 1004 1005
		struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);

		dmaengine_terminate_all(chan);
		dma_unmap_sg(chan->device->dev,
			host->data->sg, host->data->sg_len,
1006
			omap_hsmmc_get_dma_dir(host, host->data));
1007

1008
		host->data->host_cookie = 0;
1009 1010 1011 1012 1013 1014 1015 1016
	}
	host->data = NULL;
}

/*
 * Readable error output
 */
#ifdef CONFIG_MMC_DEBUG
1017
static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
1018 1019
{
	/* --- means reserved bit without definition at documentation */
1020
	static const char *omap_hsmmc_status_bits[] = {
1021 1022 1023 1024
		"CC"  , "TC"  , "BGE", "---", "BWR" , "BRR" , "---" , "---" ,
		"CIRQ",	"OBI" , "---", "---", "---" , "---" , "---" , "ERRI",
		"CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" ,
		"ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---"
1025 1026 1027 1028 1029 1030 1031 1032
	};
	char res[256];
	char *buf = res;
	int len, i;

	len = sprintf(buf, "MMC IRQ 0x%x :", status);
	buf += len;

1033
	for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++)
1034
		if (status & (1 << i)) {
1035
			len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]);
1036 1037 1038
			buf += len;
		}

1039
	dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
1040
}
1041 1042 1043 1044 1045
#else
static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
					     u32 status)
{
}
1046 1047
#endif  /* CONFIG_MMC_DEBUG */

1048 1049 1050 1051 1052 1053 1054
/*
 * MMC controller internal state machines reset
 *
 * Used to reset command or data internal state machines, using respectively
 *  SRC or SRD bit of SYSCTL register
 * Can be called from interrupt context
 */
1055 1056
static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
						   unsigned long bit)
1057 1058
{
	unsigned long i = 0;
1059
	unsigned long limit = MMC_TIMEOUT_US;
1060 1061 1062 1063

	OMAP_HSMMC_WRITE(host->base, SYSCTL,
			 OMAP_HSMMC_READ(host->base, SYSCTL) | bit);

1064 1065 1066 1067
	/*
	 * OMAP4 ES2 and greater has an updated reset logic.
	 * Monitor a 0->1 transition first
	 */
1068
	if (mmc_pdata(host)->features & HSMMC_HAS_UPDATED_RESET) {
1069
		while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
1070
					&& (i++ < limit))
1071
			udelay(1);
1072 1073 1074
	}
	i = 0;

1075 1076
	while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&
		(i++ < limit))
1077
		udelay(1);
1078 1079 1080 1081 1082 1083

	if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit)
		dev_err(mmc_dev(host->mmc),
			"Timeout waiting on controller reset in %s\n",
			__func__);
}
1084

1085 1086
static void hsmmc_command_incomplete(struct omap_hsmmc_host *host,
					int err, int end_cmd)
1087
{
1088
	if (end_cmd) {
1089
		omap_hsmmc_reset_controller_fsm(host, SRC);
1090 1091 1092
		if (host->cmd)
			host->cmd->error = err;
	}
1093 1094 1095 1096

	if (host->data) {
		omap_hsmmc_reset_controller_fsm(host, SRD);
		omap_hsmmc_dma_cleanup(host, err);
1097 1098
	} else if (host->mrq && host->mrq->cmd)
		host->mrq->cmd->error = err;
1099 1100
}

1101
static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1102 1103
{
	struct mmc_data *data;
1104
	int end_cmd = 0, end_trans = 0;
1105
	int error = 0;
1106

1107
	data = host->data;
1108
	dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
1109

1110
	if (status & ERR_EN) {
1111
		omap_hsmmc_dbg_report_irq(host, status);
1112

1113
		if (status & (CTO_EN | CCRC_EN))
1114
			end_cmd = 1;
1115
		if (status & (CTO_EN | DTO_EN))
1116
			hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
1117
		else if (status & (CCRC_EN | DCRC_EN))
1118
			hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
1119

1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
		if (status & ACE_EN) {
			u32 ac12;
			ac12 = OMAP_HSMMC_READ(host->base, AC12);
			if (!(ac12 & ACNE) && host->mrq->sbc) {
				end_cmd = 1;
				if (ac12 & ACTO)
					error =  -ETIMEDOUT;
				else if (ac12 & (ACCE | ACEB | ACIE))
					error = -EILSEQ;
				host->mrq->sbc->error = error;
				hsmmc_command_incomplete(host, error, end_cmd);
			}
			dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
		}
1134
		if (host->data || host->response_busy) {
1135
			end_trans = !end_cmd;
1136
			host->response_busy = 0;
1137 1138 1139
		}
	}

1140
	OMAP_HSMMC_WRITE(host->base, STAT, status);
1141
	if (end_cmd || ((status & CC_EN) && host->cmd))
1142
		omap_hsmmc_cmd_done(host, host->cmd);
1143
	if ((end_trans || (status & TC_EN)) && host->mrq)
1144
		omap_hsmmc_xfer_done(host, data);
1145
}
1146

1147 1148 1149 1150 1151 1152 1153 1154 1155
/*
 * MMC controller IRQ handler
 */
static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
{
	struct omap_hsmmc_host *host = dev_id;
	int status;

	status = OMAP_HSMMC_READ(host->base, STAT);
1156 1157 1158 1159 1160 1161
	while (status & (INT_EN_MASK | CIRQ_EN)) {
		if (host->req_in_progress)
			omap_hsmmc_do_irq(host, status);

		if (status & CIRQ_EN)
			mmc_signal_sdio_irq(host->mmc);
1162

1163 1164
		/* Flush posted write */
		status = OMAP_HSMMC_READ(host->base, STAT);
1165
	}
1166

1167 1168 1169
	return IRQ_HANDLED;
}

1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
static irqreturn_t omap_hsmmc_wake_irq(int irq, void *dev_id)
{
	struct omap_hsmmc_host *host = dev_id;

	/* cirq is level triggered, disable to avoid infinite loop */
	spin_lock(&host->irq_lock);
	if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
		disable_irq_nosync(host->wake_irq);
		host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
	}
	spin_unlock(&host->irq_lock);
	pm_request_resume(host->dev); /* no use counter */

	return IRQ_HANDLED;
}

1186
static void set_sd_bus_power(struct omap_hsmmc_host *host)
A
Adrian Hunter 已提交
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
{
	unsigned long i;

	OMAP_HSMMC_WRITE(host->base, HCTL,
			 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
	for (i = 0; i < loops_per_jiffy; i++) {
		if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP)
			break;
		cpu_relax();
	}
}

1199
/*
1200 1201 1202 1203 1204
 * Switch MMC interface voltage ... only relevant for MMC1.
 *
 * MMC2 and MMC3 use fixed 1.8V levels, and maybe a transceiver.
 * The MMC2 transceiver controls are used instead of DAT4..DAT7.
 * Some chips, like eMMC ones, use internal transceivers.
1205
 */
1206
static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1207 1208 1209 1210 1211
{
	u32 reg_val = 0;
	int ret;

	/* Disable the clocks */
1212
	pm_runtime_put_sync(host->dev);
1213
	if (host->dbclk)
1214
		clk_disable_unprepare(host->dbclk);
1215 1216

	/* Turn the power off */
1217
	ret = mmc_pdata(host)->set_power(host->dev, host->slot_id, 0, 0);
1218 1219

	/* Turn the power ON with given VDD 1.8 or 3.0v */
1220
	if (!ret)
1221 1222
		ret = mmc_pdata(host)->set_power(host->dev, host->slot_id, 1,
						 vdd);
1223
	pm_runtime_get_sync(host->dev);
1224
	if (host->dbclk)
1225
		clk_prepare_enable(host->dbclk);
1226

1227 1228 1229 1230 1231 1232
	if (ret != 0)
		goto err;

	OMAP_HSMMC_WRITE(host->base, HCTL,
		OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR);
	reg_val = OMAP_HSMMC_READ(host->base, HCTL);
1233

1234 1235 1236
	/*
	 * If a MMC dual voltage card is detected, the set_ios fn calls
	 * this fn with VDD bit set for 1.8V. Upon card removal from the
1237
	 * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
1238
	 *
1239 1240 1241 1242 1243 1244 1245 1246 1247
	 * Cope with a bit of slop in the range ... per data sheets:
	 *  - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max,
	 *    but recommended values are 1.71V to 1.89V
	 *  - "3.0V" for vdds_mmc1/vdds_mmc1a can be up to 3.5V max,
	 *    but recommended values are 2.7V to 3.3V
	 *
	 * Board setup code shouldn't permit anything very out-of-range.
	 * TWL4030-family VMMC1 and VSIM regulators are fine (avoiding the
	 * middle range) but VSIM can't power DAT4..DAT7 at more than 3V.
1248
	 */
1249
	if ((1 << vdd) <= MMC_VDD_23_24)
1250
		reg_val |= SDVS18;
1251 1252
	else
		reg_val |= SDVS30;
1253 1254

	OMAP_HSMMC_WRITE(host->base, HCTL, reg_val);
A
Adrian Hunter 已提交
1255
	set_sd_bus_power(host);
1256 1257 1258

	return 0;
err:
1259
	dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n");
1260 1261 1262
	return ret;
}

1263 1264 1265
/* Protect the card while the cover is open */
static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
{
1266
	if (!mmc_pdata(host)->get_cover_state)
1267 1268 1269
		return;

	host->reqs_blocked = 0;
1270
	if (mmc_pdata(host)->get_cover_state(host->dev, host->slot_id)) {
1271
		if (host->protect_card) {
1272
			dev_info(host->dev, "%s: cover is closed, "
1273 1274 1275 1276 1277 1278
					 "card is now accessible\n",
					 mmc_hostname(host->mmc));
			host->protect_card = 0;
		}
	} else {
		if (!host->protect_card) {
1279
			dev_info(host->dev, "%s: cover is open, "
1280 1281 1282 1283 1284 1285 1286
					 "card is now inaccessible\n",
					 mmc_hostname(host->mmc));
			host->protect_card = 1;
		}
	}
}

1287
/*
1288
 * irq handler to notify the core about card insertion/removal
1289
 */
1290
static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
1291
{
1292
	struct omap_hsmmc_host *host = dev_id;
1293
	struct omap_hsmmc_platform_data *pdata = host->pdata;
1294 1295 1296
	int carddetect;

	sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
1297

1298 1299
	if (pdata->card_detect)
		carddetect = pdata->card_detect(host->dev, host->slot_id);
1300 1301
	else {
		omap_hsmmc_protect_card(host);
1302
		carddetect = -ENOSYS;
1303
	}
1304

1305
	if (carddetect)
1306
		mmc_detect_change(host->mmc, (HZ * 200) / 1000);
1307
	else
1308 1309 1310 1311
		mmc_detect_change(host->mmc, (HZ * 50) / 1000);
	return IRQ_HANDLED;
}

1312
static void omap_hsmmc_dma_callback(void *param)
1313
{
1314 1315
	struct omap_hsmmc_host *host = param;
	struct dma_chan *chan;
1316
	struct mmc_data *data;
1317
	int req_in_progress;
1318

1319
	spin_lock_irq(&host->irq_lock);
1320
	if (host->dma_ch < 0) {
1321
		spin_unlock_irq(&host->irq_lock);
1322
		return;
1323
	}
1324

1325
	data = host->mrq->data;
1326
	chan = omap_hsmmc_get_dma_chan(host, data);
1327
	if (!data->host_cookie)
1328 1329
		dma_unmap_sg(chan->device->dev,
			     data->sg, data->sg_len,
1330
			     omap_hsmmc_get_dma_dir(host, data));
1331 1332

	req_in_progress = host->req_in_progress;
1333
	host->dma_ch = -1;
1334
	spin_unlock_irq(&host->irq_lock);
1335 1336 1337 1338 1339 1340 1341 1342

	/* If DMA has finished after TC, complete the request */
	if (!req_in_progress) {
		struct mmc_request *mrq = host->mrq;

		host->mrq = NULL;
		mmc_request_done(host->mmc, mrq);
	}
1343 1344
}

1345 1346
static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
				       struct mmc_data *data,
1347
				       struct omap_hsmmc_next *next,
1348
				       struct dma_chan *chan)
1349 1350 1351 1352 1353
{
	int dma_len;

	if (!next && data->host_cookie &&
	    data->host_cookie != host->next_data.cookie) {
1354
		dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d"
1355 1356 1357 1358 1359 1360
		       " host->next_data.cookie %d\n",
		       __func__, data->host_cookie, host->next_data.cookie);
		data->host_cookie = 0;
	}

	/* Check if next job is already prepared */
1361
	if (next || data->host_cookie != host->next_data.cookie) {
1362
		dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
				     omap_hsmmc_get_dma_dir(host, data));

	} else {
		dma_len = host->next_data.dma_len;
		host->next_data.dma_len = 0;
	}


	if (dma_len == 0)
		return -EINVAL;

	if (next) {
		next->dma_len = dma_len;
		data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
	} else
		host->dma_len = dma_len;

	return 0;
}

1383 1384 1385
/*
 * Routine to configure and start DMA for the MMC card
 */
1386
static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
1387
					struct mmc_request *req)
1388
{
1389 1390 1391
	struct dma_slave_config cfg;
	struct dma_async_tx_descriptor *tx;
	int ret = 0, i;
1392
	struct mmc_data *data = req->data;
1393
	struct dma_chan *chan;
1394

1395
	/* Sanity check: all the SG entries must be aligned by block size. */
1396
	for (i = 0; i < data->sg_len; i++) {
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
		struct scatterlist *sgl;

		sgl = data->sg + i;
		if (sgl->length % data->blksz)
			return -EINVAL;
	}
	if ((data->blksz % 4) != 0)
		/* REVISIT: The MMC buffer increments only when MSB is written.
		 * Return error for blksz which is non multiple of four.
		 */
		return -EINVAL;

1409
	BUG_ON(host->dma_ch != -1);
1410

1411 1412
	chan = omap_hsmmc_get_dma_chan(host, data);

1413 1414 1415 1416 1417 1418
	cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
	cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.src_maxburst = data->blksz / 4;
	cfg.dst_maxburst = data->blksz / 4;
1419

1420 1421
	ret = dmaengine_slave_config(chan, &cfg);
	if (ret)
1422
		return ret;
1423

1424
	ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
1425 1426
	if (ret)
		return ret;
1427

1428 1429 1430 1431 1432 1433 1434 1435
	tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
		data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!tx) {
		dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
		/* FIXME: cleanup */
		return -1;
	}
1436

1437 1438
	tx->callback = omap_hsmmc_dma_callback;
	tx->callback_param = host;
1439

1440 1441
	/* Does not fail */
	dmaengine_submit(tx);
1442

1443
	host->dma_ch = 1;
1444

1445 1446 1447
	return 0;
}

1448
static void set_data_timeout(struct omap_hsmmc_host *host,
1449 1450
			     unsigned int timeout_ns,
			     unsigned int timeout_clks)
1451 1452 1453 1454 1455 1456 1457 1458 1459
{
	unsigned int timeout, cycle_ns;
	uint32_t reg, clkd, dto = 0;

	reg = OMAP_HSMMC_READ(host->base, SYSCTL);
	clkd = (reg & CLKD_MASK) >> CLKD_SHIFT;
	if (clkd == 0)
		clkd = 1;

1460
	cycle_ns = 1000000000 / (host->clk_rate / clkd);
1461 1462
	timeout = timeout_ns / cycle_ns;
	timeout += timeout_clks;
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	if (timeout) {
		while ((timeout & 0x80000000) == 0) {
			dto += 1;
			timeout <<= 1;
		}
		dto = 31 - dto;
		timeout <<= 1;
		if (timeout && dto)
			dto += 1;
		if (dto >= 13)
			dto -= 13;
		else
			dto = 0;
		if (dto > 14)
			dto = 14;
	}

	reg &= ~DTO_MASK;
	reg |= dto << DTO_SHIFT;
	OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
}

1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
{
	struct mmc_request *req = host->mrq;
	struct dma_chan *chan;

	if (!req->data)
		return;
	OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
				| (req->data->blocks << 16));
	set_data_timeout(host, req->data->timeout_ns,
				req->data->timeout_clks);
	chan = omap_hsmmc_get_dma_chan(host, req->data);
	dma_async_issue_pending(chan);
}

1500 1501 1502 1503
/*
 * Configure block length for MMC/SD cards and initiate the transfer.
 */
static int
1504
omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
1505 1506 1507 1508 1509 1510
{
	int ret;
	host->data = req->data;

	if (req->data == NULL) {
		OMAP_HSMMC_WRITE(host->base, BLK, 0);
1511 1512 1513 1514 1515 1516
		/*
		 * Set an arbitrary 100ms data timeout for commands with
		 * busy signal.
		 */
		if (req->cmd->flags & MMC_RSP_BUSY)
			set_data_timeout(host, 100000000U, 0);
1517 1518 1519 1520
		return 0;
	}

	if (host->use_dma) {
1521
		ret = omap_hsmmc_setup_dma_transfer(host, req);
1522
		if (ret != 0) {
1523
			dev_err(mmc_dev(host->mmc), "MMC start dma failure\n");
1524 1525 1526 1527 1528 1529
			return ret;
		}
	}
	return 0;
}

1530 1531 1532 1533 1534 1535
static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
				int err)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);
	struct mmc_data *data = mrq->data;

1536
	if (host->use_dma && data->host_cookie) {
1537 1538
		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);

1539 1540
		dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
			     omap_hsmmc_get_dma_dir(host, data));
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
		data->host_cookie = 0;
	}
}

static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
			       bool is_first_req)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);

	if (mrq->data->host_cookie) {
		mrq->data->host_cookie = 0;
		return ;
	}

1555 1556 1557
	if (host->use_dma) {
		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);

1558
		if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
1559
						&host->next_data, c))
1560
			mrq->data->host_cookie = 0;
1561
	}
1562 1563
}

1564 1565 1566
/*
 * Request function. for read/write operation
 */
1567
static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1568
{
1569
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1570
	int err;
1571

1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
	BUG_ON(host->req_in_progress);
	BUG_ON(host->dma_ch != -1);
	if (host->protect_card) {
		if (host->reqs_blocked < 3) {
			/*
			 * Ensure the controller is left in a consistent
			 * state by resetting the command and data state
			 * machines.
			 */
			omap_hsmmc_reset_controller_fsm(host, SRD);
			omap_hsmmc_reset_controller_fsm(host, SRC);
			host->reqs_blocked += 1;
		}
		req->cmd->error = -EBADF;
		if (req->data)
			req->data->error = -EBADF;
		req->cmd->retries = 0;
		mmc_request_done(mmc, req);
		return;
	} else if (host->reqs_blocked)
		host->reqs_blocked = 0;
1593 1594
	WARN_ON(host->mrq != NULL);
	host->mrq = req;
1595
	host->clk_rate = clk_get_rate(host->fclk);
1596
	err = omap_hsmmc_prepare_data(host, req);
1597 1598 1599 1600 1601 1602 1603 1604
	if (err) {
		req->cmd->error = err;
		if (req->data)
			req->data->error = err;
		host->mrq = NULL;
		mmc_request_done(mmc, req);
		return;
	}
1605
	if (req->sbc && !(host->flags & AUTO_CMD23)) {
1606 1607 1608
		omap_hsmmc_start_command(host, req->sbc, NULL);
		return;
	}
1609

1610
	omap_hsmmc_start_dma_transfer(host);
1611
	omap_hsmmc_start_command(host, req->cmd, req->data);
1612 1613 1614
}

/* Routine to configure clock values. Exposed API to core */
1615
static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1616
{
1617
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1618
	int do_send_init_stream = 0;
1619

1620
	pm_runtime_get_sync(host->dev);
1621

1622 1623 1624
	if (ios->power_mode != host->power_mode) {
		switch (ios->power_mode) {
		case MMC_POWER_OFF:
1625 1626
			mmc_pdata(host)->set_power(host->dev, host->slot_id,
						   0, 0);
1627 1628
			break;
		case MMC_POWER_UP:
1629 1630
			mmc_pdata(host)->set_power(host->dev, host->slot_id,
						   1, ios->vdd);
1631 1632 1633 1634 1635 1636
			break;
		case MMC_POWER_ON:
			do_send_init_stream = 1;
			break;
		}
		host->power_mode = ios->power_mode;
1637 1638
	}

1639 1640
	/* FIXME: set registers based only on changes to ios */

1641
	omap_hsmmc_set_bus_width(host);
1642

1643
	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1644 1645 1646
		/* Only MMC1 can interface at 3V without some flavor
		 * of external transceiver; but they all handle 1.8V.
		 */
1647
		if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) &&
1648
			(ios->vdd == DUAL_VOLT_OCR_BIT)) {
1649 1650 1651 1652 1653 1654
				/*
				 * The mmc_select_voltage fn of the core does
				 * not seem to set the power_mode to
				 * MMC_POWER_UP upon recalculating the voltage.
				 * vdd 1.8v.
				 */
1655 1656
			if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0)
				dev_dbg(mmc_dev(host->mmc),
1657 1658 1659 1660
						"Switch operation failed\n");
		}
	}

1661
	omap_hsmmc_set_clock(host);
1662

1663
	if (do_send_init_stream)
1664 1665
		send_init_stream(host);

1666
	omap_hsmmc_set_bus_mode(host);
1667

1668
	pm_runtime_put_autosuspend(host->dev);
1669 1670 1671 1672
}

static int omap_hsmmc_get_cd(struct mmc_host *mmc)
{
1673
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1674

1675
	if (!mmc_pdata(host)->card_detect)
1676
		return -ENOSYS;
1677
	return mmc_pdata(host)->card_detect(host->dev, host->slot_id);
1678 1679 1680 1681
}

static int omap_hsmmc_get_ro(struct mmc_host *mmc)
{
1682
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1683

1684
	if (!mmc_pdata(host)->get_ro)
1685
		return -ENOSYS;
1686
	return mmc_pdata(host)->get_ro(host->dev, 0);
1687 1688
}

1689 1690 1691 1692
static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);

1693 1694
	if (mmc_pdata(host)->init_card)
		mmc_pdata(host)->init_card(card);
1695 1696
}

1697 1698 1699
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1700
	u32 irq_mask, con;
1701 1702 1703 1704
	unsigned long flags;

	spin_lock_irqsave(&host->irq_lock, flags);

1705
	con = OMAP_HSMMC_READ(host->base, CON);
1706 1707 1708 1709
	irq_mask = OMAP_HSMMC_READ(host->base, ISE);
	if (enable) {
		host->flags |= HSMMC_SDIO_IRQ_ENABLED;
		irq_mask |= CIRQ_EN;
1710
		con |= CTPL | CLKEXTFREE;
1711 1712 1713
	} else {
		host->flags &= ~HSMMC_SDIO_IRQ_ENABLED;
		irq_mask &= ~CIRQ_EN;
1714
		con &= ~(CTPL | CLKEXTFREE);
1715
	}
1716
	OMAP_HSMMC_WRITE(host->base, CON, con);
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760
	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);

	/*
	 * if enable, piggy back detection on current request
	 * but always disable immediately
	 */
	if (!host->req_in_progress || !enable)
		OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);

	/* flush posted write */
	OMAP_HSMMC_READ(host->base, IE);

	spin_unlock_irqrestore(&host->irq_lock, flags);
}

static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
{
	struct mmc_host *mmc = host->mmc;
	int ret;

	/*
	 * For omaps with wake-up path, wakeirq will be irq from pinctrl and
	 * for other omaps, wakeirq will be from GPIO (dat line remuxed to
	 * gpio). wakeirq is needed to detect sdio irq in runtime suspend state
	 * with functional clock disabled.
	 */
	if (!host->dev->of_node || !host->wake_irq)
		return -ENODEV;

	/* Prevent auto-enabling of IRQ */
	irq_set_status_flags(host->wake_irq, IRQ_NOAUTOEN);
	ret = devm_request_irq(host->dev, host->wake_irq, omap_hsmmc_wake_irq,
			       IRQF_TRIGGER_LOW | IRQF_ONESHOT,
			       mmc_hostname(mmc), host);
	if (ret) {
		dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n");
		goto err;
	}

	/*
	 * Some omaps don't have wake-up path from deeper idle states
	 * and need to remux SDIO DAT1 to GPIO for wake-up from idle.
	 */
	if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
		struct pinctrl *p = devm_pinctrl_get(host->dev);
		if (!p) {
			ret = -ENODEV;
			goto err_free_irq;
		}
		if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
			dev_info(host->dev, "missing default pinctrl state\n");
			devm_pinctrl_put(p);
			ret = -EINVAL;
			goto err_free_irq;
		}

		if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
			dev_info(host->dev, "missing idle pinctrl state\n");
			devm_pinctrl_put(p);
			ret = -EINVAL;
			goto err_free_irq;
		}
		devm_pinctrl_put(p);
1780 1781
	}

1782 1783
	OMAP_HSMMC_WRITE(host->base, HCTL,
			 OMAP_HSMMC_READ(host->base, HCTL) | IWE);
1784 1785
	return 0;

1786 1787
err_free_irq:
	devm_free_irq(host->dev, host->wake_irq, host);
1788 1789 1790 1791 1792 1793
err:
	dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n");
	host->wake_irq = 0;
	return ret;
}

1794
static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
1795 1796 1797 1798
{
	u32 hctl, capa, value;

	/* Only MMC1 supports 3.0V */
1799
	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813
		hctl = SDVS30;
		capa = VS30 | VS18;
	} else {
		hctl = SDVS18;
		capa = VS18;
	}

	value = OMAP_HSMMC_READ(host->base, HCTL) & ~SDVS_MASK;
	OMAP_HSMMC_WRITE(host->base, HCTL, value | hctl);

	value = OMAP_HSMMC_READ(host->base, CAPA);
	OMAP_HSMMC_WRITE(host->base, CAPA, value | capa);

	/* Set SD bus power bit */
A
Adrian Hunter 已提交
1814
	set_sd_bus_power(host);
1815 1816
}

1817
static int omap_hsmmc_enable_fclk(struct mmc_host *mmc)
1818
{
1819
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1820

1821 1822
	pm_runtime_get_sync(host->dev);

1823 1824 1825
	return 0;
}

1826
static int omap_hsmmc_disable_fclk(struct mmc_host *mmc)
1827
{
1828
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1829

1830 1831 1832
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);

1833 1834 1835
	return 0;
}

1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
static int omap_hsmmc_multi_io_quirk(struct mmc_card *card,
				     unsigned int direction, int blk_size)
{
	/* This controller can't do multiblock reads due to hw bugs */
	if (direction == MMC_DATA_READ)
		return 1;

	return blk_size;
}

static struct mmc_host_ops omap_hsmmc_ops = {
1847 1848
	.enable = omap_hsmmc_enable_fclk,
	.disable = omap_hsmmc_disable_fclk,
1849 1850
	.post_req = omap_hsmmc_post_req,
	.pre_req = omap_hsmmc_pre_req,
1851 1852
	.request = omap_hsmmc_request,
	.set_ios = omap_hsmmc_set_ios,
1853 1854
	.get_cd = omap_hsmmc_get_cd,
	.get_ro = omap_hsmmc_get_ro,
1855
	.init_card = omap_hsmmc_init_card,
1856
	.enable_sdio_irq = omap_hsmmc_enable_sdio_irq,
1857 1858
};

1859 1860
#ifdef CONFIG_DEBUG_FS

1861
static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
1862 1863
{
	struct mmc_host *mmc = s->private;
1864
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1865

1866 1867 1868
	seq_printf(s, "mmc%d:\n", mmc->index);
	seq_printf(s, "sdio irq mode\t%s\n",
		   (mmc->caps & MMC_CAP_SDIO_IRQ) ? "interrupt" : "polling");
1869

1870 1871 1872 1873 1874 1875
	if (mmc->caps & MMC_CAP_SDIO_IRQ) {
		seq_printf(s, "sdio irq \t%s\n",
			   (host->flags & HSMMC_SDIO_IRQ_ENABLED) ?  "enabled"
			   : "disabled");
	}
	seq_printf(s, "ctx_loss:\t%d\n", host->context_loss);
1876

1877 1878
	pm_runtime_get_sync(host->dev);
	seq_puts(s, "\nregs:\n");
1879 1880
	seq_printf(s, "CON:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, CON));
1881 1882
	seq_printf(s, "PSTATE:\t\t0x%08x\n",
		   OMAP_HSMMC_READ(host->base, PSTATE));
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892
	seq_printf(s, "HCTL:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, HCTL));
	seq_printf(s, "SYSCTL:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, SYSCTL));
	seq_printf(s, "IE:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, IE));
	seq_printf(s, "ISE:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, ISE));
	seq_printf(s, "CAPA:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, CAPA));
1893

1894 1895
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
1896

1897 1898 1899
	return 0;
}

1900
static int omap_hsmmc_regs_open(struct inode *inode, struct file *file)
1901
{
1902
	return single_open(file, omap_hsmmc_regs_show, inode->i_private);
1903 1904 1905
}

static const struct file_operations mmc_regs_fops = {
1906
	.open           = omap_hsmmc_regs_open,
1907 1908 1909 1910 1911
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = single_release,
};

1912
static void omap_hsmmc_debugfs(struct mmc_host *mmc)
1913 1914 1915 1916 1917 1918 1919 1920
{
	if (mmc->debugfs_root)
		debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root,
			mmc, &mmc_regs_fops);
}

#else

1921
static void omap_hsmmc_debugfs(struct mmc_host *mmc)
1922 1923 1924 1925 1926
{
}

#endif

1927
#ifdef CONFIG_OF
1928 1929 1930 1931 1932 1933 1934 1935
static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = {
	/* See 35xx errata 2.1.1.128 in SPRZ278F */
	.controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
};

static const struct omap_mmc_of_data omap4_mmc_of_data = {
	.reg_offset = 0x100,
};
1936 1937 1938 1939
static const struct omap_mmc_of_data am33xx_mmc_of_data = {
	.reg_offset = 0x100,
	.controller_flags = OMAP_HSMMC_SWAKEUP_MISSING,
};
1940 1941 1942 1943 1944

static const struct of_device_id omap_mmc_of_match[] = {
	{
		.compatible = "ti,omap2-hsmmc",
	},
1945 1946 1947 1948
	{
		.compatible = "ti,omap3-pre-es3-hsmmc",
		.data = &omap3_pre_es3_mmc_of_data,
	},
1949 1950 1951 1952 1953
	{
		.compatible = "ti,omap3-hsmmc",
	},
	{
		.compatible = "ti,omap4-hsmmc",
1954
		.data = &omap4_mmc_of_data,
1955
	},
1956 1957 1958 1959
	{
		.compatible = "ti,am33xx-hsmmc",
		.data = &am33xx_mmc_of_data,
	},
1960
	{},
1961
};
1962 1963
MODULE_DEVICE_TABLE(of, omap_mmc_of_match);

1964
static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
1965
{
1966
	struct omap_hsmmc_platform_data *pdata;
1967
	struct device_node *np = dev->of_node;
1968
	u32 bus_width, max_freq;
1969 1970 1971 1972 1973 1974
	int cd_gpio, wp_gpio;

	cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
	wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
	if (cd_gpio == -EPROBE_DEFER || wp_gpio == -EPROBE_DEFER)
		return ERR_PTR(-EPROBE_DEFER);
1975 1976 1977

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
1978
		return ERR_PTR(-ENOMEM); /* out of memory */
1979 1980 1981 1982 1983 1984

	if (of_find_property(np, "ti,dual-volt", NULL))
		pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;

	/* This driver only supports 1 slot */
	pdata->nr_slots = 1;
1985 1986
	pdata->switch_pin = cd_gpio;
	pdata->gpio_wp = wp_gpio;
1987 1988

	if (of_find_property(np, "ti,non-removable", NULL)) {
1989 1990
		pdata->nonremovable = true;
		pdata->no_regulator_off_init = true;
1991
	}
1992
	of_property_read_u32(np, "bus-width", &bus_width);
1993
	if (bus_width == 4)
1994
		pdata->caps |= MMC_CAP_4_BIT_DATA;
1995
	else if (bus_width == 8)
1996
		pdata->caps |= MMC_CAP_8_BIT_DATA;
1997 1998

	if (of_find_property(np, "ti,needs-special-reset", NULL))
1999
		pdata->features |= HSMMC_HAS_UPDATED_RESET;
2000

2001 2002 2003
	if (!of_property_read_u32(np, "max-frequency", &max_freq))
		pdata->max_freq = max_freq;

2004
	if (of_find_property(np, "ti,needs-special-hs-handling", NULL))
2005
		pdata->features |= HSMMC_HAS_HSPE_SUPPORT;
2006

2007
	if (of_find_property(np, "keep-power-in-suspend", NULL))
2008
		pdata->pm_caps |= MMC_PM_KEEP_POWER;
2009 2010

	if (of_find_property(np, "enable-sdio-wakeup", NULL))
2011
		pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2012

2013 2014 2015
	return pdata;
}
#else
2016
static inline struct omap_hsmmc_platform_data
2017 2018
			*of_get_hsmmc_pdata(struct device *dev)
{
2019
	return ERR_PTR(-EINVAL);
2020 2021 2022
}
#endif

2023
static int omap_hsmmc_probe(struct platform_device *pdev)
2024
{
2025
	struct omap_hsmmc_platform_data *pdata = pdev->dev.platform_data;
2026
	struct mmc_host *mmc;
2027
	struct omap_hsmmc_host *host = NULL;
2028
	struct resource *res;
2029
	int ret, irq;
2030
	const struct of_device_id *match;
2031 2032
	dma_cap_mask_t mask;
	unsigned tx_req, rx_req;
2033
	const struct omap_mmc_of_data *data;
2034
	void __iomem *base;
2035 2036 2037 2038

	match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
	if (match) {
		pdata = of_get_hsmmc_pdata(&pdev->dev);
2039 2040 2041 2042

		if (IS_ERR(pdata))
			return PTR_ERR(pdata);

2043
		if (match->data) {
2044 2045 2046
			data = match->data;
			pdata->reg_offset = data->reg_offset;
			pdata->controller_flags |= data->controller_flags;
2047 2048
		}
	}
2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064

	if (pdata == NULL) {
		dev_err(&pdev->dev, "Platform Data is missing\n");
		return -ENXIO;
	}

	if (pdata->nr_slots == 0) {
		dev_err(&pdev->dev, "No Slots\n");
		return -ENXIO;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (res == NULL || irq < 0)
		return -ENXIO;

2065 2066 2067
	base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(base))
		return PTR_ERR(base);
2068

2069
	mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
2070 2071
	if (!mmc) {
		ret = -ENOMEM;
2072
		goto err;
2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
	}

	host		= mmc_priv(mmc);
	host->mmc	= mmc;
	host->pdata	= pdata;
	host->dev	= &pdev->dev;
	host->use_dma	= 1;
	host->dma_ch	= -1;
	host->irq	= irq;
	host->slot_id	= 0;
2083
	host->mapbase	= res->start + pdata->reg_offset;
2084
	host->base	= base + pdata->reg_offset;
2085
	host->power_mode = MMC_POWER_OFF;
2086
	host->next_data.cookie = 1;
2087
	host->pbias_enabled = 0;
2088

2089 2090 2091 2092
	ret = omap_hsmmc_gpio_init(host, pdata);
	if (ret)
		goto err_gpio;

2093 2094
	platform_set_drvdata(pdev, host);

2095 2096 2097
	if (pdev->dev.of_node)
		host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1);

2098
	mmc->ops	= &omap_hsmmc_ops;
2099

2100 2101 2102 2103 2104 2105
	mmc->f_min = OMAP_MMC_MIN_CLOCK;

	if (pdata->max_freq > 0)
		mmc->f_max = pdata->max_freq;
	else
		mmc->f_max = OMAP_MMC_MAX_CLOCK;
2106

2107
	spin_lock_init(&host->irq_lock);
2108

2109
	host->fclk = devm_clk_get(&pdev->dev, "fck");
2110 2111 2112 2113 2114 2115
	if (IS_ERR(host->fclk)) {
		ret = PTR_ERR(host->fclk);
		host->fclk = NULL;
		goto err1;
	}

2116 2117
	if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
		dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
2118
		omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
2119
	}
2120

2121 2122 2123 2124
	pm_runtime_enable(host->dev);
	pm_runtime_get_sync(host->dev);
	pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
	pm_runtime_use_autosuspend(host->dev);
2125

2126 2127
	omap_hsmmc_context_save(host);

2128
	host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck");
2129 2130 2131 2132 2133
	/*
	 * MMC can still work without debounce clock.
	 */
	if (IS_ERR(host->dbclk)) {
		host->dbclk = NULL;
2134
	} else if (clk_prepare_enable(host->dbclk) != 0) {
2135 2136
		dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
		host->dbclk = NULL;
2137
	}
2138

2139 2140
	/* Since we do only SG emulation, we can have as many segs
	 * as we want. */
2141
	mmc->max_segs = 1024;
2142

2143 2144 2145 2146 2147
	mmc->max_blk_size = 512;       /* Block Length at max can be 1024 */
	mmc->max_blk_count = 0xFFFF;    /* No. of Blocks is 16 bits */
	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
	mmc->max_seg_size = mmc->max_req_size;

2148
	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
2149
		     MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
2150

2151
	mmc->caps |= mmc_pdata(host)->caps;
2152
	if (mmc->caps & MMC_CAP_8_BIT_DATA)
2153 2154
		mmc->caps |= MMC_CAP_4_BIT_DATA;

2155
	if (mmc_pdata(host)->nonremovable)
2156 2157
		mmc->caps |= MMC_CAP_NONREMOVABLE;

2158
	mmc->pm_caps = mmc_pdata(host)->pm_caps;
2159

2160
	omap_hsmmc_conf_bus_power(host);
2161

2162 2163 2164 2165 2166 2167 2168 2169
	if (!pdev->dev.of_node) {
		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
		if (!res) {
			dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
			ret = -ENXIO;
			goto err_irq;
		}
		tx_req = res->start;
2170

2171 2172 2173 2174 2175 2176 2177
		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
		if (!res) {
			dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
			ret = -ENXIO;
			goto err_irq;
		}
		rx_req = res->start;
2178
	}
2179

2180 2181 2182
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

2183 2184 2185 2186
	host->rx_chan =
		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
						 &rx_req, &pdev->dev, "rx");

2187 2188
	if (!host->rx_chan) {
		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
2189
		ret = -ENXIO;
2190 2191 2192
		goto err_irq;
	}

2193 2194 2195 2196
	host->tx_chan =
		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
						 &tx_req, &pdev->dev, "tx");

2197 2198
	if (!host->tx_chan) {
		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
2199
		ret = -ENXIO;
2200
		goto err_irq;
2201
	}
2202 2203

	/* Request IRQ for MMC operations */
2204
	ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
2205 2206
			mmc_hostname(mmc), host);
	if (ret) {
2207
		dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
2208 2209 2210
		goto err_irq;
	}

2211
	if (omap_hsmmc_have_reg() && !mmc_pdata(host)->set_power) {
2212 2213
		ret = omap_hsmmc_reg_get(host);
		if (ret)
2214
			goto err_irq;
2215 2216 2217
		host->use_reg = 1;
	}

2218
	mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
2219 2220

	/* Request IRQ for card detect */
2221
	if ((mmc_pdata(host)->card_detect_irq)) {
2222
		ret = devm_request_threaded_irq(&pdev->dev,
2223
						mmc_pdata(host)->card_detect_irq,
2224
						NULL, omap_hsmmc_detect,
2225
					   IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2226
					   mmc_hostname(mmc), host);
2227
		if (ret) {
2228
			dev_err(mmc_dev(host->mmc),
2229 2230 2231
				"Unable to grab MMC CD IRQ\n");
			goto err_irq_cd;
		}
2232 2233
		pdata->suspend = omap_hsmmc_suspend_cdirq;
		pdata->resume = omap_hsmmc_resume_cdirq;
2234 2235
	}

2236
	omap_hsmmc_disable_irq(host);
2237

2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
	/*
	 * For now, only support SDIO interrupt if we have a separate
	 * wake-up interrupt configured from device tree. This is because
	 * the wake-up interrupt is needed for idle state and some
	 * platforms need special quirks. And we don't want to add new
	 * legacy mux platform init code callbacks any longer as we
	 * are moving to DT based booting anyways.
	 */
	ret = omap_hsmmc_configure_wake_irq(host);
	if (!ret)
		mmc->caps |= MMC_CAP_SDIO_IRQ;

2250 2251
	omap_hsmmc_protect_card(host);

2252 2253
	mmc_add_host(mmc);

2254
	if (mmc_pdata(host)->name != NULL) {
2255 2256 2257 2258
		ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
		if (ret < 0)
			goto err_slot_name;
	}
2259 2260
	if (mmc_pdata(host)->card_detect_irq &&
	    mmc_pdata(host)->get_cover_state) {
2261 2262 2263
		ret = device_create_file(&mmc->class_dev,
					&dev_attr_cover_switch);
		if (ret < 0)
2264
			goto err_slot_name;
2265 2266
	}

2267
	omap_hsmmc_debugfs(mmc);
2268 2269
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
2270

2271 2272 2273 2274
	return 0;

err_slot_name:
	mmc_remove_host(mmc);
2275 2276 2277
err_irq_cd:
	if (host->use_reg)
		omap_hsmmc_reg_put(host);
2278
err_irq:
2279 2280 2281 2282
	if (host->tx_chan)
		dma_release_channel(host->tx_chan);
	if (host->rx_chan)
		dma_release_channel(host->rx_chan);
2283
	pm_runtime_put_sync(host->dev);
2284
	pm_runtime_disable(host->dev);
2285
	if (host->dbclk)
2286
		clk_disable_unprepare(host->dbclk);
2287
err1:
2288 2289
	omap_hsmmc_gpio_free(host, pdata);
err_gpio:
2290
	mmc_free_host(mmc);
2291 2292 2293 2294
err:
	return ret;
}

2295
static int omap_hsmmc_remove(struct platform_device *pdev)
2296
{
2297
	struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2298

2299 2300 2301 2302
	pm_runtime_get_sync(host->dev);
	mmc_remove_host(host->mmc);
	if (host->use_reg)
		omap_hsmmc_reg_put(host);
2303

2304 2305 2306 2307 2308
	if (host->tx_chan)
		dma_release_channel(host->tx_chan);
	if (host->rx_chan)
		dma_release_channel(host->rx_chan);

2309 2310
	pm_runtime_put_sync(host->dev);
	pm_runtime_disable(host->dev);
2311
	if (host->dbclk)
2312
		clk_disable_unprepare(host->dbclk);
2313

2314
	omap_hsmmc_gpio_free(host, host->pdata);
2315
	mmc_free_host(host->mmc);
2316

2317 2318 2319 2320
	return 0;
}

#ifdef CONFIG_PM
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339
static int omap_hsmmc_prepare(struct device *dev)
{
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);

	if (host->pdata->suspend)
		return host->pdata->suspend(dev, host->slot_id);

	return 0;
}

static void omap_hsmmc_complete(struct device *dev)
{
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);

	if (host->pdata->resume)
		host->pdata->resume(dev, host->slot_id);

}

2340
static int omap_hsmmc_suspend(struct device *dev)
2341
{
2342
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2343

2344
	if (!host)
2345 2346
		return 0;

2347
	pm_runtime_get_sync(host->dev);
2348

2349
	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
2350 2351 2352
		OMAP_HSMMC_WRITE(host->base, ISE, 0);
		OMAP_HSMMC_WRITE(host->base, IE, 0);
		OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
2353 2354
		OMAP_HSMMC_WRITE(host->base, HCTL,
				OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2355
	}
2356

2357 2358 2359 2360 2361
	/* do not wake up due to sdio irq */
	if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
	    !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
		disable_irq(host->wake_irq);

2362
	if (host->dbclk)
2363
		clk_disable_unprepare(host->dbclk);
2364

2365
	pm_runtime_put_sync(host->dev);
2366
	return 0;
2367 2368 2369
}

/* Routine to resume the MMC device */
2370
static int omap_hsmmc_resume(struct device *dev)
2371
{
2372 2373 2374 2375
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);

	if (!host)
		return 0;
2376

2377
	pm_runtime_get_sync(host->dev);
2378

2379
	if (host->dbclk)
2380
		clk_prepare_enable(host->dbclk);
2381

2382 2383
	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
		omap_hsmmc_conf_bus_power(host);
2384

2385
	omap_hsmmc_protect_card(host);
2386

2387 2388 2389 2390
	if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
	    !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
		enable_irq(host->wake_irq);

2391 2392
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
2393
	return 0;
2394 2395 2396
}

#else
2397 2398
#define omap_hsmmc_prepare	NULL
#define omap_hsmmc_complete	NULL
2399
#define omap_hsmmc_suspend	NULL
2400
#define omap_hsmmc_resume	NULL
2401 2402
#endif

2403 2404 2405
static int omap_hsmmc_runtime_suspend(struct device *dev)
{
	struct omap_hsmmc_host *host;
2406
	unsigned long flags;
2407
	int ret = 0;
2408 2409 2410

	host = platform_get_drvdata(to_platform_device(dev));
	omap_hsmmc_context_save(host);
2411
	dev_dbg(dev, "disabled\n");
2412

2413 2414 2415 2416 2417 2418
	spin_lock_irqsave(&host->irq_lock, flags);
	if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
	    (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
		/* disable sdio irq handling to prevent race */
		OMAP_HSMMC_WRITE(host->base, ISE, 0);
		OMAP_HSMMC_WRITE(host->base, IE, 0);
2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433

		if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) {
			/*
			 * dat1 line low, pending sdio irq
			 * race condition: possible irq handler running on
			 * multi-core, abort
			 */
			dev_dbg(dev, "pending sdio irq, abort suspend\n");
			OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
			OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
			OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
			pm_runtime_mark_last_busy(dev);
			ret = -EBUSY;
			goto abort;
		}
2434

2435 2436
		pinctrl_pm_select_idle_state(dev);

2437 2438 2439
		WARN_ON(host->flags & HSMMC_WAKE_IRQ_ENABLED);
		enable_irq(host->wake_irq);
		host->flags |= HSMMC_WAKE_IRQ_ENABLED;
2440 2441
	} else {
		pinctrl_pm_select_idle_state(dev);
2442
	}
2443

2444
abort:
2445
	spin_unlock_irqrestore(&host->irq_lock, flags);
2446
	return ret;
2447 2448 2449 2450 2451
}

static int omap_hsmmc_runtime_resume(struct device *dev)
{
	struct omap_hsmmc_host *host;
2452
	unsigned long flags;
2453 2454 2455

	host = platform_get_drvdata(to_platform_device(dev));
	omap_hsmmc_context_restore(host);
2456
	dev_dbg(dev, "enabled\n");
2457

2458 2459 2460 2461 2462 2463 2464 2465 2466
	spin_lock_irqsave(&host->irq_lock, flags);
	if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
	    (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
		/* sdio irq flag can't change while in runtime suspend */
		if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
			disable_irq_nosync(host->wake_irq);
			host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
		}

2467 2468 2469
		pinctrl_pm_select_default_state(host->dev);

		/* irq lost, if pinmux incorrect */
2470 2471 2472
		OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
		OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
		OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
2473 2474
	} else {
		pinctrl_pm_select_default_state(host->dev);
2475 2476
	}
	spin_unlock_irqrestore(&host->irq_lock, flags);
2477 2478 2479
	return 0;
}

2480
static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
2481 2482
	.suspend	= omap_hsmmc_suspend,
	.resume		= omap_hsmmc_resume,
2483 2484
	.prepare	= omap_hsmmc_prepare,
	.complete	= omap_hsmmc_complete,
2485 2486
	.runtime_suspend = omap_hsmmc_runtime_suspend,
	.runtime_resume = omap_hsmmc_runtime_resume,
2487 2488 2489
};

static struct platform_driver omap_hsmmc_driver = {
2490
	.probe		= omap_hsmmc_probe,
2491
	.remove		= omap_hsmmc_remove,
2492 2493
	.driver		= {
		.name = DRIVER_NAME,
2494
		.pm = &omap_hsmmc_dev_pm_ops,
2495
		.of_match_table = of_match_ptr(omap_mmc_of_match),
2496 2497 2498
	},
};

2499
module_platform_driver(omap_hsmmc_driver);
2500 2501 2502 2503
MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Texas Instruments Inc");
反馈
建议
客服 返回
顶部