omap_hsmmc.c 57.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * drivers/mmc/host/omap_hsmmc.c
 *
 * Driver for OMAP2430/3430 MMC controller.
 *
 * Copyright (C) 2007 Texas Instruments.
 *
 * Authors:
 *	Syed Mohammed Khasim	<x0khasim@ti.com>
 *	Madhusudhan		<madhu.cr@ti.com>
 *	Mohit Jalori		<mjalori@ti.com>
 *
 * This file is licensed under the terms of the GNU General Public License
 * version 2. This program is licensed "as is" without any warranty of any
 * kind, whether express or implied.
 */

#include <linux/module.h>
#include <linux/init.h>
20
#include <linux/kernel.h>
21
#include <linux/debugfs.h>
22
#include <linux/dmaengine.h>
23
#include <linux/seq_file.h>
24
#include <linux/sizes.h>
25 26 27 28 29 30
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/timer.h>
#include <linux/clk.h>
31
#include <linux/of.h>
32
#include <linux/of_irq.h>
33 34
#include <linux/of_gpio.h>
#include <linux/of_device.h>
35
#include <linux/omap-dmaengine.h>
36
#include <linux/mmc/host.h>
37
#include <linux/mmc/core.h>
38
#include <linux/mmc/mmc.h>
39
#include <linux/mmc/slot-gpio.h>
40
#include <linux/io.h>
41
#include <linux/irq.h>
42 43
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
44
#include <linux/pinctrl/consumer.h>
45
#include <linux/pm_runtime.h>
46
#include <linux/pm_wakeirq.h>
47
#include <linux/platform_data/hsmmc-omap.h>
48 49

/* OMAP HSMMC Host Controller Registers */
50
#define OMAP_HSMMC_SYSSTATUS	0x0014
51
#define OMAP_HSMMC_CON		0x002C
52
#define OMAP_HSMMC_SDMASA	0x0100
53 54 55 56 57 58 59 60
#define OMAP_HSMMC_BLK		0x0104
#define OMAP_HSMMC_ARG		0x0108
#define OMAP_HSMMC_CMD		0x010C
#define OMAP_HSMMC_RSP10	0x0110
#define OMAP_HSMMC_RSP32	0x0114
#define OMAP_HSMMC_RSP54	0x0118
#define OMAP_HSMMC_RSP76	0x011C
#define OMAP_HSMMC_DATA		0x0120
61
#define OMAP_HSMMC_PSTATE	0x0124
62 63 64 65 66
#define OMAP_HSMMC_HCTL		0x0128
#define OMAP_HSMMC_SYSCTL	0x012C
#define OMAP_HSMMC_STAT		0x0130
#define OMAP_HSMMC_IE		0x0134
#define OMAP_HSMMC_ISE		0x0138
67
#define OMAP_HSMMC_AC12		0x013C
68 69 70 71
#define OMAP_HSMMC_CAPA		0x0140

#define VS18			(1 << 26)
#define VS30			(1 << 25)
72
#define HSS			(1 << 21)
73 74
#define SDVS18			(0x5 << 9)
#define SDVS30			(0x6 << 9)
75
#define SDVS33			(0x7 << 9)
76
#define SDVS_MASK		0x00000E00
77 78 79 80 81 82 83 84
#define SDVSCLR			0xFFFFF1FF
#define SDVSDET			0x00000400
#define AUTOIDLE		0x1
#define SDBP			(1 << 8)
#define DTO			0xe
#define ICE			0x1
#define ICS			0x2
#define CEN			(1 << 2)
85
#define CLKD_MAX		0x3FF		/* max clock divisor: 1023 */
86 87 88 89 90
#define CLKD_MASK		0x0000FFC0
#define CLKD_SHIFT		6
#define DTO_MASK		0x000F0000
#define DTO_SHIFT		16
#define INIT_STREAM		(1 << 1)
91
#define ACEN_ACMD23		(2 << 2)
92 93
#define DP_SELECT		(1 << 21)
#define DDIR			(1 << 4)
94
#define DMAE			0x1
95 96 97
#define MSBS			(1 << 5)
#define BCE			(1 << 1)
#define FOUR_BIT		(1 << 1)
98
#define HSPE			(1 << 2)
99
#define IWE			(1 << 24)
100
#define DDR			(1 << 19)
101 102
#define CLKEXTFREE		(1 << 16)
#define CTPL			(1 << 11)
103
#define DW8			(1 << 5)
104 105 106 107 108 109
#define OD			0x1
#define STAT_CLEAR		0xFFFFFFFF
#define INIT_STREAM_CMD		0x00000000
#define DUAL_VOLT_OCR_BIT	7
#define SRC			(1 << 25)
#define SRD			(1 << 26)
110
#define SOFTRESET		(1 << 1)
111

112 113 114
/* PSTATE */
#define DLEV_DAT(x)		(1 << (20 + (x)))

115 116 117 118 119
/* Interrupt masks for IE and ISE register */
#define CC_EN			(1 << 0)
#define TC_EN			(1 << 1)
#define BWR_EN			(1 << 4)
#define BRR_EN			(1 << 5)
120
#define CIRQ_EN			(1 << 8)
121 122 123 124 125 126 127 128
#define ERR_EN			(1 << 15)
#define CTO_EN			(1 << 16)
#define CCRC_EN			(1 << 17)
#define CEB_EN			(1 << 18)
#define CIE_EN			(1 << 19)
#define DTO_EN			(1 << 20)
#define DCRC_EN			(1 << 21)
#define DEB_EN			(1 << 22)
129
#define ACE_EN			(1 << 24)
130 131 132
#define CERR_EN			(1 << 28)
#define BADA_EN			(1 << 29)

133
#define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\
134 135 136
		DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \
		BRR_EN | BWR_EN | TC_EN | CC_EN)

137 138 139 140 141 142 143
#define CNI	(1 << 7)
#define ACIE	(1 << 4)
#define ACEB	(1 << 3)
#define ACCE	(1 << 2)
#define ACTO	(1 << 1)
#define ACNE	(1 << 0)

144
#define MMC_AUTOSUSPEND_DELAY	100
145 146
#define MMC_TIMEOUT_MS		20		/* 20 mSec */
#define MMC_TIMEOUT_US		20000		/* 20000 micro Sec */
147 148
#define OMAP_MMC_MIN_CLOCK	400000
#define OMAP_MMC_MAX_CLOCK	52000000
149
#define DRIVER_NAME		"omap_hsmmc"
150

151 152 153 154
#define VDD_1V8			1800000		/* 180000 uV */
#define VDD_3V0			3000000		/* 300000 uV */
#define VDD_165_195		(ffs(MMC_VDD_165_195) - 1)

155 156 157 158 159
/*
 * One controller can have multiple slots, like on some omap boards using
 * omap.c controller driver. Luckily this is not currently done on any known
 * omap_hsmmc.c device.
 */
160
#define mmc_pdata(host)		host->pdata
161 162 163 164 165 166 167 168 169 170

/*
 * MMC Host controller read/write API's
 */
#define OMAP_HSMMC_READ(base, reg)	\
	__raw_readl((base) + OMAP_HSMMC_##reg)

#define OMAP_HSMMC_WRITE(base, reg, val) \
	__raw_writel((val), (base) + OMAP_HSMMC_##reg)

171 172 173 174 175
struct omap_hsmmc_next {
	unsigned int	dma_len;
	s32		cookie;
};

176
struct omap_hsmmc_host {
177 178 179 180 181 182 183
	struct	device		*dev;
	struct	mmc_host	*mmc;
	struct	mmc_request	*mrq;
	struct	mmc_command	*cmd;
	struct	mmc_data	*data;
	struct	clk		*fclk;
	struct	clk		*dbclk;
184 185 186 187 188 189 190 191 192
	/*
	 * vcc == configured supply
	 * vcc_aux == optional
	 *   -	MMC1, supply for DAT4..DAT7
	 *   -	MMC2/MMC2, external level shifter voltage supply, for
	 *	chip (SDIO, eMMC, etc) or transceiver (MMC2 only)
	 */
	struct	regulator	*vcc;
	struct	regulator	*vcc_aux;
193 194
	struct	regulator	*pbias;
	bool			pbias_enabled;
195 196
	void	__iomem		*base;
	resource_size_t		mapbase;
197
	spinlock_t		irq_lock; /* Prevent races with irq handler */
198
	unsigned int		dma_len;
199
	unsigned int		dma_sg_idx;
200
	unsigned char		bus_mode;
201
	unsigned char		power_mode;
202
	int			suspended;
203 204 205 206
	u32			con;
	u32			hctl;
	u32			sysctl;
	u32			capa;
207
	int			irq;
208
	int			wake_irq;
209
	int			use_dma, dma_ch;
210 211
	struct dma_chan		*tx_chan;
	struct dma_chan		*rx_chan;
212
	int			response_busy;
213
	int			context_loss;
214 215
	int			protect_card;
	int			reqs_blocked;
216
	int			req_in_progress;
217
	unsigned long		clk_rate;
218
	unsigned int		flags;
219 220
#define AUTO_CMD23		(1 << 0)        /* Auto CMD23 support */
#define HSMMC_SDIO_IRQ_ENABLED	(1 << 1)        /* SDIO irq enabled */
221
	struct omap_hsmmc_next	next_data;
222
	struct	omap_hsmmc_platform_data	*pdata;
223 224 225 226 227 228 229

	/* return MMC cover switch state, can be NULL if not supported.
	 *
	 * possible return values:
	 *   0 - closed
	 *   1 - open
	 */
230
	int (*get_cover_state)(struct device *dev);
231

232
	int (*card_detect)(struct device *dev);
233 234
};

235 236 237 238 239
struct omap_mmc_of_data {
	u32 reg_offset;
	u8 controller_flags;
};

240 241
static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host);

242
static int omap_hsmmc_card_detect(struct device *dev)
243
{
244
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
245

246
	return mmc_gpio_get_cd(host->mmc);
247 248
}

249
static int omap_hsmmc_get_cover_state(struct device *dev)
250
{
251
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
252

253
	return mmc_gpio_get_cd(host->mmc);
254 255
}

256 257
#ifdef CONFIG_REGULATOR

258
static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
259 260 261 262 263
{
	struct omap_hsmmc_host *host =
		platform_get_drvdata(to_platform_device(dev));
	int ret = 0;

264 265 266
	if (mmc_pdata(host)->set_power)
		return mmc_pdata(host)->set_power(dev, power_on, vdd);

267 268 269 270 271 272 273
	/*
	 * If we don't see a Vcc regulator, assume it's a fixed
	 * voltage always-on regulator.
	 */
	if (!host->vcc)
		return 0;

274
	if (mmc_pdata(host)->before_set_reg)
275
		mmc_pdata(host)->before_set_reg(dev, power_on, vdd);
276

277 278 279 280 281 282 283 284 285
	if (host->pbias) {
		if (host->pbias_enabled == 1) {
			ret = regulator_disable(host->pbias);
			if (!ret)
				host->pbias_enabled = 0;
		}
		regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0);
	}

286 287 288 289 290 291 292 293 294 295 296 297 298 299
	/*
	 * Assume Vcc regulator is used only to power the card ... OMAP
	 * VDDS is used to power the pins, optionally with a transceiver to
	 * support cards using voltages other than VDDS (1.8V nominal).  When a
	 * transceiver is used, DAT3..7 are muxed as transceiver control pins.
	 *
	 * In some cases this regulator won't support enable/disable;
	 * e.g. it's a fixed rail for a WLAN chip.
	 *
	 * In other cases vcc_aux switches interface power.  Example, for
	 * eMMC cards it represents VccQ.  Sometimes transceivers or SDIO
	 * chips/cards need an interface voltage rail too.
	 */
	if (power_on) {
300 301
		if (host->vcc)
			ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
302 303 304
		/* Enable interface voltage rail, if needed */
		if (ret == 0 && host->vcc_aux) {
			ret = regulator_enable(host->vcc_aux);
305
			if (ret < 0 && host->vcc)
306 307
				ret = mmc_regulator_set_ocr(host->mmc,
							host->vcc, 0);
308 309
		}
	} else {
310
		/* Shut down the rail */
311 312
		if (host->vcc_aux)
			ret = regulator_disable(host->vcc_aux);
313
		if (host->vcc) {
314 315 316 317
			/* Then proceed to shut down the local regulator */
			ret = mmc_regulator_set_ocr(host->mmc,
						host->vcc, 0);
		}
318 319
	}

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	if (host->pbias) {
		if (vdd <= VDD_165_195)
			ret = regulator_set_voltage(host->pbias, VDD_1V8,
								VDD_1V8);
		else
			ret = regulator_set_voltage(host->pbias, VDD_3V0,
								VDD_3V0);
		if (ret < 0)
			goto error_set_power;

		if (host->pbias_enabled == 0) {
			ret = regulator_enable(host->pbias);
			if (!ret)
				host->pbias_enabled = 1;
		}
	}

337
	if (mmc_pdata(host)->after_set_reg)
338
		mmc_pdata(host)->after_set_reg(dev, power_on, vdd);
339

340
error_set_power:
341 342 343 344 345 346
	return ret;
}

static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
	struct regulator *reg;
347
	int ocr_value = 0;
348
	int ret;
349

350 351 352
	if (mmc_pdata(host)->set_power)
		return 0;

353
	reg = devm_regulator_get_optional(host->dev, "vmmc");
354
	if (IS_ERR(reg)) {
355 356 357 358 359
		ret = PTR_ERR(reg);
		if (ret != -ENODEV)
			return ret;
		host->vcc = NULL;
		dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
360
			PTR_ERR(reg));
361 362
	} else {
		host->vcc = reg;
363
		ocr_value = mmc_regulator_get_ocrmask(reg);
364 365
		if (!mmc_pdata(host)->ocr_mask) {
			mmc_pdata(host)->ocr_mask = ocr_value;
366
		} else {
367
			if (!(mmc_pdata(host)->ocr_mask & ocr_value)) {
368
				dev_err(host->dev, "ocrmask %x is not supported\n",
369 370
					mmc_pdata(host)->ocr_mask);
				mmc_pdata(host)->ocr_mask = 0;
371 372 373
				return -EINVAL;
			}
		}
374
	}
375

376 377
	/* Allow an aux regulator */
	reg = devm_regulator_get_optional(host->dev, "vmmc_aux");
378 379 380 381 382 383 384 385 386 387
	if (IS_ERR(reg)) {
		ret = PTR_ERR(reg);
		if (ret != -ENODEV)
			return ret;
		host->vcc_aux = NULL;
		dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
			PTR_ERR(reg));
	} else {
		host->vcc_aux = reg;
	}
388

389
	reg = devm_regulator_get_optional(host->dev, "pbias");
390 391 392 393 394 395 396 397 398 399
	if (IS_ERR(reg)) {
		ret = PTR_ERR(reg);
		if (ret != -ENODEV)
			return ret;
		host->pbias = NULL;
		dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
			PTR_ERR(reg));
	} else {
		host->pbias = reg;
	}
400

401
	/* For eMMC do not power off when not in sleep state */
402
	if (mmc_pdata(host)->no_regulator_off_init)
403 404 405 406 407 408 409
		return 0;
	/*
	 * To disable boot_on regulator, enable regulator
	 * to increase usecount and then disable it.
	 */
	if ((host->vcc && regulator_is_enabled(host->vcc) > 0) ||
	    (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) {
410
		int vdd = ffs(mmc_pdata(host)->ocr_mask) - 1;
411

412 413
		omap_hsmmc_set_power(host->dev, 1, vdd);
		omap_hsmmc_set_power(host->dev, 0, 0);
414 415 416 417 418
	}

	return 0;
}

419 420 421 422 423 424 425
static inline int omap_hsmmc_have_reg(void)
{
	return 1;
}

#else

426 427 428 429 430
static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
{
	return 0;
}

431 432 433 434 435 436 437 438 439 440 441 442
static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
	return -EINVAL;
}

static inline int omap_hsmmc_have_reg(void)
{
	return 0;
}

#endif

443
static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id);
444 445 446

static int omap_hsmmc_gpio_init(struct mmc_host *mmc,
				struct omap_hsmmc_host *host,
447
				struct omap_hsmmc_platform_data *pdata)
448 449 450
{
	int ret;

451 452
	if (gpio_is_valid(pdata->gpio_cod)) {
		ret = mmc_gpio_request_cd(mmc, pdata->gpio_cod, 0);
453 454
		if (ret)
			return ret;
455 456 457

		host->get_cover_state = omap_hsmmc_get_cover_state;
		mmc_gpio_set_cd_isr(mmc, omap_hsmmc_cover_irq);
458 459
	} else if (gpio_is_valid(pdata->gpio_cd)) {
		ret = mmc_gpio_request_cd(mmc, pdata->gpio_cd, 0);
460 461 462 463
		if (ret)
			return ret;

		host->card_detect = omap_hsmmc_card_detect;
464
	}
465

466
	if (gpio_is_valid(pdata->gpio_wp)) {
467
		ret = mmc_gpio_request_ro(mmc, pdata->gpio_wp);
468
		if (ret)
469
			return ret;
470
	}
471 472 473 474

	return 0;
}

475 476 477 478 479 480 481 482 483
/*
 * Start clock to the card
 */
static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host)
{
	OMAP_HSMMC_WRITE(host->base, SYSCTL,
		OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
}

484 485 486
/*
 * Stop clock to the card
 */
487
static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
488 489 490 491
{
	OMAP_HSMMC_WRITE(host->base, SYSCTL,
		OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
	if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0)
M
Masanari Iida 已提交
492
		dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n");
493 494
}

495 496
static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host,
				  struct mmc_command *cmd)
497
{
498 499
	u32 irq_mask = INT_EN_MASK;
	unsigned long flags;
500 501

	if (host->use_dma)
502
		irq_mask &= ~(BRR_EN | BWR_EN);
503

504 505
	/* Disable timeout for erases */
	if (cmd->opcode == MMC_ERASE)
506
		irq_mask &= ~DTO_EN;
507

508
	spin_lock_irqsave(&host->irq_lock, flags);
509 510
	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
	OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
511 512 513 514

	/* latch pending CIRQ, but don't signal MMC core */
	if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
		irq_mask |= CIRQ_EN;
515
	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
516
	spin_unlock_irqrestore(&host->irq_lock, flags);
517 518 519 520
}

static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
{
521 522 523 524 525 526 527 528 529
	u32 irq_mask = 0;
	unsigned long flags;

	spin_lock_irqsave(&host->irq_lock, flags);
	/* no transfer running but need to keep cirq if enabled */
	if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
		irq_mask |= CIRQ_EN;
	OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
530
	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
531
	spin_unlock_irqrestore(&host->irq_lock, flags);
532 533
}

534
/* Calculate divisor for the given clock frequency */
535
static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
536 537 538 539
{
	u16 dsor = 0;

	if (ios->clock) {
540
		dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
541 542
		if (dsor > CLKD_MAX)
			dsor = CLKD_MAX;
543 544 545 546 547
	}

	return dsor;
}

548 549 550 551 552
static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
{
	struct mmc_ios *ios = &host->mmc->ios;
	unsigned long regval;
	unsigned long timeout;
553
	unsigned long clkdiv;
554

555
	dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
556 557 558 559 560

	omap_hsmmc_stop_clock(host);

	regval = OMAP_HSMMC_READ(host->base, SYSCTL);
	regval = regval & ~(CLKD_MASK | DTO_MASK);
561 562
	clkdiv = calc_divisor(host, ios);
	regval = regval | (clkdiv << 6) | (DTO << 16);
563 564 565 566 567 568 569 570 571 572
	OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
	OMAP_HSMMC_WRITE(host->base, SYSCTL,
		OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);

	/* Wait till the ICS bit is set */
	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
	while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
		&& time_before(jiffies, timeout))
		cpu_relax();

573 574 575 576 577 578 579 580 581
	/*
	 * Enable High-Speed Support
	 * Pre-Requisites
	 *	- Controller should support High-Speed-Enable Bit
	 *	- Controller should not be using DDR Mode
	 *	- Controller should advertise that it supports High Speed
	 *	  in capabilities register
	 *	- MMC/SD clock coming out of controller > 25MHz
	 */
582
	if ((mmc_pdata(host)->features & HSMMC_HAS_HSPE_SUPPORT) &&
583
	    (ios->timing != MMC_TIMING_MMC_DDR52) &&
584
	    (ios->timing != MMC_TIMING_UHS_DDR50) &&
585 586 587 588 589 590 591 592 593 594
	    ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) {
		regval = OMAP_HSMMC_READ(host->base, HCTL);
		if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000)
			regval |= HSPE;
		else
			regval &= ~HSPE;

		OMAP_HSMMC_WRITE(host->base, HCTL, regval);
	}

595 596 597
	omap_hsmmc_start_clock(host);
}

598 599 600 601 602 603
static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
{
	struct mmc_ios *ios = &host->mmc->ios;
	u32 con;

	con = OMAP_HSMMC_READ(host->base, CON);
604 605
	if (ios->timing == MMC_TIMING_MMC_DDR52 ||
	    ios->timing == MMC_TIMING_UHS_DDR50)
606 607 608
		con |= DDR;	/* configure in DDR mode */
	else
		con &= ~DDR;
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
	switch (ios->bus_width) {
	case MMC_BUS_WIDTH_8:
		OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
		break;
	case MMC_BUS_WIDTH_4:
		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
		OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
		break;
	case MMC_BUS_WIDTH_1:
		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
		OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
		break;
	}
}

static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
{
	struct mmc_ios *ios = &host->mmc->ios;
	u32 con;

	con = OMAP_HSMMC_READ(host->base, CON);
	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
		OMAP_HSMMC_WRITE(host->base, CON, con | OD);
	else
		OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
}

638 639 640 641 642 643
#ifdef CONFIG_PM

/*
 * Restore the MMC host context, if it was lost as result of a
 * power state change.
 */
644
static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
645 646
{
	struct mmc_ios *ios = &host->mmc->ios;
647
	u32 hctl, capa;
648 649
	unsigned long timeout;

650 651 652 653 654 655 656 657
	if (host->con == OMAP_HSMMC_READ(host->base, CON) &&
	    host->hctl == OMAP_HSMMC_READ(host->base, HCTL) &&
	    host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) &&
	    host->capa == OMAP_HSMMC_READ(host->base, CAPA))
		return 0;

	host->context_loss++;

658
	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
659 660 661 662 663 664 665 666 667 668 669
		if (host->power_mode != MMC_POWER_OFF &&
		    (1 << ios->vdd) <= MMC_VDD_23_24)
			hctl = SDVS18;
		else
			hctl = SDVS30;
		capa = VS30 | VS18;
	} else {
		hctl = SDVS18;
		capa = VS18;
	}

670 671 672
	if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
		hctl |= IWE;

673 674 675 676 677 678 679 680 681 682 683 684 685 686
	OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | hctl);

	OMAP_HSMMC_WRITE(host->base, CAPA,
			OMAP_HSMMC_READ(host->base, CAPA) | capa);

	OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | SDBP);

	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
	while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP
		&& time_before(jiffies, timeout))
		;

687 688 689
	OMAP_HSMMC_WRITE(host->base, ISE, 0);
	OMAP_HSMMC_WRITE(host->base, IE, 0);
	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
690 691 692 693 694

	/* Do not initialize card-specific things if the power is off */
	if (host->power_mode == MMC_POWER_OFF)
		goto out;

695
	omap_hsmmc_set_bus_width(host);
696

697
	omap_hsmmc_set_clock(host);
698

699 700
	omap_hsmmc_set_bus_mode(host);

701
out:
702 703
	dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n",
		host->context_loss);
704 705 706 707 708 709
	return 0;
}

/*
 * Save the MMC host context (store the number of power state changes so far).
 */
710
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
711
{
712 713 714 715
	host->con =  OMAP_HSMMC_READ(host->base, CON);
	host->hctl = OMAP_HSMMC_READ(host->base, HCTL);
	host->sysctl =  OMAP_HSMMC_READ(host->base, SYSCTL);
	host->capa = OMAP_HSMMC_READ(host->base, CAPA);
716 717 718 719
}

#else

720
static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
721 722 723 724
{
	return 0;
}

725
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
726 727 728 729 730
{
}

#endif

731 732 733 734
/*
 * Send init stream sequence to card
 * before sending IDLE command
 */
735
static void send_init_stream(struct omap_hsmmc_host *host)
736 737 738 739
{
	int reg = 0;
	unsigned long timeout;

740 741 742
	if (host->protect_card)
		return;

743
	disable_irq(host->irq);
744 745

	OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
746 747 748 749 750
	OMAP_HSMMC_WRITE(host->base, CON,
		OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
	OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);

	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
751 752
	while ((reg != CC_EN) && time_before(jiffies, timeout))
		reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN;
753 754 755

	OMAP_HSMMC_WRITE(host->base, CON,
		OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM);
756 757 758 759

	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
	OMAP_HSMMC_READ(host->base, STAT);

760 761 762 763
	enable_irq(host->irq);
}

static inline
764
int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host)
765 766 767
{
	int r = 1;

768
	if (host->get_cover_state)
769
		r = host->get_cover_state(host->dev);
770 771 772 773
	return r;
}

static ssize_t
774
omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr,
775 776 777
			   char *buf)
{
	struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
778
	struct omap_hsmmc_host *host = mmc_priv(mmc);
779

780 781
	return sprintf(buf, "%s\n",
			omap_hsmmc_cover_is_closed(host) ? "closed" : "open");
782 783
}

784
static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL);
785 786

static ssize_t
787
omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
788 789 790
			char *buf)
{
	struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
791
	struct omap_hsmmc_host *host = mmc_priv(mmc);
792

793
	return sprintf(buf, "%s\n", mmc_pdata(host)->name);
794 795
}

796
static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
797 798 799 800 801

/*
 * Configure the response type and send the cmd.
 */
static void
802
omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
803 804 805 806
	struct mmc_data *data)
{
	int cmdreg = 0, resptype = 0, cmdtype = 0;

807
	dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
808 809 810
		mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
	host->cmd = cmd;

811
	omap_hsmmc_enable_irq(host, cmd);
812

813
	host->response_busy = 0;
814 815 816
	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136)
			resptype = 1;
817 818 819 820
		else if (cmd->flags & MMC_RSP_BUSY) {
			resptype = 3;
			host->response_busy = 1;
		} else
821 822 823 824 825 826 827 828 829 830 831 832 833
			resptype = 2;
	}

	/*
	 * Unlike OMAP1 controller, the cmdtype does not seem to be based on
	 * ac, bc, adtc, bcr. Only commands ending an open ended transfer need
	 * a val of 0x3, rest 0x0.
	 */
	if (cmd == host->mrq->stop)
		cmdtype = 0x3;

	cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);

834 835 836 837 838
	if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) &&
	    host->mrq->sbc) {
		cmdreg |= ACEN_ACMD23;
		OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg);
	}
839 840 841 842 843 844 845 846 847
	if (data) {
		cmdreg |= DP_SELECT | MSBS | BCE;
		if (data->flags & MMC_DATA_READ)
			cmdreg |= DDIR;
		else
			cmdreg &= ~(DDIR);
	}

	if (host->use_dma)
848
		cmdreg |= DMAE;
849

850
	host->req_in_progress = 1;
851

852 853 854 855
	OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
	OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
}

856
static int
857
omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
858 859 860 861 862 863 864
{
	if (data->flags & MMC_DATA_WRITE)
		return DMA_TO_DEVICE;
	else
		return DMA_FROM_DEVICE;
}

865 866 867 868 869 870
static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
	struct mmc_data *data)
{
	return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
}

871 872 873
static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
{
	int dma_ch;
874
	unsigned long flags;
875

876
	spin_lock_irqsave(&host->irq_lock, flags);
877 878
	host->req_in_progress = 0;
	dma_ch = host->dma_ch;
879
	spin_unlock_irqrestore(&host->irq_lock, flags);
880 881 882 883 884 885 886

	omap_hsmmc_disable_irq(host);
	/* Do not complete the request if DMA is still in progress */
	if (mrq->data && host->use_dma && dma_ch != -1)
		return;
	host->mrq = NULL;
	mmc_request_done(host->mmc, mrq);
887 888
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
889 890
}

891 892 893 894
/*
 * Notify the transfer complete to MMC core
 */
static void
895
omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
896
{
897 898 899
	if (!data) {
		struct mmc_request *mrq = host->mrq;

900 901 902 903 904 905 906
		/* TC before CC from CMD6 - don't know why, but it happens */
		if (host->cmd && host->cmd->opcode == 6 &&
		    host->response_busy) {
			host->response_busy = 0;
			return;
		}

907
		omap_hsmmc_request_done(host, mrq);
908 909 910
		return;
	}

911 912 913 914 915 916 917
	host->data = NULL;

	if (!data->error)
		data->bytes_xfered += data->blocks * (data->blksz);
	else
		data->bytes_xfered = 0;

918 919 920
	if (data->stop && (data->error || !host->mrq->sbc))
		omap_hsmmc_start_command(host, data->stop, NULL);
	else
921
		omap_hsmmc_request_done(host, data->mrq);
922 923 924 925 926 927
}

/*
 * Notify the core about command completion
 */
static void
928
omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
929
{
930
	if (host->mrq->sbc && (host->cmd == host->mrq->sbc) &&
931
	    !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) {
932
		host->cmd = NULL;
933 934 935 936 937 938
		omap_hsmmc_start_dma_transfer(host);
		omap_hsmmc_start_command(host, host->mrq->cmd,
						host->mrq->data);
		return;
	}

939 940
	host->cmd = NULL;

941 942 943 944 945 946 947 948 949 950 951 952
	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136) {
			/* response type 2 */
			cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10);
			cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32);
			cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54);
			cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76);
		} else {
			/* response types 1, 1b, 3, 4, 5, 6 */
			cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
		}
	}
953
	if ((host->data == NULL && !host->response_busy) || cmd->error)
954
		omap_hsmmc_request_done(host, host->mrq);
955 956 957 958 959
}

/*
 * DMA clean up for command errors
 */
960
static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
961
{
962
	int dma_ch;
963
	unsigned long flags;
964

965
	host->data->error = errno;
966

967
	spin_lock_irqsave(&host->irq_lock, flags);
968 969
	dma_ch = host->dma_ch;
	host->dma_ch = -1;
970
	spin_unlock_irqrestore(&host->irq_lock, flags);
971 972

	if (host->use_dma && dma_ch != -1) {
973 974 975 976 977
		struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);

		dmaengine_terminate_all(chan);
		dma_unmap_sg(chan->device->dev,
			host->data->sg, host->data->sg_len,
978
			omap_hsmmc_get_dma_dir(host, host->data));
979

980
		host->data->host_cookie = 0;
981 982 983 984 985 986 987 988
	}
	host->data = NULL;
}

/*
 * Readable error output
 */
#ifdef CONFIG_MMC_DEBUG
989
static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
990 991
{
	/* --- means reserved bit without definition at documentation */
992
	static const char *omap_hsmmc_status_bits[] = {
993 994 995 996
		"CC"  , "TC"  , "BGE", "---", "BWR" , "BRR" , "---" , "---" ,
		"CIRQ",	"OBI" , "---", "---", "---" , "---" , "---" , "ERRI",
		"CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" ,
		"ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---"
997 998 999 1000 1001 1002 1003 1004
	};
	char res[256];
	char *buf = res;
	int len, i;

	len = sprintf(buf, "MMC IRQ 0x%x :", status);
	buf += len;

1005
	for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++)
1006
		if (status & (1 << i)) {
1007
			len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]);
1008 1009 1010
			buf += len;
		}

1011
	dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
1012
}
1013 1014 1015 1016 1017
#else
static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
					     u32 status)
{
}
1018 1019
#endif  /* CONFIG_MMC_DEBUG */

1020 1021 1022 1023 1024 1025 1026
/*
 * MMC controller internal state machines reset
 *
 * Used to reset command or data internal state machines, using respectively
 *  SRC or SRD bit of SYSCTL register
 * Can be called from interrupt context
 */
1027 1028
static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
						   unsigned long bit)
1029 1030
{
	unsigned long i = 0;
1031
	unsigned long limit = MMC_TIMEOUT_US;
1032 1033 1034 1035

	OMAP_HSMMC_WRITE(host->base, SYSCTL,
			 OMAP_HSMMC_READ(host->base, SYSCTL) | bit);

1036 1037 1038 1039
	/*
	 * OMAP4 ES2 and greater has an updated reset logic.
	 * Monitor a 0->1 transition first
	 */
1040
	if (mmc_pdata(host)->features & HSMMC_HAS_UPDATED_RESET) {
1041
		while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
1042
					&& (i++ < limit))
1043
			udelay(1);
1044 1045 1046
	}
	i = 0;

1047 1048
	while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&
		(i++ < limit))
1049
		udelay(1);
1050 1051 1052 1053 1054 1055

	if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit)
		dev_err(mmc_dev(host->mmc),
			"Timeout waiting on controller reset in %s\n",
			__func__);
}
1056

1057 1058
static void hsmmc_command_incomplete(struct omap_hsmmc_host *host,
					int err, int end_cmd)
1059
{
1060
	if (end_cmd) {
1061
		omap_hsmmc_reset_controller_fsm(host, SRC);
1062 1063 1064
		if (host->cmd)
			host->cmd->error = err;
	}
1065 1066 1067 1068

	if (host->data) {
		omap_hsmmc_reset_controller_fsm(host, SRD);
		omap_hsmmc_dma_cleanup(host, err);
1069 1070
	} else if (host->mrq && host->mrq->cmd)
		host->mrq->cmd->error = err;
1071 1072
}

1073
static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1074 1075
{
	struct mmc_data *data;
1076
	int end_cmd = 0, end_trans = 0;
1077
	int error = 0;
1078

1079
	data = host->data;
1080
	dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
1081

1082
	if (status & ERR_EN) {
1083
		omap_hsmmc_dbg_report_irq(host, status);
1084

1085
		if (status & (CTO_EN | CCRC_EN))
1086
			end_cmd = 1;
1087 1088 1089 1090
		if (host->data || host->response_busy) {
			end_trans = !end_cmd;
			host->response_busy = 0;
		}
1091
		if (status & (CTO_EN | DTO_EN))
1092
			hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
1093 1094
		else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
				   BADA_EN))
1095
			hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
1096

1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
		if (status & ACE_EN) {
			u32 ac12;
			ac12 = OMAP_HSMMC_READ(host->base, AC12);
			if (!(ac12 & ACNE) && host->mrq->sbc) {
				end_cmd = 1;
				if (ac12 & ACTO)
					error =  -ETIMEDOUT;
				else if (ac12 & (ACCE | ACEB | ACIE))
					error = -EILSEQ;
				host->mrq->sbc->error = error;
				hsmmc_command_incomplete(host, error, end_cmd);
			}
			dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
		}
1111 1112
	}

1113
	OMAP_HSMMC_WRITE(host->base, STAT, status);
1114
	if (end_cmd || ((status & CC_EN) && host->cmd))
1115
		omap_hsmmc_cmd_done(host, host->cmd);
1116
	if ((end_trans || (status & TC_EN)) && host->mrq)
1117
		omap_hsmmc_xfer_done(host, data);
1118
}
1119

1120 1121 1122 1123 1124 1125 1126 1127 1128
/*
 * MMC controller IRQ handler
 */
static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
{
	struct omap_hsmmc_host *host = dev_id;
	int status;

	status = OMAP_HSMMC_READ(host->base, STAT);
1129 1130 1131 1132 1133 1134
	while (status & (INT_EN_MASK | CIRQ_EN)) {
		if (host->req_in_progress)
			omap_hsmmc_do_irq(host, status);

		if (status & CIRQ_EN)
			mmc_signal_sdio_irq(host->mmc);
1135

1136 1137
		/* Flush posted write */
		status = OMAP_HSMMC_READ(host->base, STAT);
1138
	}
1139

1140 1141 1142
	return IRQ_HANDLED;
}

1143
static void set_sd_bus_power(struct omap_hsmmc_host *host)
A
Adrian Hunter 已提交
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
{
	unsigned long i;

	OMAP_HSMMC_WRITE(host->base, HCTL,
			 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
	for (i = 0; i < loops_per_jiffy; i++) {
		if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP)
			break;
		cpu_relax();
	}
}

1156
/*
1157 1158 1159 1160 1161
 * Switch MMC interface voltage ... only relevant for MMC1.
 *
 * MMC2 and MMC3 use fixed 1.8V levels, and maybe a transceiver.
 * The MMC2 transceiver controls are used instead of DAT4..DAT7.
 * Some chips, like eMMC ones, use internal transceivers.
1162
 */
1163
static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1164 1165 1166 1167 1168
{
	u32 reg_val = 0;
	int ret;

	/* Disable the clocks */
1169
	pm_runtime_put_sync(host->dev);
1170
	if (host->dbclk)
1171
		clk_disable_unprepare(host->dbclk);
1172 1173

	/* Turn the power off */
1174
	ret = omap_hsmmc_set_power(host->dev, 0, 0);
1175 1176

	/* Turn the power ON with given VDD 1.8 or 3.0v */
1177
	if (!ret)
1178
		ret = omap_hsmmc_set_power(host->dev, 1, vdd);
1179
	pm_runtime_get_sync(host->dev);
1180
	if (host->dbclk)
1181
		clk_prepare_enable(host->dbclk);
1182

1183 1184 1185 1186 1187 1188
	if (ret != 0)
		goto err;

	OMAP_HSMMC_WRITE(host->base, HCTL,
		OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR);
	reg_val = OMAP_HSMMC_READ(host->base, HCTL);
1189

1190 1191 1192
	/*
	 * If a MMC dual voltage card is detected, the set_ios fn calls
	 * this fn with VDD bit set for 1.8V. Upon card removal from the
1193
	 * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
1194
	 *
1195 1196 1197 1198 1199 1200 1201 1202 1203
	 * Cope with a bit of slop in the range ... per data sheets:
	 *  - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max,
	 *    but recommended values are 1.71V to 1.89V
	 *  - "3.0V" for vdds_mmc1/vdds_mmc1a can be up to 3.5V max,
	 *    but recommended values are 2.7V to 3.3V
	 *
	 * Board setup code shouldn't permit anything very out-of-range.
	 * TWL4030-family VMMC1 and VSIM regulators are fine (avoiding the
	 * middle range) but VSIM can't power DAT4..DAT7 at more than 3V.
1204
	 */
1205
	if ((1 << vdd) <= MMC_VDD_23_24)
1206
		reg_val |= SDVS18;
1207 1208
	else
		reg_val |= SDVS30;
1209 1210

	OMAP_HSMMC_WRITE(host->base, HCTL, reg_val);
A
Adrian Hunter 已提交
1211
	set_sd_bus_power(host);
1212 1213 1214

	return 0;
err:
1215
	dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n");
1216 1217 1218
	return ret;
}

1219 1220 1221
/* Protect the card while the cover is open */
static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
{
1222
	if (!host->get_cover_state)
1223 1224 1225
		return;

	host->reqs_blocked = 0;
1226
	if (host->get_cover_state(host->dev)) {
1227
		if (host->protect_card) {
1228
			dev_info(host->dev, "%s: cover is closed, "
1229 1230 1231 1232 1233 1234
					 "card is now accessible\n",
					 mmc_hostname(host->mmc));
			host->protect_card = 0;
		}
	} else {
		if (!host->protect_card) {
1235
			dev_info(host->dev, "%s: cover is open, "
1236 1237 1238 1239 1240 1241 1242
					 "card is now inaccessible\n",
					 mmc_hostname(host->mmc));
			host->protect_card = 1;
		}
	}
}

1243
/*
1244
 * irq handler when (cell-phone) cover is mounted/removed
1245
 */
1246
static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id)
1247
{
1248
	struct omap_hsmmc_host *host = dev_id;
1249 1250

	sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
1251

1252 1253
	omap_hsmmc_protect_card(host);
	mmc_detect_change(host->mmc, (HZ * 200) / 1000);
1254 1255 1256
	return IRQ_HANDLED;
}

1257
static void omap_hsmmc_dma_callback(void *param)
1258
{
1259 1260
	struct omap_hsmmc_host *host = param;
	struct dma_chan *chan;
1261
	struct mmc_data *data;
1262
	int req_in_progress;
1263

1264
	spin_lock_irq(&host->irq_lock);
1265
	if (host->dma_ch < 0) {
1266
		spin_unlock_irq(&host->irq_lock);
1267
		return;
1268
	}
1269

1270
	data = host->mrq->data;
1271
	chan = omap_hsmmc_get_dma_chan(host, data);
1272
	if (!data->host_cookie)
1273 1274
		dma_unmap_sg(chan->device->dev,
			     data->sg, data->sg_len,
1275
			     omap_hsmmc_get_dma_dir(host, data));
1276 1277

	req_in_progress = host->req_in_progress;
1278
	host->dma_ch = -1;
1279
	spin_unlock_irq(&host->irq_lock);
1280 1281 1282 1283 1284 1285 1286

	/* If DMA has finished after TC, complete the request */
	if (!req_in_progress) {
		struct mmc_request *mrq = host->mrq;

		host->mrq = NULL;
		mmc_request_done(host->mmc, mrq);
1287 1288
		pm_runtime_mark_last_busy(host->dev);
		pm_runtime_put_autosuspend(host->dev);
1289
	}
1290 1291
}

1292 1293
static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
				       struct mmc_data *data,
1294
				       struct omap_hsmmc_next *next,
1295
				       struct dma_chan *chan)
1296 1297 1298 1299 1300
{
	int dma_len;

	if (!next && data->host_cookie &&
	    data->host_cookie != host->next_data.cookie) {
1301
		dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d"
1302 1303 1304 1305 1306 1307
		       " host->next_data.cookie %d\n",
		       __func__, data->host_cookie, host->next_data.cookie);
		data->host_cookie = 0;
	}

	/* Check if next job is already prepared */
1308
	if (next || data->host_cookie != host->next_data.cookie) {
1309
		dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
				     omap_hsmmc_get_dma_dir(host, data));

	} else {
		dma_len = host->next_data.dma_len;
		host->next_data.dma_len = 0;
	}


	if (dma_len == 0)
		return -EINVAL;

	if (next) {
		next->dma_len = dma_len;
		data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
	} else
		host->dma_len = dma_len;

	return 0;
}

1330 1331 1332
/*
 * Routine to configure and start DMA for the MMC card
 */
1333
static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
1334
					struct mmc_request *req)
1335
{
1336 1337 1338
	struct dma_slave_config cfg;
	struct dma_async_tx_descriptor *tx;
	int ret = 0, i;
1339
	struct mmc_data *data = req->data;
1340
	struct dma_chan *chan;
1341

1342
	/* Sanity check: all the SG entries must be aligned by block size. */
1343
	for (i = 0; i < data->sg_len; i++) {
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
		struct scatterlist *sgl;

		sgl = data->sg + i;
		if (sgl->length % data->blksz)
			return -EINVAL;
	}
	if ((data->blksz % 4) != 0)
		/* REVISIT: The MMC buffer increments only when MSB is written.
		 * Return error for blksz which is non multiple of four.
		 */
		return -EINVAL;

1356
	BUG_ON(host->dma_ch != -1);
1357

1358 1359
	chan = omap_hsmmc_get_dma_chan(host, data);

1360 1361 1362 1363 1364 1365
	cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
	cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.src_maxburst = data->blksz / 4;
	cfg.dst_maxburst = data->blksz / 4;
1366

1367 1368
	ret = dmaengine_slave_config(chan, &cfg);
	if (ret)
1369
		return ret;
1370

1371
	ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
1372 1373
	if (ret)
		return ret;
1374

1375 1376 1377 1378 1379 1380 1381 1382
	tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
		data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!tx) {
		dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
		/* FIXME: cleanup */
		return -1;
	}
1383

1384 1385
	tx->callback = omap_hsmmc_dma_callback;
	tx->callback_param = host;
1386

1387 1388
	/* Does not fail */
	dmaengine_submit(tx);
1389

1390
	host->dma_ch = 1;
1391

1392 1393 1394
	return 0;
}

1395
static void set_data_timeout(struct omap_hsmmc_host *host,
1396 1397
			     unsigned int timeout_ns,
			     unsigned int timeout_clks)
1398 1399 1400 1401 1402 1403 1404 1405 1406
{
	unsigned int timeout, cycle_ns;
	uint32_t reg, clkd, dto = 0;

	reg = OMAP_HSMMC_READ(host->base, SYSCTL);
	clkd = (reg & CLKD_MASK) >> CLKD_SHIFT;
	if (clkd == 0)
		clkd = 1;

1407
	cycle_ns = 1000000000 / (host->clk_rate / clkd);
1408 1409
	timeout = timeout_ns / cycle_ns;
	timeout += timeout_clks;
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	if (timeout) {
		while ((timeout & 0x80000000) == 0) {
			dto += 1;
			timeout <<= 1;
		}
		dto = 31 - dto;
		timeout <<= 1;
		if (timeout && dto)
			dto += 1;
		if (dto >= 13)
			dto -= 13;
		else
			dto = 0;
		if (dto > 14)
			dto = 14;
	}

	reg &= ~DTO_MASK;
	reg |= dto << DTO_SHIFT;
	OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
}

1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
{
	struct mmc_request *req = host->mrq;
	struct dma_chan *chan;

	if (!req->data)
		return;
	OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
				| (req->data->blocks << 16));
	set_data_timeout(host, req->data->timeout_ns,
				req->data->timeout_clks);
	chan = omap_hsmmc_get_dma_chan(host, req->data);
	dma_async_issue_pending(chan);
}

1447 1448 1449 1450
/*
 * Configure block length for MMC/SD cards and initiate the transfer.
 */
static int
1451
omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
1452 1453 1454 1455 1456 1457
{
	int ret;
	host->data = req->data;

	if (req->data == NULL) {
		OMAP_HSMMC_WRITE(host->base, BLK, 0);
1458 1459 1460 1461 1462 1463
		/*
		 * Set an arbitrary 100ms data timeout for commands with
		 * busy signal.
		 */
		if (req->cmd->flags & MMC_RSP_BUSY)
			set_data_timeout(host, 100000000U, 0);
1464 1465 1466 1467
		return 0;
	}

	if (host->use_dma) {
1468
		ret = omap_hsmmc_setup_dma_transfer(host, req);
1469
		if (ret != 0) {
1470
			dev_err(mmc_dev(host->mmc), "MMC start dma failure\n");
1471 1472 1473 1474 1475 1476
			return ret;
		}
	}
	return 0;
}

1477 1478 1479 1480 1481 1482
static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
				int err)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);
	struct mmc_data *data = mrq->data;

1483
	if (host->use_dma && data->host_cookie) {
1484 1485
		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);

1486 1487
		dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
			     omap_hsmmc_get_dma_dir(host, data));
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
		data->host_cookie = 0;
	}
}

static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
			       bool is_first_req)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);

	if (mrq->data->host_cookie) {
		mrq->data->host_cookie = 0;
		return ;
	}

1502 1503 1504
	if (host->use_dma) {
		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);

1505
		if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
1506
						&host->next_data, c))
1507
			mrq->data->host_cookie = 0;
1508
	}
1509 1510
}

1511 1512 1513
/*
 * Request function. for read/write operation
 */
1514
static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1515
{
1516
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1517
	int err;
1518

1519 1520
	BUG_ON(host->req_in_progress);
	BUG_ON(host->dma_ch != -1);
1521
	pm_runtime_get_sync(host->dev);
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
	if (host->protect_card) {
		if (host->reqs_blocked < 3) {
			/*
			 * Ensure the controller is left in a consistent
			 * state by resetting the command and data state
			 * machines.
			 */
			omap_hsmmc_reset_controller_fsm(host, SRD);
			omap_hsmmc_reset_controller_fsm(host, SRC);
			host->reqs_blocked += 1;
		}
		req->cmd->error = -EBADF;
		if (req->data)
			req->data->error = -EBADF;
		req->cmd->retries = 0;
		mmc_request_done(mmc, req);
1538 1539
		pm_runtime_mark_last_busy(host->dev);
		pm_runtime_put_autosuspend(host->dev);
1540 1541 1542
		return;
	} else if (host->reqs_blocked)
		host->reqs_blocked = 0;
1543 1544
	WARN_ON(host->mrq != NULL);
	host->mrq = req;
1545
	host->clk_rate = clk_get_rate(host->fclk);
1546
	err = omap_hsmmc_prepare_data(host, req);
1547 1548 1549 1550 1551 1552
	if (err) {
		req->cmd->error = err;
		if (req->data)
			req->data->error = err;
		host->mrq = NULL;
		mmc_request_done(mmc, req);
1553 1554
		pm_runtime_mark_last_busy(host->dev);
		pm_runtime_put_autosuspend(host->dev);
1555 1556
		return;
	}
1557
	if (req->sbc && !(host->flags & AUTO_CMD23)) {
1558 1559 1560
		omap_hsmmc_start_command(host, req->sbc, NULL);
		return;
	}
1561

1562
	omap_hsmmc_start_dma_transfer(host);
1563
	omap_hsmmc_start_command(host, req->cmd, req->data);
1564 1565 1566
}

/* Routine to configure clock values. Exposed API to core */
1567
static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1568
{
1569
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1570
	int do_send_init_stream = 0;
1571

1572
	pm_runtime_get_sync(host->dev);
1573

1574 1575 1576
	if (ios->power_mode != host->power_mode) {
		switch (ios->power_mode) {
		case MMC_POWER_OFF:
1577
			omap_hsmmc_set_power(host->dev, 0, 0);
1578 1579
			break;
		case MMC_POWER_UP:
1580
			omap_hsmmc_set_power(host->dev, 1, ios->vdd);
1581 1582 1583 1584 1585 1586
			break;
		case MMC_POWER_ON:
			do_send_init_stream = 1;
			break;
		}
		host->power_mode = ios->power_mode;
1587 1588
	}

1589 1590
	/* FIXME: set registers based only on changes to ios */

1591
	omap_hsmmc_set_bus_width(host);
1592

1593
	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1594 1595 1596
		/* Only MMC1 can interface at 3V without some flavor
		 * of external transceiver; but they all handle 1.8V.
		 */
1597
		if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) &&
1598
			(ios->vdd == DUAL_VOLT_OCR_BIT)) {
1599 1600 1601 1602 1603 1604
				/*
				 * The mmc_select_voltage fn of the core does
				 * not seem to set the power_mode to
				 * MMC_POWER_UP upon recalculating the voltage.
				 * vdd 1.8v.
				 */
1605 1606
			if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0)
				dev_dbg(mmc_dev(host->mmc),
1607 1608 1609 1610
						"Switch operation failed\n");
		}
	}

1611
	omap_hsmmc_set_clock(host);
1612

1613
	if (do_send_init_stream)
1614 1615
		send_init_stream(host);

1616
	omap_hsmmc_set_bus_mode(host);
1617

1618
	pm_runtime_put_autosuspend(host->dev);
1619 1620 1621 1622
}

static int omap_hsmmc_get_cd(struct mmc_host *mmc)
{
1623
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1624

1625
	if (!host->card_detect)
1626
		return -ENOSYS;
1627
	return host->card_detect(host->dev);
1628 1629
}

1630 1631 1632 1633
static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);

1634 1635
	if (mmc_pdata(host)->init_card)
		mmc_pdata(host)->init_card(card);
1636 1637
}

1638 1639 1640
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1641
	u32 irq_mask, con;
1642 1643 1644 1645
	unsigned long flags;

	spin_lock_irqsave(&host->irq_lock, flags);

1646
	con = OMAP_HSMMC_READ(host->base, CON);
1647 1648 1649 1650
	irq_mask = OMAP_HSMMC_READ(host->base, ISE);
	if (enable) {
		host->flags |= HSMMC_SDIO_IRQ_ENABLED;
		irq_mask |= CIRQ_EN;
1651
		con |= CTPL | CLKEXTFREE;
1652 1653 1654
	} else {
		host->flags &= ~HSMMC_SDIO_IRQ_ENABLED;
		irq_mask &= ~CIRQ_EN;
1655
		con &= ~(CTPL | CLKEXTFREE);
1656
	}
1657
	OMAP_HSMMC_WRITE(host->base, CON, con);
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);

	/*
	 * if enable, piggy back detection on current request
	 * but always disable immediately
	 */
	if (!host->req_in_progress || !enable)
		OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);

	/* flush posted write */
	OMAP_HSMMC_READ(host->base, IE);

	spin_unlock_irqrestore(&host->irq_lock, flags);
}

static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
{
	int ret;

	/*
	 * For omaps with wake-up path, wakeirq will be irq from pinctrl and
	 * for other omaps, wakeirq will be from GPIO (dat line remuxed to
	 * gpio). wakeirq is needed to detect sdio irq in runtime suspend state
	 * with functional clock disabled.
	 */
	if (!host->dev->of_node || !host->wake_irq)
		return -ENODEV;

1686
	ret = dev_pm_set_dedicated_wake_irq(host->dev, host->wake_irq);
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
	if (ret) {
		dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n");
		goto err;
	}

	/*
	 * Some omaps don't have wake-up path from deeper idle states
	 * and need to remux SDIO DAT1 to GPIO for wake-up from idle.
	 */
	if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
		struct pinctrl *p = devm_pinctrl_get(host->dev);
		if (!p) {
			ret = -ENODEV;
			goto err_free_irq;
		}
		if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
			dev_info(host->dev, "missing default pinctrl state\n");
			devm_pinctrl_put(p);
			ret = -EINVAL;
			goto err_free_irq;
		}

		if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
			dev_info(host->dev, "missing idle pinctrl state\n");
			devm_pinctrl_put(p);
			ret = -EINVAL;
			goto err_free_irq;
		}
		devm_pinctrl_put(p);
1716 1717
	}

1718 1719
	OMAP_HSMMC_WRITE(host->base, HCTL,
			 OMAP_HSMMC_READ(host->base, HCTL) | IWE);
1720 1721
	return 0;

1722
err_free_irq:
1723
	dev_pm_clear_wake_irq(host->dev);
1724 1725 1726 1727 1728 1729
err:
	dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n");
	host->wake_irq = 0;
	return ret;
}

1730
static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
1731 1732 1733 1734
{
	u32 hctl, capa, value;

	/* Only MMC1 supports 3.0V */
1735
	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
		hctl = SDVS30;
		capa = VS30 | VS18;
	} else {
		hctl = SDVS18;
		capa = VS18;
	}

	value = OMAP_HSMMC_READ(host->base, HCTL) & ~SDVS_MASK;
	OMAP_HSMMC_WRITE(host->base, HCTL, value | hctl);

	value = OMAP_HSMMC_READ(host->base, CAPA);
	OMAP_HSMMC_WRITE(host->base, CAPA, value | capa);

	/* Set SD bus power bit */
A
Adrian Hunter 已提交
1750
	set_sd_bus_power(host);
1751 1752
}

1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
static int omap_hsmmc_multi_io_quirk(struct mmc_card *card,
				     unsigned int direction, int blk_size)
{
	/* This controller can't do multiblock reads due to hw bugs */
	if (direction == MMC_DATA_READ)
		return 1;

	return blk_size;
}

static struct mmc_host_ops omap_hsmmc_ops = {
1764 1765
	.post_req = omap_hsmmc_post_req,
	.pre_req = omap_hsmmc_pre_req,
1766 1767
	.request = omap_hsmmc_request,
	.set_ios = omap_hsmmc_set_ios,
1768
	.get_cd = omap_hsmmc_get_cd,
1769
	.get_ro = mmc_gpio_get_ro,
1770
	.init_card = omap_hsmmc_init_card,
1771
	.enable_sdio_irq = omap_hsmmc_enable_sdio_irq,
1772 1773
};

1774 1775
#ifdef CONFIG_DEBUG_FS

1776
static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
1777 1778
{
	struct mmc_host *mmc = s->private;
1779
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1780

1781 1782 1783
	seq_printf(s, "mmc%d:\n", mmc->index);
	seq_printf(s, "sdio irq mode\t%s\n",
		   (mmc->caps & MMC_CAP_SDIO_IRQ) ? "interrupt" : "polling");
1784

1785 1786 1787 1788 1789 1790
	if (mmc->caps & MMC_CAP_SDIO_IRQ) {
		seq_printf(s, "sdio irq \t%s\n",
			   (host->flags & HSMMC_SDIO_IRQ_ENABLED) ?  "enabled"
			   : "disabled");
	}
	seq_printf(s, "ctx_loss:\t%d\n", host->context_loss);
1791

1792 1793
	pm_runtime_get_sync(host->dev);
	seq_puts(s, "\nregs:\n");
1794 1795
	seq_printf(s, "CON:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, CON));
1796 1797
	seq_printf(s, "PSTATE:\t\t0x%08x\n",
		   OMAP_HSMMC_READ(host->base, PSTATE));
1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
	seq_printf(s, "HCTL:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, HCTL));
	seq_printf(s, "SYSCTL:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, SYSCTL));
	seq_printf(s, "IE:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, IE));
	seq_printf(s, "ISE:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, ISE));
	seq_printf(s, "CAPA:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, CAPA));
1808

1809 1810
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
1811

1812 1813 1814
	return 0;
}

1815
static int omap_hsmmc_regs_open(struct inode *inode, struct file *file)
1816
{
1817
	return single_open(file, omap_hsmmc_regs_show, inode->i_private);
1818 1819 1820
}

static const struct file_operations mmc_regs_fops = {
1821
	.open           = omap_hsmmc_regs_open,
1822 1823 1824 1825 1826
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = single_release,
};

1827
static void omap_hsmmc_debugfs(struct mmc_host *mmc)
1828 1829 1830 1831 1832 1833 1834 1835
{
	if (mmc->debugfs_root)
		debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root,
			mmc, &mmc_regs_fops);
}

#else

1836
static void omap_hsmmc_debugfs(struct mmc_host *mmc)
1837 1838 1839 1840 1841
{
}

#endif

1842
#ifdef CONFIG_OF
1843 1844 1845 1846 1847 1848 1849 1850
static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = {
	/* See 35xx errata 2.1.1.128 in SPRZ278F */
	.controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
};

static const struct omap_mmc_of_data omap4_mmc_of_data = {
	.reg_offset = 0x100,
};
1851 1852 1853 1854
static const struct omap_mmc_of_data am33xx_mmc_of_data = {
	.reg_offset = 0x100,
	.controller_flags = OMAP_HSMMC_SWAKEUP_MISSING,
};
1855 1856 1857 1858 1859

static const struct of_device_id omap_mmc_of_match[] = {
	{
		.compatible = "ti,omap2-hsmmc",
	},
1860 1861 1862 1863
	{
		.compatible = "ti,omap3-pre-es3-hsmmc",
		.data = &omap3_pre_es3_mmc_of_data,
	},
1864 1865 1866 1867 1868
	{
		.compatible = "ti,omap3-hsmmc",
	},
	{
		.compatible = "ti,omap4-hsmmc",
1869
		.data = &omap4_mmc_of_data,
1870
	},
1871 1872 1873 1874
	{
		.compatible = "ti,am33xx-hsmmc",
		.data = &am33xx_mmc_of_data,
	},
1875
	{},
1876
};
1877 1878
MODULE_DEVICE_TABLE(of, omap_mmc_of_match);

1879
static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
1880
{
1881
	struct omap_hsmmc_platform_data *pdata;
1882 1883 1884 1885
	struct device_node *np = dev->of_node;

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
1886
		return ERR_PTR(-ENOMEM); /* out of memory */
1887 1888 1889 1890

	if (of_find_property(np, "ti,dual-volt", NULL))
		pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;

1891 1892
	pdata->gpio_cd = -EINVAL;
	pdata->gpio_cod = -EINVAL;
1893
	pdata->gpio_wp = -EINVAL;
1894 1895

	if (of_find_property(np, "ti,non-removable", NULL)) {
1896 1897
		pdata->nonremovable = true;
		pdata->no_regulator_off_init = true;
1898 1899 1900
	}

	if (of_find_property(np, "ti,needs-special-reset", NULL))
1901
		pdata->features |= HSMMC_HAS_UPDATED_RESET;
1902

1903
	if (of_find_property(np, "ti,needs-special-hs-handling", NULL))
1904
		pdata->features |= HSMMC_HAS_HSPE_SUPPORT;
1905

1906 1907 1908
	return pdata;
}
#else
1909
static inline struct omap_hsmmc_platform_data
1910 1911
			*of_get_hsmmc_pdata(struct device *dev)
{
1912
	return ERR_PTR(-EINVAL);
1913 1914 1915
}
#endif

1916
static int omap_hsmmc_probe(struct platform_device *pdev)
1917
{
1918
	struct omap_hsmmc_platform_data *pdata = pdev->dev.platform_data;
1919
	struct mmc_host *mmc;
1920
	struct omap_hsmmc_host *host = NULL;
1921
	struct resource *res;
1922
	int ret, irq;
1923
	const struct of_device_id *match;
1924 1925
	dma_cap_mask_t mask;
	unsigned tx_req, rx_req;
1926
	const struct omap_mmc_of_data *data;
1927
	void __iomem *base;
1928 1929 1930 1931

	match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
	if (match) {
		pdata = of_get_hsmmc_pdata(&pdev->dev);
1932 1933 1934 1935

		if (IS_ERR(pdata))
			return PTR_ERR(pdata);

1936
		if (match->data) {
1937 1938 1939
			data = match->data;
			pdata->reg_offset = data->reg_offset;
			pdata->controller_flags |= data->controller_flags;
1940 1941
		}
	}
1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952

	if (pdata == NULL) {
		dev_err(&pdev->dev, "Platform Data is missing\n");
		return -ENXIO;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (res == NULL || irq < 0)
		return -ENXIO;

1953 1954 1955
	base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(base))
		return PTR_ERR(base);
1956

1957
	mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
1958 1959
	if (!mmc) {
		ret = -ENOMEM;
1960
		goto err;
1961 1962
	}

1963 1964 1965 1966
	ret = mmc_of_parse(mmc);
	if (ret)
		goto err1;

1967 1968 1969 1970 1971 1972 1973
	host		= mmc_priv(mmc);
	host->mmc	= mmc;
	host->pdata	= pdata;
	host->dev	= &pdev->dev;
	host->use_dma	= 1;
	host->dma_ch	= -1;
	host->irq	= irq;
1974
	host->mapbase	= res->start + pdata->reg_offset;
1975
	host->base	= base + pdata->reg_offset;
1976
	host->power_mode = MMC_POWER_OFF;
1977
	host->next_data.cookie = 1;
1978
	host->pbias_enabled = 0;
1979

1980
	ret = omap_hsmmc_gpio_init(mmc, host, pdata);
1981 1982 1983
	if (ret)
		goto err_gpio;

1984 1985
	platform_set_drvdata(pdev, host);

1986 1987 1988
	if (pdev->dev.of_node)
		host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1);

1989
	mmc->ops	= &omap_hsmmc_ops;
1990

1991 1992 1993 1994
	mmc->f_min = OMAP_MMC_MIN_CLOCK;

	if (pdata->max_freq > 0)
		mmc->f_max = pdata->max_freq;
1995
	else if (mmc->f_max == 0)
1996
		mmc->f_max = OMAP_MMC_MAX_CLOCK;
1997

1998
	spin_lock_init(&host->irq_lock);
1999

2000
	host->fclk = devm_clk_get(&pdev->dev, "fck");
2001 2002 2003 2004 2005 2006
	if (IS_ERR(host->fclk)) {
		ret = PTR_ERR(host->fclk);
		host->fclk = NULL;
		goto err1;
	}

2007 2008
	if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
		dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
2009
		omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
2010
	}
2011

2012
	device_init_wakeup(&pdev->dev, true);
2013 2014 2015 2016
	pm_runtime_enable(host->dev);
	pm_runtime_get_sync(host->dev);
	pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
	pm_runtime_use_autosuspend(host->dev);
2017

2018 2019
	omap_hsmmc_context_save(host);

2020
	host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck");
2021 2022 2023 2024 2025
	/*
	 * MMC can still work without debounce clock.
	 */
	if (IS_ERR(host->dbclk)) {
		host->dbclk = NULL;
2026
	} else if (clk_prepare_enable(host->dbclk) != 0) {
2027 2028
		dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
		host->dbclk = NULL;
2029
	}
2030

2031 2032
	/* Since we do only SG emulation, we can have as many segs
	 * as we want. */
2033
	mmc->max_segs = 1024;
2034

2035 2036 2037 2038 2039
	mmc->max_blk_size = 512;       /* Block Length at max can be 1024 */
	mmc->max_blk_count = 0xFFFF;    /* No. of Blocks is 16 bits */
	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
	mmc->max_seg_size = mmc->max_req_size;

2040
	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
2041
		     MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
2042

2043
	mmc->caps |= mmc_pdata(host)->caps;
2044
	if (mmc->caps & MMC_CAP_8_BIT_DATA)
2045 2046
		mmc->caps |= MMC_CAP_4_BIT_DATA;

2047
	if (mmc_pdata(host)->nonremovable)
2048 2049
		mmc->caps |= MMC_CAP_NONREMOVABLE;

2050
	mmc->pm_caps |= mmc_pdata(host)->pm_caps;
2051

2052
	omap_hsmmc_conf_bus_power(host);
2053

2054 2055 2056 2057 2058 2059 2060 2061
	if (!pdev->dev.of_node) {
		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
		if (!res) {
			dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
			ret = -ENXIO;
			goto err_irq;
		}
		tx_req = res->start;
2062

2063 2064 2065 2066 2067 2068 2069
		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
		if (!res) {
			dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
			ret = -ENXIO;
			goto err_irq;
		}
		rx_req = res->start;
2070
	}
2071

2072 2073 2074
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

2075 2076 2077 2078
	host->rx_chan =
		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
						 &rx_req, &pdev->dev, "rx");

2079 2080
	if (!host->rx_chan) {
		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
2081
		ret = -ENXIO;
2082 2083 2084
		goto err_irq;
	}

2085 2086 2087 2088
	host->tx_chan =
		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
						 &tx_req, &pdev->dev, "tx");

2089 2090
	if (!host->tx_chan) {
		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
2091
		ret = -ENXIO;
2092
		goto err_irq;
2093
	}
2094 2095

	/* Request IRQ for MMC operations */
2096
	ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
2097 2098
			mmc_hostname(mmc), host);
	if (ret) {
2099
		dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
2100 2101 2102
		goto err_irq;
	}

2103
	if (omap_hsmmc_have_reg()) {
2104 2105
		ret = omap_hsmmc_reg_get(host);
		if (ret)
2106
			goto err_irq;
2107 2108
	}

2109
	mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
2110

2111
	omap_hsmmc_disable_irq(host);
2112

2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124
	/*
	 * For now, only support SDIO interrupt if we have a separate
	 * wake-up interrupt configured from device tree. This is because
	 * the wake-up interrupt is needed for idle state and some
	 * platforms need special quirks. And we don't want to add new
	 * legacy mux platform init code callbacks any longer as we
	 * are moving to DT based booting anyways.
	 */
	ret = omap_hsmmc_configure_wake_irq(host);
	if (!ret)
		mmc->caps |= MMC_CAP_SDIO_IRQ;

2125 2126
	omap_hsmmc_protect_card(host);

2127 2128
	mmc_add_host(mmc);

2129
	if (mmc_pdata(host)->name != NULL) {
2130 2131 2132 2133
		ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
		if (ret < 0)
			goto err_slot_name;
	}
2134
	if (host->get_cover_state) {
2135
		ret = device_create_file(&mmc->class_dev,
2136
					 &dev_attr_cover_switch);
2137
		if (ret < 0)
2138
			goto err_slot_name;
2139 2140
	}

2141
	omap_hsmmc_debugfs(mmc);
2142 2143
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
2144

2145 2146 2147 2148 2149
	return 0;

err_slot_name:
	mmc_remove_host(mmc);
err_irq:
2150
	device_init_wakeup(&pdev->dev, false);
2151 2152 2153 2154
	if (host->tx_chan)
		dma_release_channel(host->tx_chan);
	if (host->rx_chan)
		dma_release_channel(host->rx_chan);
2155
	pm_runtime_put_sync(host->dev);
2156
	pm_runtime_disable(host->dev);
2157
	if (host->dbclk)
2158
		clk_disable_unprepare(host->dbclk);
2159
err1:
2160
err_gpio:
2161
	mmc_free_host(mmc);
2162 2163 2164 2165
err:
	return ret;
}

2166
static int omap_hsmmc_remove(struct platform_device *pdev)
2167
{
2168
	struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2169

2170 2171
	pm_runtime_get_sync(host->dev);
	mmc_remove_host(host->mmc);
2172

2173 2174 2175 2176 2177
	if (host->tx_chan)
		dma_release_channel(host->tx_chan);
	if (host->rx_chan)
		dma_release_channel(host->rx_chan);

2178 2179
	pm_runtime_put_sync(host->dev);
	pm_runtime_disable(host->dev);
2180
	device_init_wakeup(&pdev->dev, false);
2181
	if (host->dbclk)
2182
		clk_disable_unprepare(host->dbclk);
2183

2184
	mmc_free_host(host->mmc);
2185

2186 2187 2188
	return 0;
}

2189
#ifdef CONFIG_PM_SLEEP
2190
static int omap_hsmmc_suspend(struct device *dev)
2191
{
2192
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2193

2194
	if (!host)
2195 2196
		return 0;

2197
	pm_runtime_get_sync(host->dev);
2198

2199
	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
2200 2201 2202
		OMAP_HSMMC_WRITE(host->base, ISE, 0);
		OMAP_HSMMC_WRITE(host->base, IE, 0);
		OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
2203 2204
		OMAP_HSMMC_WRITE(host->base, HCTL,
				OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2205
	}
2206

2207
	if (host->dbclk)
2208
		clk_disable_unprepare(host->dbclk);
2209

2210
	pm_runtime_put_sync(host->dev);
2211
	return 0;
2212 2213 2214
}

/* Routine to resume the MMC device */
2215
static int omap_hsmmc_resume(struct device *dev)
2216
{
2217 2218 2219 2220
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);

	if (!host)
		return 0;
2221

2222
	pm_runtime_get_sync(host->dev);
2223

2224
	if (host->dbclk)
2225
		clk_prepare_enable(host->dbclk);
2226

2227 2228
	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
		omap_hsmmc_conf_bus_power(host);
2229

2230 2231 2232
	omap_hsmmc_protect_card(host);
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
2233
	return 0;
2234 2235 2236
}
#endif

2237 2238 2239
static int omap_hsmmc_runtime_suspend(struct device *dev)
{
	struct omap_hsmmc_host *host;
2240
	unsigned long flags;
2241
	int ret = 0;
2242 2243 2244

	host = platform_get_drvdata(to_platform_device(dev));
	omap_hsmmc_context_save(host);
2245
	dev_dbg(dev, "disabled\n");
2246

2247 2248 2249 2250 2251 2252
	spin_lock_irqsave(&host->irq_lock, flags);
	if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
	    (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
		/* disable sdio irq handling to prevent race */
		OMAP_HSMMC_WRITE(host->base, ISE, 0);
		OMAP_HSMMC_WRITE(host->base, IE, 0);
2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267

		if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) {
			/*
			 * dat1 line low, pending sdio irq
			 * race condition: possible irq handler running on
			 * multi-core, abort
			 */
			dev_dbg(dev, "pending sdio irq, abort suspend\n");
			OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
			OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
			OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
			pm_runtime_mark_last_busy(dev);
			ret = -EBUSY;
			goto abort;
		}
2268

2269 2270 2271
		pinctrl_pm_select_idle_state(dev);
	} else {
		pinctrl_pm_select_idle_state(dev);
2272
	}
2273

2274
abort:
2275
	spin_unlock_irqrestore(&host->irq_lock, flags);
2276
	return ret;
2277 2278 2279 2280 2281
}

static int omap_hsmmc_runtime_resume(struct device *dev)
{
	struct omap_hsmmc_host *host;
2282
	unsigned long flags;
2283 2284 2285

	host = platform_get_drvdata(to_platform_device(dev));
	omap_hsmmc_context_restore(host);
2286
	dev_dbg(dev, "enabled\n");
2287

2288 2289 2290 2291
	spin_lock_irqsave(&host->irq_lock, flags);
	if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
	    (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {

2292 2293 2294
		pinctrl_pm_select_default_state(host->dev);

		/* irq lost, if pinmux incorrect */
2295 2296 2297
		OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
		OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
		OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
2298 2299
	} else {
		pinctrl_pm_select_default_state(host->dev);
2300 2301
	}
	spin_unlock_irqrestore(&host->irq_lock, flags);
2302 2303 2304
	return 0;
}

2305
static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
2306
	SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume)
2307 2308
	.runtime_suspend = omap_hsmmc_runtime_suspend,
	.runtime_resume = omap_hsmmc_runtime_resume,
2309 2310 2311
};

static struct platform_driver omap_hsmmc_driver = {
2312
	.probe		= omap_hsmmc_probe,
2313
	.remove		= omap_hsmmc_remove,
2314 2315
	.driver		= {
		.name = DRIVER_NAME,
2316
		.pm = &omap_hsmmc_dev_pm_ops,
2317
		.of_match_table = of_match_ptr(omap_mmc_of_match),
2318 2319 2320
	},
};

2321
module_platform_driver(omap_hsmmc_driver);
2322 2323 2324 2325
MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Texas Instruments Inc");
新手
引导
客服 返回
顶部