“d68185d1aef9b8d3801ff656ec3089503119e936”上不存在“README.md”
omap_hsmmc.c 58.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * drivers/mmc/host/omap_hsmmc.c
 *
 * Driver for OMAP2430/3430 MMC controller.
 *
 * Copyright (C) 2007 Texas Instruments.
 *
 * Authors:
 *	Syed Mohammed Khasim	<x0khasim@ti.com>
 *	Madhusudhan		<madhu.cr@ti.com>
 *	Mohit Jalori		<mjalori@ti.com>
 *
 * This file is licensed under the terms of the GNU General Public License
 * version 2. This program is licensed "as is" without any warranty of any
 * kind, whether express or implied.
 */

#include <linux/module.h>
#include <linux/init.h>
20
#include <linux/kernel.h>
21
#include <linux/debugfs.h>
22
#include <linux/dmaengine.h>
23
#include <linux/seq_file.h>
24
#include <linux/sizes.h>
25 26 27 28 29 30
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/timer.h>
#include <linux/clk.h>
31
#include <linux/of.h>
32
#include <linux/of_irq.h>
33 34
#include <linux/of_gpio.h>
#include <linux/of_device.h>
35
#include <linux/omap-dmaengine.h>
36
#include <linux/mmc/host.h>
37
#include <linux/mmc/core.h>
38
#include <linux/mmc/mmc.h>
39
#include <linux/mmc/slot-gpio.h>
40
#include <linux/io.h>
41
#include <linux/irq.h>
42 43
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
44
#include <linux/pinctrl/consumer.h>
45
#include <linux/pm_runtime.h>
46
#include <linux/pm_wakeirq.h>
47
#include <linux/platform_data/hsmmc-omap.h>
48 49

/* OMAP HSMMC Host Controller Registers */
50
#define OMAP_HSMMC_SYSSTATUS	0x0014
51
#define OMAP_HSMMC_CON		0x002C
52
#define OMAP_HSMMC_SDMASA	0x0100
53 54 55 56 57 58 59 60
#define OMAP_HSMMC_BLK		0x0104
#define OMAP_HSMMC_ARG		0x0108
#define OMAP_HSMMC_CMD		0x010C
#define OMAP_HSMMC_RSP10	0x0110
#define OMAP_HSMMC_RSP32	0x0114
#define OMAP_HSMMC_RSP54	0x0118
#define OMAP_HSMMC_RSP76	0x011C
#define OMAP_HSMMC_DATA		0x0120
61
#define OMAP_HSMMC_PSTATE	0x0124
62 63 64 65 66
#define OMAP_HSMMC_HCTL		0x0128
#define OMAP_HSMMC_SYSCTL	0x012C
#define OMAP_HSMMC_STAT		0x0130
#define OMAP_HSMMC_IE		0x0134
#define OMAP_HSMMC_ISE		0x0138
67
#define OMAP_HSMMC_AC12		0x013C
68 69 70 71
#define OMAP_HSMMC_CAPA		0x0140

#define VS18			(1 << 26)
#define VS30			(1 << 25)
72
#define HSS			(1 << 21)
73 74
#define SDVS18			(0x5 << 9)
#define SDVS30			(0x6 << 9)
75
#define SDVS33			(0x7 << 9)
76
#define SDVS_MASK		0x00000E00
77 78 79 80 81 82 83 84
#define SDVSCLR			0xFFFFF1FF
#define SDVSDET			0x00000400
#define AUTOIDLE		0x1
#define SDBP			(1 << 8)
#define DTO			0xe
#define ICE			0x1
#define ICS			0x2
#define CEN			(1 << 2)
85
#define CLKD_MAX		0x3FF		/* max clock divisor: 1023 */
86 87 88 89 90
#define CLKD_MASK		0x0000FFC0
#define CLKD_SHIFT		6
#define DTO_MASK		0x000F0000
#define DTO_SHIFT		16
#define INIT_STREAM		(1 << 1)
91
#define ACEN_ACMD23		(2 << 2)
92 93
#define DP_SELECT		(1 << 21)
#define DDIR			(1 << 4)
94
#define DMAE			0x1
95 96 97
#define MSBS			(1 << 5)
#define BCE			(1 << 1)
#define FOUR_BIT		(1 << 1)
98
#define HSPE			(1 << 2)
99
#define IWE			(1 << 24)
100
#define DDR			(1 << 19)
101 102
#define CLKEXTFREE		(1 << 16)
#define CTPL			(1 << 11)
103
#define DW8			(1 << 5)
104 105 106 107 108 109
#define OD			0x1
#define STAT_CLEAR		0xFFFFFFFF
#define INIT_STREAM_CMD		0x00000000
#define DUAL_VOLT_OCR_BIT	7
#define SRC			(1 << 25)
#define SRD			(1 << 26)
110
#define SOFTRESET		(1 << 1)
111

112 113 114
/* PSTATE */
#define DLEV_DAT(x)		(1 << (20 + (x)))

115 116 117 118 119
/* Interrupt masks for IE and ISE register */
#define CC_EN			(1 << 0)
#define TC_EN			(1 << 1)
#define BWR_EN			(1 << 4)
#define BRR_EN			(1 << 5)
120
#define CIRQ_EN			(1 << 8)
121 122 123 124 125 126 127 128
#define ERR_EN			(1 << 15)
#define CTO_EN			(1 << 16)
#define CCRC_EN			(1 << 17)
#define CEB_EN			(1 << 18)
#define CIE_EN			(1 << 19)
#define DTO_EN			(1 << 20)
#define DCRC_EN			(1 << 21)
#define DEB_EN			(1 << 22)
129
#define ACE_EN			(1 << 24)
130 131 132
#define CERR_EN			(1 << 28)
#define BADA_EN			(1 << 29)

133
#define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\
134 135 136
		DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \
		BRR_EN | BWR_EN | TC_EN | CC_EN)

137 138 139 140 141 142 143
#define CNI	(1 << 7)
#define ACIE	(1 << 4)
#define ACEB	(1 << 3)
#define ACCE	(1 << 2)
#define ACTO	(1 << 1)
#define ACNE	(1 << 0)

144
#define MMC_AUTOSUSPEND_DELAY	100
145 146
#define MMC_TIMEOUT_MS		20		/* 20 mSec */
#define MMC_TIMEOUT_US		20000		/* 20000 micro Sec */
147 148
#define OMAP_MMC_MIN_CLOCK	400000
#define OMAP_MMC_MAX_CLOCK	52000000
149
#define DRIVER_NAME		"omap_hsmmc"
150

151 152 153 154
#define VDD_1V8			1800000		/* 180000 uV */
#define VDD_3V0			3000000		/* 300000 uV */
#define VDD_165_195		(ffs(MMC_VDD_165_195) - 1)

155 156 157 158 159
/*
 * One controller can have multiple slots, like on some omap boards using
 * omap.c controller driver. Luckily this is not currently done on any known
 * omap_hsmmc.c device.
 */
160
#define mmc_pdata(host)		host->pdata
161 162 163 164 165 166 167 168 169 170

/*
 * MMC Host controller read/write API's
 */
#define OMAP_HSMMC_READ(base, reg)	\
	__raw_readl((base) + OMAP_HSMMC_##reg)

#define OMAP_HSMMC_WRITE(base, reg, val) \
	__raw_writel((val), (base) + OMAP_HSMMC_##reg)

171 172 173 174 175
struct omap_hsmmc_next {
	unsigned int	dma_len;
	s32		cookie;
};

176
struct omap_hsmmc_host {
177 178 179 180 181 182 183
	struct	device		*dev;
	struct	mmc_host	*mmc;
	struct	mmc_request	*mrq;
	struct	mmc_command	*cmd;
	struct	mmc_data	*data;
	struct	clk		*fclk;
	struct	clk		*dbclk;
184
	struct	regulator	*pbias;
185
	void	__iomem		*base;
186
	int			vqmmc_enabled;
187
	resource_size_t		mapbase;
188
	spinlock_t		irq_lock; /* Prevent races with irq handler */
189
	unsigned int		dma_len;
190
	unsigned int		dma_sg_idx;
191
	unsigned char		bus_mode;
192
	unsigned char		power_mode;
193
	int			suspended;
194 195 196 197
	u32			con;
	u32			hctl;
	u32			sysctl;
	u32			capa;
198
	int			irq;
199
	int			wake_irq;
200
	int			use_dma, dma_ch;
201 202
	struct dma_chan		*tx_chan;
	struct dma_chan		*rx_chan;
203
	int			response_busy;
204
	int			context_loss;
205 206
	int			protect_card;
	int			reqs_blocked;
207
	int			req_in_progress;
208
	unsigned long		clk_rate;
209
	unsigned int		flags;
210 211
#define AUTO_CMD23		(1 << 0)        /* Auto CMD23 support */
#define HSMMC_SDIO_IRQ_ENABLED	(1 << 1)        /* SDIO irq enabled */
212
	struct omap_hsmmc_next	next_data;
213
	struct	omap_hsmmc_platform_data	*pdata;
214 215 216 217 218 219 220

	/* return MMC cover switch state, can be NULL if not supported.
	 *
	 * possible return values:
	 *   0 - closed
	 *   1 - open
	 */
221
	int (*get_cover_state)(struct device *dev);
222

223
	int (*card_detect)(struct device *dev);
224 225
};

226 227 228 229 230
struct omap_mmc_of_data {
	u32 reg_offset;
	u8 controller_flags;
};

231 232
static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host);

233
static int omap_hsmmc_card_detect(struct device *dev)
234
{
235
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
236

237
	return mmc_gpio_get_cd(host->mmc);
238 239
}

240
static int omap_hsmmc_get_cover_state(struct device *dev)
241
{
242
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
243

244
	return mmc_gpio_get_cd(host->mmc);
245 246
}

247
static int omap_hsmmc_enable_supply(struct mmc_host *mmc)
248 249
{
	int ret;
250
	struct omap_hsmmc_host *host = mmc_priv(mmc);
251
	struct mmc_ios *ios = &mmc->ios;
252 253

	if (mmc->supply.vmmc) {
254
		ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
255 256 257 258 259
		if (ret)
			return ret;
	}

	/* Enable interface voltage rail, if needed */
260
	if (mmc->supply.vqmmc && !host->vqmmc_enabled) {
261 262 263 264 265
		ret = regulator_enable(mmc->supply.vqmmc);
		if (ret) {
			dev_err(mmc_dev(mmc), "vmmc_aux reg enable failed\n");
			goto err_vqmmc;
		}
266
		host->vqmmc_enabled = 1;
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
	}

	return 0;

err_vqmmc:
	if (mmc->supply.vmmc)
		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);

	return ret;
}

static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
{
	int ret;
	int status;
282
	struct omap_hsmmc_host *host = mmc_priv(mmc);
283

284
	if (mmc->supply.vqmmc && host->vqmmc_enabled) {
285 286 287 288 289
		ret = regulator_disable(mmc->supply.vqmmc);
		if (ret) {
			dev_err(mmc_dev(mmc), "vmmc_aux reg disable failed\n");
			return ret;
		}
290
		host->vqmmc_enabled = 0;
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
	}

	if (mmc->supply.vmmc) {
		ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
		if (ret)
			goto err_set_ocr;
	}

	return 0;

err_set_ocr:
	if (mmc->supply.vqmmc) {
		status = regulator_enable(mmc->supply.vqmmc);
		if (status)
			dev_err(mmc_dev(mmc), "vmmc_aux re-enable failed\n");
	}

	return ret;
}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
				int vdd)
{
	int ret;

	if (!host->pbias)
		return 0;

	if (power_on) {
		if (vdd <= VDD_165_195)
			ret = regulator_set_voltage(host->pbias, VDD_1V8,
						    VDD_1V8);
		else
			ret = regulator_set_voltage(host->pbias, VDD_3V0,
						    VDD_3V0);
		if (ret < 0) {
			dev_err(host->dev, "pbias set voltage fail\n");
			return ret;
		}

331
		if (!regulator_is_enabled(host->pbias)) {
332 333 334 335 336 337 338
			ret = regulator_enable(host->pbias);
			if (ret) {
				dev_err(host->dev, "pbias reg enable fail\n");
				return ret;
			}
		}
	} else {
339
		if (regulator_is_enabled(host->pbias)) {
340 341 342 343 344 345 346 347 348 349 350
			ret = regulator_disable(host->pbias);
			if (ret) {
				dev_err(host->dev, "pbias reg disable fail\n");
				return ret;
			}
		}
	}

	return 0;
}

351
static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
352 353 354
{
	struct omap_hsmmc_host *host =
		platform_get_drvdata(to_platform_device(dev));
355
	struct mmc_host *mmc = host->mmc;
356 357
	int ret = 0;

358 359 360
	if (mmc_pdata(host)->set_power)
		return mmc_pdata(host)->set_power(dev, power_on, vdd);

361 362 363 364
	/*
	 * If we don't see a Vcc regulator, assume it's a fixed
	 * voltage always-on regulator.
	 */
365
	if (!mmc->supply.vmmc)
366 367
		return 0;

368
	if (mmc_pdata(host)->before_set_reg)
369
		mmc_pdata(host)->before_set_reg(dev, power_on, vdd);
370

371 372 373
	ret = omap_hsmmc_set_pbias(host, false, 0);
	if (ret)
		return ret;
374

375 376 377 378 379 380 381 382 383 384 385 386 387 388
	/*
	 * Assume Vcc regulator is used only to power the card ... OMAP
	 * VDDS is used to power the pins, optionally with a transceiver to
	 * support cards using voltages other than VDDS (1.8V nominal).  When a
	 * transceiver is used, DAT3..7 are muxed as transceiver control pins.
	 *
	 * In some cases this regulator won't support enable/disable;
	 * e.g. it's a fixed rail for a WLAN chip.
	 *
	 * In other cases vcc_aux switches interface power.  Example, for
	 * eMMC cards it represents VccQ.  Sometimes transceivers or SDIO
	 * chips/cards need an interface voltage rail too.
	 */
	if (power_on) {
389
		ret = omap_hsmmc_enable_supply(mmc);
390 391
		if (ret)
			return ret;
392 393 394 395

		ret = omap_hsmmc_set_pbias(host, true, vdd);
		if (ret)
			goto err_set_voltage;
396
	} else {
397 398 399
		ret = omap_hsmmc_disable_supply(mmc);
		if (ret)
			return ret;
400 401
	}

402
	if (mmc_pdata(host)->after_set_reg)
403
		mmc_pdata(host)->after_set_reg(dev, power_on, vdd);
404

405 406 407
	return 0;

err_set_voltage:
408
	omap_hsmmc_disable_supply(mmc);
409

410 411 412
	return ret;
}

413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
static int omap_hsmmc_disable_boot_regulator(struct regulator *reg)
{
	int ret;

	if (!reg)
		return 0;

	if (regulator_is_enabled(reg)) {
		ret = regulator_enable(reg);
		if (ret)
			return ret;

		ret = regulator_disable(reg);
		if (ret)
			return ret;
	}

	return 0;
}

static int omap_hsmmc_disable_boot_regulators(struct omap_hsmmc_host *host)
{
	struct mmc_host *mmc = host->mmc;
	int ret;

	/*
	 * disable regulators enabled during boot and get the usecount
	 * right so that regulators can be enabled/disabled by checking
	 * the return value of regulator_is_enabled
	 */
	ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vmmc);
	if (ret) {
		dev_err(host->dev, "fail to disable boot enabled vmmc reg\n");
		return ret;
	}

	ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vqmmc);
	if (ret) {
		dev_err(host->dev,
			"fail to disable boot enabled vmmc_aux reg\n");
		return ret;
	}

	ret = omap_hsmmc_disable_boot_regulator(host->pbias);
	if (ret) {
		dev_err(host->dev,
			"failed to disable boot enabled pbias reg\n");
		return ret;
	}

	return 0;
}

466 467
static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
468
	int ocr_value = 0;
469
	int ret;
470
	struct mmc_host *mmc = host->mmc;
471

472 473 474
	if (mmc_pdata(host)->set_power)
		return 0;

475 476 477
	mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc");
	if (IS_ERR(mmc->supply.vmmc)) {
		ret = PTR_ERR(mmc->supply.vmmc);
478 479 480
		if (ret != -ENODEV)
			return ret;
		dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
481 482
			PTR_ERR(mmc->supply.vmmc));
		mmc->supply.vmmc = NULL;
483
	} else {
484
		ocr_value = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
485
		if (ocr_value > 0)
486
			mmc_pdata(host)->ocr_mask = ocr_value;
487
	}
488

489
	/* Allow an aux regulator */
490 491 492
	mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux");
	if (IS_ERR(mmc->supply.vqmmc)) {
		ret = PTR_ERR(mmc->supply.vqmmc);
493 494 495
		if (ret != -ENODEV)
			return ret;
		dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
496 497
			PTR_ERR(mmc->supply.vqmmc));
		mmc->supply.vqmmc = NULL;
498
	}
499

500 501 502
	host->pbias = devm_regulator_get_optional(host->dev, "pbias");
	if (IS_ERR(host->pbias)) {
		ret = PTR_ERR(host->pbias);
503 504 505
		if (ret != -ENODEV)
			return ret;
		dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
506 507
			PTR_ERR(host->pbias));
		host->pbias = NULL;
508
	}
509

510
	/* For eMMC do not power off when not in sleep state */
511
	if (mmc_pdata(host)->no_regulator_off_init)
512 513
		return 0;

514 515 516
	ret = omap_hsmmc_disable_boot_regulators(host);
	if (ret)
		return ret;
517 518 519 520

	return 0;
}

521
static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id);
522 523 524

static int omap_hsmmc_gpio_init(struct mmc_host *mmc,
				struct omap_hsmmc_host *host,
525
				struct omap_hsmmc_platform_data *pdata)
526 527 528
{
	int ret;

529 530
	if (gpio_is_valid(pdata->gpio_cod)) {
		ret = mmc_gpio_request_cd(mmc, pdata->gpio_cod, 0);
531 532
		if (ret)
			return ret;
533 534 535

		host->get_cover_state = omap_hsmmc_get_cover_state;
		mmc_gpio_set_cd_isr(mmc, omap_hsmmc_cover_irq);
536 537
	} else if (gpio_is_valid(pdata->gpio_cd)) {
		ret = mmc_gpio_request_cd(mmc, pdata->gpio_cd, 0);
538 539 540 541
		if (ret)
			return ret;

		host->card_detect = omap_hsmmc_card_detect;
542
	}
543

544
	if (gpio_is_valid(pdata->gpio_wp)) {
545
		ret = mmc_gpio_request_ro(mmc, pdata->gpio_wp);
546
		if (ret)
547
			return ret;
548
	}
549 550 551 552

	return 0;
}

553 554 555 556 557 558 559 560 561
/*
 * Start clock to the card
 */
static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host)
{
	OMAP_HSMMC_WRITE(host->base, SYSCTL,
		OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
}

562 563 564
/*
 * Stop clock to the card
 */
565
static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
566 567 568 569
{
	OMAP_HSMMC_WRITE(host->base, SYSCTL,
		OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
	if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0)
M
Masanari Iida 已提交
570
		dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n");
571 572
}

573 574
static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host,
				  struct mmc_command *cmd)
575
{
576 577
	u32 irq_mask = INT_EN_MASK;
	unsigned long flags;
578 579

	if (host->use_dma)
580
		irq_mask &= ~(BRR_EN | BWR_EN);
581

582 583
	/* Disable timeout for erases */
	if (cmd->opcode == MMC_ERASE)
584
		irq_mask &= ~DTO_EN;
585

586
	spin_lock_irqsave(&host->irq_lock, flags);
587 588
	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
	OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
589 590 591 592

	/* latch pending CIRQ, but don't signal MMC core */
	if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
		irq_mask |= CIRQ_EN;
593
	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
594
	spin_unlock_irqrestore(&host->irq_lock, flags);
595 596 597 598
}

static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
{
599 600 601 602 603 604 605 606 607
	u32 irq_mask = 0;
	unsigned long flags;

	spin_lock_irqsave(&host->irq_lock, flags);
	/* no transfer running but need to keep cirq if enabled */
	if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
		irq_mask |= CIRQ_EN;
	OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
608
	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
609
	spin_unlock_irqrestore(&host->irq_lock, flags);
610 611
}

612
/* Calculate divisor for the given clock frequency */
613
static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
614 615 616 617
{
	u16 dsor = 0;

	if (ios->clock) {
618
		dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
619 620
		if (dsor > CLKD_MAX)
			dsor = CLKD_MAX;
621 622 623 624 625
	}

	return dsor;
}

626 627 628 629 630
static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
{
	struct mmc_ios *ios = &host->mmc->ios;
	unsigned long regval;
	unsigned long timeout;
631
	unsigned long clkdiv;
632

633
	dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
634 635 636 637 638

	omap_hsmmc_stop_clock(host);

	regval = OMAP_HSMMC_READ(host->base, SYSCTL);
	regval = regval & ~(CLKD_MASK | DTO_MASK);
639 640
	clkdiv = calc_divisor(host, ios);
	regval = regval | (clkdiv << 6) | (DTO << 16);
641 642 643 644 645 646 647 648 649 650
	OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
	OMAP_HSMMC_WRITE(host->base, SYSCTL,
		OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);

	/* Wait till the ICS bit is set */
	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
	while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
		&& time_before(jiffies, timeout))
		cpu_relax();

651 652 653 654 655 656 657 658 659
	/*
	 * Enable High-Speed Support
	 * Pre-Requisites
	 *	- Controller should support High-Speed-Enable Bit
	 *	- Controller should not be using DDR Mode
	 *	- Controller should advertise that it supports High Speed
	 *	  in capabilities register
	 *	- MMC/SD clock coming out of controller > 25MHz
	 */
660
	if ((mmc_pdata(host)->features & HSMMC_HAS_HSPE_SUPPORT) &&
661
	    (ios->timing != MMC_TIMING_MMC_DDR52) &&
662
	    (ios->timing != MMC_TIMING_UHS_DDR50) &&
663 664 665 666 667 668 669 670 671 672
	    ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) {
		regval = OMAP_HSMMC_READ(host->base, HCTL);
		if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000)
			regval |= HSPE;
		else
			regval &= ~HSPE;

		OMAP_HSMMC_WRITE(host->base, HCTL, regval);
	}

673 674 675
	omap_hsmmc_start_clock(host);
}

676 677 678 679 680 681
static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
{
	struct mmc_ios *ios = &host->mmc->ios;
	u32 con;

	con = OMAP_HSMMC_READ(host->base, CON);
682 683
	if (ios->timing == MMC_TIMING_MMC_DDR52 ||
	    ios->timing == MMC_TIMING_UHS_DDR50)
684 685 686
		con |= DDR;	/* configure in DDR mode */
	else
		con &= ~DDR;
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
	switch (ios->bus_width) {
	case MMC_BUS_WIDTH_8:
		OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
		break;
	case MMC_BUS_WIDTH_4:
		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
		OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
		break;
	case MMC_BUS_WIDTH_1:
		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
		OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
		break;
	}
}

static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
{
	struct mmc_ios *ios = &host->mmc->ios;
	u32 con;

	con = OMAP_HSMMC_READ(host->base, CON);
	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
		OMAP_HSMMC_WRITE(host->base, CON, con | OD);
	else
		OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
}

716 717 718 719 720 721
#ifdef CONFIG_PM

/*
 * Restore the MMC host context, if it was lost as result of a
 * power state change.
 */
722
static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
723 724
{
	struct mmc_ios *ios = &host->mmc->ios;
725
	u32 hctl, capa;
726 727
	unsigned long timeout;

728 729 730 731 732 733 734 735
	if (host->con == OMAP_HSMMC_READ(host->base, CON) &&
	    host->hctl == OMAP_HSMMC_READ(host->base, HCTL) &&
	    host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) &&
	    host->capa == OMAP_HSMMC_READ(host->base, CAPA))
		return 0;

	host->context_loss++;

736
	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
737 738 739 740 741 742 743 744 745 746 747
		if (host->power_mode != MMC_POWER_OFF &&
		    (1 << ios->vdd) <= MMC_VDD_23_24)
			hctl = SDVS18;
		else
			hctl = SDVS30;
		capa = VS30 | VS18;
	} else {
		hctl = SDVS18;
		capa = VS18;
	}

748 749 750
	if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
		hctl |= IWE;

751 752 753 754 755 756 757 758 759 760 761 762 763 764
	OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | hctl);

	OMAP_HSMMC_WRITE(host->base, CAPA,
			OMAP_HSMMC_READ(host->base, CAPA) | capa);

	OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | SDBP);

	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
	while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP
		&& time_before(jiffies, timeout))
		;

765 766 767
	OMAP_HSMMC_WRITE(host->base, ISE, 0);
	OMAP_HSMMC_WRITE(host->base, IE, 0);
	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
768 769 770 771 772

	/* Do not initialize card-specific things if the power is off */
	if (host->power_mode == MMC_POWER_OFF)
		goto out;

773
	omap_hsmmc_set_bus_width(host);
774

775
	omap_hsmmc_set_clock(host);
776

777 778
	omap_hsmmc_set_bus_mode(host);

779
out:
780 781
	dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n",
		host->context_loss);
782 783 784 785 786 787
	return 0;
}

/*
 * Save the MMC host context (store the number of power state changes so far).
 */
788
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
789
{
790 791 792 793
	host->con =  OMAP_HSMMC_READ(host->base, CON);
	host->hctl = OMAP_HSMMC_READ(host->base, HCTL);
	host->sysctl =  OMAP_HSMMC_READ(host->base, SYSCTL);
	host->capa = OMAP_HSMMC_READ(host->base, CAPA);
794 795 796 797
}

#else

798
static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
799 800 801 802
{
	return 0;
}

803
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
804 805 806 807 808
{
}

#endif

809 810 811 812
/*
 * Send init stream sequence to card
 * before sending IDLE command
 */
813
static void send_init_stream(struct omap_hsmmc_host *host)
814 815 816 817
{
	int reg = 0;
	unsigned long timeout;

818 819 820
	if (host->protect_card)
		return;

821
	disable_irq(host->irq);
822 823

	OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
824 825 826 827 828
	OMAP_HSMMC_WRITE(host->base, CON,
		OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
	OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);

	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
829 830
	while ((reg != CC_EN) && time_before(jiffies, timeout))
		reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN;
831 832 833

	OMAP_HSMMC_WRITE(host->base, CON,
		OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM);
834 835 836 837

	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
	OMAP_HSMMC_READ(host->base, STAT);

838 839 840 841
	enable_irq(host->irq);
}

static inline
842
int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host)
843 844 845
{
	int r = 1;

846
	if (host->get_cover_state)
847
		r = host->get_cover_state(host->dev);
848 849 850 851
	return r;
}

static ssize_t
852
omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr,
853 854 855
			   char *buf)
{
	struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
856
	struct omap_hsmmc_host *host = mmc_priv(mmc);
857

858 859
	return sprintf(buf, "%s\n",
			omap_hsmmc_cover_is_closed(host) ? "closed" : "open");
860 861
}

862
static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL);
863 864

static ssize_t
865
omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
866 867 868
			char *buf)
{
	struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
869
	struct omap_hsmmc_host *host = mmc_priv(mmc);
870

871
	return sprintf(buf, "%s\n", mmc_pdata(host)->name);
872 873
}

874
static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
875 876 877 878 879

/*
 * Configure the response type and send the cmd.
 */
static void
880
omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
881 882 883 884
	struct mmc_data *data)
{
	int cmdreg = 0, resptype = 0, cmdtype = 0;

885
	dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
886 887 888
		mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
	host->cmd = cmd;

889
	omap_hsmmc_enable_irq(host, cmd);
890

891
	host->response_busy = 0;
892 893 894
	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136)
			resptype = 1;
895 896 897 898
		else if (cmd->flags & MMC_RSP_BUSY) {
			resptype = 3;
			host->response_busy = 1;
		} else
899 900 901 902 903 904 905 906 907 908 909 910 911
			resptype = 2;
	}

	/*
	 * Unlike OMAP1 controller, the cmdtype does not seem to be based on
	 * ac, bc, adtc, bcr. Only commands ending an open ended transfer need
	 * a val of 0x3, rest 0x0.
	 */
	if (cmd == host->mrq->stop)
		cmdtype = 0x3;

	cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);

912 913 914 915 916
	if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) &&
	    host->mrq->sbc) {
		cmdreg |= ACEN_ACMD23;
		OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg);
	}
917 918 919 920 921 922 923 924 925
	if (data) {
		cmdreg |= DP_SELECT | MSBS | BCE;
		if (data->flags & MMC_DATA_READ)
			cmdreg |= DDIR;
		else
			cmdreg &= ~(DDIR);
	}

	if (host->use_dma)
926
		cmdreg |= DMAE;
927

928
	host->req_in_progress = 1;
929

930 931 932 933
	OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
	OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
}

934
static int
935
omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
936 937 938 939 940 941 942
{
	if (data->flags & MMC_DATA_WRITE)
		return DMA_TO_DEVICE;
	else
		return DMA_FROM_DEVICE;
}

943 944 945 946 947 948
static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
	struct mmc_data *data)
{
	return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
}

949 950 951
static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
{
	int dma_ch;
952
	unsigned long flags;
953

954
	spin_lock_irqsave(&host->irq_lock, flags);
955 956
	host->req_in_progress = 0;
	dma_ch = host->dma_ch;
957
	spin_unlock_irqrestore(&host->irq_lock, flags);
958 959 960 961 962 963 964

	omap_hsmmc_disable_irq(host);
	/* Do not complete the request if DMA is still in progress */
	if (mrq->data && host->use_dma && dma_ch != -1)
		return;
	host->mrq = NULL;
	mmc_request_done(host->mmc, mrq);
965 966
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
967 968
}

969 970 971 972
/*
 * Notify the transfer complete to MMC core
 */
static void
973
omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
974
{
975 976 977
	if (!data) {
		struct mmc_request *mrq = host->mrq;

978 979 980 981 982 983 984
		/* TC before CC from CMD6 - don't know why, but it happens */
		if (host->cmd && host->cmd->opcode == 6 &&
		    host->response_busy) {
			host->response_busy = 0;
			return;
		}

985
		omap_hsmmc_request_done(host, mrq);
986 987 988
		return;
	}

989 990 991 992 993 994 995
	host->data = NULL;

	if (!data->error)
		data->bytes_xfered += data->blocks * (data->blksz);
	else
		data->bytes_xfered = 0;

996 997 998
	if (data->stop && (data->error || !host->mrq->sbc))
		omap_hsmmc_start_command(host, data->stop, NULL);
	else
999
		omap_hsmmc_request_done(host, data->mrq);
1000 1001 1002 1003 1004 1005
}

/*
 * Notify the core about command completion
 */
static void
1006
omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
1007
{
1008
	if (host->mrq->sbc && (host->cmd == host->mrq->sbc) &&
1009
	    !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) {
1010
		host->cmd = NULL;
1011 1012 1013 1014 1015 1016
		omap_hsmmc_start_dma_transfer(host);
		omap_hsmmc_start_command(host, host->mrq->cmd,
						host->mrq->data);
		return;
	}

1017 1018
	host->cmd = NULL;

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
	if (cmd->flags & MMC_RSP_PRESENT) {
		if (cmd->flags & MMC_RSP_136) {
			/* response type 2 */
			cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10);
			cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32);
			cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54);
			cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76);
		} else {
			/* response types 1, 1b, 3, 4, 5, 6 */
			cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
		}
	}
1031
	if ((host->data == NULL && !host->response_busy) || cmd->error)
1032
		omap_hsmmc_request_done(host, host->mrq);
1033 1034 1035 1036 1037
}

/*
 * DMA clean up for command errors
 */
1038
static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
1039
{
1040
	int dma_ch;
1041
	unsigned long flags;
1042

1043
	host->data->error = errno;
1044

1045
	spin_lock_irqsave(&host->irq_lock, flags);
1046 1047
	dma_ch = host->dma_ch;
	host->dma_ch = -1;
1048
	spin_unlock_irqrestore(&host->irq_lock, flags);
1049 1050

	if (host->use_dma && dma_ch != -1) {
1051 1052 1053 1054 1055
		struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);

		dmaengine_terminate_all(chan);
		dma_unmap_sg(chan->device->dev,
			host->data->sg, host->data->sg_len,
1056
			omap_hsmmc_get_dma_dir(host, host->data));
1057

1058
		host->data->host_cookie = 0;
1059 1060 1061 1062 1063 1064 1065 1066
	}
	host->data = NULL;
}

/*
 * Readable error output
 */
#ifdef CONFIG_MMC_DEBUG
1067
static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
1068 1069
{
	/* --- means reserved bit without definition at documentation */
1070
	static const char *omap_hsmmc_status_bits[] = {
1071 1072 1073 1074
		"CC"  , "TC"  , "BGE", "---", "BWR" , "BRR" , "---" , "---" ,
		"CIRQ",	"OBI" , "---", "---", "---" , "---" , "---" , "ERRI",
		"CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" ,
		"ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---"
1075 1076 1077 1078 1079 1080 1081 1082
	};
	char res[256];
	char *buf = res;
	int len, i;

	len = sprintf(buf, "MMC IRQ 0x%x :", status);
	buf += len;

1083
	for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++)
1084
		if (status & (1 << i)) {
1085
			len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]);
1086 1087 1088
			buf += len;
		}

1089
	dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
1090
}
1091 1092 1093 1094 1095
#else
static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
					     u32 status)
{
}
1096 1097
#endif  /* CONFIG_MMC_DEBUG */

1098 1099 1100 1101 1102 1103 1104
/*
 * MMC controller internal state machines reset
 *
 * Used to reset command or data internal state machines, using respectively
 *  SRC or SRD bit of SYSCTL register
 * Can be called from interrupt context
 */
1105 1106
static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
						   unsigned long bit)
1107 1108
{
	unsigned long i = 0;
1109
	unsigned long limit = MMC_TIMEOUT_US;
1110 1111 1112 1113

	OMAP_HSMMC_WRITE(host->base, SYSCTL,
			 OMAP_HSMMC_READ(host->base, SYSCTL) | bit);

1114 1115 1116 1117
	/*
	 * OMAP4 ES2 and greater has an updated reset logic.
	 * Monitor a 0->1 transition first
	 */
1118
	if (mmc_pdata(host)->features & HSMMC_HAS_UPDATED_RESET) {
1119
		while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
1120
					&& (i++ < limit))
1121
			udelay(1);
1122 1123 1124
	}
	i = 0;

1125 1126
	while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&
		(i++ < limit))
1127
		udelay(1);
1128 1129 1130 1131 1132 1133

	if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit)
		dev_err(mmc_dev(host->mmc),
			"Timeout waiting on controller reset in %s\n",
			__func__);
}
1134

1135 1136
static void hsmmc_command_incomplete(struct omap_hsmmc_host *host,
					int err, int end_cmd)
1137
{
1138
	if (end_cmd) {
1139
		omap_hsmmc_reset_controller_fsm(host, SRC);
1140 1141 1142
		if (host->cmd)
			host->cmd->error = err;
	}
1143 1144 1145 1146

	if (host->data) {
		omap_hsmmc_reset_controller_fsm(host, SRD);
		omap_hsmmc_dma_cleanup(host, err);
1147 1148
	} else if (host->mrq && host->mrq->cmd)
		host->mrq->cmd->error = err;
1149 1150
}

1151
static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1152 1153
{
	struct mmc_data *data;
1154
	int end_cmd = 0, end_trans = 0;
1155
	int error = 0;
1156

1157
	data = host->data;
1158
	dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
1159

1160
	if (status & ERR_EN) {
1161
		omap_hsmmc_dbg_report_irq(host, status);
1162

1163
		if (status & (CTO_EN | CCRC_EN))
1164
			end_cmd = 1;
1165 1166 1167 1168
		if (host->data || host->response_busy) {
			end_trans = !end_cmd;
			host->response_busy = 0;
		}
1169
		if (status & (CTO_EN | DTO_EN))
1170
			hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
1171 1172
		else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
				   BADA_EN))
1173
			hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
1174

1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
		if (status & ACE_EN) {
			u32 ac12;
			ac12 = OMAP_HSMMC_READ(host->base, AC12);
			if (!(ac12 & ACNE) && host->mrq->sbc) {
				end_cmd = 1;
				if (ac12 & ACTO)
					error =  -ETIMEDOUT;
				else if (ac12 & (ACCE | ACEB | ACIE))
					error = -EILSEQ;
				host->mrq->sbc->error = error;
				hsmmc_command_incomplete(host, error, end_cmd);
			}
			dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
		}
1189 1190
	}

1191
	OMAP_HSMMC_WRITE(host->base, STAT, status);
1192
	if (end_cmd || ((status & CC_EN) && host->cmd))
1193
		omap_hsmmc_cmd_done(host, host->cmd);
1194
	if ((end_trans || (status & TC_EN)) && host->mrq)
1195
		omap_hsmmc_xfer_done(host, data);
1196
}
1197

1198 1199 1200 1201 1202 1203 1204 1205 1206
/*
 * MMC controller IRQ handler
 */
static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
{
	struct omap_hsmmc_host *host = dev_id;
	int status;

	status = OMAP_HSMMC_READ(host->base, STAT);
1207 1208 1209 1210 1211 1212
	while (status & (INT_EN_MASK | CIRQ_EN)) {
		if (host->req_in_progress)
			omap_hsmmc_do_irq(host, status);

		if (status & CIRQ_EN)
			mmc_signal_sdio_irq(host->mmc);
1213

1214 1215
		/* Flush posted write */
		status = OMAP_HSMMC_READ(host->base, STAT);
1216
	}
1217

1218 1219 1220
	return IRQ_HANDLED;
}

1221
static void set_sd_bus_power(struct omap_hsmmc_host *host)
A
Adrian Hunter 已提交
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
{
	unsigned long i;

	OMAP_HSMMC_WRITE(host->base, HCTL,
			 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
	for (i = 0; i < loops_per_jiffy; i++) {
		if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP)
			break;
		cpu_relax();
	}
}

1234
/*
1235 1236 1237 1238 1239
 * Switch MMC interface voltage ... only relevant for MMC1.
 *
 * MMC2 and MMC3 use fixed 1.8V levels, and maybe a transceiver.
 * The MMC2 transceiver controls are used instead of DAT4..DAT7.
 * Some chips, like eMMC ones, use internal transceivers.
1240
 */
1241
static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1242 1243 1244 1245 1246
{
	u32 reg_val = 0;
	int ret;

	/* Disable the clocks */
1247
	pm_runtime_put_sync(host->dev);
1248
	if (host->dbclk)
1249
		clk_disable_unprepare(host->dbclk);
1250 1251

	/* Turn the power off */
1252
	ret = omap_hsmmc_set_power(host->dev, 0, 0);
1253 1254

	/* Turn the power ON with given VDD 1.8 or 3.0v */
1255
	if (!ret)
1256
		ret = omap_hsmmc_set_power(host->dev, 1, vdd);
1257
	pm_runtime_get_sync(host->dev);
1258
	if (host->dbclk)
1259
		clk_prepare_enable(host->dbclk);
1260

1261 1262 1263 1264 1265 1266
	if (ret != 0)
		goto err;

	OMAP_HSMMC_WRITE(host->base, HCTL,
		OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR);
	reg_val = OMAP_HSMMC_READ(host->base, HCTL);
1267

1268 1269 1270
	/*
	 * If a MMC dual voltage card is detected, the set_ios fn calls
	 * this fn with VDD bit set for 1.8V. Upon card removal from the
1271
	 * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
1272
	 *
1273 1274 1275 1276 1277 1278 1279 1280 1281
	 * Cope with a bit of slop in the range ... per data sheets:
	 *  - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max,
	 *    but recommended values are 1.71V to 1.89V
	 *  - "3.0V" for vdds_mmc1/vdds_mmc1a can be up to 3.5V max,
	 *    but recommended values are 2.7V to 3.3V
	 *
	 * Board setup code shouldn't permit anything very out-of-range.
	 * TWL4030-family VMMC1 and VSIM regulators are fine (avoiding the
	 * middle range) but VSIM can't power DAT4..DAT7 at more than 3V.
1282
	 */
1283
	if ((1 << vdd) <= MMC_VDD_23_24)
1284
		reg_val |= SDVS18;
1285 1286
	else
		reg_val |= SDVS30;
1287 1288

	OMAP_HSMMC_WRITE(host->base, HCTL, reg_val);
A
Adrian Hunter 已提交
1289
	set_sd_bus_power(host);
1290 1291 1292

	return 0;
err:
1293
	dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n");
1294 1295 1296
	return ret;
}

1297 1298 1299
/* Protect the card while the cover is open */
static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
{
1300
	if (!host->get_cover_state)
1301 1302 1303
		return;

	host->reqs_blocked = 0;
1304
	if (host->get_cover_state(host->dev)) {
1305
		if (host->protect_card) {
1306
			dev_info(host->dev, "%s: cover is closed, "
1307 1308 1309 1310 1311 1312
					 "card is now accessible\n",
					 mmc_hostname(host->mmc));
			host->protect_card = 0;
		}
	} else {
		if (!host->protect_card) {
1313
			dev_info(host->dev, "%s: cover is open, "
1314 1315 1316 1317 1318 1319 1320
					 "card is now inaccessible\n",
					 mmc_hostname(host->mmc));
			host->protect_card = 1;
		}
	}
}

1321
/*
1322
 * irq handler when (cell-phone) cover is mounted/removed
1323
 */
1324
static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id)
1325
{
1326
	struct omap_hsmmc_host *host = dev_id;
1327 1328

	sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
1329

1330 1331
	omap_hsmmc_protect_card(host);
	mmc_detect_change(host->mmc, (HZ * 200) / 1000);
1332 1333 1334
	return IRQ_HANDLED;
}

1335
static void omap_hsmmc_dma_callback(void *param)
1336
{
1337 1338
	struct omap_hsmmc_host *host = param;
	struct dma_chan *chan;
1339
	struct mmc_data *data;
1340
	int req_in_progress;
1341

1342
	spin_lock_irq(&host->irq_lock);
1343
	if (host->dma_ch < 0) {
1344
		spin_unlock_irq(&host->irq_lock);
1345
		return;
1346
	}
1347

1348
	data = host->mrq->data;
1349
	chan = omap_hsmmc_get_dma_chan(host, data);
1350
	if (!data->host_cookie)
1351 1352
		dma_unmap_sg(chan->device->dev,
			     data->sg, data->sg_len,
1353
			     omap_hsmmc_get_dma_dir(host, data));
1354 1355

	req_in_progress = host->req_in_progress;
1356
	host->dma_ch = -1;
1357
	spin_unlock_irq(&host->irq_lock);
1358 1359 1360 1361 1362 1363 1364

	/* If DMA has finished after TC, complete the request */
	if (!req_in_progress) {
		struct mmc_request *mrq = host->mrq;

		host->mrq = NULL;
		mmc_request_done(host->mmc, mrq);
1365 1366
		pm_runtime_mark_last_busy(host->dev);
		pm_runtime_put_autosuspend(host->dev);
1367
	}
1368 1369
}

1370 1371
static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
				       struct mmc_data *data,
1372
				       struct omap_hsmmc_next *next,
1373
				       struct dma_chan *chan)
1374 1375 1376 1377 1378
{
	int dma_len;

	if (!next && data->host_cookie &&
	    data->host_cookie != host->next_data.cookie) {
1379
		dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d"
1380 1381 1382 1383 1384 1385
		       " host->next_data.cookie %d\n",
		       __func__, data->host_cookie, host->next_data.cookie);
		data->host_cookie = 0;
	}

	/* Check if next job is already prepared */
1386
	if (next || data->host_cookie != host->next_data.cookie) {
1387
		dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
				     omap_hsmmc_get_dma_dir(host, data));

	} else {
		dma_len = host->next_data.dma_len;
		host->next_data.dma_len = 0;
	}


	if (dma_len == 0)
		return -EINVAL;

	if (next) {
		next->dma_len = dma_len;
		data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
	} else
		host->dma_len = dma_len;

	return 0;
}

1408 1409 1410
/*
 * Routine to configure and start DMA for the MMC card
 */
1411
static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
1412
					struct mmc_request *req)
1413
{
1414 1415 1416
	struct dma_slave_config cfg;
	struct dma_async_tx_descriptor *tx;
	int ret = 0, i;
1417
	struct mmc_data *data = req->data;
1418
	struct dma_chan *chan;
1419

1420
	/* Sanity check: all the SG entries must be aligned by block size. */
1421
	for (i = 0; i < data->sg_len; i++) {
1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
		struct scatterlist *sgl;

		sgl = data->sg + i;
		if (sgl->length % data->blksz)
			return -EINVAL;
	}
	if ((data->blksz % 4) != 0)
		/* REVISIT: The MMC buffer increments only when MSB is written.
		 * Return error for blksz which is non multiple of four.
		 */
		return -EINVAL;

1434
	BUG_ON(host->dma_ch != -1);
1435

1436 1437
	chan = omap_hsmmc_get_dma_chan(host, data);

1438 1439 1440 1441 1442 1443
	cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
	cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	cfg.src_maxburst = data->blksz / 4;
	cfg.dst_maxburst = data->blksz / 4;
1444

1445 1446
	ret = dmaengine_slave_config(chan, &cfg);
	if (ret)
1447
		return ret;
1448

1449
	ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
1450 1451
	if (ret)
		return ret;
1452

1453 1454 1455 1456 1457 1458 1459 1460
	tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
		data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!tx) {
		dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
		/* FIXME: cleanup */
		return -1;
	}
1461

1462 1463
	tx->callback = omap_hsmmc_dma_callback;
	tx->callback_param = host;
1464

1465 1466
	/* Does not fail */
	dmaengine_submit(tx);
1467

1468
	host->dma_ch = 1;
1469

1470 1471 1472
	return 0;
}

1473
static void set_data_timeout(struct omap_hsmmc_host *host,
1474 1475
			     unsigned int timeout_ns,
			     unsigned int timeout_clks)
1476 1477 1478 1479 1480 1481 1482 1483 1484
{
	unsigned int timeout, cycle_ns;
	uint32_t reg, clkd, dto = 0;

	reg = OMAP_HSMMC_READ(host->base, SYSCTL);
	clkd = (reg & CLKD_MASK) >> CLKD_SHIFT;
	if (clkd == 0)
		clkd = 1;

1485
	cycle_ns = 1000000000 / (host->clk_rate / clkd);
1486 1487
	timeout = timeout_ns / cycle_ns;
	timeout += timeout_clks;
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
	if (timeout) {
		while ((timeout & 0x80000000) == 0) {
			dto += 1;
			timeout <<= 1;
		}
		dto = 31 - dto;
		timeout <<= 1;
		if (timeout && dto)
			dto += 1;
		if (dto >= 13)
			dto -= 13;
		else
			dto = 0;
		if (dto > 14)
			dto = 14;
	}

	reg &= ~DTO_MASK;
	reg |= dto << DTO_SHIFT;
	OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
}

1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
{
	struct mmc_request *req = host->mrq;
	struct dma_chan *chan;

	if (!req->data)
		return;
	OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
				| (req->data->blocks << 16));
	set_data_timeout(host, req->data->timeout_ns,
				req->data->timeout_clks);
	chan = omap_hsmmc_get_dma_chan(host, req->data);
	dma_async_issue_pending(chan);
}

1525 1526 1527 1528
/*
 * Configure block length for MMC/SD cards and initiate the transfer.
 */
static int
1529
omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
1530 1531 1532 1533 1534 1535
{
	int ret;
	host->data = req->data;

	if (req->data == NULL) {
		OMAP_HSMMC_WRITE(host->base, BLK, 0);
1536 1537 1538 1539 1540 1541
		/*
		 * Set an arbitrary 100ms data timeout for commands with
		 * busy signal.
		 */
		if (req->cmd->flags & MMC_RSP_BUSY)
			set_data_timeout(host, 100000000U, 0);
1542 1543 1544 1545
		return 0;
	}

	if (host->use_dma) {
1546
		ret = omap_hsmmc_setup_dma_transfer(host, req);
1547
		if (ret != 0) {
1548
			dev_err(mmc_dev(host->mmc), "MMC start dma failure\n");
1549 1550 1551 1552 1553 1554
			return ret;
		}
	}
	return 0;
}

1555 1556 1557 1558 1559 1560
static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
				int err)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);
	struct mmc_data *data = mrq->data;

1561
	if (host->use_dma && data->host_cookie) {
1562 1563
		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);

1564 1565
		dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
			     omap_hsmmc_get_dma_dir(host, data));
1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
		data->host_cookie = 0;
	}
}

static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
			       bool is_first_req)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);

	if (mrq->data->host_cookie) {
		mrq->data->host_cookie = 0;
		return ;
	}

1580 1581 1582
	if (host->use_dma) {
		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);

1583
		if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
1584
						&host->next_data, c))
1585
			mrq->data->host_cookie = 0;
1586
	}
1587 1588
}

1589 1590 1591
/*
 * Request function. for read/write operation
 */
1592
static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1593
{
1594
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1595
	int err;
1596

1597 1598
	BUG_ON(host->req_in_progress);
	BUG_ON(host->dma_ch != -1);
1599
	pm_runtime_get_sync(host->dev);
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
	if (host->protect_card) {
		if (host->reqs_blocked < 3) {
			/*
			 * Ensure the controller is left in a consistent
			 * state by resetting the command and data state
			 * machines.
			 */
			omap_hsmmc_reset_controller_fsm(host, SRD);
			omap_hsmmc_reset_controller_fsm(host, SRC);
			host->reqs_blocked += 1;
		}
		req->cmd->error = -EBADF;
		if (req->data)
			req->data->error = -EBADF;
		req->cmd->retries = 0;
		mmc_request_done(mmc, req);
1616 1617
		pm_runtime_mark_last_busy(host->dev);
		pm_runtime_put_autosuspend(host->dev);
1618 1619 1620
		return;
	} else if (host->reqs_blocked)
		host->reqs_blocked = 0;
1621 1622
	WARN_ON(host->mrq != NULL);
	host->mrq = req;
1623
	host->clk_rate = clk_get_rate(host->fclk);
1624
	err = omap_hsmmc_prepare_data(host, req);
1625 1626 1627 1628 1629 1630
	if (err) {
		req->cmd->error = err;
		if (req->data)
			req->data->error = err;
		host->mrq = NULL;
		mmc_request_done(mmc, req);
1631 1632
		pm_runtime_mark_last_busy(host->dev);
		pm_runtime_put_autosuspend(host->dev);
1633 1634
		return;
	}
1635
	if (req->sbc && !(host->flags & AUTO_CMD23)) {
1636 1637 1638
		omap_hsmmc_start_command(host, req->sbc, NULL);
		return;
	}
1639

1640
	omap_hsmmc_start_dma_transfer(host);
1641
	omap_hsmmc_start_command(host, req->cmd, req->data);
1642 1643 1644
}

/* Routine to configure clock values. Exposed API to core */
1645
static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1646
{
1647
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1648
	int do_send_init_stream = 0;
1649

1650
	pm_runtime_get_sync(host->dev);
1651

1652 1653 1654
	if (ios->power_mode != host->power_mode) {
		switch (ios->power_mode) {
		case MMC_POWER_OFF:
1655
			omap_hsmmc_set_power(host->dev, 0, 0);
1656 1657
			break;
		case MMC_POWER_UP:
1658
			omap_hsmmc_set_power(host->dev, 1, ios->vdd);
1659 1660 1661 1662 1663 1664
			break;
		case MMC_POWER_ON:
			do_send_init_stream = 1;
			break;
		}
		host->power_mode = ios->power_mode;
1665 1666
	}

1667 1668
	/* FIXME: set registers based only on changes to ios */

1669
	omap_hsmmc_set_bus_width(host);
1670

1671
	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1672 1673 1674
		/* Only MMC1 can interface at 3V without some flavor
		 * of external transceiver; but they all handle 1.8V.
		 */
1675
		if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) &&
1676
			(ios->vdd == DUAL_VOLT_OCR_BIT)) {
1677 1678 1679 1680 1681 1682
				/*
				 * The mmc_select_voltage fn of the core does
				 * not seem to set the power_mode to
				 * MMC_POWER_UP upon recalculating the voltage.
				 * vdd 1.8v.
				 */
1683 1684
			if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0)
				dev_dbg(mmc_dev(host->mmc),
1685 1686 1687 1688
						"Switch operation failed\n");
		}
	}

1689
	omap_hsmmc_set_clock(host);
1690

1691
	if (do_send_init_stream)
1692 1693
		send_init_stream(host);

1694
	omap_hsmmc_set_bus_mode(host);
1695

1696
	pm_runtime_put_autosuspend(host->dev);
1697 1698 1699 1700
}

static int omap_hsmmc_get_cd(struct mmc_host *mmc)
{
1701
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1702

1703
	if (!host->card_detect)
1704
		return -ENOSYS;
1705
	return host->card_detect(host->dev);
1706 1707
}

1708 1709 1710 1711
static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);

1712 1713
	if (mmc_pdata(host)->init_card)
		mmc_pdata(host)->init_card(card);
1714 1715
}

1716 1717 1718
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1719
	u32 irq_mask, con;
1720 1721 1722 1723
	unsigned long flags;

	spin_lock_irqsave(&host->irq_lock, flags);

1724
	con = OMAP_HSMMC_READ(host->base, CON);
1725 1726 1727 1728
	irq_mask = OMAP_HSMMC_READ(host->base, ISE);
	if (enable) {
		host->flags |= HSMMC_SDIO_IRQ_ENABLED;
		irq_mask |= CIRQ_EN;
1729
		con |= CTPL | CLKEXTFREE;
1730 1731 1732
	} else {
		host->flags &= ~HSMMC_SDIO_IRQ_ENABLED;
		irq_mask &= ~CIRQ_EN;
1733
		con &= ~(CTPL | CLKEXTFREE);
1734
	}
1735
	OMAP_HSMMC_WRITE(host->base, CON, con);
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);

	/*
	 * if enable, piggy back detection on current request
	 * but always disable immediately
	 */
	if (!host->req_in_progress || !enable)
		OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);

	/* flush posted write */
	OMAP_HSMMC_READ(host->base, IE);

	spin_unlock_irqrestore(&host->irq_lock, flags);
}

static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
{
	int ret;

	/*
	 * For omaps with wake-up path, wakeirq will be irq from pinctrl and
	 * for other omaps, wakeirq will be from GPIO (dat line remuxed to
	 * gpio). wakeirq is needed to detect sdio irq in runtime suspend state
	 * with functional clock disabled.
	 */
	if (!host->dev->of_node || !host->wake_irq)
		return -ENODEV;

1764
	ret = dev_pm_set_dedicated_wake_irq(host->dev, host->wake_irq);
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
	if (ret) {
		dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n");
		goto err;
	}

	/*
	 * Some omaps don't have wake-up path from deeper idle states
	 * and need to remux SDIO DAT1 to GPIO for wake-up from idle.
	 */
	if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
		struct pinctrl *p = devm_pinctrl_get(host->dev);
		if (!p) {
			ret = -ENODEV;
			goto err_free_irq;
		}
		if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
			dev_info(host->dev, "missing default pinctrl state\n");
			devm_pinctrl_put(p);
			ret = -EINVAL;
			goto err_free_irq;
		}

		if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
			dev_info(host->dev, "missing idle pinctrl state\n");
			devm_pinctrl_put(p);
			ret = -EINVAL;
			goto err_free_irq;
		}
		devm_pinctrl_put(p);
1794 1795
	}

1796 1797
	OMAP_HSMMC_WRITE(host->base, HCTL,
			 OMAP_HSMMC_READ(host->base, HCTL) | IWE);
1798 1799
	return 0;

1800
err_free_irq:
1801
	dev_pm_clear_wake_irq(host->dev);
1802 1803 1804 1805 1806 1807
err:
	dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n");
	host->wake_irq = 0;
	return ret;
}

1808
static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
1809 1810 1811 1812
{
	u32 hctl, capa, value;

	/* Only MMC1 supports 3.0V */
1813
	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827
		hctl = SDVS30;
		capa = VS30 | VS18;
	} else {
		hctl = SDVS18;
		capa = VS18;
	}

	value = OMAP_HSMMC_READ(host->base, HCTL) & ~SDVS_MASK;
	OMAP_HSMMC_WRITE(host->base, HCTL, value | hctl);

	value = OMAP_HSMMC_READ(host->base, CAPA);
	OMAP_HSMMC_WRITE(host->base, CAPA, value | capa);

	/* Set SD bus power bit */
A
Adrian Hunter 已提交
1828
	set_sd_bus_power(host);
1829 1830
}

1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
static int omap_hsmmc_multi_io_quirk(struct mmc_card *card,
				     unsigned int direction, int blk_size)
{
	/* This controller can't do multiblock reads due to hw bugs */
	if (direction == MMC_DATA_READ)
		return 1;

	return blk_size;
}

static struct mmc_host_ops omap_hsmmc_ops = {
1842 1843
	.post_req = omap_hsmmc_post_req,
	.pre_req = omap_hsmmc_pre_req,
1844 1845
	.request = omap_hsmmc_request,
	.set_ios = omap_hsmmc_set_ios,
1846
	.get_cd = omap_hsmmc_get_cd,
1847
	.get_ro = mmc_gpio_get_ro,
1848
	.init_card = omap_hsmmc_init_card,
1849
	.enable_sdio_irq = omap_hsmmc_enable_sdio_irq,
1850 1851
};

1852 1853
#ifdef CONFIG_DEBUG_FS

1854
static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
1855 1856
{
	struct mmc_host *mmc = s->private;
1857
	struct omap_hsmmc_host *host = mmc_priv(mmc);
1858

1859 1860 1861
	seq_printf(s, "mmc%d:\n", mmc->index);
	seq_printf(s, "sdio irq mode\t%s\n",
		   (mmc->caps & MMC_CAP_SDIO_IRQ) ? "interrupt" : "polling");
1862

1863 1864 1865 1866 1867 1868
	if (mmc->caps & MMC_CAP_SDIO_IRQ) {
		seq_printf(s, "sdio irq \t%s\n",
			   (host->flags & HSMMC_SDIO_IRQ_ENABLED) ?  "enabled"
			   : "disabled");
	}
	seq_printf(s, "ctx_loss:\t%d\n", host->context_loss);
1869

1870 1871
	pm_runtime_get_sync(host->dev);
	seq_puts(s, "\nregs:\n");
1872 1873
	seq_printf(s, "CON:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, CON));
1874 1875
	seq_printf(s, "PSTATE:\t\t0x%08x\n",
		   OMAP_HSMMC_READ(host->base, PSTATE));
1876 1877 1878 1879 1880 1881 1882 1883 1884 1885
	seq_printf(s, "HCTL:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, HCTL));
	seq_printf(s, "SYSCTL:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, SYSCTL));
	seq_printf(s, "IE:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, IE));
	seq_printf(s, "ISE:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, ISE));
	seq_printf(s, "CAPA:\t\t0x%08x\n",
			OMAP_HSMMC_READ(host->base, CAPA));
1886

1887 1888
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
1889

1890 1891 1892
	return 0;
}

1893
static int omap_hsmmc_regs_open(struct inode *inode, struct file *file)
1894
{
1895
	return single_open(file, omap_hsmmc_regs_show, inode->i_private);
1896 1897 1898
}

static const struct file_operations mmc_regs_fops = {
1899
	.open           = omap_hsmmc_regs_open,
1900 1901 1902 1903 1904
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = single_release,
};

1905
static void omap_hsmmc_debugfs(struct mmc_host *mmc)
1906 1907 1908 1909 1910 1911 1912 1913
{
	if (mmc->debugfs_root)
		debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root,
			mmc, &mmc_regs_fops);
}

#else

1914
static void omap_hsmmc_debugfs(struct mmc_host *mmc)
1915 1916 1917 1918 1919
{
}

#endif

1920
#ifdef CONFIG_OF
1921 1922 1923 1924 1925 1926 1927 1928
static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = {
	/* See 35xx errata 2.1.1.128 in SPRZ278F */
	.controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
};

static const struct omap_mmc_of_data omap4_mmc_of_data = {
	.reg_offset = 0x100,
};
1929 1930 1931 1932
static const struct omap_mmc_of_data am33xx_mmc_of_data = {
	.reg_offset = 0x100,
	.controller_flags = OMAP_HSMMC_SWAKEUP_MISSING,
};
1933 1934 1935 1936 1937

static const struct of_device_id omap_mmc_of_match[] = {
	{
		.compatible = "ti,omap2-hsmmc",
	},
1938 1939 1940 1941
	{
		.compatible = "ti,omap3-pre-es3-hsmmc",
		.data = &omap3_pre_es3_mmc_of_data,
	},
1942 1943 1944 1945 1946
	{
		.compatible = "ti,omap3-hsmmc",
	},
	{
		.compatible = "ti,omap4-hsmmc",
1947
		.data = &omap4_mmc_of_data,
1948
	},
1949 1950 1951 1952
	{
		.compatible = "ti,am33xx-hsmmc",
		.data = &am33xx_mmc_of_data,
	},
1953
	{},
1954
};
1955 1956
MODULE_DEVICE_TABLE(of, omap_mmc_of_match);

1957
static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
1958
{
1959
	struct omap_hsmmc_platform_data *pdata;
1960 1961 1962 1963
	struct device_node *np = dev->of_node;

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
1964
		return ERR_PTR(-ENOMEM); /* out of memory */
1965 1966 1967 1968

	if (of_find_property(np, "ti,dual-volt", NULL))
		pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;

1969 1970
	pdata->gpio_cd = -EINVAL;
	pdata->gpio_cod = -EINVAL;
1971
	pdata->gpio_wp = -EINVAL;
1972 1973

	if (of_find_property(np, "ti,non-removable", NULL)) {
1974 1975
		pdata->nonremovable = true;
		pdata->no_regulator_off_init = true;
1976 1977 1978
	}

	if (of_find_property(np, "ti,needs-special-reset", NULL))
1979
		pdata->features |= HSMMC_HAS_UPDATED_RESET;
1980

1981
	if (of_find_property(np, "ti,needs-special-hs-handling", NULL))
1982
		pdata->features |= HSMMC_HAS_HSPE_SUPPORT;
1983

1984 1985 1986
	return pdata;
}
#else
1987
static inline struct omap_hsmmc_platform_data
1988 1989
			*of_get_hsmmc_pdata(struct device *dev)
{
1990
	return ERR_PTR(-EINVAL);
1991 1992 1993
}
#endif

1994
static int omap_hsmmc_probe(struct platform_device *pdev)
1995
{
1996
	struct omap_hsmmc_platform_data *pdata = pdev->dev.platform_data;
1997
	struct mmc_host *mmc;
1998
	struct omap_hsmmc_host *host = NULL;
1999
	struct resource *res;
2000
	int ret, irq;
2001
	const struct of_device_id *match;
2002 2003
	dma_cap_mask_t mask;
	unsigned tx_req, rx_req;
2004
	const struct omap_mmc_of_data *data;
2005
	void __iomem *base;
2006 2007 2008 2009

	match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
	if (match) {
		pdata = of_get_hsmmc_pdata(&pdev->dev);
2010 2011 2012 2013

		if (IS_ERR(pdata))
			return PTR_ERR(pdata);

2014
		if (match->data) {
2015 2016 2017
			data = match->data;
			pdata->reg_offset = data->reg_offset;
			pdata->controller_flags |= data->controller_flags;
2018 2019
		}
	}
2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030

	if (pdata == NULL) {
		dev_err(&pdev->dev, "Platform Data is missing\n");
		return -ENXIO;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (res == NULL || irq < 0)
		return -ENXIO;

2031 2032 2033
	base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(base))
		return PTR_ERR(base);
2034

2035
	mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
2036 2037
	if (!mmc) {
		ret = -ENOMEM;
2038
		goto err;
2039 2040
	}

2041 2042 2043 2044
	ret = mmc_of_parse(mmc);
	if (ret)
		goto err1;

2045 2046 2047 2048 2049 2050 2051
	host		= mmc_priv(mmc);
	host->mmc	= mmc;
	host->pdata	= pdata;
	host->dev	= &pdev->dev;
	host->use_dma	= 1;
	host->dma_ch	= -1;
	host->irq	= irq;
2052
	host->mapbase	= res->start + pdata->reg_offset;
2053
	host->base	= base + pdata->reg_offset;
2054
	host->power_mode = MMC_POWER_OFF;
2055
	host->next_data.cookie = 1;
2056
	host->vqmmc_enabled = 0;
2057

2058
	ret = omap_hsmmc_gpio_init(mmc, host, pdata);
2059 2060 2061
	if (ret)
		goto err_gpio;

2062 2063
	platform_set_drvdata(pdev, host);

2064 2065 2066
	if (pdev->dev.of_node)
		host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1);

2067
	mmc->ops	= &omap_hsmmc_ops;
2068

2069 2070 2071 2072
	mmc->f_min = OMAP_MMC_MIN_CLOCK;

	if (pdata->max_freq > 0)
		mmc->f_max = pdata->max_freq;
2073
	else if (mmc->f_max == 0)
2074
		mmc->f_max = OMAP_MMC_MAX_CLOCK;
2075

2076
	spin_lock_init(&host->irq_lock);
2077

2078
	host->fclk = devm_clk_get(&pdev->dev, "fck");
2079 2080 2081 2082 2083 2084
	if (IS_ERR(host->fclk)) {
		ret = PTR_ERR(host->fclk);
		host->fclk = NULL;
		goto err1;
	}

2085 2086
	if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
		dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
2087
		omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
2088
	}
2089

2090
	device_init_wakeup(&pdev->dev, true);
2091 2092 2093 2094
	pm_runtime_enable(host->dev);
	pm_runtime_get_sync(host->dev);
	pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
	pm_runtime_use_autosuspend(host->dev);
2095

2096 2097
	omap_hsmmc_context_save(host);

2098
	host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck");
2099 2100 2101 2102 2103
	/*
	 * MMC can still work without debounce clock.
	 */
	if (IS_ERR(host->dbclk)) {
		host->dbclk = NULL;
2104
	} else if (clk_prepare_enable(host->dbclk) != 0) {
2105 2106
		dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
		host->dbclk = NULL;
2107
	}
2108

2109 2110
	/* Since we do only SG emulation, we can have as many segs
	 * as we want. */
2111
	mmc->max_segs = 1024;
2112

2113 2114 2115 2116 2117
	mmc->max_blk_size = 512;       /* Block Length at max can be 1024 */
	mmc->max_blk_count = 0xFFFF;    /* No. of Blocks is 16 bits */
	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
	mmc->max_seg_size = mmc->max_req_size;

2118
	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
2119
		     MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
2120

2121
	mmc->caps |= mmc_pdata(host)->caps;
2122
	if (mmc->caps & MMC_CAP_8_BIT_DATA)
2123 2124
		mmc->caps |= MMC_CAP_4_BIT_DATA;

2125
	if (mmc_pdata(host)->nonremovable)
2126 2127
		mmc->caps |= MMC_CAP_NONREMOVABLE;

2128
	mmc->pm_caps |= mmc_pdata(host)->pm_caps;
2129

2130
	omap_hsmmc_conf_bus_power(host);
2131

2132 2133 2134 2135 2136 2137 2138 2139
	if (!pdev->dev.of_node) {
		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
		if (!res) {
			dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
			ret = -ENXIO;
			goto err_irq;
		}
		tx_req = res->start;
2140

2141 2142 2143 2144 2145 2146 2147
		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
		if (!res) {
			dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
			ret = -ENXIO;
			goto err_irq;
		}
		rx_req = res->start;
2148
	}
2149

2150 2151 2152
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

2153 2154 2155 2156
	host->rx_chan =
		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
						 &rx_req, &pdev->dev, "rx");

2157 2158
	if (!host->rx_chan) {
		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
2159
		ret = -ENXIO;
2160 2161 2162
		goto err_irq;
	}

2163 2164 2165 2166
	host->tx_chan =
		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
						 &tx_req, &pdev->dev, "tx");

2167 2168
	if (!host->tx_chan) {
		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
2169
		ret = -ENXIO;
2170
		goto err_irq;
2171
	}
2172 2173

	/* Request IRQ for MMC operations */
2174
	ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
2175 2176
			mmc_hostname(mmc), host);
	if (ret) {
2177
		dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
2178 2179 2180
		goto err_irq;
	}

2181 2182 2183
	ret = omap_hsmmc_reg_get(host);
	if (ret)
		goto err_irq;
2184

2185
	mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
2186

2187
	omap_hsmmc_disable_irq(host);
2188

2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200
	/*
	 * For now, only support SDIO interrupt if we have a separate
	 * wake-up interrupt configured from device tree. This is because
	 * the wake-up interrupt is needed for idle state and some
	 * platforms need special quirks. And we don't want to add new
	 * legacy mux platform init code callbacks any longer as we
	 * are moving to DT based booting anyways.
	 */
	ret = omap_hsmmc_configure_wake_irq(host);
	if (!ret)
		mmc->caps |= MMC_CAP_SDIO_IRQ;

2201 2202
	omap_hsmmc_protect_card(host);

2203 2204
	mmc_add_host(mmc);

2205
	if (mmc_pdata(host)->name != NULL) {
2206 2207 2208 2209
		ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
		if (ret < 0)
			goto err_slot_name;
	}
2210
	if (host->get_cover_state) {
2211
		ret = device_create_file(&mmc->class_dev,
2212
					 &dev_attr_cover_switch);
2213
		if (ret < 0)
2214
			goto err_slot_name;
2215 2216
	}

2217
	omap_hsmmc_debugfs(mmc);
2218 2219
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
2220

2221 2222 2223 2224 2225
	return 0;

err_slot_name:
	mmc_remove_host(mmc);
err_irq:
2226
	device_init_wakeup(&pdev->dev, false);
2227 2228 2229 2230
	if (host->tx_chan)
		dma_release_channel(host->tx_chan);
	if (host->rx_chan)
		dma_release_channel(host->rx_chan);
2231
	pm_runtime_put_sync(host->dev);
2232
	pm_runtime_disable(host->dev);
2233
	if (host->dbclk)
2234
		clk_disable_unprepare(host->dbclk);
2235
err1:
2236
err_gpio:
2237
	mmc_free_host(mmc);
2238 2239 2240 2241
err:
	return ret;
}

2242
static int omap_hsmmc_remove(struct platform_device *pdev)
2243
{
2244
	struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2245

2246 2247
	pm_runtime_get_sync(host->dev);
	mmc_remove_host(host->mmc);
2248

2249 2250 2251 2252 2253
	if (host->tx_chan)
		dma_release_channel(host->tx_chan);
	if (host->rx_chan)
		dma_release_channel(host->rx_chan);

2254 2255
	pm_runtime_put_sync(host->dev);
	pm_runtime_disable(host->dev);
2256
	device_init_wakeup(&pdev->dev, false);
2257
	if (host->dbclk)
2258
		clk_disable_unprepare(host->dbclk);
2259

2260
	mmc_free_host(host->mmc);
2261

2262 2263 2264
	return 0;
}

2265
#ifdef CONFIG_PM_SLEEP
2266
static int omap_hsmmc_suspend(struct device *dev)
2267
{
2268
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2269

2270
	if (!host)
2271 2272
		return 0;

2273
	pm_runtime_get_sync(host->dev);
2274

2275
	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
2276 2277 2278
		OMAP_HSMMC_WRITE(host->base, ISE, 0);
		OMAP_HSMMC_WRITE(host->base, IE, 0);
		OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
2279 2280
		OMAP_HSMMC_WRITE(host->base, HCTL,
				OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2281
	}
2282

2283
	if (host->dbclk)
2284
		clk_disable_unprepare(host->dbclk);
2285

2286
	pm_runtime_put_sync(host->dev);
2287
	return 0;
2288 2289 2290
}

/* Routine to resume the MMC device */
2291
static int omap_hsmmc_resume(struct device *dev)
2292
{
2293 2294 2295 2296
	struct omap_hsmmc_host *host = dev_get_drvdata(dev);

	if (!host)
		return 0;
2297

2298
	pm_runtime_get_sync(host->dev);
2299

2300
	if (host->dbclk)
2301
		clk_prepare_enable(host->dbclk);
2302

2303 2304
	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
		omap_hsmmc_conf_bus_power(host);
2305

2306 2307 2308
	omap_hsmmc_protect_card(host);
	pm_runtime_mark_last_busy(host->dev);
	pm_runtime_put_autosuspend(host->dev);
2309
	return 0;
2310 2311 2312
}
#endif

2313 2314 2315
static int omap_hsmmc_runtime_suspend(struct device *dev)
{
	struct omap_hsmmc_host *host;
2316
	unsigned long flags;
2317
	int ret = 0;
2318 2319 2320

	host = platform_get_drvdata(to_platform_device(dev));
	omap_hsmmc_context_save(host);
2321
	dev_dbg(dev, "disabled\n");
2322

2323 2324 2325 2326 2327 2328
	spin_lock_irqsave(&host->irq_lock, flags);
	if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
	    (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
		/* disable sdio irq handling to prevent race */
		OMAP_HSMMC_WRITE(host->base, ISE, 0);
		OMAP_HSMMC_WRITE(host->base, IE, 0);
2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343

		if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) {
			/*
			 * dat1 line low, pending sdio irq
			 * race condition: possible irq handler running on
			 * multi-core, abort
			 */
			dev_dbg(dev, "pending sdio irq, abort suspend\n");
			OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
			OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
			OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
			pm_runtime_mark_last_busy(dev);
			ret = -EBUSY;
			goto abort;
		}
2344

2345 2346 2347
		pinctrl_pm_select_idle_state(dev);
	} else {
		pinctrl_pm_select_idle_state(dev);
2348
	}
2349

2350
abort:
2351
	spin_unlock_irqrestore(&host->irq_lock, flags);
2352
	return ret;
2353 2354 2355 2356 2357
}

static int omap_hsmmc_runtime_resume(struct device *dev)
{
	struct omap_hsmmc_host *host;
2358
	unsigned long flags;
2359 2360 2361

	host = platform_get_drvdata(to_platform_device(dev));
	omap_hsmmc_context_restore(host);
2362
	dev_dbg(dev, "enabled\n");
2363

2364 2365 2366 2367
	spin_lock_irqsave(&host->irq_lock, flags);
	if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
	    (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {

2368 2369 2370
		pinctrl_pm_select_default_state(host->dev);

		/* irq lost, if pinmux incorrect */
2371 2372 2373
		OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
		OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
		OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
2374 2375
	} else {
		pinctrl_pm_select_default_state(host->dev);
2376 2377
	}
	spin_unlock_irqrestore(&host->irq_lock, flags);
2378 2379 2380
	return 0;
}

2381
static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
2382
	SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume)
2383 2384
	.runtime_suspend = omap_hsmmc_runtime_suspend,
	.runtime_resume = omap_hsmmc_runtime_resume,
2385 2386 2387
};

static struct platform_driver omap_hsmmc_driver = {
2388
	.probe		= omap_hsmmc_probe,
2389
	.remove		= omap_hsmmc_remove,
2390 2391
	.driver		= {
		.name = DRIVER_NAME,
2392
		.pm = &omap_hsmmc_dev_pm_ops,
2393
		.of_match_table = of_match_ptr(omap_mmc_of_match),
2394 2395 2396
	},
};

2397
module_platform_driver(omap_hsmmc_driver);
2398 2399 2400 2401
MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Texas Instruments Inc");
新手
引导
客服 返回
顶部