spi-imx.c 33.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
 * Copyright (C) 2008 Juergen Beisert
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the
 * Free Software Foundation
 * 51 Franklin Street, Fifth Floor
 * Boston, MA  02110-1301, USA.
 */

#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
R
Robin Gong 已提交
24 25
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
26 27 28 29 30 31 32 33
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
34
#include <linux/slab.h>
35 36 37
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
38 39 40
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
41

R
Robin Gong 已提交
42
#include <linux/platform_data/dma-imx.h>
43
#include <linux/platform_data/spi-imx.h>
44 45 46 47 48 49 50 51 52 53 54 55 56

#define DRIVER_NAME "spi_imx"

#define MXC_CSPIRXDATA		0x00
#define MXC_CSPITXDATA		0x04
#define MXC_CSPICTRL		0x08
#define MXC_CSPIINT		0x0c
#define MXC_RESET		0x1c

/* generic defines to abstract from the different register layouts */
#define MXC_INT_RR	(1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE	(1 << 1) /* Transmit FIFO empty interrupt */

R
Robin Gong 已提交
57 58 59
/* The maximum  bytes that a sdma BD can transfer.*/
#define MAX_SDMA_BD_BYTES  (1 << 15)
#define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000))
60
struct spi_imx_config {
61 62 63
	unsigned int speed_hz;
	unsigned int bpw;
	unsigned int mode;
64
	u8 cs;
65 66
};

67
enum spi_imx_devtype {
68 69 70 71 72 73
	IMX1_CSPI,
	IMX21_CSPI,
	IMX27_CSPI,
	IMX31_CSPI,
	IMX35_CSPI,	/* CSPI on all i.mx except above */
	IMX51_ECSPI,	/* ECSPI on i.mx51 and later */
74 75 76 77 78 79 80 81 82
};

struct spi_imx_data;

struct spi_imx_devtype_data {
	void (*intctrl)(struct spi_imx_data *, int);
	int (*config)(struct spi_imx_data *, struct spi_imx_config *);
	void (*trigger)(struct spi_imx_data *);
	int (*rx_available)(struct spi_imx_data *);
83
	void (*reset)(struct spi_imx_data *);
84
	enum spi_imx_devtype devtype;
85 86
};

87
struct spi_imx_data {
88 89 90
	struct spi_bitbang bitbang;

	struct completion xfer_done;
91
	void __iomem *base;
92 93
	struct clk *clk_per;
	struct clk *clk_ipg;
94 95 96
	unsigned long spi_clk;

	unsigned int count;
97 98
	void (*tx)(struct spi_imx_data *);
	void (*rx)(struct spi_imx_data *);
99 100 101 102
	void *rx_buf;
	const void *tx_buf;
	unsigned int txfifo; /* number of words pushed in tx FIFO */

R
Robin Gong 已提交
103 104 105 106
	/* DMA */
	unsigned int dma_is_inited;
	unsigned int dma_finished;
	bool usedma;
107
	u32 wml;
R
Robin Gong 已提交
108 109 110
	struct completion dma_rx_completion;
	struct completion dma_tx_completion;

111
	const struct spi_imx_devtype_data *devtype_data;
112
	int chipselect[0];
113 114
};

115 116 117 118 119 120 121 122 123 124
static inline int is_imx27_cspi(struct spi_imx_data *d)
{
	return d->devtype_data->devtype == IMX27_CSPI;
}

static inline int is_imx35_cspi(struct spi_imx_data *d)
{
	return d->devtype_data->devtype == IMX35_CSPI;
}

125 126 127 128 129
static inline int is_imx51_ecspi(struct spi_imx_data *d)
{
	return d->devtype_data->devtype == IMX51_ECSPI;
}

130 131
static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d)
{
132
	return is_imx51_ecspi(d) ? 64 : 8;
133 134
}

135
#define MXC_SPI_BUF_RX(type)						\
136
static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx)		\
137
{									\
138
	unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);	\
139
									\
140 141 142
	if (spi_imx->rx_buf) {						\
		*(type *)spi_imx->rx_buf = val;				\
		spi_imx->rx_buf += sizeof(type);			\
143 144 145 146
	}								\
}

#define MXC_SPI_BUF_TX(type)						\
147
static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx)		\
148 149 150
{									\
	type val = 0;							\
									\
151 152 153
	if (spi_imx->tx_buf) {						\
		val = *(type *)spi_imx->tx_buf;				\
		spi_imx->tx_buf += sizeof(type);			\
154 155
	}								\
									\
156
	spi_imx->count -= sizeof(type);					\
157
									\
158
	writel(val, spi_imx->base + MXC_CSPITXDATA);			\
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
}

MXC_SPI_BUF_RX(u8)
MXC_SPI_BUF_TX(u8)
MXC_SPI_BUF_RX(u16)
MXC_SPI_BUF_TX(u16)
MXC_SPI_BUF_RX(u32)
MXC_SPI_BUF_TX(u32)

/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
 * (which is currently not the case in this driver)
 */
static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
	256, 384, 512, 768, 1024};

/* MX21, MX27 */
175
static unsigned int spi_imx_clkdiv_1(unsigned int fin,
176
		unsigned int fspi, unsigned int max)
177
{
178
	int i;
179 180 181 182 183 184 185 186

	for (i = 2; i < max; i++)
		if (fspi * mxc_clkdivs[i] >= fin)
			return i;

	return max;
}

187
/* MX1, MX31, MX35, MX51 CSPI */
188
static unsigned int spi_imx_clkdiv_2(unsigned int fin,
189 190 191 192 193 194 195 196 197 198 199 200 201
		unsigned int fspi)
{
	int i, div = 4;

	for (i = 0; i < 7; i++) {
		if (fspi * div >= fin)
			return i;
		div <<= 1;
	}

	return 7;
}

R
Robin Gong 已提交
202 203 204 205 206
static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
			 struct spi_transfer *transfer)
{
	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);

207 208
	if (spi_imx->dma_is_inited &&
	    transfer->len > spi_imx->wml * sizeof(u32))
R
Robin Gong 已提交
209 210 211 212
		return true;
	return false;
}

213 214 215
#define MX51_ECSPI_CTRL		0x08
#define MX51_ECSPI_CTRL_ENABLE		(1 <<  0)
#define MX51_ECSPI_CTRL_XCH		(1 <<  2)
R
Robin Gong 已提交
216
#define MX51_ECSPI_CTRL_SMC		(1 << 3)
217 218 219 220 221 222 223 224 225 226 227
#define MX51_ECSPI_CTRL_MODE_MASK	(0xf << 4)
#define MX51_ECSPI_CTRL_POSTDIV_OFFSET	8
#define MX51_ECSPI_CTRL_PREDIV_OFFSET	12
#define MX51_ECSPI_CTRL_CS(cs)		((cs) << 18)
#define MX51_ECSPI_CTRL_BL_OFFSET	20

#define MX51_ECSPI_CONFIG	0x0c
#define MX51_ECSPI_CONFIG_SCLKPHA(cs)	(1 << ((cs) +  0))
#define MX51_ECSPI_CONFIG_SCLKPOL(cs)	(1 << ((cs) +  4))
#define MX51_ECSPI_CONFIG_SBBCTRL(cs)	(1 << ((cs) +  8))
#define MX51_ECSPI_CONFIG_SSBPOL(cs)	(1 << ((cs) + 12))
228
#define MX51_ECSPI_CONFIG_SCLKCTL(cs)	(1 << ((cs) + 20))
229 230 231 232 233

#define MX51_ECSPI_INT		0x10
#define MX51_ECSPI_INT_TEEN		(1 <<  0)
#define MX51_ECSPI_INT_RREN		(1 <<  3)

R
Robin Gong 已提交
234 235 236 237 238 239 240 241 242 243 244 245
#define MX51_ECSPI_DMA      0x14
#define MX51_ECSPI_DMA_TX_WML_OFFSET	0
#define MX51_ECSPI_DMA_TX_WML_MASK	0x3F
#define MX51_ECSPI_DMA_RX_WML_OFFSET	16
#define MX51_ECSPI_DMA_RX_WML_MASK	(0x3F << 16)
#define MX51_ECSPI_DMA_RXT_WML_OFFSET	24
#define MX51_ECSPI_DMA_RXT_WML_MASK	(0x3F << 24)

#define MX51_ECSPI_DMA_TEDEN_OFFSET	7
#define MX51_ECSPI_DMA_RXDEN_OFFSET	23
#define MX51_ECSPI_DMA_RXTDEN_OFFSET	31

246 247
#define MX51_ECSPI_STAT		0x18
#define MX51_ECSPI_STAT_RR		(1 <<  3)
248

249 250 251
#define MX51_ECSPI_TESTREG	0x20
#define MX51_ECSPI_TESTREG_LBC	BIT(31)

252
/* MX51 eCSPI */
253 254
static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi,
				      unsigned int *fres)
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
{
	/*
	 * there are two 4-bit dividers, the pre-divider divides by
	 * $pre, the post-divider by 2^$post
	 */
	unsigned int pre, post;

	if (unlikely(fspi > fin))
		return 0;

	post = fls(fin) - fls(fspi);
	if (fin > fspi << post)
		post++;

	/* now we have: (fin <= fspi << post) with post being minimal */

	post = max(4U, post) - 4;
	if (unlikely(post > 0xf)) {
		pr_err("%s: cannot set clock freq: %u (base freq: %u)\n",
				__func__, fspi, fin);
		return 0xff;
	}

	pre = DIV_ROUND_UP(fin, fspi << post) - 1;

	pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
			__func__, fin, fspi, post, pre);
282 283 284 285

	/* Resulting frequency for the SCLK line. */
	*fres = (fin / (pre + 1)) >> post;

286 287
	return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
		(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
288 289
}

290
static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
291 292 293 294
{
	unsigned val = 0;

	if (enable & MXC_INT_TE)
295
		val |= MX51_ECSPI_INT_TEEN;
296 297

	if (enable & MXC_INT_RR)
298
		val |= MX51_ECSPI_INT_RREN;
299

300
	writel(val, spi_imx->base + MX51_ECSPI_INT);
301 302
}

303
static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
304
{
R
Robin Gong 已提交
305 306 307 308 309 310 311 312
	u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);

	if (!spi_imx->usedma)
		reg |= MX51_ECSPI_CTRL_XCH;
	else if (!spi_imx->dma_finished)
		reg |= MX51_ECSPI_CTRL_SMC;
	else
		reg &= ~MX51_ECSPI_CTRL_SMC;
313
	writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
314 315
}

316
static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
317 318
		struct spi_imx_config *config)
{
R
Robin Gong 已提交
319 320
	u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0;
	u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg;
321
	u32 clk = config->speed_hz, delay, reg;
322

323 324 325 326 327 328 329
	/*
	 * The hardware seems to have a race condition when changing modes. The
	 * current assumption is that the selection of the channel arrives
	 * earlier in the hardware than the mode bits when they are written at
	 * the same time.
	 * So set master mode for all channels as we do not support slave mode.
	 */
330
	ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
331 332

	/* set clock speed */
333
	ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk);
334 335

	/* set chip select to use */
336
	ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
337

338
	ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
339

340
	cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs);
341 342

	if (config->mode & SPI_CPHA)
343
		cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
A
Andrew Y. Kuksov 已提交
344 345
	else
		cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
346

347
	if (config->mode & SPI_CPOL) {
348
		cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
349
		cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
A
Andrew Y. Kuksov 已提交
350 351 352
	} else {
		cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
		cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
353
	}
354
	if (config->mode & SPI_CS_HIGH)
355
		cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
A
Andrew Y. Kuksov 已提交
356 357
	else
		cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
358

359 360 361
	/* CTRL register always go first to bring out controller from reset */
	writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);

362 363 364 365 366 367 368
	reg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
	if (config->mode & SPI_LOOP)
		reg |= MX51_ECSPI_TESTREG_LBC;
	else
		reg &= ~MX51_ECSPI_TESTREG_LBC;
	writel(reg, spi_imx->base + MX51_ECSPI_TESTREG);

369
	writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
370

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
	/*
	 * Wait until the changes in the configuration register CONFIGREG
	 * propagate into the hardware. It takes exactly one tick of the
	 * SCLK clock, but we will wait two SCLK clock just to be sure. The
	 * effect of the delay it takes for the hardware to apply changes
	 * is noticable if the SCLK clock run very slow. In such a case, if
	 * the polarity of SCLK should be inverted, the GPIO ChipSelect might
	 * be asserted before the SCLK polarity changes, which would disrupt
	 * the SPI communication as the device on the other end would consider
	 * the change of SCLK polarity as a clock tick already.
	 */
	delay = (2 * 1000000) / clk;
	if (likely(delay < 10))	/* SCLK is faster than 100 kHz */
		udelay(delay);
	else			/* SCLK is _very_ slow */
		usleep_range(delay, delay + 10);

R
Robin Gong 已提交
388 389 390 391 392 393 394
	/*
	 * Configure the DMA register: setup the watermark
	 * and enable DMA request.
	 */
	if (spi_imx->dma_is_inited) {
		dma = readl(spi_imx->base + MX51_ECSPI_DMA);

395 396 397
		rx_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
		tx_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
		rxt_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_RXT_WML_OFFSET;
R
Robin Gong 已提交
398 399 400 401 402 403 404 405 406 407 408
		dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK
			   & ~MX51_ECSPI_DMA_RX_WML_MASK
			   & ~MX51_ECSPI_DMA_RXT_WML_MASK)
			   | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg
			   |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
			   |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET)
			   |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET);

		writel(dma, spi_imx->base + MX51_ECSPI_DMA);
	}

409 410 411
	return 0;
}

412
static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
413
{
414
	return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
415 416
}

417
static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx)
418 419
{
	/* drain receive buffer */
420
	while (mx51_ecspi_rx_available(spi_imx))
421 422 423
		readl(spi_imx->base + MXC_CSPIRXDATA);
}

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
#define MX31_INTREG_TEEN	(1 << 0)
#define MX31_INTREG_RREN	(1 << 3)

#define MX31_CSPICTRL_ENABLE	(1 << 0)
#define MX31_CSPICTRL_MASTER	(1 << 1)
#define MX31_CSPICTRL_XCH	(1 << 2)
#define MX31_CSPICTRL_POL	(1 << 4)
#define MX31_CSPICTRL_PHA	(1 << 5)
#define MX31_CSPICTRL_SSCTL	(1 << 6)
#define MX31_CSPICTRL_SSPOL	(1 << 7)
#define MX31_CSPICTRL_BC_SHIFT	8
#define MX35_CSPICTRL_BL_SHIFT	20
#define MX31_CSPICTRL_CS_SHIFT	24
#define MX35_CSPICTRL_CS_SHIFT	12
#define MX31_CSPICTRL_DR_SHIFT	16

#define MX31_CSPISTATUS		0x14
#define MX31_STATUS_RR		(1 << 3)

/* These functions also work for the i.MX35, but be aware that
 * the i.MX35 has a slightly different register layout for bits
 * we do not use here.
 */
447
static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
448 449 450 451 452 453 454 455
{
	unsigned int val = 0;

	if (enable & MXC_INT_TE)
		val |= MX31_INTREG_TEEN;
	if (enable & MXC_INT_RR)
		val |= MX31_INTREG_RREN;

456
	writel(val, spi_imx->base + MXC_CSPIINT);
457 458
}

459
static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx)
460 461 462
{
	unsigned int reg;

463
	reg = readl(spi_imx->base + MXC_CSPICTRL);
464
	reg |= MX31_CSPICTRL_XCH;
465
	writel(reg, spi_imx->base + MXC_CSPICTRL);
466 467
}

468
static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx,
469 470 471
		struct spi_imx_config *config)
{
	unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
472
	int cs = spi_imx->chipselect[config->cs];
473 474 475 476

	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
		MX31_CSPICTRL_DR_SHIFT;

477
	if (is_imx35_cspi(spi_imx)) {
478 479 480 481 482
		reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
		reg |= MX31_CSPICTRL_SSCTL;
	} else {
		reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
	}
483 484 485 486 487 488 489

	if (config->mode & SPI_CPHA)
		reg |= MX31_CSPICTRL_PHA;
	if (config->mode & SPI_CPOL)
		reg |= MX31_CSPICTRL_POL;
	if (config->mode & SPI_CS_HIGH)
		reg |= MX31_CSPICTRL_SSPOL;
490
	if (cs < 0)
491
		reg |= (cs + 32) <<
492 493
			(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
						  MX31_CSPICTRL_CS_SHIFT);
494 495 496 497 498 499

	writel(reg, spi_imx->base + MXC_CSPICTRL);

	return 0;
}

500
static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx)
501
{
502
	return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
503 504
}

505
static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx)
506 507
{
	/* drain receive buffer */
508
	while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
509 510 511
		readl(spi_imx->base + MXC_CSPIRXDATA);
}

512 513 514 515 516 517 518 519 520 521 522 523 524 525
#define MX21_INTREG_RR		(1 << 4)
#define MX21_INTREG_TEEN	(1 << 9)
#define MX21_INTREG_RREN	(1 << 13)

#define MX21_CSPICTRL_POL	(1 << 5)
#define MX21_CSPICTRL_PHA	(1 << 6)
#define MX21_CSPICTRL_SSPOL	(1 << 8)
#define MX21_CSPICTRL_XCH	(1 << 9)
#define MX21_CSPICTRL_ENABLE	(1 << 10)
#define MX21_CSPICTRL_MASTER	(1 << 11)
#define MX21_CSPICTRL_DR_SHIFT	14
#define MX21_CSPICTRL_CS_SHIFT	19

static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
526 527 528 529
{
	unsigned int val = 0;

	if (enable & MXC_INT_TE)
530
		val |= MX21_INTREG_TEEN;
531
	if (enable & MXC_INT_RR)
532
		val |= MX21_INTREG_RREN;
533

534
	writel(val, spi_imx->base + MXC_CSPIINT);
535 536
}

537
static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx)
538 539 540
{
	unsigned int reg;

541
	reg = readl(spi_imx->base + MXC_CSPICTRL);
542
	reg |= MX21_CSPICTRL_XCH;
543
	writel(reg, spi_imx->base + MXC_CSPICTRL);
544 545
}

546
static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx,
547
		struct spi_imx_config *config)
548
{
549
	unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
550
	int cs = spi_imx->chipselect[config->cs];
551
	unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
552

553
	reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) <<
554
		MX21_CSPICTRL_DR_SHIFT;
555 556 557
	reg |= config->bpw - 1;

	if (config->mode & SPI_CPHA)
558
		reg |= MX21_CSPICTRL_PHA;
559
	if (config->mode & SPI_CPOL)
560
		reg |= MX21_CSPICTRL_POL;
561
	if (config->mode & SPI_CS_HIGH)
562
		reg |= MX21_CSPICTRL_SSPOL;
563
	if (cs < 0)
564
		reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT;
565

566
	writel(reg, spi_imx->base + MXC_CSPICTRL);
567 568 569 570

	return 0;
}

571
static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx)
572
{
573
	return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
574 575
}

576
static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx)
577 578 579 580
{
	writel(1, spi_imx->base + MXC_RESET);
}

581 582 583 584 585 586 587 588 589 590 591
#define MX1_INTREG_RR		(1 << 3)
#define MX1_INTREG_TEEN		(1 << 8)
#define MX1_INTREG_RREN		(1 << 11)

#define MX1_CSPICTRL_POL	(1 << 4)
#define MX1_CSPICTRL_PHA	(1 << 5)
#define MX1_CSPICTRL_XCH	(1 << 8)
#define MX1_CSPICTRL_ENABLE	(1 << 9)
#define MX1_CSPICTRL_MASTER	(1 << 10)
#define MX1_CSPICTRL_DR_SHIFT	13

592
static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
593 594 595 596 597 598 599 600
{
	unsigned int val = 0;

	if (enable & MXC_INT_TE)
		val |= MX1_INTREG_TEEN;
	if (enable & MXC_INT_RR)
		val |= MX1_INTREG_RREN;

601
	writel(val, spi_imx->base + MXC_CSPIINT);
602 603
}

604
static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx)
605 606 607
{
	unsigned int reg;

608
	reg = readl(spi_imx->base + MXC_CSPICTRL);
609
	reg |= MX1_CSPICTRL_XCH;
610
	writel(reg, spi_imx->base + MXC_CSPICTRL);
611 612
}

613
static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx,
614
		struct spi_imx_config *config)
615 616 617
{
	unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;

618
	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
619 620 621 622 623 624 625 626
		MX1_CSPICTRL_DR_SHIFT;
	reg |= config->bpw - 1;

	if (config->mode & SPI_CPHA)
		reg |= MX1_CSPICTRL_PHA;
	if (config->mode & SPI_CPOL)
		reg |= MX1_CSPICTRL_POL;

627
	writel(reg, spi_imx->base + MXC_CSPICTRL);
628 629 630 631

	return 0;
}

632
static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx)
633
{
634
	return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
635 636
}

637 638 639 640 641
static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx)
{
	writel(1, spi_imx->base + MXC_RESET);
}

642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
	.intctrl = mx1_intctrl,
	.config = mx1_config,
	.trigger = mx1_trigger,
	.rx_available = mx1_rx_available,
	.reset = mx1_reset,
	.devtype = IMX1_CSPI,
};

static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
	.intctrl = mx21_intctrl,
	.config = mx21_config,
	.trigger = mx21_trigger,
	.rx_available = mx21_rx_available,
	.reset = mx21_reset,
	.devtype = IMX21_CSPI,
};

static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
	/* i.mx27 cspi shares the functions with i.mx21 one */
	.intctrl = mx21_intctrl,
	.config = mx21_config,
	.trigger = mx21_trigger,
	.rx_available = mx21_rx_available,
	.reset = mx21_reset,
	.devtype = IMX27_CSPI,
};

static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
	.intctrl = mx31_intctrl,
	.config = mx31_config,
	.trigger = mx31_trigger,
	.rx_available = mx31_rx_available,
	.reset = mx31_reset,
	.devtype = IMX31_CSPI,
};

static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
	/* i.mx35 and later cspi shares the functions with i.mx31 one */
	.intctrl = mx31_intctrl,
	.config = mx31_config,
	.trigger = mx31_trigger,
	.rx_available = mx31_rx_available,
	.reset = mx31_reset,
	.devtype = IMX35_CSPI,
};

static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
	.intctrl = mx51_ecspi_intctrl,
	.config = mx51_ecspi_config,
	.trigger = mx51_ecspi_trigger,
	.rx_available = mx51_ecspi_rx_available,
	.reset = mx51_ecspi_reset,
	.devtype = IMX51_ECSPI,
};

698
static const struct platform_device_id spi_imx_devtype[] = {
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
	{
		.name = "imx1-cspi",
		.driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
	}, {
		.name = "imx21-cspi",
		.driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data,
	}, {
		.name = "imx27-cspi",
		.driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data,
	}, {
		.name = "imx31-cspi",
		.driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data,
	}, {
		.name = "imx35-cspi",
		.driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data,
	}, {
		.name = "imx51-ecspi",
		.driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data,
	}, {
		/* sentinel */
	}
720 721
};

722 723 724 725 726 727 728 729 730
static const struct of_device_id spi_imx_dt_ids[] = {
	{ .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
	{ .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
	{ .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
	{ .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
	{ .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
	{ .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
	{ /* sentinel */ }
};
731
MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
732

733
static void spi_imx_chipselect(struct spi_device *spi, int is_active)
734
{
735 736
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
	int gpio = spi_imx->chipselect[spi->chip_select];
737 738
	int active = is_active != BITBANG_CS_INACTIVE;
	int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
739

740
	if (!gpio_is_valid(gpio))
741 742
		return;

743
	gpio_set_value(gpio, dev_is_lowactive ^ active);
744 745
}

746
static void spi_imx_push(struct spi_imx_data *spi_imx)
747
{
748
	while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) {
749
		if (!spi_imx->count)
750
			break;
751 752
		spi_imx->tx(spi_imx);
		spi_imx->txfifo++;
753 754
	}

755
	spi_imx->devtype_data->trigger(spi_imx);
756 757
}

758
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
759
{
760
	struct spi_imx_data *spi_imx = dev_id;
761

762
	while (spi_imx->devtype_data->rx_available(spi_imx)) {
763 764
		spi_imx->rx(spi_imx);
		spi_imx->txfifo--;
765 766
	}

767 768
	if (spi_imx->count) {
		spi_imx_push(spi_imx);
769 770 771
		return IRQ_HANDLED;
	}

772
	if (spi_imx->txfifo) {
773 774 775
		/* No data left to push, but still waiting for rx data,
		 * enable receive data available interrupt.
		 */
776
		spi_imx->devtype_data->intctrl(
777
				spi_imx, MXC_INT_RR);
778 779 780
		return IRQ_HANDLED;
	}

781
	spi_imx->devtype_data->intctrl(spi_imx, 0);
782
	complete(&spi_imx->xfer_done);
783 784 785 786

	return IRQ_HANDLED;
}

787
static int spi_imx_setupxfer(struct spi_device *spi,
788 789
				 struct spi_transfer *t)
{
790 791
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
	struct spi_imx_config config;
792 793 794 795

	config.bpw = t ? t->bits_per_word : spi->bits_per_word;
	config.speed_hz  = t ? t->speed_hz : spi->max_speed_hz;
	config.mode = spi->mode;
796
	config.cs = spi->chip_select;
797

S
Sascha Hauer 已提交
798 799 800 801 802
	if (!config.speed_hz)
		config.speed_hz = spi->max_speed_hz;
	if (!config.bpw)
		config.bpw = spi->bits_per_word;

803 804 805 806 807 808 809
	/* Initialize the functions for transfer */
	if (config.bpw <= 8) {
		spi_imx->rx = spi_imx_buf_rx_u8;
		spi_imx->tx = spi_imx_buf_tx_u8;
	} else if (config.bpw <= 16) {
		spi_imx->rx = spi_imx_buf_rx_u16;
		spi_imx->tx = spi_imx_buf_tx_u16;
810
	} else {
811 812
		spi_imx->rx = spi_imx_buf_rx_u32;
		spi_imx->tx = spi_imx_buf_tx_u32;
813
	}
814

815
	spi_imx->devtype_data->config(spi_imx, &config);
816 817 818 819

	return 0;
}

R
Robin Gong 已提交
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
{
	struct spi_master *master = spi_imx->bitbang.master;

	if (master->dma_rx) {
		dma_release_channel(master->dma_rx);
		master->dma_rx = NULL;
	}

	if (master->dma_tx) {
		dma_release_channel(master->dma_tx);
		master->dma_tx = NULL;
	}

	spi_imx->dma_is_inited = 0;
}

static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
			     struct spi_master *master,
			     const struct resource *res)
{
	struct dma_slave_config slave_config = {};
	int ret;

R
Robin Gong 已提交
844 845 846 847
	/* use pio mode for i.mx6dl chip TKT238285 */
	if (of_machine_is_compatible("fsl,imx6dl"))
		return 0;

848 849
	spi_imx->wml = spi_imx_get_fifosize(spi_imx) / 2;

R
Robin Gong 已提交
850 851 852 853 854 855 856 857 858 859 860
	/* Prepare for TX DMA: */
	master->dma_tx = dma_request_slave_channel(dev, "tx");
	if (!master->dma_tx) {
		dev_err(dev, "cannot get the TX DMA channel!\n");
		ret = -EINVAL;
		goto err;
	}

	slave_config.direction = DMA_MEM_TO_DEV;
	slave_config.dst_addr = res->start + MXC_CSPITXDATA;
	slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
861
	slave_config.dst_maxburst = spi_imx->wml;
R
Robin Gong 已提交
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
	ret = dmaengine_slave_config(master->dma_tx, &slave_config);
	if (ret) {
		dev_err(dev, "error in TX dma configuration.\n");
		goto err;
	}

	/* Prepare for RX : */
	master->dma_rx = dma_request_slave_channel(dev, "rx");
	if (!master->dma_rx) {
		dev_dbg(dev, "cannot get the DMA channel.\n");
		ret = -EINVAL;
		goto err;
	}

	slave_config.direction = DMA_DEV_TO_MEM;
	slave_config.src_addr = res->start + MXC_CSPIRXDATA;
	slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
879
	slave_config.src_maxburst = spi_imx->wml;
R
Robin Gong 已提交
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918
	ret = dmaengine_slave_config(master->dma_rx, &slave_config);
	if (ret) {
		dev_err(dev, "error in RX dma configuration.\n");
		goto err;
	}

	init_completion(&spi_imx->dma_rx_completion);
	init_completion(&spi_imx->dma_tx_completion);
	master->can_dma = spi_imx_can_dma;
	master->max_dma_len = MAX_SDMA_BD_BYTES;
	spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
					 SPI_MASTER_MUST_TX;
	spi_imx->dma_is_inited = 1;

	return 0;
err:
	spi_imx_sdma_exit(spi_imx);
	return ret;
}

static void spi_imx_dma_rx_callback(void *cookie)
{
	struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;

	complete(&spi_imx->dma_rx_completion);
}

static void spi_imx_dma_tx_callback(void *cookie)
{
	struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;

	complete(&spi_imx->dma_tx_completion);
}

static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
				struct spi_transfer *transfer)
{
	struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
	int ret;
919
	unsigned long timeout;
R
Robin Gong 已提交
920 921 922 923 924 925 926
	u32 dma;
	int left;
	struct spi_master *master = spi_imx->bitbang.master;
	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;

	if (tx) {
		desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
927
					tx->sgl, tx->nents, DMA_MEM_TO_DEV,
R
Robin Gong 已提交
928 929 930 931 932 933 934 935 936 937 938
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc_tx)
			goto no_dma;

		desc_tx->callback = spi_imx_dma_tx_callback;
		desc_tx->callback_param = (void *)spi_imx;
		dmaengine_submit(desc_tx);
	}

	if (rx) {
		desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
939
					rx->sgl, rx->nents, DMA_DEV_TO_MEM,
R
Robin Gong 已提交
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc_rx)
			goto no_dma;

		desc_rx->callback = spi_imx_dma_rx_callback;
		desc_rx->callback_param = (void *)spi_imx;
		dmaengine_submit(desc_rx);
	}

	reinit_completion(&spi_imx->dma_rx_completion);
	reinit_completion(&spi_imx->dma_tx_completion);

	/* Trigger the cspi module. */
	spi_imx->dma_finished = 0;

	dma = readl(spi_imx->base + MX51_ECSPI_DMA);
	dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK);
	/* Change RX_DMA_LENGTH trigger dma fetch tail data */
958
	left = transfer->len % spi_imx->wml;
R
Robin Gong 已提交
959 960 961
	if (left)
		writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET),
				spi_imx->base + MX51_ECSPI_DMA);
962 963 964 965 966 967 968 969 970 971
	/*
	 * Set these order to avoid potential RX overflow. The overflow may
	 * happen if we enable SPI HW before starting RX DMA due to rescheduling
	 * for another task and/or interrupt.
	 * So RX DMA enabled first to make sure data would be read out from FIFO
	 * ASAP. TX DMA enabled next to start filling TX FIFO with new data.
	 * And finaly SPI HW enabled to start actual data transfer.
	 */
	dma_async_issue_pending(master->dma_rx);
	dma_async_issue_pending(master->dma_tx);
R
Robin Gong 已提交
972 973 974
	spi_imx->devtype_data->trigger(spi_imx);

	/* Wait SDMA to finish the data transfer.*/
975
	timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
R
Robin Gong 已提交
976
						IMX_DMA_TIMEOUT);
977
	if (!timeout) {
R
Robin Gong 已提交
978 979 980 981
		pr_warn("%s %s: I/O Error in DMA TX\n",
			dev_driver_string(&master->dev),
			dev_name(&master->dev));
		dmaengine_terminate_all(master->dma_tx);
982
		dmaengine_terminate_all(master->dma_rx);
R
Robin Gong 已提交
983
	} else {
984 985 986
		timeout = wait_for_completion_timeout(
				&spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
		if (!timeout) {
R
Robin Gong 已提交
987 988 989 990 991 992
			pr_warn("%s %s: I/O Error in DMA RX\n",
				dev_driver_string(&master->dev),
				dev_name(&master->dev));
			spi_imx->devtype_data->reset(spi_imx);
			dmaengine_terminate_all(master->dma_rx);
		}
993
		dma &= ~MX51_ECSPI_DMA_RXT_WML_MASK;
R
Robin Gong 已提交
994
		writel(dma |
995
		       spi_imx->wml << MX51_ECSPI_DMA_RXT_WML_OFFSET,
R
Robin Gong 已提交
996 997 998 999 1000 1001
		       spi_imx->base + MX51_ECSPI_DMA);
	}

	spi_imx->dma_finished = 1;
	spi_imx->devtype_data->trigger(spi_imx);

1002
	if (!timeout)
R
Robin Gong 已提交
1003
		ret = -ETIMEDOUT;
1004
	else
R
Robin Gong 已提交
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
		ret = transfer->len;

	return ret;

no_dma:
	pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
		     dev_driver_string(&master->dev),
		     dev_name(&master->dev));
	return -EAGAIN;
}

static int spi_imx_pio_transfer(struct spi_device *spi,
1017 1018
				struct spi_transfer *transfer)
{
1019
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1020

1021 1022 1023 1024
	spi_imx->tx_buf = transfer->tx_buf;
	spi_imx->rx_buf = transfer->rx_buf;
	spi_imx->count = transfer->len;
	spi_imx->txfifo = 0;
1025

1026
	reinit_completion(&spi_imx->xfer_done);
1027

1028
	spi_imx_push(spi_imx);
1029

1030
	spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
1031

1032
	wait_for_completion(&spi_imx->xfer_done);
1033 1034 1035 1036

	return transfer->len;
}

R
Robin Gong 已提交
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
static int spi_imx_transfer(struct spi_device *spi,
				struct spi_transfer *transfer)
{
	int ret;
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);

	if (spi_imx->bitbang.master->can_dma &&
	    spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) {
		spi_imx->usedma = true;
		ret = spi_imx_dma_transfer(spi_imx, transfer);
		if (ret != -EAGAIN)
			return ret;
	}
	spi_imx->usedma = false;

	return spi_imx_pio_transfer(spi, transfer);
}

1055
static int spi_imx_setup(struct spi_device *spi)
1056
{
1057 1058 1059
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
	int gpio = spi_imx->chipselect[spi->chip_select];

1060
	dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
1061 1062
		 spi->mode, spi->bits_per_word, spi->max_speed_hz);

1063
	if (gpio_is_valid(gpio))
1064 1065
		gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);

1066
	spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
1067 1068 1069 1070

	return 0;
}

1071
static void spi_imx_cleanup(struct spi_device *spi)
1072 1073 1074
{
}

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
static int
spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
{
	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
	int ret;

	ret = clk_enable(spi_imx->clk_per);
	if (ret)
		return ret;

	ret = clk_enable(spi_imx->clk_ipg);
	if (ret) {
		clk_disable(spi_imx->clk_per);
		return ret;
	}

	return 0;
}

static int
spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
{
	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);

	clk_disable(spi_imx->clk_ipg);
	clk_disable(spi_imx->clk_per);
	return 0;
}

1104
static int spi_imx_probe(struct platform_device *pdev)
1105
{
1106 1107 1108 1109 1110
	struct device_node *np = pdev->dev.of_node;
	const struct of_device_id *of_id =
			of_match_device(spi_imx_dt_ids, &pdev->dev);
	struct spi_imx_master *mxc_platform_info =
			dev_get_platdata(&pdev->dev);
1111
	struct spi_master *master;
1112
	struct spi_imx_data *spi_imx;
1113
	struct resource *res;
1114
	int i, ret, num_cs, irq;
1115

1116
	if (!np && !mxc_platform_info) {
1117 1118 1119 1120
		dev_err(&pdev->dev, "can't get the platform data\n");
		return -EINVAL;
	}

1121
	ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs);
1122 1123 1124 1125 1126 1127
	if (ret < 0) {
		if (mxc_platform_info)
			num_cs = mxc_platform_info->num_chipselect;
		else
			return ret;
	}
1128

1129 1130
	master = spi_alloc_master(&pdev->dev,
			sizeof(struct spi_imx_data) + sizeof(int) * num_cs);
1131 1132 1133 1134 1135
	if (!master)
		return -ENOMEM;

	platform_set_drvdata(pdev, master);

1136
	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
1137
	master->bus_num = pdev->id;
1138
	master->num_chipselect = num_cs;
1139

1140
	spi_imx = spi_master_get_devdata(master);
1141
	spi_imx->bitbang.master = master;
1142

1143 1144 1145
	spi_imx->devtype_data = of_id ? of_id->data :
		(struct spi_imx_devtype_data *)pdev->id_entry->driver_data;

1146
	for (i = 0; i < master->num_chipselect; i++) {
1147
		int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
1148
		if (!gpio_is_valid(cs_gpio) && mxc_platform_info)
1149
			cs_gpio = mxc_platform_info->chipselect[i];
1150 1151

		spi_imx->chipselect[i] = cs_gpio;
1152
		if (!gpio_is_valid(cs_gpio))
1153
			continue;
1154

F
Fabio Estevam 已提交
1155 1156
		ret = devm_gpio_request(&pdev->dev, spi_imx->chipselect[i],
					DRIVER_NAME);
1157
		if (ret) {
1158
			dev_err(&pdev->dev, "can't get cs gpios\n");
F
Fabio Estevam 已提交
1159
			goto out_master_put;
1160 1161 1162
		}
	}

1163 1164 1165 1166 1167
	spi_imx->bitbang.chipselect = spi_imx_chipselect;
	spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
	spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
	spi_imx->bitbang.master->setup = spi_imx_setup;
	spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
1168 1169
	spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
	spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
1170 1171 1172
	spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
	if (is_imx51_ecspi(spi_imx))
		spi_imx->bitbang.master->mode_bits |= SPI_LOOP;
1173

1174
	init_completion(&spi_imx->xfer_done);
1175 1176

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
F
Fabio Estevam 已提交
1177 1178 1179 1180
	spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(spi_imx->base)) {
		ret = PTR_ERR(spi_imx->base);
		goto out_master_put;
1181 1182
	}

1183 1184 1185
	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		ret = irq;
F
Fabio Estevam 已提交
1186
		goto out_master_put;
1187 1188
	}

1189
	ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
1190
			       dev_name(&pdev->dev), spi_imx);
1191
	if (ret) {
1192
		dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
F
Fabio Estevam 已提交
1193
		goto out_master_put;
1194 1195
	}

1196 1197 1198
	spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
	if (IS_ERR(spi_imx->clk_ipg)) {
		ret = PTR_ERR(spi_imx->clk_ipg);
F
Fabio Estevam 已提交
1199
		goto out_master_put;
1200 1201
	}

1202 1203 1204
	spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
	if (IS_ERR(spi_imx->clk_per)) {
		ret = PTR_ERR(spi_imx->clk_per);
F
Fabio Estevam 已提交
1205
		goto out_master_put;
1206 1207
	}

1208 1209 1210 1211 1212 1213 1214
	ret = clk_prepare_enable(spi_imx->clk_per);
	if (ret)
		goto out_master_put;

	ret = clk_prepare_enable(spi_imx->clk_ipg);
	if (ret)
		goto out_put_per;
1215 1216

	spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
R
Robin Gong 已提交
1217 1218 1219 1220
	/*
	 * Only validated on i.mx6 now, can remove the constrain if validated on
	 * other chips.
	 */
1221 1222
	if (is_imx51_ecspi(spi_imx) &&
	    spi_imx_sdma_init(&pdev->dev, spi_imx, master, res))
R
Robin Gong 已提交
1223
		dev_err(&pdev->dev, "dma setup error,use pio instead\n");
1224

1225
	spi_imx->devtype_data->reset(spi_imx);
1226

1227
	spi_imx->devtype_data->intctrl(spi_imx, 0);
1228

1229
	master->dev.of_node = pdev->dev.of_node;
1230
	ret = spi_bitbang_start(&spi_imx->bitbang);
1231 1232 1233 1234 1235 1236 1237
	if (ret) {
		dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
		goto out_clk_put;
	}

	dev_info(&pdev->dev, "probed\n");

1238 1239
	clk_disable(spi_imx->clk_ipg);
	clk_disable(spi_imx->clk_per);
1240 1241 1242
	return ret;

out_clk_put:
1243
	clk_disable_unprepare(spi_imx->clk_ipg);
1244 1245
out_put_per:
	clk_disable_unprepare(spi_imx->clk_per);
F
Fabio Estevam 已提交
1246
out_master_put:
1247
	spi_master_put(master);
F
Fabio Estevam 已提交
1248

1249 1250 1251
	return ret;
}

1252
static int spi_imx_remove(struct platform_device *pdev)
1253 1254
{
	struct spi_master *master = platform_get_drvdata(pdev);
1255
	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1256

1257
	spi_bitbang_stop(&spi_imx->bitbang);
1258

1259
	writel(0, spi_imx->base + MXC_CSPICTRL);
1260 1261
	clk_unprepare(spi_imx->clk_ipg);
	clk_unprepare(spi_imx->clk_per);
R
Robin Gong 已提交
1262
	spi_imx_sdma_exit(spi_imx);
1263 1264 1265 1266 1267
	spi_master_put(master);

	return 0;
}

1268
static struct platform_driver spi_imx_driver = {
1269 1270
	.driver = {
		   .name = DRIVER_NAME,
1271
		   .of_match_table = spi_imx_dt_ids,
1272
		   },
1273
	.id_table = spi_imx_devtype,
1274
	.probe = spi_imx_probe,
1275
	.remove = spi_imx_remove,
1276
};
1277
module_platform_driver(spi_imx_driver);
1278 1279 1280 1281

MODULE_DESCRIPTION("SPI Master Controller driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
F
Fabio Estevam 已提交
1282
MODULE_ALIAS("platform:" DRIVER_NAME);