spi-imx.c 33.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
 * Copyright (C) 2008 Juergen Beisert
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the
 * Free Software Foundation
 * 51 Franklin Street, Fifth Floor
 * Boston, MA  02110-1301, USA.
 */

#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
R
Robin Gong 已提交
24 25
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
26 27 28 29 30 31 32 33
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
34
#include <linux/slab.h>
35 36 37
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
38 39 40
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
41

R
Robin Gong 已提交
42
#include <linux/platform_data/dma-imx.h>
43
#include <linux/platform_data/spi-imx.h>
44 45 46 47 48 49 50 51 52 53 54 55 56

#define DRIVER_NAME "spi_imx"

#define MXC_CSPIRXDATA		0x00
#define MXC_CSPITXDATA		0x04
#define MXC_CSPICTRL		0x08
#define MXC_CSPIINT		0x0c
#define MXC_RESET		0x1c

/* generic defines to abstract from the different register layouts */
#define MXC_INT_RR	(1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE	(1 << 1) /* Transmit FIFO empty interrupt */

R
Robin Gong 已提交
57 58
/* The maximum  bytes that a sdma BD can transfer.*/
#define MAX_SDMA_BD_BYTES  (1 << 15)
59
struct spi_imx_config {
60 61 62 63 64
	unsigned int speed_hz;
	unsigned int bpw;
	unsigned int mode;
};

65
enum spi_imx_devtype {
66 67 68 69 70 71
	IMX1_CSPI,
	IMX21_CSPI,
	IMX27_CSPI,
	IMX31_CSPI,
	IMX35_CSPI,	/* CSPI on all i.mx except above */
	IMX51_ECSPI,	/* ECSPI on i.mx51 and later */
72 73 74 75 76 77
};

struct spi_imx_data;

struct spi_imx_devtype_data {
	void (*intctrl)(struct spi_imx_data *, int);
78
	int (*config)(struct spi_device *, struct spi_imx_config *);
79 80
	void (*trigger)(struct spi_imx_data *);
	int (*rx_available)(struct spi_imx_data *);
81
	void (*reset)(struct spi_imx_data *);
82
	enum spi_imx_devtype devtype;
83 84
};

85
struct spi_imx_data {
86
	struct spi_bitbang bitbang;
87
	struct device *dev;
88 89

	struct completion xfer_done;
90
	void __iomem *base;
91 92
	unsigned long base_phys;

93 94
	struct clk *clk_per;
	struct clk *clk_ipg;
95
	unsigned long spi_clk;
96
	unsigned int spi_bus_clk;
97

98 99
	unsigned int bytes_per_word;

100
	unsigned int count;
101 102
	void (*tx)(struct spi_imx_data *);
	void (*rx)(struct spi_imx_data *);
103 104 105 106
	void *rx_buf;
	const void *tx_buf;
	unsigned int txfifo; /* number of words pushed in tx FIFO */

R
Robin Gong 已提交
107 108
	/* DMA */
	bool usedma;
109
	u32 wml;
R
Robin Gong 已提交
110 111 112
	struct completion dma_rx_completion;
	struct completion dma_tx_completion;

113
	const struct spi_imx_devtype_data *devtype_data;
114 115
};

116 117 118 119 120 121 122 123 124 125
static inline int is_imx27_cspi(struct spi_imx_data *d)
{
	return d->devtype_data->devtype == IMX27_CSPI;
}

static inline int is_imx35_cspi(struct spi_imx_data *d)
{
	return d->devtype_data->devtype == IMX35_CSPI;
}

126 127 128 129 130
static inline int is_imx51_ecspi(struct spi_imx_data *d)
{
	return d->devtype_data->devtype == IMX51_ECSPI;
}

131 132
static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d)
{
133
	return is_imx51_ecspi(d) ? 64 : 8;
134 135
}

136
#define MXC_SPI_BUF_RX(type)						\
137
static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx)		\
138
{									\
139
	unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);	\
140
									\
141 142 143
	if (spi_imx->rx_buf) {						\
		*(type *)spi_imx->rx_buf = val;				\
		spi_imx->rx_buf += sizeof(type);			\
144 145 146 147
	}								\
}

#define MXC_SPI_BUF_TX(type)						\
148
static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx)		\
149 150 151
{									\
	type val = 0;							\
									\
152 153 154
	if (spi_imx->tx_buf) {						\
		val = *(type *)spi_imx->tx_buf;				\
		spi_imx->tx_buf += sizeof(type);			\
155 156
	}								\
									\
157
	spi_imx->count -= sizeof(type);					\
158
									\
159
	writel(val, spi_imx->base + MXC_CSPITXDATA);			\
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
}

MXC_SPI_BUF_RX(u8)
MXC_SPI_BUF_TX(u8)
MXC_SPI_BUF_RX(u16)
MXC_SPI_BUF_TX(u16)
MXC_SPI_BUF_RX(u32)
MXC_SPI_BUF_TX(u32)

/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
 * (which is currently not the case in this driver)
 */
static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
	256, 384, 512, 768, 1024};

/* MX21, MX27 */
176
static unsigned int spi_imx_clkdiv_1(unsigned int fin,
177
		unsigned int fspi, unsigned int max)
178
{
179
	int i;
180 181 182 183 184 185 186 187

	for (i = 2; i < max; i++)
		if (fspi * mxc_clkdivs[i] >= fin)
			return i;

	return max;
}

188
/* MX1, MX31, MX35, MX51 CSPI */
189
static unsigned int spi_imx_clkdiv_2(unsigned int fin,
190 191 192 193 194 195 196 197 198 199 200 201 202
		unsigned int fspi)
{
	int i, div = 4;

	for (i = 0; i < 7; i++) {
		if (fspi * div >= fin)
			return i;
		div <<= 1;
	}

	return 7;
}

203 204 205 206 207
static int spi_imx_bytes_per_word(const int bpw)
{
	return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
}

R
Robin Gong 已提交
208 209 210 211
static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
			 struct spi_transfer *transfer)
{
	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
212
	unsigned int bpw;
213 214 215 216

	if (!master->dma_rx)
		return false;

217 218 219 220
	if (!transfer)
		return false;

	bpw = transfer->bits_per_word;
221 222 223 224 225 226 227 228 229 230 231 232 233
	if (!bpw)
		bpw = spi->bits_per_word;

	bpw = spi_imx_bytes_per_word(bpw);

	if (bpw != 1 && bpw != 2 && bpw != 4)
		return false;

	if (transfer->len < spi_imx->wml * bpw)
		return false;

	if (transfer->len % (spi_imx->wml * bpw))
		return false;
R
Robin Gong 已提交
234

235
	return true;
R
Robin Gong 已提交
236 237
}

238 239 240
#define MX51_ECSPI_CTRL		0x08
#define MX51_ECSPI_CTRL_ENABLE		(1 <<  0)
#define MX51_ECSPI_CTRL_XCH		(1 <<  2)
R
Robin Gong 已提交
241
#define MX51_ECSPI_CTRL_SMC		(1 << 3)
242 243 244 245 246 247 248 249 250 251 252
#define MX51_ECSPI_CTRL_MODE_MASK	(0xf << 4)
#define MX51_ECSPI_CTRL_POSTDIV_OFFSET	8
#define MX51_ECSPI_CTRL_PREDIV_OFFSET	12
#define MX51_ECSPI_CTRL_CS(cs)		((cs) << 18)
#define MX51_ECSPI_CTRL_BL_OFFSET	20

#define MX51_ECSPI_CONFIG	0x0c
#define MX51_ECSPI_CONFIG_SCLKPHA(cs)	(1 << ((cs) +  0))
#define MX51_ECSPI_CONFIG_SCLKPOL(cs)	(1 << ((cs) +  4))
#define MX51_ECSPI_CONFIG_SBBCTRL(cs)	(1 << ((cs) +  8))
#define MX51_ECSPI_CONFIG_SSBPOL(cs)	(1 << ((cs) + 12))
253
#define MX51_ECSPI_CONFIG_SCLKCTL(cs)	(1 << ((cs) + 20))
254 255 256 257 258

#define MX51_ECSPI_INT		0x10
#define MX51_ECSPI_INT_TEEN		(1 <<  0)
#define MX51_ECSPI_INT_RREN		(1 <<  3)

R
Robin Gong 已提交
259
#define MX51_ECSPI_DMA      0x14
260 261 262
#define MX51_ECSPI_DMA_TX_WML(wml)	((wml) & 0x3f)
#define MX51_ECSPI_DMA_RX_WML(wml)	(((wml) & 0x3f) << 16)
#define MX51_ECSPI_DMA_RXT_WML(wml)	(((wml) & 0x3f) << 24)
R
Robin Gong 已提交
263

264 265 266
#define MX51_ECSPI_DMA_TEDEN		(1 << 7)
#define MX51_ECSPI_DMA_RXDEN		(1 << 23)
#define MX51_ECSPI_DMA_RXTDEN		(1 << 31)
R
Robin Gong 已提交
267

268 269
#define MX51_ECSPI_STAT		0x18
#define MX51_ECSPI_STAT_RR		(1 <<  3)
270

271 272 273
#define MX51_ECSPI_TESTREG	0x20
#define MX51_ECSPI_TESTREG_LBC	BIT(31)

274
/* MX51 eCSPI */
275 276
static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
				      unsigned int fspi, unsigned int *fres)
277 278 279 280 281 282
{
	/*
	 * there are two 4-bit dividers, the pre-divider divides by
	 * $pre, the post-divider by 2^$post
	 */
	unsigned int pre, post;
283
	unsigned int fin = spi_imx->spi_clk;
284 285 286 287 288 289 290 291 292 293 294 295

	if (unlikely(fspi > fin))
		return 0;

	post = fls(fin) - fls(fspi);
	if (fin > fspi << post)
		post++;

	/* now we have: (fin <= fspi << post) with post being minimal */

	post = max(4U, post) - 4;
	if (unlikely(post > 0xf)) {
296 297
		dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
				fspi, fin);
298 299 300 301 302
		return 0xff;
	}

	pre = DIV_ROUND_UP(fin, fspi << post) - 1;

303
	dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
304
			__func__, fin, fspi, post, pre);
305 306 307 308

	/* Resulting frequency for the SCLK line. */
	*fres = (fin / (pre + 1)) >> post;

309 310
	return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
		(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
311 312
}

313
static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
314 315 316 317
{
	unsigned val = 0;

	if (enable & MXC_INT_TE)
318
		val |= MX51_ECSPI_INT_TEEN;
319 320

	if (enable & MXC_INT_RR)
321
		val |= MX51_ECSPI_INT_RREN;
322

323
	writel(val, spi_imx->base + MX51_ECSPI_INT);
324 325
}

326
static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
327
{
328
	u32 reg;
R
Robin Gong 已提交
329

330 331
	reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
	reg |= MX51_ECSPI_CTRL_XCH;
332
	writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
333 334
}

335 336
static int __maybe_unused mx51_ecspi_config(struct spi_device *spi,
					    struct spi_imx_config *config)
337
{
338
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
339
	u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
340
	u32 clk = config->speed_hz, delay, reg;
341
	u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
342

343 344 345 346 347 348 349
	/*
	 * The hardware seems to have a race condition when changing modes. The
	 * current assumption is that the selection of the channel arrives
	 * earlier in the hardware than the mode bits when they are written at
	 * the same time.
	 * So set master mode for all channels as we do not support slave mode.
	 */
350
	ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
351 352

	/* set clock speed */
353
	ctrl |= mx51_ecspi_clkdiv(spi_imx, config->speed_hz, &clk);
354
	spi_imx->spi_bus_clk = clk;
355 356

	/* set chip select to use */
357
	ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
358

359
	ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
360

361
	cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
362 363

	if (config->mode & SPI_CPHA)
364
		cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
365
	else
366
		cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
367

368
	if (config->mode & SPI_CPOL) {
369 370
		cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
		cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
371
	} else {
372 373
		cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
		cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
374
	}
375
	if (config->mode & SPI_CS_HIGH)
376
		cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
377
	else
378
		cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
379

380 381 382
	if (spi_imx->usedma)
		ctrl |= MX51_ECSPI_CTRL_SMC;

383 384 385
	/* CTRL register always go first to bring out controller from reset */
	writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);

386 387 388 389 390 391 392
	reg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
	if (config->mode & SPI_LOOP)
		reg |= MX51_ECSPI_TESTREG_LBC;
	else
		reg &= ~MX51_ECSPI_TESTREG_LBC;
	writel(reg, spi_imx->base + MX51_ECSPI_TESTREG);

393
	writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
394

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
	/*
	 * Wait until the changes in the configuration register CONFIGREG
	 * propagate into the hardware. It takes exactly one tick of the
	 * SCLK clock, but we will wait two SCLK clock just to be sure. The
	 * effect of the delay it takes for the hardware to apply changes
	 * is noticable if the SCLK clock run very slow. In such a case, if
	 * the polarity of SCLK should be inverted, the GPIO ChipSelect might
	 * be asserted before the SCLK polarity changes, which would disrupt
	 * the SPI communication as the device on the other end would consider
	 * the change of SCLK polarity as a clock tick already.
	 */
	delay = (2 * 1000000) / clk;
	if (likely(delay < 10))	/* SCLK is faster than 100 kHz */
		udelay(delay);
	else			/* SCLK is _very_ slow */
		usleep_range(delay, delay + 10);

R
Robin Gong 已提交
412 413 414 415
	/*
	 * Configure the DMA register: setup the watermark
	 * and enable DMA request.
	 */
416

417 418 419
	writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml) |
		MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
		MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
420 421
		MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
		MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
R
Robin Gong 已提交
422

423 424 425
	return 0;
}

426
static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
427
{
428
	return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
429 430
}

431
static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx)
432 433
{
	/* drain receive buffer */
434
	while (mx51_ecspi_rx_available(spi_imx))
435 436 437
		readl(spi_imx->base + MXC_CSPIRXDATA);
}

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
#define MX31_INTREG_TEEN	(1 << 0)
#define MX31_INTREG_RREN	(1 << 3)

#define MX31_CSPICTRL_ENABLE	(1 << 0)
#define MX31_CSPICTRL_MASTER	(1 << 1)
#define MX31_CSPICTRL_XCH	(1 << 2)
#define MX31_CSPICTRL_POL	(1 << 4)
#define MX31_CSPICTRL_PHA	(1 << 5)
#define MX31_CSPICTRL_SSCTL	(1 << 6)
#define MX31_CSPICTRL_SSPOL	(1 << 7)
#define MX31_CSPICTRL_BC_SHIFT	8
#define MX35_CSPICTRL_BL_SHIFT	20
#define MX31_CSPICTRL_CS_SHIFT	24
#define MX35_CSPICTRL_CS_SHIFT	12
#define MX31_CSPICTRL_DR_SHIFT	16

#define MX31_CSPISTATUS		0x14
#define MX31_STATUS_RR		(1 << 3)

/* These functions also work for the i.MX35, but be aware that
 * the i.MX35 has a slightly different register layout for bits
 * we do not use here.
 */
461
static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
462 463 464 465 466 467 468 469
{
	unsigned int val = 0;

	if (enable & MXC_INT_TE)
		val |= MX31_INTREG_TEEN;
	if (enable & MXC_INT_RR)
		val |= MX31_INTREG_RREN;

470
	writel(val, spi_imx->base + MXC_CSPIINT);
471 472
}

473
static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx)
474 475 476
{
	unsigned int reg;

477
	reg = readl(spi_imx->base + MXC_CSPICTRL);
478
	reg |= MX31_CSPICTRL_XCH;
479
	writel(reg, spi_imx->base + MXC_CSPICTRL);
480 481
}

482 483
static int __maybe_unused mx31_config(struct spi_device *spi,
				      struct spi_imx_config *config)
484
{
485
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
486 487 488 489 490
	unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;

	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
		MX31_CSPICTRL_DR_SHIFT;

491
	if (is_imx35_cspi(spi_imx)) {
492 493 494 495 496
		reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
		reg |= MX31_CSPICTRL_SSCTL;
	} else {
		reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
	}
497 498 499 500 501 502 503

	if (config->mode & SPI_CPHA)
		reg |= MX31_CSPICTRL_PHA;
	if (config->mode & SPI_CPOL)
		reg |= MX31_CSPICTRL_POL;
	if (config->mode & SPI_CS_HIGH)
		reg |= MX31_CSPICTRL_SSPOL;
504 505
	if (spi->cs_gpio < 0)
		reg |= (spi->cs_gpio + 32) <<
506 507
			(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
						  MX31_CSPICTRL_CS_SHIFT);
508 509 510 511 512 513

	writel(reg, spi_imx->base + MXC_CSPICTRL);

	return 0;
}

514
static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx)
515
{
516
	return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
517 518
}

519
static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx)
520 521
{
	/* drain receive buffer */
522
	while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
523 524 525
		readl(spi_imx->base + MXC_CSPIRXDATA);
}

526 527 528 529 530 531 532 533 534 535 536 537 538 539
#define MX21_INTREG_RR		(1 << 4)
#define MX21_INTREG_TEEN	(1 << 9)
#define MX21_INTREG_RREN	(1 << 13)

#define MX21_CSPICTRL_POL	(1 << 5)
#define MX21_CSPICTRL_PHA	(1 << 6)
#define MX21_CSPICTRL_SSPOL	(1 << 8)
#define MX21_CSPICTRL_XCH	(1 << 9)
#define MX21_CSPICTRL_ENABLE	(1 << 10)
#define MX21_CSPICTRL_MASTER	(1 << 11)
#define MX21_CSPICTRL_DR_SHIFT	14
#define MX21_CSPICTRL_CS_SHIFT	19

static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
540 541 542 543
{
	unsigned int val = 0;

	if (enable & MXC_INT_TE)
544
		val |= MX21_INTREG_TEEN;
545
	if (enable & MXC_INT_RR)
546
		val |= MX21_INTREG_RREN;
547

548
	writel(val, spi_imx->base + MXC_CSPIINT);
549 550
}

551
static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx)
552 553 554
{
	unsigned int reg;

555
	reg = readl(spi_imx->base + MXC_CSPICTRL);
556
	reg |= MX21_CSPICTRL_XCH;
557
	writel(reg, spi_imx->base + MXC_CSPICTRL);
558 559
}

560 561
static int __maybe_unused mx21_config(struct spi_device *spi,
				      struct spi_imx_config *config)
562
{
563
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
564
	unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
565
	unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
566

567
	reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) <<
568
		MX21_CSPICTRL_DR_SHIFT;
569 570 571
	reg |= config->bpw - 1;

	if (config->mode & SPI_CPHA)
572
		reg |= MX21_CSPICTRL_PHA;
573
	if (config->mode & SPI_CPOL)
574
		reg |= MX21_CSPICTRL_POL;
575
	if (config->mode & SPI_CS_HIGH)
576
		reg |= MX21_CSPICTRL_SSPOL;
577 578
	if (spi->cs_gpio < 0)
		reg |= (spi->cs_gpio + 32) << MX21_CSPICTRL_CS_SHIFT;
579

580
	writel(reg, spi_imx->base + MXC_CSPICTRL);
581 582 583 584

	return 0;
}

585
static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx)
586
{
587
	return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
588 589
}

590
static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx)
591 592 593 594
{
	writel(1, spi_imx->base + MXC_RESET);
}

595 596 597 598 599 600 601 602 603 604 605
#define MX1_INTREG_RR		(1 << 3)
#define MX1_INTREG_TEEN		(1 << 8)
#define MX1_INTREG_RREN		(1 << 11)

#define MX1_CSPICTRL_POL	(1 << 4)
#define MX1_CSPICTRL_PHA	(1 << 5)
#define MX1_CSPICTRL_XCH	(1 << 8)
#define MX1_CSPICTRL_ENABLE	(1 << 9)
#define MX1_CSPICTRL_MASTER	(1 << 10)
#define MX1_CSPICTRL_DR_SHIFT	13

606
static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
607 608 609 610 611 612 613 614
{
	unsigned int val = 0;

	if (enable & MXC_INT_TE)
		val |= MX1_INTREG_TEEN;
	if (enable & MXC_INT_RR)
		val |= MX1_INTREG_RREN;

615
	writel(val, spi_imx->base + MXC_CSPIINT);
616 617
}

618
static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx)
619 620 621
{
	unsigned int reg;

622
	reg = readl(spi_imx->base + MXC_CSPICTRL);
623
	reg |= MX1_CSPICTRL_XCH;
624
	writel(reg, spi_imx->base + MXC_CSPICTRL);
625 626
}

627 628
static int __maybe_unused mx1_config(struct spi_device *spi,
				     struct spi_imx_config *config)
629
{
630
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
631 632
	unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;

633
	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
634 635 636 637 638 639 640 641
		MX1_CSPICTRL_DR_SHIFT;
	reg |= config->bpw - 1;

	if (config->mode & SPI_CPHA)
		reg |= MX1_CSPICTRL_PHA;
	if (config->mode & SPI_CPOL)
		reg |= MX1_CSPICTRL_POL;

642
	writel(reg, spi_imx->base + MXC_CSPICTRL);
643 644 645 646

	return 0;
}

647
static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx)
648
{
649
	return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
650 651
}

652 653 654 655 656
static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx)
{
	writel(1, spi_imx->base + MXC_RESET);
}

657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
	.intctrl = mx1_intctrl,
	.config = mx1_config,
	.trigger = mx1_trigger,
	.rx_available = mx1_rx_available,
	.reset = mx1_reset,
	.devtype = IMX1_CSPI,
};

static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
	.intctrl = mx21_intctrl,
	.config = mx21_config,
	.trigger = mx21_trigger,
	.rx_available = mx21_rx_available,
	.reset = mx21_reset,
	.devtype = IMX21_CSPI,
};

static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
	/* i.mx27 cspi shares the functions with i.mx21 one */
	.intctrl = mx21_intctrl,
	.config = mx21_config,
	.trigger = mx21_trigger,
	.rx_available = mx21_rx_available,
	.reset = mx21_reset,
	.devtype = IMX27_CSPI,
};

static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
	.intctrl = mx31_intctrl,
	.config = mx31_config,
	.trigger = mx31_trigger,
	.rx_available = mx31_rx_available,
	.reset = mx31_reset,
	.devtype = IMX31_CSPI,
};

static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
	/* i.mx35 and later cspi shares the functions with i.mx31 one */
	.intctrl = mx31_intctrl,
	.config = mx31_config,
	.trigger = mx31_trigger,
	.rx_available = mx31_rx_available,
	.reset = mx31_reset,
	.devtype = IMX35_CSPI,
};

static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
	.intctrl = mx51_ecspi_intctrl,
	.config = mx51_ecspi_config,
	.trigger = mx51_ecspi_trigger,
	.rx_available = mx51_ecspi_rx_available,
	.reset = mx51_ecspi_reset,
	.devtype = IMX51_ECSPI,
};

713
static const struct platform_device_id spi_imx_devtype[] = {
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
	{
		.name = "imx1-cspi",
		.driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
	}, {
		.name = "imx21-cspi",
		.driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data,
	}, {
		.name = "imx27-cspi",
		.driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data,
	}, {
		.name = "imx31-cspi",
		.driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data,
	}, {
		.name = "imx35-cspi",
		.driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data,
	}, {
		.name = "imx51-ecspi",
		.driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data,
	}, {
		/* sentinel */
	}
735 736
};

737 738 739 740 741 742 743 744 745
static const struct of_device_id spi_imx_dt_ids[] = {
	{ .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
	{ .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
	{ .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
	{ .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
	{ .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
	{ .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
	{ /* sentinel */ }
};
746
MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
747

748
static void spi_imx_chipselect(struct spi_device *spi, int is_active)
749
{
750 751
	int active = is_active != BITBANG_CS_INACTIVE;
	int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
752

753
	if (!gpio_is_valid(spi->cs_gpio))
754 755
		return;

756
	gpio_set_value(spi->cs_gpio, dev_is_lowactive ^ active);
757 758
}

759
static void spi_imx_push(struct spi_imx_data *spi_imx)
760
{
761
	while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) {
762
		if (!spi_imx->count)
763
			break;
764 765
		spi_imx->tx(spi_imx);
		spi_imx->txfifo++;
766 767
	}

768
	spi_imx->devtype_data->trigger(spi_imx);
769 770
}

771
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
772
{
773
	struct spi_imx_data *spi_imx = dev_id;
774

775
	while (spi_imx->devtype_data->rx_available(spi_imx)) {
776 777
		spi_imx->rx(spi_imx);
		spi_imx->txfifo--;
778 779
	}

780 781
	if (spi_imx->count) {
		spi_imx_push(spi_imx);
782 783 784
		return IRQ_HANDLED;
	}

785
	if (spi_imx->txfifo) {
786 787 788
		/* No data left to push, but still waiting for rx data,
		 * enable receive data available interrupt.
		 */
789
		spi_imx->devtype_data->intctrl(
790
				spi_imx, MXC_INT_RR);
791 792 793
		return IRQ_HANDLED;
	}

794
	spi_imx->devtype_data->intctrl(spi_imx, 0);
795
	complete(&spi_imx->xfer_done);
796 797 798 799

	return IRQ_HANDLED;
}

800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
static int spi_imx_dma_configure(struct spi_master *master,
				 int bytes_per_word)
{
	int ret;
	enum dma_slave_buswidth buswidth;
	struct dma_slave_config rx = {}, tx = {};
	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);

	if (bytes_per_word == spi_imx->bytes_per_word)
		/* Same as last time */
		return 0;

	switch (bytes_per_word) {
	case 4:
		buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
		break;
	case 2:
		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
		break;
	case 1:
		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
		break;
	default:
		return -EINVAL;
	}

	tx.direction = DMA_MEM_TO_DEV;
	tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
	tx.dst_addr_width = buswidth;
	tx.dst_maxburst = spi_imx->wml;
	ret = dmaengine_slave_config(master->dma_tx, &tx);
	if (ret) {
		dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
		return ret;
	}

	rx.direction = DMA_DEV_TO_MEM;
	rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
	rx.src_addr_width = buswidth;
	rx.src_maxburst = spi_imx->wml;
	ret = dmaengine_slave_config(master->dma_rx, &rx);
	if (ret) {
		dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
		return ret;
	}

	spi_imx->bytes_per_word = bytes_per_word;

	return 0;
}

851
static int spi_imx_setupxfer(struct spi_device *spi,
852 853
				 struct spi_transfer *t)
{
854 855
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
	struct spi_imx_config config;
856
	int ret;
857 858 859 860 861

	config.bpw = t ? t->bits_per_word : spi->bits_per_word;
	config.speed_hz  = t ? t->speed_hz : spi->max_speed_hz;
	config.mode = spi->mode;

S
Sascha Hauer 已提交
862 863 864 865 866
	if (!config.speed_hz)
		config.speed_hz = spi->max_speed_hz;
	if (!config.bpw)
		config.bpw = spi->bits_per_word;

867 868 869 870 871 872 873
	/* Initialize the functions for transfer */
	if (config.bpw <= 8) {
		spi_imx->rx = spi_imx_buf_rx_u8;
		spi_imx->tx = spi_imx_buf_tx_u8;
	} else if (config.bpw <= 16) {
		spi_imx->rx = spi_imx_buf_rx_u16;
		spi_imx->tx = spi_imx_buf_tx_u16;
874
	} else {
875 876
		spi_imx->rx = spi_imx_buf_rx_u32;
		spi_imx->tx = spi_imx_buf_tx_u32;
877
	}
878

879 880 881 882 883
	if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
		spi_imx->usedma = 1;
	else
		spi_imx->usedma = 0;

884 885 886 887 888 889 890
	if (spi_imx->usedma) {
		ret = spi_imx_dma_configure(spi->master,
					    spi_imx_bytes_per_word(config.bpw));
		if (ret)
			return ret;
	}

891
	spi_imx->devtype_data->config(spi, &config);
892 893 894 895

	return 0;
}

R
Robin Gong 已提交
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
{
	struct spi_master *master = spi_imx->bitbang.master;

	if (master->dma_rx) {
		dma_release_channel(master->dma_rx);
		master->dma_rx = NULL;
	}

	if (master->dma_tx) {
		dma_release_channel(master->dma_tx);
		master->dma_tx = NULL;
	}
}

static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
912
			     struct spi_master *master)
R
Robin Gong 已提交
913 914 915
{
	int ret;

R
Robin Gong 已提交
916 917 918 919
	/* use pio mode for i.mx6dl chip TKT238285 */
	if (of_machine_is_compatible("fsl,imx6dl"))
		return 0;

920 921
	spi_imx->wml = spi_imx_get_fifosize(spi_imx) / 2;

R
Robin Gong 已提交
922
	/* Prepare for TX DMA: */
923 924 925 926 927
	master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
	if (IS_ERR(master->dma_tx)) {
		ret = PTR_ERR(master->dma_tx);
		dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
		master->dma_tx = NULL;
R
Robin Gong 已提交
928 929 930 931
		goto err;
	}

	/* Prepare for RX : */
932 933 934 935 936
	master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
	if (IS_ERR(master->dma_rx)) {
		ret = PTR_ERR(master->dma_rx);
		dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
		master->dma_rx = NULL;
R
Robin Gong 已提交
937 938 939
		goto err;
	}

940
	spi_imx_dma_configure(master, 1);
R
Robin Gong 已提交
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968

	init_completion(&spi_imx->dma_rx_completion);
	init_completion(&spi_imx->dma_tx_completion);
	master->can_dma = spi_imx_can_dma;
	master->max_dma_len = MAX_SDMA_BD_BYTES;
	spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
					 SPI_MASTER_MUST_TX;

	return 0;
err:
	spi_imx_sdma_exit(spi_imx);
	return ret;
}

static void spi_imx_dma_rx_callback(void *cookie)
{
	struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;

	complete(&spi_imx->dma_rx_completion);
}

static void spi_imx_dma_tx_callback(void *cookie)
{
	struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;

	complete(&spi_imx->dma_tx_completion);
}

969 970 971 972 973 974 975 976 977 978 979 980 981 982
static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
{
	unsigned long timeout = 0;

	/* Time with actual data transfer and CS change delay related to HW */
	timeout = (8 + 4) * size / spi_imx->spi_bus_clk;

	/* Add extra second for scheduler related activities */
	timeout += 1;

	/* Double calculated timeout */
	return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
}

R
Robin Gong 已提交
983 984 985
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
				struct spi_transfer *transfer)
{
986
	struct dma_async_tx_descriptor *desc_tx, *desc_rx;
987
	unsigned long transfer_timeout;
988
	unsigned long timeout;
R
Robin Gong 已提交
989 990 991
	struct spi_master *master = spi_imx->bitbang.master;
	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;

992 993 994 995 996 997 998 999 1000
	/*
	 * The TX DMA setup starts the transfer, so make sure RX is configured
	 * before TX.
	 */
	desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
				rx->sgl, rx->nents, DMA_DEV_TO_MEM,
				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc_rx)
		return -EINVAL;
R
Robin Gong 已提交
1001

1002 1003 1004 1005 1006
	desc_rx->callback = spi_imx_dma_rx_callback;
	desc_rx->callback_param = (void *)spi_imx;
	dmaengine_submit(desc_rx);
	reinit_completion(&spi_imx->dma_rx_completion);
	dma_async_issue_pending(master->dma_rx);
R
Robin Gong 已提交
1007

1008 1009 1010 1011 1012 1013
	desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
				tx->sgl, tx->nents, DMA_MEM_TO_DEV,
				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc_tx) {
		dmaengine_terminate_all(master->dma_tx);
		return -EINVAL;
R
Robin Gong 已提交
1014 1015
	}

1016 1017 1018
	desc_tx->callback = spi_imx_dma_tx_callback;
	desc_tx->callback_param = (void *)spi_imx;
	dmaengine_submit(desc_tx);
R
Robin Gong 已提交
1019
	reinit_completion(&spi_imx->dma_tx_completion);
1020
	dma_async_issue_pending(master->dma_tx);
R
Robin Gong 已提交
1021

1022 1023
	transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);

R
Robin Gong 已提交
1024
	/* Wait SDMA to finish the data transfer.*/
1025
	timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
1026
						transfer_timeout);
1027
	if (!timeout) {
1028
		dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
R
Robin Gong 已提交
1029
		dmaengine_terminate_all(master->dma_tx);
1030
		dmaengine_terminate_all(master->dma_rx);
1031
		return -ETIMEDOUT;
R
Robin Gong 已提交
1032 1033
	}

1034 1035 1036 1037 1038 1039 1040 1041
	timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
					      transfer_timeout);
	if (!timeout) {
		dev_err(&master->dev, "I/O Error in DMA RX\n");
		spi_imx->devtype_data->reset(spi_imx);
		dmaengine_terminate_all(master->dma_rx);
		return -ETIMEDOUT;
	}
R
Robin Gong 已提交
1042

1043
	return transfer->len;
R
Robin Gong 已提交
1044 1045 1046
}

static int spi_imx_pio_transfer(struct spi_device *spi,
1047 1048
				struct spi_transfer *transfer)
{
1049
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1050

1051 1052 1053 1054
	spi_imx->tx_buf = transfer->tx_buf;
	spi_imx->rx_buf = transfer->rx_buf;
	spi_imx->count = transfer->len;
	spi_imx->txfifo = 0;
1055

1056
	reinit_completion(&spi_imx->xfer_done);
1057

1058
	spi_imx_push(spi_imx);
1059

1060
	spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
1061

1062
	wait_for_completion(&spi_imx->xfer_done);
1063 1064 1065 1066

	return transfer->len;
}

R
Robin Gong 已提交
1067 1068 1069 1070 1071
static int spi_imx_transfer(struct spi_device *spi,
				struct spi_transfer *transfer)
{
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);

1072
	if (spi_imx->usedma)
S
Sascha Hauer 已提交
1073
		return spi_imx_dma_transfer(spi_imx, transfer);
1074 1075
	else
		return spi_imx_pio_transfer(spi, transfer);
R
Robin Gong 已提交
1076 1077
}

1078
static int spi_imx_setup(struct spi_device *spi)
1079
{
1080
	dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
1081 1082
		 spi->mode, spi->bits_per_word, spi->max_speed_hz);

1083 1084 1085
	if (gpio_is_valid(spi->cs_gpio))
		gpio_direction_output(spi->cs_gpio,
				      spi->mode & SPI_CS_HIGH ? 0 : 1);
1086

1087
	spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
1088 1089 1090 1091

	return 0;
}

1092
static void spi_imx_cleanup(struct spi_device *spi)
1093 1094 1095
{
}

1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
static int
spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
{
	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
	int ret;

	ret = clk_enable(spi_imx->clk_per);
	if (ret)
		return ret;

	ret = clk_enable(spi_imx->clk_ipg);
	if (ret) {
		clk_disable(spi_imx->clk_per);
		return ret;
	}

	return 0;
}

static int
spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
{
	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);

	clk_disable(spi_imx->clk_ipg);
	clk_disable(spi_imx->clk_per);
	return 0;
}

1125
static int spi_imx_probe(struct platform_device *pdev)
1126
{
1127 1128 1129 1130 1131
	struct device_node *np = pdev->dev.of_node;
	const struct of_device_id *of_id =
			of_match_device(spi_imx_dt_ids, &pdev->dev);
	struct spi_imx_master *mxc_platform_info =
			dev_get_platdata(&pdev->dev);
1132
	struct spi_master *master;
1133
	struct spi_imx_data *spi_imx;
1134
	struct resource *res;
1135
	int i, ret, irq;
1136

1137
	if (!np && !mxc_platform_info) {
1138 1139 1140 1141
		dev_err(&pdev->dev, "can't get the platform data\n");
		return -EINVAL;
	}

1142
	master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
1143 1144 1145 1146 1147
	if (!master)
		return -ENOMEM;

	platform_set_drvdata(pdev, master);

1148
	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
1149
	master->bus_num = np ? -1 : pdev->id;
1150

1151
	spi_imx = spi_master_get_devdata(master);
1152
	spi_imx->bitbang.master = master;
1153
	spi_imx->dev = &pdev->dev;
1154

1155 1156 1157
	spi_imx->devtype_data = of_id ? of_id->data :
		(struct spi_imx_devtype_data *)pdev->id_entry->driver_data;

1158 1159 1160 1161 1162 1163
	if (mxc_platform_info) {
		master->num_chipselect = mxc_platform_info->num_chipselect;
		master->cs_gpios = devm_kzalloc(&master->dev,
			sizeof(int) * master->num_chipselect, GFP_KERNEL);
		if (!master->cs_gpios)
			return -ENOMEM;
1164

1165 1166 1167
		for (i = 0; i < master->num_chipselect; i++)
			master->cs_gpios[i] = mxc_platform_info->chipselect[i];
 	}
1168

1169 1170 1171 1172 1173
	spi_imx->bitbang.chipselect = spi_imx_chipselect;
	spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
	spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
	spi_imx->bitbang.master->setup = spi_imx_setup;
	spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
1174 1175
	spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
	spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
1176 1177 1178
	spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
	if (is_imx51_ecspi(spi_imx))
		spi_imx->bitbang.master->mode_bits |= SPI_LOOP;
1179

1180
	init_completion(&spi_imx->xfer_done);
1181 1182

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
F
Fabio Estevam 已提交
1183 1184 1185 1186
	spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(spi_imx->base)) {
		ret = PTR_ERR(spi_imx->base);
		goto out_master_put;
1187
	}
1188
	spi_imx->base_phys = res->start;
1189

1190 1191 1192
	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		ret = irq;
F
Fabio Estevam 已提交
1193
		goto out_master_put;
1194 1195
	}

1196
	ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
1197
			       dev_name(&pdev->dev), spi_imx);
1198
	if (ret) {
1199
		dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
F
Fabio Estevam 已提交
1200
		goto out_master_put;
1201 1202
	}

1203 1204 1205
	spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
	if (IS_ERR(spi_imx->clk_ipg)) {
		ret = PTR_ERR(spi_imx->clk_ipg);
F
Fabio Estevam 已提交
1206
		goto out_master_put;
1207 1208
	}

1209 1210 1211
	spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
	if (IS_ERR(spi_imx->clk_per)) {
		ret = PTR_ERR(spi_imx->clk_per);
F
Fabio Estevam 已提交
1212
		goto out_master_put;
1213 1214
	}

1215 1216 1217 1218 1219 1220 1221
	ret = clk_prepare_enable(spi_imx->clk_per);
	if (ret)
		goto out_master_put;

	ret = clk_prepare_enable(spi_imx->clk_ipg);
	if (ret)
		goto out_put_per;
1222 1223

	spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
R
Robin Gong 已提交
1224 1225 1226 1227
	/*
	 * Only validated on i.mx6 now, can remove the constrain if validated on
	 * other chips.
	 */
1228
	if (is_imx51_ecspi(spi_imx)) {
1229
		ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
1230 1231 1232
		if (ret == -EPROBE_DEFER)
			goto out_clk_put;

1233 1234 1235 1236
		if (ret < 0)
			dev_err(&pdev->dev, "dma setup error %d, use pio\n",
				ret);
	}
1237

1238
	spi_imx->devtype_data->reset(spi_imx);
1239

1240
	spi_imx->devtype_data->intctrl(spi_imx, 0);
1241

1242
	master->dev.of_node = pdev->dev.of_node;
1243
	ret = spi_bitbang_start(&spi_imx->bitbang);
1244 1245 1246 1247 1248
	if (ret) {
		dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
		goto out_clk_put;
	}

1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
	for (i = 0; i < master->num_chipselect; i++) {
		if (!gpio_is_valid(master->cs_gpios[i]))
			continue;

		ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
					DRIVER_NAME);
		if (ret) {
			dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
				master->cs_gpios[i]);
			goto out_clk_put;
		}
	}

1262 1263
	dev_info(&pdev->dev, "probed\n");

1264 1265
	clk_disable(spi_imx->clk_ipg);
	clk_disable(spi_imx->clk_per);
1266 1267 1268
	return ret;

out_clk_put:
1269
	clk_disable_unprepare(spi_imx->clk_ipg);
1270 1271
out_put_per:
	clk_disable_unprepare(spi_imx->clk_per);
F
Fabio Estevam 已提交
1272
out_master_put:
1273
	spi_master_put(master);
F
Fabio Estevam 已提交
1274

1275 1276 1277
	return ret;
}

1278
static int spi_imx_remove(struct platform_device *pdev)
1279 1280
{
	struct spi_master *master = platform_get_drvdata(pdev);
1281
	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1282

1283
	spi_bitbang_stop(&spi_imx->bitbang);
1284

1285
	writel(0, spi_imx->base + MXC_CSPICTRL);
1286 1287
	clk_unprepare(spi_imx->clk_ipg);
	clk_unprepare(spi_imx->clk_per);
R
Robin Gong 已提交
1288
	spi_imx_sdma_exit(spi_imx);
1289 1290 1291 1292 1293
	spi_master_put(master);

	return 0;
}

1294
static struct platform_driver spi_imx_driver = {
1295 1296
	.driver = {
		   .name = DRIVER_NAME,
1297
		   .of_match_table = spi_imx_dt_ids,
1298
		   },
1299
	.id_table = spi_imx_devtype,
1300
	.probe = spi_imx_probe,
1301
	.remove = spi_imx_remove,
1302
};
1303
module_platform_driver(spi_imx_driver);
1304 1305 1306 1307

MODULE_DESCRIPTION("SPI Master Controller driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
F
Fabio Estevam 已提交
1308
MODULE_ALIAS("platform:" DRIVER_NAME);