spi-s3c64xx.c 37.7 KB
Newer Older
G
Grant Likely 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 * Copyright (C) 2009 Samsung Electronics Ltd.
 *	Jaswinder Singh <jassi.brar@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/init.h>
#include <linux/module.h>
M
Mark Brown 已提交
22
#include <linux/interrupt.h>
23 24 25
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
26
#include <linux/dmaengine.h>
27
#include <linux/platform_device.h>
28
#include <linux/pm_runtime.h>
29
#include <linux/spi/spi.h>
30
#include <linux/gpio.h>
31 32
#include <linux/of.h>
#include <linux/of_gpio.h>
33

34
#include <linux/platform_data/spi-s3c64xx.h>
35

36
#define MAX_SPI_PORTS		3
37
#define S3C64XX_SPI_QUIRK_POLL		(1 << 0)
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/* Registers and bit-fields */

#define S3C64XX_SPI_CH_CFG		0x00
#define S3C64XX_SPI_CLK_CFG		0x04
#define S3C64XX_SPI_MODE_CFG	0x08
#define S3C64XX_SPI_SLAVE_SEL	0x0C
#define S3C64XX_SPI_INT_EN		0x10
#define S3C64XX_SPI_STATUS		0x14
#define S3C64XX_SPI_TX_DATA		0x18
#define S3C64XX_SPI_RX_DATA		0x1C
#define S3C64XX_SPI_PACKET_CNT	0x20
#define S3C64XX_SPI_PENDING_CLR	0x24
#define S3C64XX_SPI_SWAP_CFG	0x28
#define S3C64XX_SPI_FB_CLK		0x2C

#define S3C64XX_SPI_CH_HS_EN		(1<<6)	/* High Speed Enable */
#define S3C64XX_SPI_CH_SW_RST		(1<<5)
#define S3C64XX_SPI_CH_SLAVE		(1<<4)
#define S3C64XX_SPI_CPOL_L		(1<<3)
#define S3C64XX_SPI_CPHA_B		(1<<2)
#define S3C64XX_SPI_CH_RXCH_ON		(1<<1)
#define S3C64XX_SPI_CH_TXCH_ON		(1<<0)

#define S3C64XX_SPI_CLKSEL_SRCMSK	(3<<9)
#define S3C64XX_SPI_CLKSEL_SRCSHFT	9
#define S3C64XX_SPI_ENCLK_ENABLE	(1<<8)
65
#define S3C64XX_SPI_PSR_MASK		0xff
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115

#define S3C64XX_SPI_MODE_CH_TSZ_BYTE		(0<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD	(1<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_WORD		(2<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_MASK		(3<<29)
#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE		(0<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD	(1<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_WORD		(2<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_MASK		(3<<17)
#define S3C64XX_SPI_MODE_RXDMA_ON		(1<<2)
#define S3C64XX_SPI_MODE_TXDMA_ON		(1<<1)
#define S3C64XX_SPI_MODE_4BURST			(1<<0)

#define S3C64XX_SPI_SLAVE_AUTO			(1<<1)
#define S3C64XX_SPI_SLAVE_SIG_INACT		(1<<0)

#define S3C64XX_SPI_INT_TRAILING_EN		(1<<6)
#define S3C64XX_SPI_INT_RX_OVERRUN_EN		(1<<5)
#define S3C64XX_SPI_INT_RX_UNDERRUN_EN		(1<<4)
#define S3C64XX_SPI_INT_TX_OVERRUN_EN		(1<<3)
#define S3C64XX_SPI_INT_TX_UNDERRUN_EN		(1<<2)
#define S3C64XX_SPI_INT_RX_FIFORDY_EN		(1<<1)
#define S3C64XX_SPI_INT_TX_FIFORDY_EN		(1<<0)

#define S3C64XX_SPI_ST_RX_OVERRUN_ERR		(1<<5)
#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR	(1<<4)
#define S3C64XX_SPI_ST_TX_OVERRUN_ERR		(1<<3)
#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR	(1<<2)
#define S3C64XX_SPI_ST_RX_FIFORDY		(1<<1)
#define S3C64XX_SPI_ST_TX_FIFORDY		(1<<0)

#define S3C64XX_SPI_PACKET_CNT_EN		(1<<16)

#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR		(1<<4)
#define S3C64XX_SPI_PND_TX_OVERRUN_CLR		(1<<3)
#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR		(1<<2)
#define S3C64XX_SPI_PND_RX_OVERRUN_CLR		(1<<1)
#define S3C64XX_SPI_PND_TRAILING_CLR		(1<<0)

#define S3C64XX_SPI_SWAP_RX_HALF_WORD		(1<<7)
#define S3C64XX_SPI_SWAP_RX_BYTE		(1<<6)
#define S3C64XX_SPI_SWAP_RX_BIT			(1<<5)
#define S3C64XX_SPI_SWAP_RX_EN			(1<<4)
#define S3C64XX_SPI_SWAP_TX_HALF_WORD		(1<<3)
#define S3C64XX_SPI_SWAP_TX_BYTE		(1<<2)
#define S3C64XX_SPI_SWAP_TX_BIT			(1<<1)
#define S3C64XX_SPI_SWAP_TX_EN			(1<<0)

#define S3C64XX_SPI_FBCLK_MSK		(3<<0)

116 117 118 119 120 121
#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
				(1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
					FIFO_LVL_MASK(i))
122 123 124 125 126 127 128

#define S3C64XX_SPI_MAX_TRAILCNT	0x3ff
#define S3C64XX_SPI_TRAILCNT_OFF	19

#define S3C64XX_SPI_TRAILCNT		S3C64XX_SPI_MAX_TRAILCNT

#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
129
#define is_polling(x)	(x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
130 131 132 133

#define RXBUSY    (1<<2)
#define TXBUSY    (1<<3)

B
Boojin Kim 已提交
134
struct s3c64xx_spi_dma_data {
135
	struct dma_chan *ch;
136
	enum dma_transfer_direction direction;
137
	unsigned int dmach;
B
Boojin Kim 已提交
138 139
};

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
/**
 * struct s3c64xx_spi_info - SPI Controller hardware info
 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
 * @clk_from_cmu: True, if the controller does not include a clock mux and
 *	prescaler unit.
 *
 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
 * differ in some aspects such as the size of the fifo and spi bus clock
 * setup. Such differences are specified to the driver using this structure
 * which is provided as driver data to the driver.
 */
struct s3c64xx_spi_port_config {
	int	fifo_lvl_mask[MAX_SPI_PORTS];
	int	rx_lvl_offset;
	int	tx_st_done;
158
	int	quirks;
159 160 161 162
	bool	high_speed;
	bool	clk_from_cmu;
};

163 164 165
/**
 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
 * @clk: Pointer to the spi clock.
166
 * @src_clk: Pointer to the clock used to generate SPI signals.
167 168 169 170 171 172 173 174 175
 * @master: Pointer to the SPI Protocol master.
 * @cntrlr_info: Platform specific data for the controller this driver manages.
 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
 * @lock: Controller specific lock.
 * @state: Set of FLAGS to indicate status.
 * @rx_dmach: Controller's DMA channel for Rx.
 * @tx_dmach: Controller's DMA channel for Tx.
 * @sfr_start: BUS address of SPI controller regs.
 * @regs: Pointer to ioremap'ed controller registers.
M
Mark Brown 已提交
176
 * @irq: interrupt
177 178 179 180 181 182 183 184
 * @xfer_completion: To indicate completion of xfer task.
 * @cur_mode: Stores the active configuration of the controller.
 * @cur_bpw: Stores the active bits per word settings.
 * @cur_speed: Stores the active xfer clock speed.
 */
struct s3c64xx_spi_driver_data {
	void __iomem                    *regs;
	struct clk                      *clk;
185
	struct clk                      *src_clk;
186 187
	struct platform_device          *pdev;
	struct spi_master               *master;
188
	struct s3c64xx_spi_info  *cntrlr_info;
189 190 191 192 193 194 195
	struct spi_device               *tgl_spi;
	spinlock_t                      lock;
	unsigned long                   sfr_start;
	struct completion               xfer_completion;
	unsigned                        state;
	unsigned                        cur_mode, cur_bpw;
	unsigned                        cur_speed;
B
Boojin Kim 已提交
196 197
	struct s3c64xx_spi_dma_data	rx_dma;
	struct s3c64xx_spi_dma_data	tx_dma;
198 199
	struct s3c64xx_spi_port_config	*port_conf;
	unsigned int			port_id;
200 201 202 203 204 205 206 207 208 209
};

static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
{
	void __iomem *regs = sdd->regs;
	unsigned long loops;
	u32 val;

	writel(0, regs + S3C64XX_SPI_PACKET_CNT);

210 211 212 213
	val = readl(regs + S3C64XX_SPI_CH_CFG);
	val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
	writel(val, regs + S3C64XX_SPI_CH_CFG);

214 215 216 217 218 219 220 221 222
	val = readl(regs + S3C64XX_SPI_CH_CFG);
	val |= S3C64XX_SPI_CH_SW_RST;
	val &= ~S3C64XX_SPI_CH_HS_EN;
	writel(val, regs + S3C64XX_SPI_CH_CFG);

	/* Flush TxFIFO*/
	loops = msecs_to_loops(1);
	do {
		val = readl(regs + S3C64XX_SPI_STATUS);
223
	} while (TX_FIFO_LVL(val, sdd) && loops--);
224

225 226 227
	if (loops == 0)
		dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");

228 229 230 231
	/* Flush RxFIFO*/
	loops = msecs_to_loops(1);
	do {
		val = readl(regs + S3C64XX_SPI_STATUS);
232
		if (RX_FIFO_LVL(val, sdd))
233 234 235 236 237
			readl(regs + S3C64XX_SPI_RX_DATA);
		else
			break;
	} while (loops--);

238 239 240
	if (loops == 0)
		dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");

241 242 243 244 245 246 247 248 249
	val = readl(regs + S3C64XX_SPI_CH_CFG);
	val &= ~S3C64XX_SPI_CH_SW_RST;
	writel(val, regs + S3C64XX_SPI_CH_CFG);

	val = readl(regs + S3C64XX_SPI_MODE_CFG);
	val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
	writel(val, regs + S3C64XX_SPI_MODE_CFG);
}

B
Boojin Kim 已提交
250
static void s3c64xx_spi_dmacb(void *data)
251
{
B
Boojin Kim 已提交
252 253
	struct s3c64xx_spi_driver_data *sdd;
	struct s3c64xx_spi_dma_data *dma = data;
254 255
	unsigned long flags;

256
	if (dma->direction == DMA_DEV_TO_MEM)
B
Boojin Kim 已提交
257 258 259 260 261 262
		sdd = container_of(data,
			struct s3c64xx_spi_driver_data, rx_dma);
	else
		sdd = container_of(data,
			struct s3c64xx_spi_driver_data, tx_dma);

263 264
	spin_lock_irqsave(&sdd->lock, flags);

265
	if (dma->direction == DMA_DEV_TO_MEM) {
B
Boojin Kim 已提交
266 267 268 269 270 271 272 273
		sdd->state &= ~RXBUSY;
		if (!(sdd->state & TXBUSY))
			complete(&sdd->xfer_completion);
	} else {
		sdd->state &= ~TXBUSY;
		if (!(sdd->state & RXBUSY))
			complete(&sdd->xfer_completion);
	}
274 275 276 277

	spin_unlock_irqrestore(&sdd->lock, flags);
}

278
static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
279
			struct sg_table *sgt)
280 281 282 283 284
{
	struct s3c64xx_spi_driver_data *sdd;
	struct dma_slave_config config;
	struct dma_async_tx_descriptor *desc;

285 286
	memset(&config, 0, sizeof(config));

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
	if (dma->direction == DMA_DEV_TO_MEM) {
		sdd = container_of((void *)dma,
			struct s3c64xx_spi_driver_data, rx_dma);
		config.direction = dma->direction;
		config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
		config.src_addr_width = sdd->cur_bpw / 8;
		config.src_maxburst = 1;
		dmaengine_slave_config(dma->ch, &config);
	} else {
		sdd = container_of((void *)dma,
			struct s3c64xx_spi_driver_data, tx_dma);
		config.direction = dma->direction;
		config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
		config.dst_addr_width = sdd->cur_bpw / 8;
		config.dst_maxburst = 1;
		dmaengine_slave_config(dma->ch, &config);
	}

305 306
	desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
				       dma->direction, DMA_PREP_INTERRUPT);
307 308 309 310 311 312 313 314 315 316 317 318 319 320

	desc->callback = s3c64xx_spi_dmacb;
	desc->callback_param = dma;

	dmaengine_submit(desc);
	dma_async_issue_pending(dma->ch);
}

static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
{
	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
	dma_filter_fn filter = sdd->cntrlr_info->filter;
	struct device *dev = &sdd->pdev->dev;
	dma_cap_mask_t mask;
321
	int ret;
322

323 324 325 326 327 328 329 330 331 332 333 334
	if (!is_polling(sdd)) {
		dma_cap_zero(mask);
		dma_cap_set(DMA_SLAVE, mask);

		/* Acquire DMA channels */
		sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
				   (void *)sdd->rx_dma.dmach, dev, "rx");
		if (!sdd->rx_dma.ch) {
			dev_err(dev, "Failed to get RX DMA channel\n");
			ret = -EBUSY;
			goto out;
		}
335
		spi->dma_rx = sdd->rx_dma.ch;
336

337 338 339 340 341 342 343
		sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
				   (void *)sdd->tx_dma.dmach, dev, "tx");
		if (!sdd->tx_dma.ch) {
			dev_err(dev, "Failed to get TX DMA channel\n");
			ret = -EBUSY;
			goto out_rx;
		}
344
		spi->dma_tx = sdd->tx_dma.ch;
345 346 347
	}

	ret = pm_runtime_get_sync(&sdd->pdev->dev);
348
	if (ret < 0) {
349 350 351
		dev_err(dev, "Failed to enable device: %d\n", ret);
		goto out_tx;
	}
352 353

	return 0;
354 355 356 357 358 359 360

out_tx:
	dma_release_channel(sdd->tx_dma.ch);
out_rx:
	dma_release_channel(sdd->rx_dma.ch);
out:
	return ret;
361 362 363 364 365 366 367
}

static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
{
	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);

	/* Free DMA channels */
368 369 370 371
	if (!is_polling(sdd)) {
		dma_release_channel(sdd->rx_dma.ch);
		dma_release_channel(sdd->tx_dma.ch);
	}
372 373 374 375 376

	pm_runtime_put(&sdd->pdev->dev);
	return 0;
}

377 378 379 380 381 382 383 384 385
static bool s3c64xx_spi_can_dma(struct spi_master *master,
				struct spi_device *spi,
				struct spi_transfer *xfer)
{
	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);

	return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
}

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
				struct spi_device *spi,
				struct spi_transfer *xfer, int dma_mode)
{
	void __iomem *regs = sdd->regs;
	u32 modecfg, chcfg;

	modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
	modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);

	chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
	chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;

	if (dma_mode) {
		chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
	} else {
		/* Always shift in data in FIFO, even if xfer is Tx only,
		 * this helps setting PCKT_CNT value for generating clocks
		 * as exactly needed.
		 */
		chcfg |= S3C64XX_SPI_CH_RXCH_ON;
		writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
					| S3C64XX_SPI_PACKET_CNT_EN,
					regs + S3C64XX_SPI_PACKET_CNT);
	}

	if (xfer->tx_buf != NULL) {
		sdd->state |= TXBUSY;
		chcfg |= S3C64XX_SPI_CH_TXCH_ON;
		if (dma_mode) {
			modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
417
			prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
418
		} else {
419 420 421 422 423 424 425 426 427 428 429 430 431 432
			switch (sdd->cur_bpw) {
			case 32:
				iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
					xfer->tx_buf, xfer->len / 4);
				break;
			case 16:
				iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
					xfer->tx_buf, xfer->len / 2);
				break;
			default:
				iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
					xfer->tx_buf, xfer->len);
				break;
			}
433 434 435 436 437 438
		}
	}

	if (xfer->rx_buf != NULL) {
		sdd->state |= RXBUSY;

439
		if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
440 441 442 443 444 445 446 447 448
					&& !(sdd->cur_mode & SPI_CPHA))
			chcfg |= S3C64XX_SPI_CH_HS_EN;

		if (dma_mode) {
			modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
			chcfg |= S3C64XX_SPI_CH_RXCH_ON;
			writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
					| S3C64XX_SPI_PACKET_CNT_EN,
					regs + S3C64XX_SPI_PACKET_CNT);
449
			prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
450 451 452 453 454 455 456
		}
	}

	writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
	writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
}

457
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
					int timeout_ms)
{
	void __iomem *regs = sdd->regs;
	unsigned long val = 1;
	u32 status;

	/* max fifo depth available */
	u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;

	if (timeout_ms)
		val = msecs_to_loops(timeout_ms);

	do {
		status = readl(regs + S3C64XX_SPI_STATUS);
	} while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);

	/* return the actual received data length */
	return RX_FIFO_LVL(status, sdd);
476 477
}

478 479
static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
			struct spi_transfer *xfer)
480 481 482
{
	void __iomem *regs = sdd->regs;
	unsigned long val;
483
	u32 status;
484 485 486 487
	int ms;

	/* millisecs to xfer 'len' bytes @ 'cur_speed' */
	ms = xfer->len * 8 * 1000 / sdd->cur_speed;
488
	ms += 10; /* some tolerance */
489

490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
	val = msecs_to_jiffies(ms) + 10;
	val = wait_for_completion_timeout(&sdd->xfer_completion, val);

	/*
	 * If the previous xfer was completed within timeout, then
	 * proceed further else return -EIO.
	 * DmaTx returns after simply writing data in the FIFO,
	 * w/o waiting for real transmission on the bus to finish.
	 * DmaRx returns only after Dma read data from FIFO which
	 * needs bus transmission to finish, so we don't worry if
	 * Xfer involved Rx(with or without Tx).
	 */
	if (val && !xfer->rx_buf) {
		val = msecs_to_loops(10);
		status = readl(regs + S3C64XX_SPI_STATUS);
		while ((TX_FIFO_LVL(status, sdd)
			|| !S3C64XX_SPI_ST_TX_DONE(status, sdd))
		       && --val) {
			cpu_relax();
509
			status = readl(regs + S3C64XX_SPI_STATUS);
510 511
		}

512 513
	}

514 515 516
	/* If timed out while checking rx/tx status return error */
	if (!val)
		return -EIO;
517

518 519
	return 0;
}
520

521 522 523 524 525 526 527 528 529 530
static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
			struct spi_transfer *xfer)
{
	void __iomem *regs = sdd->regs;
	unsigned long val;
	u32 status;
	int loops;
	u32 cpy_len;
	u8 *buf;
	int ms;
531

532 533 534
	/* millisecs to xfer 'len' bytes @ 'cur_speed' */
	ms = xfer->len * 8 * 1000 / sdd->cur_speed;
	ms += 10; /* some tolerance */
535

536 537 538 539
	val = msecs_to_loops(ms);
	do {
		status = readl(regs + S3C64XX_SPI_STATUS);
	} while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
540

541 542 543 544 545

	/* If it was only Tx */
	if (!xfer->rx_buf) {
		sdd->state &= ~TXBUSY;
		return 0;
546 547
	}

548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
	/*
	 * If the receive length is bigger than the controller fifo
	 * size, calculate the loops and read the fifo as many times.
	 * loops = length / max fifo size (calculated by using the
	 * fifo mask).
	 * For any size less than the fifo size the below code is
	 * executed atleast once.
	 */
	loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
	buf = xfer->rx_buf;
	do {
		/* wait for data to be received in the fifo */
		cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
						       (loops ? ms : 0));

		switch (sdd->cur_bpw) {
		case 32:
			ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
				     buf, cpy_len / 4);
			break;
		case 16:
			ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
				     buf, cpy_len / 2);
			break;
		default:
			ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
				    buf, cpy_len);
			break;
		}

		buf = buf + cpy_len;
	} while (loops--);
	sdd->state &= ~RXBUSY;

582 583 584 585 586 587 588 589 590
	return 0;
}

static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{
	void __iomem *regs = sdd->regs;
	u32 val;

	/* Disable Clock */
591
	if (sdd->port_conf->clk_from_cmu) {
592
		clk_disable_unprepare(sdd->src_clk);
593 594 595 596 597
	} else {
		val = readl(regs + S3C64XX_SPI_CLK_CFG);
		val &= ~S3C64XX_SPI_ENCLK_ENABLE;
		writel(val, regs + S3C64XX_SPI_CLK_CFG);
	}
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620

	/* Set Polarity and Phase */
	val = readl(regs + S3C64XX_SPI_CH_CFG);
	val &= ~(S3C64XX_SPI_CH_SLAVE |
			S3C64XX_SPI_CPOL_L |
			S3C64XX_SPI_CPHA_B);

	if (sdd->cur_mode & SPI_CPOL)
		val |= S3C64XX_SPI_CPOL_L;

	if (sdd->cur_mode & SPI_CPHA)
		val |= S3C64XX_SPI_CPHA_B;

	writel(val, regs + S3C64XX_SPI_CH_CFG);

	/* Set Channel & DMA Mode */
	val = readl(regs + S3C64XX_SPI_MODE_CFG);
	val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
			| S3C64XX_SPI_MODE_CH_TSZ_MASK);

	switch (sdd->cur_bpw) {
	case 32:
		val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
621
		val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
622 623 624
		break;
	case 16:
		val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
625
		val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
626 627 628
		break;
	default:
		val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
629
		val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
630 631 632 633 634
		break;
	}

	writel(val, regs + S3C64XX_SPI_MODE_CFG);

635
	if (sdd->port_conf->clk_from_cmu) {
636 637 638 639
		/* Configure Clock */
		/* There is half-multiplier before the SPI */
		clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
		/* Enable Clock */
640
		clk_prepare_enable(sdd->src_clk);
641 642 643 644 645 646 647 648 649 650 651 652 653
	} else {
		/* Configure Clock */
		val = readl(regs + S3C64XX_SPI_CLK_CFG);
		val &= ~S3C64XX_SPI_PSR_MASK;
		val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
				& S3C64XX_SPI_PSR_MASK);
		writel(val, regs + S3C64XX_SPI_CLK_CFG);

		/* Enable Clock */
		val = readl(regs + S3C64XX_SPI_CLK_CFG);
		val |= S3C64XX_SPI_ENCLK_ENABLE;
		writel(val, regs + S3C64XX_SPI_CLK_CFG);
	}
654 655 656 657
}

#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)

658 659
static int s3c64xx_spi_prepare_message(struct spi_master *master,
				       struct spi_message *msg)
660
{
661
	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
	struct spi_device *spi = msg->spi;
	struct s3c64xx_spi_csinfo *cs = spi->controller_data;

	/* If Master's(controller) state differs from that needed by Slave */
	if (sdd->cur_speed != spi->max_speed_hz
			|| sdd->cur_mode != spi->mode
			|| sdd->cur_bpw != spi->bits_per_word) {
		sdd->cur_bpw = spi->bits_per_word;
		sdd->cur_speed = spi->max_speed_hz;
		sdd->cur_mode = spi->mode;
		s3c64xx_spi_config(sdd);
	}

	/* Configure feedback delay */
	writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);

678 679
	return 0;
}
680

681 682 683
static int s3c64xx_spi_transfer_one(struct spi_master *master,
				    struct spi_device *spi,
				    struct spi_transfer *xfer)
684 685
{
	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
686
	int status;
687 688
	u32 speed;
	u8 bpw;
689 690
	unsigned long flags;
	int use_dma;
691

692
	reinit_completion(&sdd->xfer_completion);
693

694 695 696
	/* Only BPW and Speed may change across transfers */
	bpw = xfer->bits_per_word;
	speed = xfer->speed_hz ? : spi->max_speed_hz;
697

698 699 700 701 702
	if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
		sdd->cur_bpw = bpw;
		sdd->cur_speed = speed;
		s3c64xx_spi_config(sdd);
	}
703

704 705 706 707 708 709
	/* Polling method for xfers not bigger than FIFO capacity */
	use_dma = 0;
	if (!is_polling(sdd) &&
	    (sdd->rx_dma.ch && sdd->tx_dma.ch &&
	     (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
		use_dma = 1;
710

711
	spin_lock_irqsave(&sdd->lock, flags);
712

713 714 715
	/* Pending only which is to be done */
	sdd->state &= ~RXBUSY;
	sdd->state &= ~TXBUSY;
716

717
	enable_datapath(sdd, spi, xfer, use_dma);
718

719 720
	/* Start the signals */
	writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
721

722
	spin_unlock_irqrestore(&sdd->lock, flags);
723

724 725 726 727
	if (use_dma)
		status = wait_for_dma(sdd, xfer);
	else
		status = wait_for_pio(sdd, xfer);
728 729 730 731 732 733 734 735 736 737 738

	if (status) {
		dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
			xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
			(sdd->state & RXBUSY) ? 'f' : 'p',
			(sdd->state & TXBUSY) ? 'f' : 'p',
			xfer->len);

		if (use_dma) {
			if (xfer->tx_buf != NULL
			    && (sdd->state & TXBUSY))
739
				dmaengine_terminate_all(sdd->tx_dma.ch);
740 741
			if (xfer->rx_buf != NULL
			    && (sdd->state & RXBUSY))
742
				dmaengine_terminate_all(sdd->rx_dma.ch);
743
		}
744
	} else {
745 746 747
		flush_fifo(sdd);
	}

748
	return status;
749 750
}

751 752 753 754
static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
				struct spi_device *spi)
{
	struct s3c64xx_spi_csinfo *cs;
755
	struct device_node *slave_np, *data_np = NULL;
756 757 758 759 760 761 762 763
	u32 fb_delay = 0;

	slave_np = spi->dev.of_node;
	if (!slave_np) {
		dev_err(&spi->dev, "device node not found\n");
		return ERR_PTR(-EINVAL);
	}

764
	data_np = of_get_child_by_name(slave_np, "controller-data");
765 766 767 768 769 770 771
	if (!data_np) {
		dev_err(&spi->dev, "child node 'controller-data' not found\n");
		return ERR_PTR(-EINVAL);
	}

	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
	if (!cs) {
772
		of_node_put(data_np);
773 774 775 776 777
		return ERR_PTR(-ENOMEM);
	}

	of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
	cs->fb_delay = fb_delay;
778
	of_node_put(data_np);
779 780 781
	return cs;
}

782 783 784 785 786 787 788 789 790 791
/*
 * Here we only check the validity of requested configuration
 * and save the configuration in a local data-structure.
 * The controller is actually configured only just before we
 * get a message to transfer.
 */
static int s3c64xx_spi_setup(struct spi_device *spi)
{
	struct s3c64xx_spi_csinfo *cs = spi->controller_data;
	struct s3c64xx_spi_driver_data *sdd;
792
	struct s3c64xx_spi_info *sci;
793
	int err;
794

795
	sdd = spi_master_get_devdata(spi->master);
796
	if (spi->dev.of_node) {
797
		cs = s3c64xx_get_slave_ctrldata(spi);
798
		spi->controller_data = cs;
799 800 801 802 803 804 805
	} else if (cs) {
		/* On non-DT platforms the SPI core will set spi->cs_gpio
		 * to -ENOENT. The GPIO pin used to drive the chip select
		 * is defined by using platform data so spi->cs_gpio value
		 * has to be override to have the proper GPIO pin number.
		 */
		spi->cs_gpio = cs->line;
806 807 808
	}

	if (IS_ERR_OR_NULL(cs)) {
809 810 811 812
		dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
		return -ENODEV;
	}

813
	if (!spi_get_ctldata(spi)) {
814 815 816 817 818 819 820 821 822
		if (gpio_is_valid(spi->cs_gpio)) {
			err = gpio_request_one(spi->cs_gpio, GPIOF_OUT_INIT_HIGH,
					       dev_name(&spi->dev));
			if (err) {
				dev_err(&spi->dev,
					"Failed to get /CS gpio [%d]: %d\n",
					spi->cs_gpio, err);
				goto err_gpio_req;
			}
823 824
		}

825
		spi_set_ctldata(spi, cs);
826 827 828 829
	}

	sci = sdd->cntrlr_info;

830 831
	pm_runtime_get_sync(&sdd->pdev->dev);

832
	/* Check if we can provide the requested rate */
833
	if (!sdd->port_conf->clk_from_cmu) {
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
		u32 psr, speed;

		/* Max possible */
		speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);

		if (spi->max_speed_hz > speed)
			spi->max_speed_hz = speed;

		psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
		psr &= S3C64XX_SPI_PSR_MASK;
		if (psr == S3C64XX_SPI_PSR_MASK)
			psr--;

		speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
		if (spi->max_speed_hz < speed) {
			if (psr+1 < S3C64XX_SPI_PSR_MASK) {
				psr++;
			} else {
				err = -EINVAL;
				goto setup_exit;
			}
		}
856

857
		speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
858
		if (spi->max_speed_hz >= speed) {
859
			spi->max_speed_hz = speed;
860
		} else {
861 862
			dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
				spi->max_speed_hz);
863
			err = -EINVAL;
864 865
			goto setup_exit;
		}
866 867
	}

868
	pm_runtime_put(&sdd->pdev->dev);
869
	writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
870
	return 0;
871

872
setup_exit:
873
	pm_runtime_put(&sdd->pdev->dev);
874
	/* setup() returns with device de-selected */
875
	writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
876

877 878
	if (gpio_is_valid(spi->cs_gpio))
		gpio_free(spi->cs_gpio);
879 880 881
	spi_set_ctldata(spi, NULL);

err_gpio_req:
882 883
	if (spi->dev.of_node)
		kfree(cs);
884

885 886 887
	return err;
}

888 889 890 891
static void s3c64xx_spi_cleanup(struct spi_device *spi)
{
	struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);

892
	if (gpio_is_valid(spi->cs_gpio)) {
M
Mark Brown 已提交
893
		gpio_free(spi->cs_gpio);
894 895
		if (spi->dev.of_node)
			kfree(cs);
896 897 898 899 900 901 902 903
		else {
			/* On non-DT platforms, the SPI core sets
			 * spi->cs_gpio to -ENOENT and .setup()
			 * overrides it with the GPIO pin value
			 * passed using platform data.
			 */
			spi->cs_gpio = -ENOENT;
		}
904
	}
905

906 907 908
	spi_set_ctldata(spi, NULL);
}

M
Mark Brown 已提交
909 910 911 912
static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
{
	struct s3c64xx_spi_driver_data *sdd = data;
	struct spi_master *spi = sdd->master;
913
	unsigned int val, clr = 0;
M
Mark Brown 已提交
914

915
	val = readl(sdd->regs + S3C64XX_SPI_STATUS);
M
Mark Brown 已提交
916

917 918
	if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
		clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
M
Mark Brown 已提交
919
		dev_err(&spi->dev, "RX overrun\n");
920 921 922
	}
	if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
		clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
M
Mark Brown 已提交
923
		dev_err(&spi->dev, "RX underrun\n");
924 925 926
	}
	if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
		clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
M
Mark Brown 已提交
927
		dev_err(&spi->dev, "TX overrun\n");
928 929 930
	}
	if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
		clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
M
Mark Brown 已提交
931
		dev_err(&spi->dev, "TX underrun\n");
932 933 934 935 936
	}

	/* Clear the pending irq by setting and then clearing it */
	writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
	writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
M
Mark Brown 已提交
937 938 939 940

	return IRQ_HANDLED;
}

941 942
static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
{
943
	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
944 945 946 947 948
	void __iomem *regs = sdd->regs;
	unsigned int val;

	sdd->cur_speed = 0;

949
	writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
950 951 952 953

	/* Disable Interrupts - we use Polling if not DMA mode */
	writel(0, regs + S3C64XX_SPI_INT_EN);

954
	if (!sdd->port_conf->clk_from_cmu)
955
		writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
956 957 958 959
				regs + S3C64XX_SPI_CLK_CFG);
	writel(0, regs + S3C64XX_SPI_MODE_CFG);
	writel(0, regs + S3C64XX_SPI_PACKET_CNT);

960 961 962 963 964 965 966
	/* Clear any irq pending bits, should set and clear the bits */
	val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
		S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
		S3C64XX_SPI_PND_TX_OVERRUN_CLR |
		S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
	writel(val, regs + S3C64XX_SPI_PENDING_CLR);
	writel(0, regs + S3C64XX_SPI_PENDING_CLR);
967 968 969 970 971 972 973 974 975 976 977 978

	writel(0, regs + S3C64XX_SPI_SWAP_CFG);

	val = readl(regs + S3C64XX_SPI_MODE_CFG);
	val &= ~S3C64XX_SPI_MODE_4BURST;
	val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
	val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
	writel(val, regs + S3C64XX_SPI_MODE_CFG);

	flush_fifo(sdd);
}

979
#ifdef CONFIG_OF
980
static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
981 982 983 984 985
{
	struct s3c64xx_spi_info *sci;
	u32 temp;

	sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
986
	if (!sci)
987 988 989
		return ERR_PTR(-ENOMEM);

	if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
990
		dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
991 992 993 994 995 996
		sci->src_clk_nr = 0;
	} else {
		sci->src_clk_nr = temp;
	}

	if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
997
		dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
998 999 1000 1001 1002 1003 1004 1005 1006 1007
		sci->num_cs = 1;
	} else {
		sci->num_cs = temp;
	}

	return sci;
}
#else
static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
{
J
Jingoo Han 已提交
1008
	return dev_get_platdata(dev);
1009 1010 1011 1012 1013
}
#endif

static const struct of_device_id s3c64xx_spi_dt_match[];

1014 1015 1016
static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
						struct platform_device *pdev)
{
1017 1018 1019 1020 1021 1022 1023
#ifdef CONFIG_OF
	if (pdev->dev.of_node) {
		const struct of_device_id *match;
		match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
		return (struct s3c64xx_spi_port_config *)match->data;
	}
#endif
1024 1025 1026 1027
	return (struct s3c64xx_spi_port_config *)
			 platform_get_device_id(pdev)->driver_data;
}

1028
static int s3c64xx_spi_probe(struct platform_device *pdev)
1029
{
1030
	struct resource	*mem_res;
1031
	struct resource	*res;
1032
	struct s3c64xx_spi_driver_data *sdd;
J
Jingoo Han 已提交
1033
	struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
1034
	struct spi_master *master;
M
Mark Brown 已提交
1035
	int ret, irq;
1036
	char clk_name[16];
1037

1038 1039 1040 1041
	if (!sci && pdev->dev.of_node) {
		sci = s3c64xx_spi_parse_dt(&pdev->dev);
		if (IS_ERR(sci))
			return PTR_ERR(sci);
1042 1043
	}

1044
	if (!sci) {
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
		dev_err(&pdev->dev, "platform_data missing!\n");
		return -ENODEV;
	}

	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (mem_res == NULL) {
		dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
		return -ENXIO;
	}

M
Mark Brown 已提交
1055 1056 1057 1058 1059 1060
	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
		return irq;
	}

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
	master = spi_alloc_master(&pdev->dev,
				sizeof(struct s3c64xx_spi_driver_data));
	if (master == NULL) {
		dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
		return -ENOMEM;
	}

	platform_set_drvdata(pdev, master);

	sdd = spi_master_get_devdata(master);
1071
	sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
1072 1073 1074 1075
	sdd->master = master;
	sdd->cntrlr_info = sci;
	sdd->pdev = pdev;
	sdd->sfr_start = mem_res->start;
1076 1077 1078
	if (pdev->dev.of_node) {
		ret = of_alias_get_id(pdev->dev.of_node, "spi");
		if (ret < 0) {
1079 1080
			dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
				ret);
1081 1082 1083 1084 1085 1086
			goto err0;
		}
		sdd->port_id = ret;
	} else {
		sdd->port_id = pdev->id;
	}
1087 1088 1089

	sdd->cur_bpw = 8;

1090 1091 1092
	if (!sdd->pdev->dev.of_node) {
		res = platform_get_resource(pdev, IORESOURCE_DMA,  0);
		if (!res) {
1093
			dev_warn(&pdev->dev, "Unable to get SPI tx dma resource. Switching to poll mode\n");
1094 1095 1096
			sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
		} else
			sdd->tx_dma.dmach = res->start;
1097 1098 1099

		res = platform_get_resource(pdev, IORESOURCE_DMA,  1);
		if (!res) {
1100
			dev_warn(&pdev->dev, "Unable to get SPI rx dma resource. Switching to poll mode\n");
1101 1102 1103
			sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
		} else
			sdd->rx_dma.dmach = res->start;
1104
	}
1105

1106 1107
	sdd->tx_dma.direction = DMA_MEM_TO_DEV;
	sdd->rx_dma.direction = DMA_DEV_TO_MEM;
1108 1109

	master->dev.of_node = pdev->dev.of_node;
1110
	master->bus_num = sdd->port_id;
1111
	master->setup = s3c64xx_spi_setup;
1112
	master->cleanup = s3c64xx_spi_cleanup;
1113
	master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1114
	master->prepare_message = s3c64xx_spi_prepare_message;
1115
	master->transfer_one = s3c64xx_spi_transfer_one;
1116
	master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
1117 1118
	master->num_chipselect = sci->num_cs;
	master->dma_alignment = 8;
1119 1120
	master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
					SPI_BPW_MASK(8);
1121 1122
	/* the spi->mode bits understood by this driver: */
	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1123
	master->auto_runtime_pm = true;
1124 1125
	if (!is_polling(sdd))
		master->can_dma = s3c64xx_spi_can_dma;
1126

1127 1128 1129
	sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
	if (IS_ERR(sdd->regs)) {
		ret = PTR_ERR(sdd->regs);
1130
		goto err0;
1131 1132
	}

1133
	if (sci->cfg_gpio && sci->cfg_gpio()) {
1134 1135
		dev_err(&pdev->dev, "Unable to config gpio\n");
		ret = -EBUSY;
1136
		goto err0;
1137 1138 1139
	}

	/* Setup clocks */
1140
	sdd->clk = devm_clk_get(&pdev->dev, "spi");
1141 1142 1143
	if (IS_ERR(sdd->clk)) {
		dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
		ret = PTR_ERR(sdd->clk);
1144
		goto err0;
1145 1146
	}

1147
	if (clk_prepare_enable(sdd->clk)) {
1148 1149
		dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
		ret = -EBUSY;
1150
		goto err0;
1151 1152
	}

1153
	sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
1154
	sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
1155
	if (IS_ERR(sdd->src_clk)) {
1156
		dev_err(&pdev->dev,
1157
			"Unable to acquire clock '%s'\n", clk_name);
1158
		ret = PTR_ERR(sdd->src_clk);
1159
		goto err2;
1160 1161
	}

1162
	if (clk_prepare_enable(sdd->src_clk)) {
1163
		dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
1164
		ret = -EBUSY;
1165
		goto err2;
1166 1167 1168
	}

	/* Setup Deufult Mode */
1169
	s3c64xx_spi_hwinit(sdd, sdd->port_id);
1170 1171 1172 1173

	spin_lock_init(&sdd->lock);
	init_completion(&sdd->xfer_completion);

1174 1175
	ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
				"spi-s3c64xx", sdd);
M
Mark Brown 已提交
1176 1177 1178
	if (ret != 0) {
		dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
			irq, ret);
1179
		goto err3;
M
Mark Brown 已提交
1180 1181 1182 1183 1184 1185
	}

	writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
	       S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
	       sdd->regs + S3C64XX_SPI_INT_EN);

1186
	pm_runtime_set_active(&pdev->dev);
1187 1188
	pm_runtime_enable(&pdev->dev);

1189 1190 1191
	ret = devm_spi_register_master(&pdev->dev, master);
	if (ret != 0) {
		dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
1192
		goto err3;
1193 1194
	}

1195
	dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
1196
					sdd->port_id, master->num_chipselect);
J
Jingoo Han 已提交
1197 1198
	dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n",
					mem_res,
B
Boojin Kim 已提交
1199
					sdd->rx_dma.dmach, sdd->tx_dma.dmach);
1200 1201 1202

	return 0;

1203
err3:
1204
	clk_disable_unprepare(sdd->src_clk);
1205
err2:
1206
	clk_disable_unprepare(sdd->clk);
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
err0:
	spi_master_put(master);

	return ret;
}

static int s3c64xx_spi_remove(struct platform_device *pdev)
{
	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);

1218 1219
	pm_runtime_disable(&pdev->dev);

M
Mark Brown 已提交
1220 1221
	writel(0, sdd->regs + S3C64XX_SPI_INT_EN);

1222
	clk_disable_unprepare(sdd->src_clk);
1223

1224
	clk_disable_unprepare(sdd->clk);
1225 1226 1227 1228

	return 0;
}

1229
#ifdef CONFIG_PM_SLEEP
M
Mark Brown 已提交
1230
static int s3c64xx_spi_suspend(struct device *dev)
1231
{
1232
	struct spi_master *master = dev_get_drvdata(dev);
1233 1234
	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);

1235 1236 1237
	int ret = spi_master_suspend(master);
	if (ret)
		return ret;
1238

1239 1240 1241 1242
	if (!pm_runtime_suspended(dev)) {
		clk_disable_unprepare(sdd->clk);
		clk_disable_unprepare(sdd->src_clk);
	}
1243 1244 1245 1246 1247 1248

	sdd->cur_speed = 0; /* Output Clock is stopped */

	return 0;
}

M
Mark Brown 已提交
1249
static int s3c64xx_spi_resume(struct device *dev)
1250
{
1251
	struct spi_master *master = dev_get_drvdata(dev);
1252
	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1253
	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1254

1255
	if (sci->cfg_gpio)
1256
		sci->cfg_gpio();
1257

1258 1259 1260 1261
	if (!pm_runtime_suspended(dev)) {
		clk_prepare_enable(sdd->src_clk);
		clk_prepare_enable(sdd->clk);
	}
1262

1263
	s3c64xx_spi_hwinit(sdd, sdd->port_id);
1264

1265
	return spi_master_resume(master);
1266
}
1267
#endif /* CONFIG_PM_SLEEP */
1268

1269 1270 1271
#ifdef CONFIG_PM_RUNTIME
static int s3c64xx_spi_runtime_suspend(struct device *dev)
{
1272
	struct spi_master *master = dev_get_drvdata(dev);
1273 1274
	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);

1275 1276
	clk_disable_unprepare(sdd->clk);
	clk_disable_unprepare(sdd->src_clk);
1277 1278 1279 1280 1281 1282

	return 0;
}

static int s3c64xx_spi_runtime_resume(struct device *dev)
{
1283
	struct spi_master *master = dev_get_drvdata(dev);
1284
	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1285
	int ret;
1286

1287 1288 1289 1290 1291 1292 1293 1294 1295
	ret = clk_prepare_enable(sdd->src_clk);
	if (ret != 0)
		return ret;

	ret = clk_prepare_enable(sdd->clk);
	if (ret != 0) {
		clk_disable_unprepare(sdd->src_clk);
		return ret;
	}
1296 1297 1298 1299 1300

	return 0;
}
#endif /* CONFIG_PM_RUNTIME */

M
Mark Brown 已提交
1301 1302
static const struct dev_pm_ops s3c64xx_spi_pm = {
	SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
1303 1304
	SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
			   s3c64xx_spi_runtime_resume, NULL)
M
Mark Brown 已提交
1305 1306
};

1307
static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
1308 1309 1310 1311 1312 1313
	.fifo_lvl_mask	= { 0x7f },
	.rx_lvl_offset	= 13,
	.tx_st_done	= 21,
	.high_speed	= true,
};

1314
static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
1315 1316 1317 1318 1319
	.fifo_lvl_mask	= { 0x7f, 0x7F },
	.rx_lvl_offset	= 13,
	.tx_st_done	= 21,
};

1320
static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
1321 1322 1323 1324 1325
	.fifo_lvl_mask	= { 0x1ff, 0x7F },
	.rx_lvl_offset	= 15,
	.tx_st_done	= 25,
};

1326
static struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
1327 1328 1329 1330 1331 1332
	.fifo_lvl_mask	= { 0x7f, 0x7F },
	.rx_lvl_offset	= 13,
	.tx_st_done	= 21,
	.high_speed	= true,
};

1333
static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
1334 1335 1336 1337 1338 1339
	.fifo_lvl_mask	= { 0x1ff, 0x7F },
	.rx_lvl_offset	= 15,
	.tx_st_done	= 25,
	.high_speed	= true,
};

1340
static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
1341 1342 1343 1344 1345 1346 1347
	.fifo_lvl_mask	= { 0x1ff, 0x7F, 0x7F },
	.rx_lvl_offset	= 15,
	.tx_st_done	= 25,
	.high_speed	= true,
	.clk_from_cmu	= true,
};

1348 1349 1350 1351 1352 1353 1354 1355 1356
static struct s3c64xx_spi_port_config exynos5440_spi_port_config = {
	.fifo_lvl_mask	= { 0x1ff },
	.rx_lvl_offset	= 15,
	.tx_st_done	= 25,
	.high_speed	= true,
	.clk_from_cmu	= true,
	.quirks		= S3C64XX_SPI_QUIRK_POLL,
};

1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
static struct platform_device_id s3c64xx_spi_driver_ids[] = {
	{
		.name		= "s3c2443-spi",
		.driver_data	= (kernel_ulong_t)&s3c2443_spi_port_config,
	}, {
		.name		= "s3c6410-spi",
		.driver_data	= (kernel_ulong_t)&s3c6410_spi_port_config,
	}, {
		.name		= "s5p64x0-spi",
		.driver_data	= (kernel_ulong_t)&s5p64x0_spi_port_config,
	}, {
		.name		= "s5pc100-spi",
		.driver_data	= (kernel_ulong_t)&s5pc100_spi_port_config,
	}, {
		.name		= "s5pv210-spi",
		.driver_data	= (kernel_ulong_t)&s5pv210_spi_port_config,
	}, {
		.name		= "exynos4210-spi",
		.driver_data	= (kernel_ulong_t)&exynos4_spi_port_config,
	},
	{ },
};

1380
static const struct of_device_id s3c64xx_spi_dt_match[] = {
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
	{ .compatible = "samsung,s3c2443-spi",
			.data = (void *)&s3c2443_spi_port_config,
	},
	{ .compatible = "samsung,s3c6410-spi",
			.data = (void *)&s3c6410_spi_port_config,
	},
	{ .compatible = "samsung,s5pc100-spi",
			.data = (void *)&s5pc100_spi_port_config,
	},
	{ .compatible = "samsung,s5pv210-spi",
			.data = (void *)&s5pv210_spi_port_config,
	},
1393 1394 1395
	{ .compatible = "samsung,exynos4210-spi",
			.data = (void *)&exynos4_spi_port_config,
	},
1396 1397 1398
	{ .compatible = "samsung,exynos5440-spi",
			.data = (void *)&exynos5440_spi_port_config,
	},
1399 1400 1401 1402
	{ },
};
MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);

1403 1404 1405 1406
static struct platform_driver s3c64xx_spi_driver = {
	.driver = {
		.name	= "s3c64xx-spi",
		.owner = THIS_MODULE,
M
Mark Brown 已提交
1407
		.pm = &s3c64xx_spi_pm,
1408
		.of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
1409
	},
1410
	.probe = s3c64xx_spi_probe,
1411
	.remove = s3c64xx_spi_remove,
1412
	.id_table = s3c64xx_spi_driver_ids,
1413 1414 1415
};
MODULE_ALIAS("platform:s3c64xx-spi");

1416
module_platform_driver(s3c64xx_spi_driver);
1417 1418 1419 1420

MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
MODULE_LICENSE("GPL");