spi-sirf.c 23.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * SPI bus driver for CSR SiRFprimaII
 *
 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
 *
 * Licensed under GPLv2 or later.
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/clk.h>
13
#include <linux/completion.h>
14 15 16 17 18 19 20 21 22
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
23 24 25
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88

#define DRIVER_NAME "sirfsoc_spi"

#define SIRFSOC_SPI_CTRL		0x0000
#define SIRFSOC_SPI_CMD			0x0004
#define SIRFSOC_SPI_TX_RX_EN		0x0008
#define SIRFSOC_SPI_INT_EN		0x000C
#define SIRFSOC_SPI_INT_STATUS		0x0010
#define SIRFSOC_SPI_TX_DMA_IO_CTRL	0x0100
#define SIRFSOC_SPI_TX_DMA_IO_LEN	0x0104
#define SIRFSOC_SPI_TXFIFO_CTRL		0x0108
#define SIRFSOC_SPI_TXFIFO_LEVEL_CHK	0x010C
#define SIRFSOC_SPI_TXFIFO_OP		0x0110
#define SIRFSOC_SPI_TXFIFO_STATUS	0x0114
#define SIRFSOC_SPI_TXFIFO_DATA		0x0118
#define SIRFSOC_SPI_RX_DMA_IO_CTRL	0x0120
#define SIRFSOC_SPI_RX_DMA_IO_LEN	0x0124
#define SIRFSOC_SPI_RXFIFO_CTRL		0x0128
#define SIRFSOC_SPI_RXFIFO_LEVEL_CHK	0x012C
#define SIRFSOC_SPI_RXFIFO_OP		0x0130
#define SIRFSOC_SPI_RXFIFO_STATUS	0x0134
#define SIRFSOC_SPI_RXFIFO_DATA		0x0138
#define SIRFSOC_SPI_DUMMY_DELAY_CTL	0x0144

/* SPI CTRL register defines */
#define SIRFSOC_SPI_SLV_MODE		BIT(16)
#define SIRFSOC_SPI_CMD_MODE		BIT(17)
#define SIRFSOC_SPI_CS_IO_OUT		BIT(18)
#define SIRFSOC_SPI_CS_IO_MODE		BIT(19)
#define SIRFSOC_SPI_CLK_IDLE_STAT	BIT(20)
#define SIRFSOC_SPI_CS_IDLE_STAT	BIT(21)
#define SIRFSOC_SPI_TRAN_MSB		BIT(22)
#define SIRFSOC_SPI_DRV_POS_EDGE	BIT(23)
#define SIRFSOC_SPI_CS_HOLD_TIME	BIT(24)
#define SIRFSOC_SPI_CLK_SAMPLE_MODE	BIT(25)
#define SIRFSOC_SPI_TRAN_DAT_FORMAT_8	(0 << 26)
#define SIRFSOC_SPI_TRAN_DAT_FORMAT_12	(1 << 26)
#define SIRFSOC_SPI_TRAN_DAT_FORMAT_16	(2 << 26)
#define SIRFSOC_SPI_TRAN_DAT_FORMAT_32	(3 << 26)
#define SIRFSOC_SPI_CMD_BYTE_NUM(x)		((x & 3) << 28)
#define SIRFSOC_SPI_ENA_AUTO_CLR		BIT(30)
#define SIRFSOC_SPI_MUL_DAT_MODE		BIT(31)

/* Interrupt Enable */
#define SIRFSOC_SPI_RX_DONE_INT_EN		BIT(0)
#define SIRFSOC_SPI_TX_DONE_INT_EN		BIT(1)
#define SIRFSOC_SPI_RX_OFLOW_INT_EN		BIT(2)
#define SIRFSOC_SPI_TX_UFLOW_INT_EN		BIT(3)
#define SIRFSOC_SPI_RX_IO_DMA_INT_EN	BIT(4)
#define SIRFSOC_SPI_TX_IO_DMA_INT_EN	BIT(5)
#define SIRFSOC_SPI_RXFIFO_FULL_INT_EN	BIT(6)
#define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN	BIT(7)
#define SIRFSOC_SPI_RXFIFO_THD_INT_EN	BIT(8)
#define SIRFSOC_SPI_TXFIFO_THD_INT_EN	BIT(9)
#define SIRFSOC_SPI_FRM_END_INT_EN	BIT(10)

#define SIRFSOC_SPI_INT_MASK_ALL		0x1FFF

/* Interrupt status */
#define SIRFSOC_SPI_RX_DONE		BIT(0)
#define SIRFSOC_SPI_TX_DONE		BIT(1)
#define SIRFSOC_SPI_RX_OFLOW		BIT(2)
#define SIRFSOC_SPI_TX_UFLOW		BIT(3)
89
#define SIRFSOC_SPI_RX_IO_DMA		BIT(4)
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
#define SIRFSOC_SPI_RX_FIFO_FULL	BIT(6)
#define SIRFSOC_SPI_TXFIFO_EMPTY	BIT(7)
#define SIRFSOC_SPI_RXFIFO_THD_REACH	BIT(8)
#define SIRFSOC_SPI_TXFIFO_THD_REACH	BIT(9)
#define SIRFSOC_SPI_FRM_END		BIT(10)

/* TX RX enable */
#define SIRFSOC_SPI_RX_EN		BIT(0)
#define SIRFSOC_SPI_TX_EN		BIT(1)
#define SIRFSOC_SPI_CMD_TX_EN		BIT(2)

#define SIRFSOC_SPI_IO_MODE_SEL		BIT(0)
#define SIRFSOC_SPI_RX_DMA_FLUSH	BIT(2)

/* FIFO OPs */
#define SIRFSOC_SPI_FIFO_RESET		BIT(0)
#define SIRFSOC_SPI_FIFO_START		BIT(1)

/* FIFO CTRL */
#define SIRFSOC_SPI_FIFO_WIDTH_BYTE	(0 << 0)
#define SIRFSOC_SPI_FIFO_WIDTH_WORD	(1 << 0)
#define SIRFSOC_SPI_FIFO_WIDTH_DWORD	(2 << 0)

/* FIFO Status */
#define	SIRFSOC_SPI_FIFO_LEVEL_MASK	0xFF
#define SIRFSOC_SPI_FIFO_FULL		BIT(8)
#define SIRFSOC_SPI_FIFO_EMPTY		BIT(9)

/* 256 bytes rx/tx FIFO */
#define SIRFSOC_SPI_FIFO_SIZE		256
#define SIRFSOC_SPI_DAT_FRM_LEN_MAX	(64 * 1024)

#define SIRFSOC_SPI_FIFO_SC(x)		((x) & 0x3F)
#define SIRFSOC_SPI_FIFO_LC(x)		(((x) & 0x3F) << 10)
#define SIRFSOC_SPI_FIFO_HC(x)		(((x) & 0x3F) << 20)
#define SIRFSOC_SPI_FIFO_THD(x)		(((x) & 0xFF) << 2)

127 128 129 130 131 132 133
/*
 * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
 * due to the limitation of dma controller
 */

#define ALIGNED(x) (!((u32)x & 0x3))
#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
134
	ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
135

136 137
#define SIRFSOC_MAX_CMD_BYTES	4

138 139
struct sirfsoc_spi {
	struct spi_bitbang bitbang;
140 141
	struct completion rx_done;
	struct completion tx_done;
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156

	void __iomem *base;
	u32 ctrl_freq;  /* SPI controller clock speed */
	struct clk *clk;

	/* rx & tx bufs from the spi_transfer */
	const void *tx;
	void *rx;

	/* place received word into rx buffer */
	void (*rx_word) (struct sirfsoc_spi *);
	/* get word from tx buffer for sending */
	void (*tx_word) (struct sirfsoc_spi *);

	/* number of words left to be tranmitted/received */
157 158
	unsigned int left_tx_word;
	unsigned int left_rx_word;
159

160 161 162 163 164 165 166
	/* rx & tx DMA channels */
	struct dma_chan *rx_chan;
	struct dma_chan *tx_chan;
	dma_addr_t src_start;
	dma_addr_t dst_start;
	void *dummypage;
	int word_width; /* in bytes */
167

168 169 170 171 172
	/*
	 * if tx size is not more than 4 and rx size is NULL, use
	 * command model
	 */
	bool	tx_by_cmd;
173
	bool	hw_cs;
174 175 176 177 178 179 180 181 182 183 184 185 186 187
};

static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
{
	u32 data;
	u8 *rx = sspi->rx;

	data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);

	if (rx) {
		*rx++ = (u8) data;
		sspi->rx = rx;
	}

188
	sspi->left_rx_word--;
189 190 191 192 193 194 195 196 197 198 199 200 201
}

static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
{
	u32 data = 0;
	const u8 *tx = sspi->tx;

	if (tx) {
		data = *tx++;
		sspi->tx = tx;
	}

	writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
202
	sspi->left_tx_word--;
203 204 205 206 207 208 209 210 211 212 213 214 215 216
}

static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
{
	u32 data;
	u16 *rx = sspi->rx;

	data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);

	if (rx) {
		*rx++ = (u16) data;
		sspi->rx = rx;
	}

217
	sspi->left_rx_word--;
218 219 220 221 222 223 224 225 226 227 228 229 230
}

static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
{
	u32 data = 0;
	const u16 *tx = sspi->tx;

	if (tx) {
		data = *tx++;
		sspi->tx = tx;
	}

	writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
231
	sspi->left_tx_word--;
232 233 234 235 236 237 238 239 240 241 242 243 244 245
}

static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
{
	u32 data;
	u32 *rx = sspi->rx;

	data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);

	if (rx) {
		*rx++ = (u32) data;
		sspi->rx = rx;
	}

246
	sspi->left_rx_word--;
247 248 249 250 251 252 253 254 255 256 257 258 259 260

}

static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
{
	u32 data = 0;
	const u32 *tx = sspi->tx;

	if (tx) {
		data = *tx++;
		sspi->tx = tx;
	}

	writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
261
	sspi->left_tx_word--;
262 263 264 265 266 267
}

static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
{
	struct sirfsoc_spi *sspi = dev_id;
	u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
268 269 270
	if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) {
		complete(&sspi->tx_done);
		writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
271 272
		writel(SIRFSOC_SPI_INT_MASK_ALL,
				sspi->base + SIRFSOC_SPI_INT_STATUS);
273 274 275
		return IRQ_HANDLED;
	}

276 277 278
	/* Error Conditions */
	if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
			spi_stat & SIRFSOC_SPI_TX_UFLOW) {
279
		complete(&sspi->tx_done);
280
		complete(&sspi->rx_done);
281
		writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
282 283 284
		writel(SIRFSOC_SPI_INT_MASK_ALL,
				sspi->base + SIRFSOC_SPI_INT_STATUS);
		return IRQ_HANDLED;
285
	}
286 287 288 289 290 291 292 293 294
	if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
		complete(&sspi->tx_done);
	while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) &
		SIRFSOC_SPI_RX_IO_DMA))
		cpu_relax();
	complete(&sspi->rx_done);
	writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
	writel(SIRFSOC_SPI_INT_MASK_ALL,
			sspi->base + SIRFSOC_SPI_INT_STATUS);
295 296 297 298

	return IRQ_HANDLED;
}

299 300 301 302 303 304 305
static void spi_sirfsoc_dma_fini_callback(void *data)
{
	struct completion *dma_complete = data;

	complete(dma_complete);
}

306 307
static int spi_sirfsoc_cmd_transfer(struct spi_device *spi,
	struct spi_transfer *t)
308 309 310
{
	struct sirfsoc_spi *sspi;
	int timeout = t->len * 10;
311
	u32 cmd;
312

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	sspi = spi_master_get_devdata(spi->master);
	memcpy(&cmd, sspi->tx, t->len);
	if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
		cmd = cpu_to_be32(cmd) >>
			((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
	if (sspi->word_width == 2 && t->len == 4 &&
			(!(spi->mode & SPI_LSB_FIRST)))
		cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
	writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
	writel(SIRFSOC_SPI_FRM_END_INT_EN,
		sspi->base + SIRFSOC_SPI_INT_EN);
	writel(SIRFSOC_SPI_CMD_TX_EN,
		sspi->base + SIRFSOC_SPI_TX_RX_EN);
	if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
		dev_err(&spi->dev, "cmd transfer timeout\n");
		return 0;
	}
330

331 332
	return t->len;
}
333

334 335 336 337 338 339
static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
	struct spi_transfer *t)
{
	struct sirfsoc_spi *sspi;
	struct dma_async_tx_descriptor *rx_desc, *tx_desc;
	int timeout = t->len * 10;
340

341 342 343 344 345 346 347 348
	sspi = spi_master_get_devdata(spi->master);
	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
	writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
	writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
	if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) {
349
		writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
350
			SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE,
351
			sspi->base + SIRFSOC_SPI_CTRL);
352 353 354 355
		writel(sspi->left_tx_word - 1,
				sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
		writel(sspi->left_tx_word - 1,
				sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
356 357 358 359 360 361
	} else {
		writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
			sspi->base + SIRFSOC_SPI_CTRL);
		writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
		writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
	}
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
					(t->tx_buf != t->rx_buf) ?
					DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
	rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
		sspi->dst_start, t->len, DMA_DEV_TO_MEM,
		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	rx_desc->callback = spi_sirfsoc_dma_fini_callback;
	rx_desc->callback_param = &sspi->rx_done;

	sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
					(t->tx_buf != t->rx_buf) ?
					DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
	tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
		sspi->src_start, t->len, DMA_MEM_TO_DEV,
		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	tx_desc->callback = spi_sirfsoc_dma_fini_callback;
	tx_desc->callback_param = &sspi->tx_done;

	dmaengine_submit(tx_desc);
	dmaengine_submit(rx_desc);
	dma_async_issue_pending(sspi->tx_chan);
	dma_async_issue_pending(sspi->rx_chan);
384 385
	writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
			sspi->base + SIRFSOC_SPI_TX_RX_EN);
386
	if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
387
		dev_err(&spi->dev, "transfer timeout\n");
388 389
		dmaengine_terminate_all(sspi->rx_chan);
	} else
390
		sspi->left_rx_word = 0;
391 392 393 394 395
	/*
	 * we only wait tx-done event if transferring by DMA. for PIO,
	 * we get rx data by writing tx data, so if rx is done, tx has
	 * done earlier
	 */
396 397 398
	if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
		dev_err(&spi->dev, "transfer timeout\n");
		dmaengine_terminate_all(sspi->tx_chan);
399
	}
400 401 402 403 404 405 406 407
	dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
	dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
	/* TX, RX FIFO stop */
	writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
	writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
	if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX)
		writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
}
408

409 410 411 412 413
static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
		struct spi_transfer *t)
{
	struct sirfsoc_spi *sspi;
	int timeout = t->len * 10;
414

415
	sspi = spi_master_get_devdata(spi->master);
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
	do {
		writel(SIRFSOC_SPI_FIFO_RESET,
			sspi->base + SIRFSOC_SPI_RXFIFO_OP);
		writel(SIRFSOC_SPI_FIFO_RESET,
			sspi->base + SIRFSOC_SPI_TXFIFO_OP);
		writel(SIRFSOC_SPI_FIFO_START,
			sspi->base + SIRFSOC_SPI_RXFIFO_OP);
		writel(SIRFSOC_SPI_FIFO_START,
			sspi->base + SIRFSOC_SPI_TXFIFO_OP);
		writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
		writel(SIRFSOC_SPI_INT_MASK_ALL,
			sspi->base + SIRFSOC_SPI_INT_STATUS);
		writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
			SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR,
			sspi->base + SIRFSOC_SPI_CTRL);
		writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width))
				- 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
		writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width))
				- 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
		while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
			& SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word)
			sspi->tx_word(sspi);
		writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
			SIRFSOC_SPI_TX_UFLOW_INT_EN |
			SIRFSOC_SPI_RX_OFLOW_INT_EN,
			sspi->base + SIRFSOC_SPI_INT_EN);
		writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
443
			sspi->base + SIRFSOC_SPI_TX_RX_EN);
444 445 446 447 448 449 450 451 452 453 454
		if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
			!wait_for_completion_timeout(&sspi->rx_done, timeout)) {
			dev_err(&spi->dev, "transfer timeout\n");
			break;
		}
		while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
			& SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word)
			sspi->rx_word(sspi);
		writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
		writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
	} while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
}

static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
{
	struct sirfsoc_spi *sspi;
	sspi = spi_master_get_devdata(spi->master);

	sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
	sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
	sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
	reinit_completion(&sspi->rx_done);
	reinit_completion(&sspi->tx_done);
	/*
	 * in the transfer, if transfer data using command register with rx_buf
	 * null, just fill command data into command register and wait for its
	 * completion.
	 */
	if (sspi->tx_by_cmd)
		spi_sirfsoc_cmd_transfer(spi, t);
	else if (IS_DMA_VALID(t))
		spi_sirfsoc_dma_transfer(spi, t);
	else
		spi_sirfsoc_pio_transfer(spi, t);
478

479
	return t->len - sspi->left_rx_word * sspi->word_width;
480 481 482 483 484 485
}

static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
{
	struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);

486
	if (sspi->hw_cs) {
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
		u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
		switch (value) {
		case BITBANG_CS_ACTIVE:
			if (spi->mode & SPI_CS_HIGH)
				regval |= SIRFSOC_SPI_CS_IO_OUT;
			else
				regval &= ~SIRFSOC_SPI_CS_IO_OUT;
			break;
		case BITBANG_CS_INACTIVE:
			if (spi->mode & SPI_CS_HIGH)
				regval &= ~SIRFSOC_SPI_CS_IO_OUT;
			else
				regval |= SIRFSOC_SPI_CS_IO_OUT;
			break;
		}
		writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
	} else {
504 505
		switch (value) {
		case BITBANG_CS_ACTIVE:
506
			gpio_direction_output(spi->cs_gpio,
507 508 509
					spi->mode & SPI_CS_HIGH ? 1 : 0);
			break;
		case BITBANG_CS_INACTIVE:
510
			gpio_direction_output(spi->cs_gpio,
511 512 513
					spi->mode & SPI_CS_HIGH ? 0 : 1);
			break;
		}
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
	}
}

static int
spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
{
	struct sirfsoc_spi *sspi;
	u8 bits_per_word = 0;
	int hz = 0;
	u32 regval;
	u32 txfifo_ctrl, rxfifo_ctrl;
	u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4;

	sspi = spi_master_get_devdata(spi->master);

529
	bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
	hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;

	regval = (sspi->ctrl_freq / (2 * hz)) - 1;
	if (regval > 0xFFFF || regval < 0) {
		dev_err(&spi->dev, "Speed %d not supported\n", hz);
		return -EINVAL;
	}

	switch (bits_per_word) {
	case 8:
		regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
		sspi->rx_word = spi_sirfsoc_rx_word_u8;
		sspi->tx_word = spi_sirfsoc_tx_word_u8;
		break;
	case 12:
	case 16:
546 547
		regval |= (bits_per_word ==  12) ?
			SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
548 549 550 551 552 553 554 555 556
			SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
		sspi->rx_word = spi_sirfsoc_rx_word_u16;
		sspi->tx_word = spi_sirfsoc_tx_word_u16;
		break;
	case 32:
		regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
		sspi->rx_word = spi_sirfsoc_rx_word_u32;
		sspi->tx_word = spi_sirfsoc_tx_word_u32;
		break;
557 558
	default:
		BUG();
559 560
	}

561 562 563 564 565 566
	sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
	txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
					   sspi->word_width;
	rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
					   sspi->word_width;

567 568 569 570 571 572 573 574
	if (!(spi->mode & SPI_CS_HIGH))
		regval |= SIRFSOC_SPI_CS_IDLE_STAT;
	if (!(spi->mode & SPI_LSB_FIRST))
		regval |= SIRFSOC_SPI_TRAN_MSB;
	if (spi->mode & SPI_CPOL)
		regval |= SIRFSOC_SPI_CLK_IDLE_STAT;

	/*
575 576
	 * Data should be driven at least 1/2 cycle before the fetch edge
	 * to make sure that data gets stable at the fetch edge.
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
	 */
	if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
	    (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
		regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
	else
		regval |= SIRFSOC_SPI_DRV_POS_EDGE;

	writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) |
			SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
			SIRFSOC_SPI_FIFO_HC(2),
		sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK);
	writel(SIRFSOC_SPI_FIFO_SC(2) |
			SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
			SIRFSOC_SPI_FIFO_HC(fifo_size - 2),
		sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK);
	writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
	writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);

595 596 597 598 599 600 601 602
	if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
		regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
				SIRFSOC_SPI_CMD_MODE);
		sspi->tx_by_cmd = true;
	} else {
		regval &= ~SIRFSOC_SPI_CMD_MODE;
		sspi->tx_by_cmd = false;
	}
603
	/*
604 605
	 * it should never set to hardware cs mode because in hardware cs mode,
	 * cs signal can't controlled by driver.
606 607
	 */
	regval |= SIRFSOC_SPI_CS_IO_MODE;
608
	writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
609 610 611 612

	if (IS_DMA_VALID(t)) {
		/* Enable DMA mode for RX, TX */
		writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
613 614
		writel(SIRFSOC_SPI_RX_DMA_FLUSH,
			sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
615 616
	} else {
		/* Enable IO mode for RX, TX */
617 618 619 620
		writel(SIRFSOC_SPI_IO_MODE_SEL,
			sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
		writel(SIRFSOC_SPI_IO_MODE_SEL,
			sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
621 622
	}

623 624 625 626 627
	return 0;
}

static int spi_sirfsoc_setup(struct spi_device *spi)
{
628 629
	struct sirfsoc_spi *sspi;

630 631 632
	if (!spi->max_speed_hz)
		return -EINVAL;

633 634 635 636 637 638
	sspi = spi_master_get_devdata(spi->master);

	if (spi->cs_gpio == -ENOENT)
		sspi->hw_cs = true;
	else
		sspi->hw_cs = false;
639 640 641
	return spi_sirfsoc_setup_transfer(spi, NULL);
}

642
static int spi_sirfsoc_probe(struct platform_device *pdev)
643 644 645 646
{
	struct sirfsoc_spi *sspi;
	struct spi_master *master;
	struct resource *mem_res;
647 648
	int irq;
	int i, ret;
649

650
	master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
651 652 653 654 655 656 657
	if (!master) {
		dev_err(&pdev->dev, "Unable to allocate SPI master\n");
		return -ENOMEM;
	}
	platform_set_drvdata(pdev, master);
	sspi = spi_master_get_devdata(master);

658
	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
659 660 661
	sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
	if (IS_ERR(sspi->base)) {
		ret = PTR_ERR(sspi->base);
662 663 664 665 666 667 668 669 670 671 672 673 674
		goto free_master;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		ret = -ENXIO;
		goto free_master;
	}
	ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
				DRIVER_NAME, sspi);
	if (ret)
		goto free_master;

675
	sspi->bitbang.master = master;
676 677 678 679 680
	sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
	sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
	sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
	sspi->bitbang.master->setup = spi_sirfsoc_setup;
	master->bus_num = pdev->id;
681
	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
682 683
	master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
					SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
684 685
	sspi->bitbang.master->dev.of_node = pdev->dev.of_node;

686
	/* request DMA channels */
687
	sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
688 689
	if (!sspi->rx_chan) {
		dev_err(&pdev->dev, "can not allocate rx dma channel\n");
690
		ret = -ENODEV;
691 692
		goto free_master;
	}
693
	sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
694 695
	if (!sspi->tx_chan) {
		dev_err(&pdev->dev, "can not allocate tx dma channel\n");
696
		ret = -ENODEV;
697 698 699
		goto free_rx_dma;
	}

700 701
	sspi->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(sspi->clk)) {
702 703
		ret = PTR_ERR(sspi->clk);
		goto free_tx_dma;
704
	}
705
	clk_prepare_enable(sspi->clk);
706 707
	sspi->ctrl_freq = clk_get_rate(sspi->clk);

708 709
	init_completion(&sspi->rx_done);
	init_completion(&sspi->tx_done);
710 711 712 713 714 715 716 717

	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
	/* We are not using dummy delay between command and data */
	writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);

718
	sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
719 720
	if (!sspi->dummypage) {
		ret = -ENOMEM;
721
		goto free_clk;
722
	}
723

724 725
	ret = spi_bitbang_start(&sspi->bitbang);
	if (ret)
726
		goto free_dummypage;
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
	for (i = 0; master->cs_gpios && i < master->num_chipselect; i++) {
		if (master->cs_gpios[i] == -ENOENT)
			continue;
		if (!gpio_is_valid(master->cs_gpios[i])) {
			dev_err(&pdev->dev, "no valid gpio\n");
			ret = -EINVAL;
			goto free_dummypage;
		}
		ret = devm_gpio_request(&pdev->dev,
				master->cs_gpios[i], DRIVER_NAME);
		if (ret) {
			dev_err(&pdev->dev, "failed to request gpio\n");
			goto free_dummypage;
		}
	}
742 743 744
	dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);

	return 0;
745 746
free_dummypage:
	kfree(sspi->dummypage);
747
free_clk:
748
	clk_disable_unprepare(sspi->clk);
749
	clk_put(sspi->clk);
750 751 752 753
free_tx_dma:
	dma_release_channel(sspi->tx_chan);
free_rx_dma:
	dma_release_channel(sspi->rx_chan);
754 755
free_master:
	spi_master_put(master);
756

757 758 759
	return ret;
}

760
static int  spi_sirfsoc_remove(struct platform_device *pdev)
761 762 763 764 765 766 767 768
{
	struct spi_master *master;
	struct sirfsoc_spi *sspi;

	master = platform_get_drvdata(pdev);
	sspi = spi_master_get_devdata(master);

	spi_bitbang_stop(&sspi->bitbang);
769
	kfree(sspi->dummypage);
770
	clk_disable_unprepare(sspi->clk);
771
	clk_put(sspi->clk);
772 773
	dma_release_channel(sspi->rx_chan);
	dma_release_channel(sspi->tx_chan);
774 775 776 777
	spi_master_put(master);
	return 0;
}

778
#ifdef CONFIG_PM_SLEEP
779 780
static int spi_sirfsoc_suspend(struct device *dev)
{
781
	struct spi_master *master = dev_get_drvdata(dev);
782
	struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
783 784 785 786 787
	int ret;

	ret = spi_master_suspend(master);
	if (ret)
		return ret;
788 789 790 791 792 793 794

	clk_disable(sspi->clk);
	return 0;
}

static int spi_sirfsoc_resume(struct device *dev)
{
795
	struct spi_master *master = dev_get_drvdata(dev);
796 797 798 799 800 801 802 803
	struct sirfsoc_spi *sspi = spi_master_get_devdata(master);

	clk_enable(sspi->clk);
	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
	writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
	writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);

804
	return spi_master_resume(master);
805
}
806
#endif
807

808 809
static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
			 spi_sirfsoc_resume);
810 811 812

static const struct of_device_id spi_sirfsoc_of_match[] = {
	{ .compatible = "sirf,prima2-spi", },
813
	{ .compatible = "sirf,marco-spi", },
814 815
	{}
};
816
MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
817 818 819 820 821 822 823 824 825

static struct platform_driver spi_sirfsoc_driver = {
	.driver = {
		.name = DRIVER_NAME,
		.owner = THIS_MODULE,
		.pm     = &spi_sirfsoc_pm_ops,
		.of_match_table = spi_sirfsoc_of_match,
	},
	.probe = spi_sirfsoc_probe,
826
	.remove = spi_sirfsoc_remove,
827 828 829
};
module_platform_driver(spi_sirfsoc_driver);
MODULE_DESCRIPTION("SiRF SoC SPI master driver");
830 831
MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
832
MODULE_LICENSE("GPL v2");