spi-ep93xx.c 19.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4
/*
 * Driver for Cirrus Logic EP93xx SPI controller.
 *
M
Mika Westerberg 已提交
5
 * Copyright (C) 2010-2011 Mika Westerberg
6 7 8 9 10 11 12
 *
 * Explicit FIFO handling code was inspired by amba-pl022 driver.
 *
 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
 *
 * For more information about the SPI controller see documentation on Cirrus
 * Logic web site:
13
 *     https://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
14 15 16 17 18 19 20
 */

#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
M
Mika Westerberg 已提交
21
#include <linux/dmaengine.h>
22 23
#include <linux/bitops.h>
#include <linux/interrupt.h>
24
#include <linux/module.h>
25 26
#include <linux/platform_device.h>
#include <linux/sched.h>
M
Mika Westerberg 已提交
27
#include <linux/scatterlist.h>
28 29
#include <linux/spi/spi.h>

30 31
#include <linux/platform_data/dma-ep93xx.h>
#include <linux/platform_data/spi-ep93xx.h>
32 33

#define SSPCR0			0x0000
34 35
#define SSPCR0_SPO		BIT(6)
#define SSPCR0_SPH		BIT(7)
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
#define SSPCR0_SCR_SHIFT	8

#define SSPCR1			0x0004
#define SSPCR1_RIE		BIT(0)
#define SSPCR1_TIE		BIT(1)
#define SSPCR1_RORIE		BIT(2)
#define SSPCR1_LBM		BIT(3)
#define SSPCR1_SSE		BIT(4)
#define SSPCR1_MS		BIT(5)
#define SSPCR1_SOD		BIT(6)

#define SSPDR			0x0008

#define SSPSR			0x000c
#define SSPSR_TFE		BIT(0)
#define SSPSR_TNF		BIT(1)
#define SSPSR_RNE		BIT(2)
#define SSPSR_RFF		BIT(3)
#define SSPSR_BSY		BIT(4)
#define SSPCPSR			0x0010

#define SSPIIR			0x0014
#define SSPIIR_RIS		BIT(0)
#define SSPIIR_TIS		BIT(1)
#define SSPIIR_RORIS		BIT(2)
#define SSPICR			SSPIIR

/* timeout in milliseconds */
#define SPI_TIMEOUT		5
/* maximum depth of RX/TX FIFO */
#define SPI_FIFO_SIZE		8

/**
 * struct ep93xx_spi - EP93xx SPI controller structure
 * @clk: clock for the controller
71
 * @mmio: pointer to ioremap()'d registers
M
Mika Westerberg 已提交
72
 * @sspdr_phys: physical address of the SSPDR register
73 74 75 76
 * @tx: current byte in transfer to transmit
 * @rx: current byte in transfer to receive
 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
 *              frame decreases this level and sending one frame increases it.
M
Mika Westerberg 已提交
77 78 79 80 81 82 83 84
 * @dma_rx: RX DMA channel
 * @dma_tx: TX DMA channel
 * @dma_rx_data: RX parameters passed to the DMA engine
 * @dma_tx_data: TX parameters passed to the DMA engine
 * @rx_sgt: sg table for RX transfers
 * @tx_sgt: sg table for TX transfers
 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
 *            the client
85 86 87
 */
struct ep93xx_spi {
	struct clk			*clk;
88
	void __iomem			*mmio;
M
Mika Westerberg 已提交
89
	unsigned long			sspdr_phys;
90 91 92
	size_t				tx;
	size_t				rx;
	size_t				fifo_level;
M
Mika Westerberg 已提交
93 94 95 96 97 98 99
	struct dma_chan			*dma_rx;
	struct dma_chan			*dma_tx;
	struct ep93xx_dma_data		dma_rx_data;
	struct ep93xx_dma_data		dma_tx_data;
	struct sg_table			rx_sgt;
	struct sg_table			tx_sgt;
	void				*zeropage;
100 101 102 103 104 105 106
};

/* converts bits per word to CR0.DSS value */
#define bits_per_word_to_dss(bpw)	((bpw) - 1)

/**
 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
107
 * @master: SPI master
108
 * @rate: desired SPI output clock rate
109 110
 * @div_cpsr: pointer to return the cpsr (pre-scaler) divider
 * @div_scr: pointer to return the scr divider
111
 */
112
static int ep93xx_spi_calc_divisors(struct spi_master *master,
113
				    u32 rate, u8 *div_cpsr, u8 *div_scr)
114
{
115
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
116 117 118 119 120
	unsigned long spi_clk_rate = clk_get_rate(espi->clk);
	int cpsr, scr;

	/*
	 * Make sure that max value is between values supported by the
121
	 * controller.
122
	 */
123
	rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
124 125 126 127 128 129 130 131 132 133 134 135

	/*
	 * Calculate divisors so that we can get speed according the
	 * following formula:
	 *	rate = spi_clock_rate / (cpsr * (1 + scr))
	 *
	 * cpsr must be even number and starts from 2, scr can be any number
	 * between 0 and 255.
	 */
	for (cpsr = 2; cpsr <= 254; cpsr += 2) {
		for (scr = 0; scr <= 255; scr++) {
			if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
136 137
				*div_scr = (u8)scr;
				*div_cpsr = (u8)cpsr;
138 139 140 141 142 143 144 145
				return 0;
			}
		}
	}

	return -EINVAL;
}

146
static int ep93xx_spi_chip_setup(struct spi_master *master,
147 148
				 struct spi_device *spi,
				 struct spi_transfer *xfer)
149
{
150
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
151
	u8 dss = bits_per_word_to_dss(xfer->bits_per_word);
152 153
	u8 div_cpsr = 0;
	u8 div_scr = 0;
154
	u16 cr0;
155 156
	int err;

157
	err = ep93xx_spi_calc_divisors(master, xfer->speed_hz,
158
				       &div_cpsr, &div_scr);
159 160
	if (err)
		return err;
161

162
	cr0 = div_scr << SSPCR0_SCR_SHIFT;
163 164 165 166
	if (spi->mode & SPI_CPOL)
		cr0 |= SSPCR0_SPO;
	if (spi->mode & SPI_CPHA)
		cr0 |= SSPCR0_SPH;
167
	cr0 |= dss;
168

169
	dev_dbg(&master->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
170
		spi->mode, div_cpsr, div_scr, dss);
171
	dev_dbg(&master->dev, "setup: cr0 %#x\n", cr0);
172

173 174
	writel(div_cpsr, espi->mmio + SSPCPSR);
	writel(cr0, espi->mmio + SSPCR0);
175 176

	return 0;
177 178
}

179
static void ep93xx_do_write(struct spi_master *master)
180
{
181 182
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
	struct spi_transfer *xfer = master->cur_msg->state;
183
	u32 val = 0;
184

185 186 187
	if (xfer->bits_per_word > 8) {
		if (xfer->tx_buf)
			val = ((u16 *)xfer->tx_buf)[espi->tx];
188
		espi->tx += 2;
189
	} else {
190 191
		if (xfer->tx_buf)
			val = ((u8 *)xfer->tx_buf)[espi->tx];
192
		espi->tx += 1;
193
	}
194
	writel(val, espi->mmio + SSPDR);
195 196
}

197
static void ep93xx_do_read(struct spi_master *master)
198
{
199 200
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
	struct spi_transfer *xfer = master->cur_msg->state;
201
	u32 val;
202

203
	val = readl(espi->mmio + SSPDR);
204 205 206
	if (xfer->bits_per_word > 8) {
		if (xfer->rx_buf)
			((u16 *)xfer->rx_buf)[espi->rx] = val;
207
		espi->rx += 2;
208
	} else {
209 210
		if (xfer->rx_buf)
			((u8 *)xfer->rx_buf)[espi->rx] = val;
211
		espi->rx += 1;
212 213 214 215 216
	}
}

/**
 * ep93xx_spi_read_write() - perform next RX/TX transfer
L
Lee Jones 已提交
217
 * @master: SPI master
218 219 220 221 222 223 224 225
 *
 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
 * called several times, the whole transfer will be completed. Returns
 * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
 *
 * When this function is finished, RX FIFO should be empty and TX FIFO should be
 * full.
 */
226
static int ep93xx_spi_read_write(struct spi_master *master)
227
{
228
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
229
	struct spi_transfer *xfer = master->cur_msg->state;
230 231

	/* read as long as RX FIFO has frames in it */
232
	while ((readl(espi->mmio + SSPSR) & SSPSR_RNE)) {
233
		ep93xx_do_read(master);
234 235 236 237
		espi->fifo_level--;
	}

	/* write as long as TX FIFO has room */
238 239
	while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < xfer->len) {
		ep93xx_do_write(master);
240 241 242
		espi->fifo_level++;
	}

243
	if (espi->rx == xfer->len)
244 245 246 247 248
		return 0;

	return -EINPROGRESS;
}

249 250 251 252 253 254 255 256 257 258 259 260 261
static enum dma_transfer_direction
ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir)
{
	switch (dir) {
	case DMA_TO_DEVICE:
		return DMA_MEM_TO_DEV;
	case DMA_FROM_DEVICE:
		return DMA_DEV_TO_MEM;
	default:
		return DMA_TRANS_NONE;
	}
}

M
Mika Westerberg 已提交
262 263
/**
 * ep93xx_spi_dma_prepare() - prepares a DMA transfer
264
 * @master: SPI master
M
Mika Westerberg 已提交
265 266 267 268 269 270 271
 * @dir: DMA transfer direction
 *
 * Function configures the DMA, maps the buffer and prepares the DMA
 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
 * in case of failure.
 */
static struct dma_async_tx_descriptor *
272
ep93xx_spi_dma_prepare(struct spi_master *master,
273
		       enum dma_data_direction dir)
M
Mika Westerberg 已提交
274
{
275
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
276
	struct spi_transfer *xfer = master->cur_msg->state;
M
Mika Westerberg 已提交
277 278 279 280 281 282 283
	struct dma_async_tx_descriptor *txd;
	enum dma_slave_buswidth buswidth;
	struct dma_slave_config conf;
	struct scatterlist *sg;
	struct sg_table *sgt;
	struct dma_chan *chan;
	const void *buf, *pbuf;
284
	size_t len = xfer->len;
M
Mika Westerberg 已提交
285 286
	int i, ret, nents;

287
	if (xfer->bits_per_word > 8)
M
Mika Westerberg 已提交
288 289 290 291 292
		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
	else
		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;

	memset(&conf, 0, sizeof(conf));
293
	conf.direction = ep93xx_dma_data_to_trans_dir(dir);
M
Mika Westerberg 已提交
294

295
	if (dir == DMA_FROM_DEVICE) {
M
Mika Westerberg 已提交
296
		chan = espi->dma_rx;
297
		buf = xfer->rx_buf;
M
Mika Westerberg 已提交
298 299 300 301 302 303
		sgt = &espi->rx_sgt;

		conf.src_addr = espi->sspdr_phys;
		conf.src_addr_width = buswidth;
	} else {
		chan = espi->dma_tx;
304
		buf = xfer->tx_buf;
M
Mika Westerberg 已提交
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
		sgt = &espi->tx_sgt;

		conf.dst_addr = espi->sspdr_phys;
		conf.dst_addr_width = buswidth;
	}

	ret = dmaengine_slave_config(chan, &conf);
	if (ret)
		return ERR_PTR(ret);

	/*
	 * We need to split the transfer into PAGE_SIZE'd chunks. This is
	 * because we are using @espi->zeropage to provide a zero RX buffer
	 * for the TX transfers and we have only allocated one page for that.
	 *
	 * For performance reasons we allocate a new sg_table only when
	 * needed. Otherwise we will re-use the current one. Eventually the
	 * last sg_table is released in ep93xx_spi_release_dma().
	 */

	nents = DIV_ROUND_UP(len, PAGE_SIZE);
	if (nents != sgt->nents) {
		sg_free_table(sgt);

		ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
		if (ret)
			return ERR_PTR(ret);
	}

	pbuf = buf;
	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
		size_t bytes = min_t(size_t, len, PAGE_SIZE);

		if (buf) {
			sg_set_page(sg, virt_to_page(pbuf), bytes,
				    offset_in_page(pbuf));
		} else {
			sg_set_page(sg, virt_to_page(espi->zeropage),
				    bytes, 0);
		}

		pbuf += bytes;
		len -= bytes;
	}

	if (WARN_ON(len)) {
351
		dev_warn(&master->dev, "len = %zu expected 0!\n", len);
M
Mika Westerberg 已提交
352 353 354 355 356 357 358
		return ERR_PTR(-EINVAL);
	}

	nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
	if (!nents)
		return ERR_PTR(-ENOMEM);

359 360
	txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction,
				      DMA_CTRL_ACK);
M
Mika Westerberg 已提交
361 362 363 364 365 366 367 368 369
	if (!txd) {
		dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
		return ERR_PTR(-ENOMEM);
	}
	return txd;
}

/**
 * ep93xx_spi_dma_finish() - finishes with a DMA transfer
370
 * @master: SPI master
M
Mika Westerberg 已提交
371 372 373 374 375
 * @dir: DMA transfer direction
 *
 * Function finishes with the DMA transfer. After this, the DMA buffer is
 * unmapped.
 */
376
static void ep93xx_spi_dma_finish(struct spi_master *master,
377
				  enum dma_data_direction dir)
M
Mika Westerberg 已提交
378
{
379
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
M
Mika Westerberg 已提交
380 381 382
	struct dma_chan *chan;
	struct sg_table *sgt;

383
	if (dir == DMA_FROM_DEVICE) {
M
Mika Westerberg 已提交
384 385 386 387 388 389 390 391 392 393 394 395
		chan = espi->dma_rx;
		sgt = &espi->rx_sgt;
	} else {
		chan = espi->dma_tx;
		sgt = &espi->tx_sgt;
	}

	dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
}

static void ep93xx_spi_dma_callback(void *callback_param)
{
396 397
	struct spi_master *master = callback_param;

398 399
	ep93xx_spi_dma_finish(master, DMA_TO_DEVICE);
	ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
400 401

	spi_finalize_current_transfer(master);
M
Mika Westerberg 已提交
402 403
}

404
static int ep93xx_spi_dma_transfer(struct spi_master *master)
M
Mika Westerberg 已提交
405
{
406
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
M
Mika Westerberg 已提交
407 408
	struct dma_async_tx_descriptor *rxd, *txd;

409
	rxd = ep93xx_spi_dma_prepare(master, DMA_FROM_DEVICE);
M
Mika Westerberg 已提交
410
	if (IS_ERR(rxd)) {
411
		dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
412
		return PTR_ERR(rxd);
M
Mika Westerberg 已提交
413 414
	}

415
	txd = ep93xx_spi_dma_prepare(master, DMA_TO_DEVICE);
M
Mika Westerberg 已提交
416
	if (IS_ERR(txd)) {
417
		ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
418
		dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
419
		return PTR_ERR(txd);
M
Mika Westerberg 已提交
420 421 422 423
	}

	/* We are ready when RX is done */
	rxd->callback = ep93xx_spi_dma_callback;
424
	rxd->callback_param = master;
M
Mika Westerberg 已提交
425

426
	/* Now submit both descriptors and start DMA */
M
Mika Westerberg 已提交
427 428 429 430 431 432
	dmaengine_submit(rxd);
	dmaengine_submit(txd);

	dma_async_issue_pending(espi->dma_rx);
	dma_async_issue_pending(espi->dma_tx);

433 434
	/* signal that we need to wait for completion */
	return 1;
435 436 437 438
}

static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
{
439 440
	struct spi_master *master = dev_id;
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
441
	u32 val;
442 443 444 445 446

	/*
	 * If we got ROR (receive overrun) interrupt we know that something is
	 * wrong. Just abort the message.
	 */
447
	if (readl(espi->mmio + SSPIIR) & SSPIIR_RORIS) {
448
		/* clear the overrun interrupt */
449
		writel(0, espi->mmio + SSPICR);
450
		dev_warn(&master->dev,
451
			 "receive overrun, aborting the message\n");
452
		master->cur_msg->status = -EIO;
453 454 455 456 457
	} else {
		/*
		 * Interrupt is either RX (RIS) or TX (TIS). For both cases we
		 * simply execute next data transfer.
		 */
458
		if (ep93xx_spi_read_write(master)) {
459 460 461 462 463 464 465 466 467 468 469 470 471 472
			/*
			 * In normal case, there still is some processing left
			 * for current transfer. Let's wait for the next
			 * interrupt then.
			 */
			return IRQ_HANDLED;
		}
	}

	/*
	 * Current transfer is finished, either with error or with success. In
	 * any case we disable interrupts and notify the worker to handle
	 * any post-processing of the message.
	 */
473 474 475 476
	val = readl(espi->mmio + SSPCR1);
	val &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
	writel(val, espi->mmio + SSPCR1);

477
	spi_finalize_current_transfer(master);
478

479 480 481
	return IRQ_HANDLED;
}

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
static int ep93xx_spi_transfer_one(struct spi_master *master,
				   struct spi_device *spi,
				   struct spi_transfer *xfer)
{
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
	u32 val;
	int ret;

	ret = ep93xx_spi_chip_setup(master, spi, xfer);
	if (ret) {
		dev_err(&master->dev, "failed to setup chip for transfer\n");
		return ret;
	}

	master->cur_msg->state = xfer;
	espi->rx = 0;
	espi->tx = 0;

	/*
	 * There is no point of setting up DMA for the transfers which will
	 * fit into the FIFO and can be transferred with a single interrupt.
	 * So in these cases we will be using PIO and don't bother for DMA.
	 */
	if (espi->dma_rx && xfer->len > SPI_FIFO_SIZE)
		return ep93xx_spi_dma_transfer(master);

	/* Using PIO so prime the TX FIFO and enable interrupts */
	ep93xx_spi_read_write(master);

	val = readl(espi->mmio + SSPCR1);
	val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
	writel(val, espi->mmio + SSPCR1);

	/* signal that we need to wait for completion */
	return 1;
}

static int ep93xx_spi_prepare_message(struct spi_master *master,
				      struct spi_message *msg)
{
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
	unsigned long timeout;

	/*
	 * Just to be sure: flush any data from RX FIFO.
	 */
	timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
	while (readl(espi->mmio + SSPSR) & SSPSR_RNE) {
		if (time_after(jiffies, timeout)) {
			dev_warn(&master->dev,
				 "timeout while flushing RX FIFO\n");
			return -ETIMEDOUT;
		}
		readl(espi->mmio + SSPDR);
	}

	/*
	 * We explicitly handle FIFO level. This way we don't have to check TX
	 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
	 */
	espi->fifo_level = 0;

	return 0;
}

547 548 549 550 551 552
static int ep93xx_spi_prepare_hardware(struct spi_master *master)
{
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
	u32 val;
	int ret;

553
	ret = clk_prepare_enable(espi->clk);
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
	if (ret)
		return ret;

	val = readl(espi->mmio + SSPCR1);
	val |= SSPCR1_SSE;
	writel(val, espi->mmio + SSPCR1);

	return 0;
}

static int ep93xx_spi_unprepare_hardware(struct spi_master *master)
{
	struct ep93xx_spi *espi = spi_master_get_devdata(master);
	u32 val;

	val = readl(espi->mmio + SSPCR1);
	val &= ~SSPCR1_SSE;
	writel(val, espi->mmio + SSPCR1);

573
	clk_disable_unprepare(espi->clk);
574 575 576 577

	return 0;
}

M
Mika Westerberg 已提交
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
{
	if (ep93xx_dma_chan_is_m2p(chan))
		return false;

	chan->private = filter_param;
	return true;
}

static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
{
	dma_cap_mask_t mask;
	int ret;

	espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
	if (!espi->zeropage)
		return -ENOMEM;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	espi->dma_rx_data.port = EP93XX_DMA_SSP;
600
	espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
M
Mika Westerberg 已提交
601 602 603 604 605 606 607 608 609 610
	espi->dma_rx_data.name = "ep93xx-spi-rx";

	espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
					   &espi->dma_rx_data);
	if (!espi->dma_rx) {
		ret = -ENODEV;
		goto fail_free_page;
	}

	espi->dma_tx_data.port = EP93XX_DMA_SSP;
611
	espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
M
Mika Westerberg 已提交
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
	espi->dma_tx_data.name = "ep93xx-spi-tx";

	espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
					   &espi->dma_tx_data);
	if (!espi->dma_tx) {
		ret = -ENODEV;
		goto fail_release_rx;
	}

	return 0;

fail_release_rx:
	dma_release_channel(espi->dma_rx);
	espi->dma_rx = NULL;
fail_free_page:
	free_page((unsigned long)espi->zeropage);

	return ret;
}

static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
{
	if (espi->dma_rx) {
		dma_release_channel(espi->dma_rx);
		sg_free_table(&espi->rx_sgt);
	}
	if (espi->dma_tx) {
		dma_release_channel(espi->dma_tx);
		sg_free_table(&espi->tx_sgt);
	}

	if (espi->zeropage)
		free_page((unsigned long)espi->zeropage);
}

647
static int ep93xx_spi_probe(struct platform_device *pdev)
648 649 650 651 652
{
	struct spi_master *master;
	struct ep93xx_spi_info *info;
	struct ep93xx_spi *espi;
	struct resource *res;
653
	int irq;
654 655
	int error;

J
Jingoo Han 已提交
656
	info = dev_get_platdata(&pdev->dev);
657 658 659 660
	if (!info) {
		dev_err(&pdev->dev, "missing platform data\n");
		return -EINVAL;
	}
661

662
	irq = platform_get_irq(pdev, 0);
663
	if (irq < 0)
664 665 666 667 668 669 670 671
		return -EBUSY;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "unable to get iomem resource\n");
		return -ENODEV;
	}

672
	master = spi_alloc_master(&pdev->dev, sizeof(*espi));
673
	if (!master)
674 675
		return -ENOMEM;

676
	master->use_gpio_descriptors = true;
677 678
	master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware;
	master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware;
679 680
	master->prepare_message = ep93xx_spi_prepare_message;
	master->transfer_one = ep93xx_spi_transfer_one;
681 682
	master->bus_num = pdev->id;
	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
683
	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
684 685 686 687 688
	/*
	 * The SPI core will count the number of GPIO descriptors to figure
	 * out the number of chip selects available on the platform.
	 */
	master->num_chipselect = 0;
689

690 691 692 693
	platform_set_drvdata(pdev, master);

	espi = spi_master_get_devdata(master);

694
	espi->clk = devm_clk_get(&pdev->dev, NULL);
695 696 697 698 699 700 701 702 703 704
	if (IS_ERR(espi->clk)) {
		dev_err(&pdev->dev, "unable to get spi clock\n");
		error = PTR_ERR(espi->clk);
		goto fail_release_master;
	}

	/*
	 * Calculate maximum and minimum supported clock rates
	 * for the controller.
	 */
705 706
	master->max_speed_hz = clk_get_rate(espi->clk) / 2;
	master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
707

M
Mika Westerberg 已提交
708
	espi->sspdr_phys = res->start + SSPDR;
709

710 711 712
	espi->mmio = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(espi->mmio)) {
		error = PTR_ERR(espi->mmio);
713
		goto fail_release_master;
714 715
	}

716
	error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
717
				0, "ep93xx-spi", master);
718 719
	if (error) {
		dev_err(&pdev->dev, "failed to request irq\n");
720
		goto fail_release_master;
721 722
	}

M
Mika Westerberg 已提交
723 724 725
	if (info->use_dma && ep93xx_spi_setup_dma(espi))
		dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");

726
	/* make sure that the hardware is disabled */
727
	writel(0, espi->mmio + SSPCR1);
728

729
	error = devm_spi_register_master(&pdev->dev, master);
730 731
	if (error) {
		dev_err(&pdev->dev, "failed to register SPI master\n");
732
		goto fail_free_dma;
733 734 735
	}

	dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
736
		 (unsigned long)res->start, irq);
737 738 739

	return 0;

M
Mika Westerberg 已提交
740 741
fail_free_dma:
	ep93xx_spi_release_dma(espi);
742 743 744 745 746 747
fail_release_master:
	spi_master_put(master);

	return error;
}

748
static int ep93xx_spi_remove(struct platform_device *pdev)
749 750 751 752
{
	struct spi_master *master = platform_get_drvdata(pdev);
	struct ep93xx_spi *espi = spi_master_get_devdata(master);

M
Mika Westerberg 已提交
753
	ep93xx_spi_release_dma(espi);
754 755 756 757 758 759 760 761

	return 0;
}

static struct platform_driver ep93xx_spi_driver = {
	.driver		= {
		.name	= "ep93xx-spi",
	},
762
	.probe		= ep93xx_spi_probe,
763
	.remove		= ep93xx_spi_remove,
764
};
765
module_platform_driver(ep93xx_spi_driver);
766 767 768 769 770

MODULE_DESCRIPTION("EP93xx SPI Controller driver");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-spi");