spi-dw-mid.c 7.9 KB
Newer Older
F
Feng Tang 已提交
1
/*
G
Grant Likely 已提交
2
 * Special handling for DW core on Intel MID platform
F
Feng Tang 已提交
3
 *
4
 * Copyright (c) 2009, 2014 Intel Corporation.
F
Feng Tang 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
21
#include <linux/types.h>
22

G
Grant Likely 已提交
23
#include "spi-dw.h"
F
Feng Tang 已提交
24 25 26

#ifdef CONFIG_SPI_DW_MID_DMA
#include <linux/pci.h>
27
#include <linux/platform_data/dma-dw.h>
F
Feng Tang 已提交
28

29 30 31
#define RX_BUSY		0
#define TX_BUSY		1

32 33
static struct dw_dma_slave mid_dma_tx = { .dst_id = 1 };
static struct dw_dma_slave mid_dma_rx = { .src_id = 0 };
F
Feng Tang 已提交
34 35 36

static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
{
37 38 39 40
	struct dw_dma_slave *s = param;

	if (s->dma_dev != chan->device->dev)
		return false;
F
Feng Tang 已提交
41

42 43
	chan->private = s;
	return true;
F
Feng Tang 已提交
44 45 46 47
}

static int mid_spi_dma_init(struct dw_spi *dws)
{
48
	struct pci_dev *dma_dev;
49 50
	struct dw_dma_slave *tx = dws->dma_tx;
	struct dw_dma_slave *rx = dws->dma_rx;
F
Feng Tang 已提交
51 52 53 54
	dma_cap_mask_t mask;

	/*
	 * Get pci device for DMA controller, currently it could only
55
	 * be the DMA controller of Medfield
F
Feng Tang 已提交
56
	 */
57 58 59 60
	dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
	if (!dma_dev)
		return -ENODEV;

F
Feng Tang 已提交
61 62 63 64
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	/* 1. Init rx channel */
65 66
	rx->dma_dev = &dma_dev->dev;
	dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, rx);
F
Feng Tang 已提交
67 68
	if (!dws->rxchan)
		goto err_exit;
69
	dws->master->dma_rx = dws->rxchan;
F
Feng Tang 已提交
70 71

	/* 2. Init tx channel */
72 73
	tx->dma_dev = &dma_dev->dev;
	dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx);
F
Feng Tang 已提交
74 75
	if (!dws->txchan)
		goto free_rxchan;
76
	dws->master->dma_tx = dws->txchan;
F
Feng Tang 已提交
77 78 79 80 81 82 83

	dws->dma_inited = 1;
	return 0;

free_rxchan:
	dma_release_channel(dws->rxchan);
err_exit:
84
	return -EBUSY;
F
Feng Tang 已提交
85 86 87 88
}

static void mid_spi_dma_exit(struct dw_spi *dws)
{
89 90
	if (!dws->dma_inited)
		return;
91

92
	dmaengine_terminate_sync(dws->txchan);
F
Feng Tang 已提交
93
	dma_release_channel(dws->txchan);
94

95
	dmaengine_terminate_sync(dws->rxchan);
F
Feng Tang 已提交
96 97 98
	dma_release_channel(dws->rxchan);
}

99 100
static irqreturn_t dma_transfer(struct dw_spi *dws)
{
101
	u16 irq_status = dw_readl(dws, DW_SPI_ISR);
102 103 104 105

	if (!irq_status)
		return IRQ_NONE;

106
	dw_readl(dws, DW_SPI_ICR);
107 108 109 110 111 112 113 114
	spi_reset_chip(dws);

	dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
	dws->master->cur_msg->status = -EIO;
	spi_finalize_current_transfer(dws->master);
	return IRQ_HANDLED;
}

115 116 117 118 119 120 121 122 123 124 125
static bool mid_spi_can_dma(struct spi_master *master, struct spi_device *spi,
		struct spi_transfer *xfer)
{
	struct dw_spi *dws = spi_master_get_devdata(master);

	if (!dws->dma_inited)
		return false;

	return xfer->len > dws->fifo_len;
}

126 127 128 129 130 131 132 133 134
static enum dma_slave_buswidth convert_dma_width(u32 dma_width) {
	if (dma_width == 1)
		return DMA_SLAVE_BUSWIDTH_1_BYTE;
	else if (dma_width == 2)
		return DMA_SLAVE_BUSWIDTH_2_BYTES;

	return DMA_SLAVE_BUSWIDTH_UNDEFINED;
}

F
Feng Tang 已提交
135
/*
136 137
 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
 * channel will clear a corresponding bit.
F
Feng Tang 已提交
138
 */
139
static void dw_spi_dma_tx_done(void *arg)
F
Feng Tang 已提交
140 141 142
{
	struct dw_spi *dws = arg;

143 144
	clear_bit(TX_BUSY, &dws->dma_chan_busy);
	if (test_bit(RX_BUSY, &dws->dma_chan_busy))
F
Feng Tang 已提交
145
		return;
146
	spi_finalize_current_transfer(dws->master);
F
Feng Tang 已提交
147 148
}

149 150
static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws,
		struct spi_transfer *xfer)
F
Feng Tang 已提交
151
{
152 153
	struct dma_slave_config txconf;
	struct dma_async_tx_descriptor *txdesc;
F
Feng Tang 已提交
154

155
	if (!xfer->tx_buf)
156 157
		return NULL;

158
	txconf.direction = DMA_MEM_TO_DEV;
F
Feng Tang 已提交
159
	txconf.dst_addr = dws->dma_addr;
160
	txconf.dst_maxburst = 16;
F
Feng Tang 已提交
161
	txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
162
	txconf.dst_addr_width = convert_dma_width(dws->dma_width);
163
	txconf.device_fc = false;
F
Feng Tang 已提交
164

165
	dmaengine_slave_config(dws->txchan, &txconf);
F
Feng Tang 已提交
166

167
	txdesc = dmaengine_prep_slave_sg(dws->txchan,
168 169
				xfer->tx_sg.sgl,
				xfer->tx_sg.nents,
170
				DMA_MEM_TO_DEV,
171
				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
172 173 174
	if (!txdesc)
		return NULL;

175
	txdesc->callback = dw_spi_dma_tx_done;
F
Feng Tang 已提交
176 177
	txdesc->callback_param = dws;

178 179 180
	return txdesc;
}

181 182 183 184 185 186 187 188
/*
 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
 * channel will clear a corresponding bit.
 */
static void dw_spi_dma_rx_done(void *arg)
{
	struct dw_spi *dws = arg;

189 190
	clear_bit(RX_BUSY, &dws->dma_chan_busy);
	if (test_bit(TX_BUSY, &dws->dma_chan_busy))
191
		return;
192
	spi_finalize_current_transfer(dws->master);
193 194
}

195 196
static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
		struct spi_transfer *xfer)
197 198 199 200
{
	struct dma_slave_config rxconf;
	struct dma_async_tx_descriptor *rxdesc;

201
	if (!xfer->rx_buf)
202 203
		return NULL;

204
	rxconf.direction = DMA_DEV_TO_MEM;
F
Feng Tang 已提交
205
	rxconf.src_addr = dws->dma_addr;
206
	rxconf.src_maxburst = 16;
F
Feng Tang 已提交
207
	rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
208
	rxconf.src_addr_width = convert_dma_width(dws->dma_width);
209
	rxconf.device_fc = false;
F
Feng Tang 已提交
210

211
	dmaengine_slave_config(dws->rxchan, &rxconf);
F
Feng Tang 已提交
212

213
	rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
214 215
				xfer->rx_sg.sgl,
				xfer->rx_sg.nents,
216
				DMA_DEV_TO_MEM,
217
				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
218 219 220
	if (!rxdesc)
		return NULL;

221
	rxdesc->callback = dw_spi_dma_rx_done;
F
Feng Tang 已提交
222 223
	rxdesc->callback_param = dws;

224 225 226
	return rxdesc;
}

227
static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
228 229 230
{
	u16 dma_ctrl = 0;

231 232
	dw_writel(dws, DW_SPI_DMARDLR, 0xf);
	dw_writel(dws, DW_SPI_DMATDLR, 0x10);
233

234
	if (xfer->tx_buf)
235
		dma_ctrl |= SPI_DMA_TDMAE;
236
	if (xfer->rx_buf)
237
		dma_ctrl |= SPI_DMA_RDMAE;
238
	dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
239

240 241 242 243 244
	/* Set the interrupt mask */
	spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI);

	dws->transfer_handler = dma_transfer;

245
	return 0;
246 247
}

248
static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
249 250 251
{
	struct dma_async_tx_descriptor *txdesc, *rxdesc;

252
	/* Prepare the TX dma transfer */
253
	txdesc = dw_spi_dma_prepare_tx(dws, xfer);
254

255
	/* Prepare the RX dma transfer */
256
	rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
257

F
Feng Tang 已提交
258
	/* rx must be started before tx due to spi instinct */
259 260 261 262 263 264 265 266 267 268 269
	if (rxdesc) {
		set_bit(RX_BUSY, &dws->dma_chan_busy);
		dmaengine_submit(rxdesc);
		dma_async_issue_pending(dws->rxchan);
	}

	if (txdesc) {
		set_bit(TX_BUSY, &dws->dma_chan_busy);
		dmaengine_submit(txdesc);
		dma_async_issue_pending(dws->txchan);
	}
270

F
Feng Tang 已提交
271 272 273
	return 0;
}

274 275 276 277 278 279 280 281 282 283 284 285
static void mid_spi_dma_stop(struct dw_spi *dws)
{
	if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
		dmaengine_terminate_all(dws->txchan);
		clear_bit(TX_BUSY, &dws->dma_chan_busy);
	}
	if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
		dmaengine_terminate_all(dws->rxchan);
		clear_bit(RX_BUSY, &dws->dma_chan_busy);
	}
}

286
static const struct dw_spi_dma_ops mid_dma_ops = {
F
Feng Tang 已提交
287 288
	.dma_init	= mid_spi_dma_init,
	.dma_exit	= mid_spi_dma_exit,
289
	.dma_setup	= mid_spi_dma_setup,
290
	.can_dma	= mid_spi_can_dma,
F
Feng Tang 已提交
291
	.dma_transfer	= mid_spi_dma_transfer,
292
	.dma_stop	= mid_spi_dma_stop,
F
Feng Tang 已提交
293 294 295
};
#endif

296
/* Some specific info for SPI0 controller on Intel MID */
F
Feng Tang 已提交
297

298
/* HW info for MRST Clk Control Unit, 32b reg per controller */
F
Feng Tang 已提交
299
#define MRST_SPI_CLK_BASE	100000000	/* 100m */
300
#define MRST_CLK_SPI_REG	0xff11d86c
F
Feng Tang 已提交
301 302 303 304 305 306 307 308
#define CLK_SPI_BDIV_OFFSET	0
#define CLK_SPI_BDIV_MASK	0x00000007
#define CLK_SPI_CDIV_OFFSET	9
#define CLK_SPI_CDIV_MASK	0x00000e00
#define CLK_SPI_DISABLE_OFFSET	8

int dw_spi_mid_init(struct dw_spi *dws)
{
309 310
	void __iomem *clk_reg;
	u32 clk_cdiv;
F
Feng Tang 已提交
311

312
	clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
F
Feng Tang 已提交
313 314 315
	if (!clk_reg)
		return -ENOMEM;

316 317 318 319
	/* Get SPI controller operating freq info */
	clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
	clk_cdiv &= CLK_SPI_CDIV_MASK;
	clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
F
Feng Tang 已提交
320
	dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
321

F
Feng Tang 已提交
322 323 324
	iounmap(clk_reg);

#ifdef CONFIG_SPI_DW_MID_DMA
325 326
	dws->dma_tx = &mid_dma_tx;
	dws->dma_rx = &mid_dma_rx;
F
Feng Tang 已提交
327 328 329 330
	dws->dma_ops = &mid_dma_ops;
#endif
	return 0;
}