提交 8275c642 编写于 作者: V Vitaly Wool 提交者: Greg Kroah-Hartman

[PATCH] spi: use linked lists rather than an array

This makes the SPI core and its users access transfers in the SPI message
structure as linked list not as an array, as discussed on LKML.

From: David Brownell <dbrownell@users.sourceforge.net>

  Updates including doc, bugfixes to the list code, add
  spi_message_add_tail().  Plus, initialize things _before_ grabbing the
  locks in some cases (in case it grows more expensive).  This also merges
  some bitbang updates of mine that didn't yet make it into the mm tree.
Signed-off-by: NVitaly Wool <vwool@ru.mvista.com>
Signed-off-by: NDmitry Pervushin <dpervushin@gmail.com>
Signed-off-by: NDavid Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NGreg Kroah-Hartman <gregkh@suse.de>
上级 2f9f7628
......@@ -155,10 +155,13 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL);
int status;
int sample;
int i;
if (!req)
return -ENOMEM;
INIT_LIST_HEAD(&req->msg.transfers);
/* activate reference, so it has time to settle; */
req->xfer[0].tx_buf = &ref_on;
req->xfer[0].len = 1;
......@@ -192,8 +195,8 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
/* group all the transfers together, so we can't interfere with
* reading touchscreen state; disable penirq while sampling
*/
req->msg.transfers = req->xfer;
req->msg.n_transfer = 6;
for (i = 0; i < 6; i++)
spi_message_add_tail(&req->xfer[i], &req->msg);
disable_irq(spi->irq);
status = spi_sync(spi, &req->msg);
......@@ -398,6 +401,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
struct ads7846 *ts;
struct ads7846_platform_data *pdata = spi->dev.platform_data;
struct spi_transfer *x;
int i;
if (!spi->irq) {
dev_dbg(&spi->dev, "no IRQ?\n");
......@@ -500,8 +504,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
CS_CHANGE(x[-1]);
ts->msg.transfers = ts->xfer;
ts->msg.n_transfer = x - ts->xfer;
for (i = 0; i < x - ts->xfer; i++)
spi_message_add_tail(&ts->xfer[i], &ts->msg);
ts->msg.complete = ads7846_rx;
ts->msg.context = ts;
......
......@@ -245,6 +245,21 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
if (from + len > flash->mtd.size)
return -EINVAL;
spi_message_init(&m);
memset(t, 0, (sizeof t));
t[0].tx_buf = flash->command;
t[0].len = sizeof(flash->command);
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
t[1].len = len;
spi_message_add_tail(&t[1], &m);
/* Byte count starts at zero. */
if (retlen)
*retlen = 0;
down(&flash->lock);
/* Wait till previous write/erase is done. */
......@@ -254,8 +269,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
return 1;
}
memset(t, 0, (sizeof t));
/* NOTE: OPCODE_FAST_READ (if available) is faster... */
/* Set up the write data buffer. */
......@@ -264,19 +277,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
flash->command[2] = from >> 8;
flash->command[3] = from;
/* Byte count starts at zero. */
if (retlen)
*retlen = 0;
t[0].tx_buf = flash->command;
t[0].len = sizeof(flash->command);
t[1].rx_buf = buf;
t[1].len = len;
m.transfers = t;
m.n_transfer = 2;
spi_sync(flash->spi, &m);
*retlen = m.actual_length - sizeof(flash->command);
......@@ -313,6 +313,16 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
if (to + len > flash->mtd.size)
return -EINVAL;
spi_message_init(&m);
memset(t, 0, (sizeof t));
t[0].tx_buf = flash->command;
t[0].len = sizeof(flash->command);
spi_message_add_tail(&t[0], &m);
t[1].tx_buf = buf;
spi_message_add_tail(&t[1], &m);
down(&flash->lock);
/* Wait until finished previous write command. */
......@@ -321,26 +331,17 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
write_enable(flash);
memset(t, 0, (sizeof t));
/* Set up the opcode in the write buffer. */
flash->command[0] = OPCODE_PP;
flash->command[1] = to >> 16;
flash->command[2] = to >> 8;
flash->command[3] = to;
t[0].tx_buf = flash->command;
t[0].len = sizeof(flash->command);
m.transfers = t;
m.n_transfer = 2;
/* what page do we start with? */
page_offset = to % FLASH_PAGESIZE;
/* do all the bytes fit onto one page? */
if (page_offset + len <= FLASH_PAGESIZE) {
t[1].tx_buf = buf;
t[1].len = len;
spi_sync(flash->spi, &m);
......@@ -352,7 +353,6 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
/* the size of data remaining on the first page */
page_size = FLASH_PAGESIZE - page_offset;
t[1].tx_buf = buf;
t[1].len = page_size;
spi_sync(flash->spi, &m);
......
......@@ -147,7 +147,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct dataflash *priv = (struct dataflash *)mtd->priv;
struct spi_device *spi = priv->spi;
struct spi_transfer x[1] = { { .tx_dma = 0, }, };
struct spi_transfer x = { .tx_dma = 0, };
struct spi_message msg;
unsigned blocksize = priv->page_size << 3;
u8 *command;
......@@ -162,10 +162,11 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
|| (instr->addr % priv->page_size) != 0)
return -EINVAL;
x[0].tx_buf = command = priv->command;
x[0].len = 4;
msg.transfers = x;
msg.n_transfer = 1;
spi_message_init(&msg);
x.tx_buf = command = priv->command;
x.len = 4;
spi_message_add_tail(&x, &msg);
down(&priv->lock);
while (instr->len > 0) {
......@@ -256,12 +257,15 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
spi_message_init(&msg);
x[0].tx_buf = command;
x[0].len = 8;
spi_message_add_tail(&x[0], &msg);
x[1].rx_buf = buf;
x[1].len = len;
msg.transfers = x;
msg.n_transfer = 2;
spi_message_add_tail(&x[1], &msg);
down(&priv->lock);
......@@ -320,9 +324,11 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
if ((to + len) > mtd->size)
return -EINVAL;
spi_message_init(&msg);
x[0].tx_buf = command = priv->command;
x[0].len = 4;
msg.transfers = x;
spi_message_add_tail(&x[0], &msg);
pageaddr = ((unsigned)to / priv->page_size);
offset = ((unsigned)to % priv->page_size);
......@@ -364,7 +370,6 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
msg.n_transfer = 1;
status = spi_sync(spi, &msg);
if (status < 0)
DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n",
......@@ -385,14 +390,16 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
x[1].tx_buf = writebuf;
x[1].len = writelen;
msg.n_transfer = 2;
spi_message_add_tail(x + 1, &msg);
status = spi_sync(spi, &msg);
spi_transfer_del(x + 1);
if (status < 0)
DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n",
spi->dev.bus_id, addr, writelen, status);
(void) dataflash_waitready(priv->spi);
#ifdef CONFIG_DATAFLASH_WRITE_VERIFY
/* (3) Compare to Buffer1 */
......@@ -405,7 +412,6 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
msg.n_transfer = 1;
status = spi_sync(spi, &msg);
if (status < 0)
DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
......
......@@ -557,6 +557,17 @@ int spi_write_then_read(struct spi_device *spi,
if ((n_tx + n_rx) > SPI_BUFSIZ)
return -EINVAL;
spi_message_init(&message);
memset(x, 0, sizeof x);
if (n_tx) {
x[0].len = n_tx;
spi_message_add_tail(&x[0], &message);
}
if (n_rx) {
x[1].len = n_rx;
spi_message_add_tail(&x[1], &message);
}
/* ... unless someone else is using the pre-allocated buffer */
if (down_trylock(&lock)) {
local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
......@@ -565,18 +576,11 @@ int spi_write_then_read(struct spi_device *spi,
} else
local_buf = buf;
memset(x, 0, sizeof x);
memcpy(local_buf, txbuf, n_tx);
x[0].tx_buf = local_buf;
x[0].len = n_tx;
x[1].rx_buf = local_buf + n_tx;
x[1].len = n_rx;
/* do the i/o */
message.transfers = x;
message.n_transfer = ARRAY_SIZE(x);
status = spi_sync(spi, &message);
if (status == 0) {
memcpy(rxbuf, x[1].rx_buf, n_rx);
......
......@@ -146,6 +146,9 @@ int spi_bitbang_setup(struct spi_device *spi)
struct spi_bitbang_cs *cs = spi->controller_state;
struct spi_bitbang *bitbang;
if (!spi->max_speed_hz)
return -EINVAL;
if (!cs) {
cs = kzalloc(sizeof *cs, SLAB_KERNEL);
if (!cs)
......@@ -172,13 +175,8 @@ int spi_bitbang_setup(struct spi_device *spi)
if (!cs->txrx_word)
return -EINVAL;
if (!spi->max_speed_hz)
spi->max_speed_hz = 500 * 1000;
/* nsecs = max(50, (clock period)/2), be optimistic */
/* nsecs = (clock period)/2 */
cs->nsecs = (1000000000/2) / (spi->max_speed_hz);
if (cs->nsecs < 50)
cs->nsecs = 50;
if (cs->nsecs > MAX_UDELAY_MS * 1000)
return -EINVAL;
......@@ -194,7 +192,7 @@ int spi_bitbang_setup(struct spi_device *spi)
/* deselect chip (low or high) */
spin_lock(&bitbang->lock);
if (!bitbang->busy) {
bitbang->chipselect(spi, 0);
bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
ndelay(cs->nsecs);
}
spin_unlock(&bitbang->lock);
......@@ -244,9 +242,9 @@ static void bitbang_work(void *_bitbang)
struct spi_message *m;
struct spi_device *spi;
unsigned nsecs;
struct spi_transfer *t;
struct spi_transfer *t = NULL;
unsigned tmp;
unsigned chipselect;
unsigned cs_change;
int status;
m = container_of(bitbang->queue.next, struct spi_message,
......@@ -254,37 +252,49 @@ static void bitbang_work(void *_bitbang)
list_del_init(&m->queue);
spin_unlock_irqrestore(&bitbang->lock, flags);
// FIXME this is made-up
nsecs = 100;
/* FIXME this is made-up ... the correct value is known to
* word-at-a-time bitbang code, and presumably chipselect()
* should enforce these requirements too?
*/
nsecs = 100;
spi = m->spi;
t = m->transfers;
tmp = 0;
chipselect = 0;
cs_change = 1;
status = 0;
for (;;t++) {
list_for_each_entry (t, &m->transfers, transfer_list) {
if (bitbang->shutdown) {
status = -ESHUTDOWN;
break;
}
/* set up default clock polarity, and activate chip */
if (!chipselect) {
bitbang->chipselect(spi, 1);
/* set up default clock polarity, and activate chip;
* this implicitly updates clock and spi modes as
* previously recorded for this device via setup().
* (and also deselects any other chip that might be
* selected ...)
*/
if (cs_change) {
bitbang->chipselect(spi, BITBANG_CS_ACTIVE);
ndelay(nsecs);
}
cs_change = t->cs_change;
if (!t->tx_buf && !t->rx_buf && t->len) {
status = -EINVAL;
break;
}
/* transfer data */
/* transfer data. the lower level code handles any
* new dma mappings it needs. our caller always gave
* us dma-safe buffers.
*/
if (t->len) {
/* FIXME if bitbang->use_dma, dma_map_single()
* before the transfer, and dma_unmap_single()
* afterwards, for either or both buffers...
/* REVISIT dma API still needs a designated
* DMA_ADDR_INVALID; ~0 might be better.
*/
if (!m->is_dma_mapped)
t->rx_dma = t->tx_dma = 0;
status = bitbang->txrx_bufs(spi, t);
}
if (status != t->len) {
......@@ -299,29 +309,31 @@ nsecs = 100;
if (t->delay_usecs)
udelay(t->delay_usecs);
tmp++;
if (tmp >= m->n_transfer)
break;
chipselect = !t->cs_change;
if (chipselect);
if (!cs_change)
continue;
if (t->transfer_list.next == &m->transfers)
break;
bitbang->chipselect(spi, 0);
/* REVISIT do we want the udelay here instead? */
msleep(1);
/* sometimes a short mid-message deselect of the chip
* may be needed to terminate a mode or command
*/
ndelay(nsecs);
bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
ndelay(nsecs);
}
tmp = m->n_transfer - 1;
tmp = m->transfers[tmp].cs_change;
m->status = status;
m->complete(m->context);
ndelay(2 * nsecs);
bitbang->chipselect(spi, status == 0 && tmp);
ndelay(nsecs);
/* normally deactivate chipselect ... unless no error and
* cs_change has hinted that the next message will probably
* be for this chip too.
*/
if (!(status == 0 && cs_change)) {
ndelay(nsecs);
bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
ndelay(nsecs);
}
spin_lock_irqsave(&bitbang->lock, flags);
}
......
......@@ -263,15 +263,16 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
/**
* struct spi_transfer - a read/write buffer pair
* @tx_buf: data to be written (dma-safe address), or NULL
* @rx_buf: data to be read (dma-safe address), or NULL
* @tx_dma: DMA address of buffer, if spi_message.is_dma_mapped
* @rx_dma: DMA address of buffer, if spi_message.is_dma_mapped
* @tx_buf: data to be written (dma-safe memory), or NULL
* @rx_buf: data to be read (dma-safe memory), or NULL
* @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped
* @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped
* @len: size of rx and tx buffers (in bytes)
* @cs_change: affects chipselect after this transfer completes
* @delay_usecs: microseconds to delay after this transfer before
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this spi_message.
* @transfer_list: transfers are sequenced through spi_message.transfers
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide rx_buf and/or tx_buf.
......@@ -279,11 +280,16 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
* the data being transferred; that may reduce overhead, when the
* underlying driver uses dma.
*
* All SPI transfers start with the relevant chipselect active. Drivers
* can change behavior of the chipselect after the transfer finishes
* (including any mandatory delay). The normal behavior is to leave it
* selected, except for the last transfer in a message. Setting cs_change
* allows two additional behavior options:
* If the transmit buffer is null, undefined data will be shifted out
* while filling rx_buf. If the receive buffer is null, the data
* shifted in will be discarded. Only "len" bytes shift out (or in).
* It's an error to try to shift out a partial word. (For example, by
* shifting out three bytes with word size of sixteen or twenty bits;
* the former uses two bytes per word, the latter uses four bytes.)
*
* All SPI transfers start with the relevant chipselect active. Normally
* it stays selected until after the last transfer in a message. Drivers
* can affect the chipselect signal using cs_change:
*
* (i) If the transfer isn't the last one in the message, this flag is
* used to make the chipselect briefly go inactive in the middle of the
......@@ -299,7 +305,8 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
* The code that submits an spi_message (and its spi_transfers)
* to the lower layers is responsible for managing its memory.
* Zero-initialize every field you don't set up explicitly, to
* insulate against future API updates.
* insulate against future API updates. After you submit a message
* and its transfers, ignore them until its completion callback.
*/
struct spi_transfer {
/* it's ok if tx_buf == rx_buf (right?)
......@@ -316,12 +323,13 @@ struct spi_transfer {
unsigned cs_change:1;
u16 delay_usecs;
struct list_head transfer_list;
};
/**
* struct spi_message - one multi-segment SPI transaction
* @transfers: the segements of the transaction
* @n_transfer: how many segments
* @transfers: list of transfer segments in this transaction
* @spi: SPI device to which the transaction is queued
* @is_dma_mapped: if true, the caller provided both dma and cpu virtual
* addresses for each transfer buffer
......@@ -333,14 +341,22 @@ struct spi_transfer {
* @queue: for use by whichever driver currently owns the message
* @state: for use by whichever driver currently owns the message
*
* An spi_message is used to execute an atomic sequence of data transfers,
* each represented by a struct spi_transfer. The sequence is "atomic"
* in the sense that no other spi_message may use that SPI bus until that
* sequence completes. On some systems, many such sequences can execute as
* as single programmed DMA transfer. On all systems, these messages are
* queued, and might complete after transactions to other devices. Messages
* sent to a given spi_device are alway executed in FIFO order.
*
* The code that submits an spi_message (and its spi_transfers)
* to the lower layers is responsible for managing its memory.
* Zero-initialize every field you don't set up explicitly, to
* insulate against future API updates.
* insulate against future API updates. After you submit a message
* and its transfers, ignore them until its completion callback.
*/
struct spi_message {
struct spi_transfer *transfers;
unsigned n_transfer;
struct list_head transfers;
struct spi_device *spi;
......@@ -371,6 +387,24 @@ struct spi_message {
void *state;
};
static inline void spi_message_init(struct spi_message *m)
{
memset(m, 0, sizeof *m);
INIT_LIST_HEAD(&m->transfers);
}
static inline void
spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
{
list_add_tail(&t->transfer_list, &m->transfers);
}
static inline void
spi_transfer_del(struct spi_transfer *t)
{
list_del(&t->transfer_list);
}
/* It's fine to embed message and transaction structures in other data
* structures so long as you don't free them while they're in use.
*/
......@@ -383,8 +417,12 @@ static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags
+ ntrans * sizeof(struct spi_transfer),
flags);
if (m) {
m->transfers = (void *)(m + 1);
m->n_transfer = ntrans;
int i;
struct spi_transfer *t = (struct spi_transfer *)(m + 1);
INIT_LIST_HEAD(&m->transfers);
for (i = 0; i < ntrans; i++, t++)
spi_message_add_tail(t, m);
}
return m;
}
......@@ -402,6 +440,8 @@ static inline void spi_message_free(struct spi_message *m)
* device doesn't work with the mode 0 default. They may likewise need
* to update clock rates or word sizes from initial values. This function
* changes those settings, and must be called from a context that can sleep.
* The changes take effect the next time the device is selected and data
* is transferred to or from it.
*/
static inline int
spi_setup(struct spi_device *spi)
......@@ -468,15 +508,12 @@ spi_write(struct spi_device *spi, const u8 *buf, size_t len)
{
struct spi_transfer t = {
.tx_buf = buf,
.rx_buf = NULL,
.len = len,
.cs_change = 0,
};
struct spi_message m = {
.transfers = &t,
.n_transfer = 1,
};
struct spi_message m;
spi_message_init(&m);
spi_message_add_tail(&t, &m);
return spi_sync(spi, &m);
}
......@@ -493,16 +530,13 @@ static inline int
spi_read(struct spi_device *spi, u8 *buf, size_t len)
{
struct spi_transfer t = {
.tx_buf = NULL,
.rx_buf = buf,
.len = len,
.cs_change = 0,
};
struct spi_message m = {
.transfers = &t,
.n_transfer = 1,
};
struct spi_message m;
spi_message_init(&m);
spi_message_add_tail(&t, &m);
return spi_sync(spi, &m);
}
......
......@@ -31,8 +31,15 @@ struct spi_bitbang {
struct spi_master *master;
void (*chipselect)(struct spi_device *spi, int is_on);
#define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */
#define BITBANG_CS_INACTIVE 0
/* txrx_bufs() may handle dma mapping for transfers that don't
* already have one (transfer.{tx,rx}_dma is zero), or use PIO
*/
int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
/* txrx_word[SPI_MODE_*]() just looks like a shift register */
u32 (*txrx_word[4])(struct spi_device *spi,
unsigned nsecs,
u32 word, u8 bits);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册