提交 711924b1 编写于 作者: S Shannon Nelson 提交者: Linus Torvalds

I/OAT: fixups from code comments

A few fixups from Andrew's code comments.
  - removed "static inline" forward-declares
  - changed use of min() to min_t()
  - removed some unnecessary NULL initializations
  - removed a couple of BUG() calls

Fixes this:

drivers/dma/ioat_dma.c: In function `ioat1_tx_submit':
drivers/dma/ioat_dma.c:177: sorry, unimplemented: inlining failed in call to '__ioat1_dma_memcpy_issue_pending': function body not available
drivers/dma/ioat_dma.c:268: sorry, unimplemented: called from here
Signed-off-by: NShannon Nelson <shannon.nelson@intel.com>
Cc: "Williams, Dan J" <dan.j.williams@intel.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 7c9e70ef
...@@ -173,10 +173,47 @@ static void ioat_set_dest(dma_addr_t addr, ...@@ -173,10 +173,47 @@ static void ioat_set_dest(dma_addr_t addr,
tx_to_ioat_desc(tx)->dst = addr; tx_to_ioat_desc(tx)->dst = addr;
} }
/**
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
* descriptors to hw
* @chan: DMA channel handle
*/
static inline void __ioat1_dma_memcpy_issue_pending( static inline void __ioat1_dma_memcpy_issue_pending(
struct ioat_dma_chan *ioat_chan); struct ioat_dma_chan *ioat_chan)
{
ioat_chan->pending = 0;
writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
}
static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
if (ioat_chan->pending != 0) {
spin_lock_bh(&ioat_chan->desc_lock);
__ioat1_dma_memcpy_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
}
}
static inline void __ioat2_dma_memcpy_issue_pending( static inline void __ioat2_dma_memcpy_issue_pending(
struct ioat_dma_chan *ioat_chan); struct ioat_dma_chan *ioat_chan)
{
ioat_chan->pending = 0;
writew(ioat_chan->dmacount,
ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
}
static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
if (ioat_chan->pending != 0) {
spin_lock_bh(&ioat_chan->desc_lock);
__ioat2_dma_memcpy_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
}
}
static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
{ {
...@@ -203,7 +240,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -203,7 +240,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
prev = to_ioat_desc(ioat_chan->used_desc.prev); prev = to_ioat_desc(ioat_chan->used_desc.prev);
prefetch(prev->hw); prefetch(prev->hw);
do { do {
copy = min((u32) len, ioat_chan->xfercap); copy = min_t(size_t, len, ioat_chan->xfercap);
new->async_tx.ack = 1; new->async_tx.ack = 1;
...@@ -291,10 +328,12 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -291,10 +328,12 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
orig_ack = first->async_tx.ack; orig_ack = first->async_tx.ack;
new = first; new = first;
/* ioat_chan->desc_lock is still in force in version 2 path */ /*
* ioat_chan->desc_lock is still in force in version 2 path
* it gets unlocked at end of this function
*/
do { do {
copy = min((u32) len, ioat_chan->xfercap); copy = min_t(size_t, len, ioat_chan->xfercap);
new->async_tx.ack = 1; new->async_tx.ack = 1;
...@@ -432,7 +471,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) ...@@ -432,7 +471,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
{ {
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *desc = NULL; struct ioat_desc_sw *desc;
u16 chanctrl; u16 chanctrl;
u32 chanerr; u32 chanerr;
int i; int i;
...@@ -575,7 +614,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) ...@@ -575,7 +614,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
static struct ioat_desc_sw * static struct ioat_desc_sw *
ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
{ {
struct ioat_desc_sw *new = NULL; struct ioat_desc_sw *new;
if (!list_empty(&ioat_chan->free_desc)) { if (!list_empty(&ioat_chan->free_desc)) {
new = to_ioat_desc(ioat_chan->free_desc.next); new = to_ioat_desc(ioat_chan->free_desc.next);
...@@ -583,9 +622,11 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) ...@@ -583,9 +622,11 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
} else { } else {
/* try to get another desc */ /* try to get another desc */
new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
/* will this ever happen? */ if (!new) {
/* TODO add upper limit on these */ dev_err(&ioat_chan->device->pdev->dev,
BUG_ON(!new); "alloc failed\n");
return NULL;
}
} }
prefetch(new->hw); prefetch(new->hw);
...@@ -595,7 +636,7 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) ...@@ -595,7 +636,7 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
static struct ioat_desc_sw * static struct ioat_desc_sw *
ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
{ {
struct ioat_desc_sw *new = NULL; struct ioat_desc_sw *new;
/* /*
* used.prev points to where to start processing * used.prev points to where to start processing
...@@ -609,8 +650,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) ...@@ -609,8 +650,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
if (ioat_chan->used_desc.prev && if (ioat_chan->used_desc.prev &&
ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
struct ioat_desc_sw *desc = NULL; struct ioat_desc_sw *desc;
struct ioat_desc_sw *noop_desc = NULL; struct ioat_desc_sw *noop_desc;
int i; int i;
/* set up the noop descriptor */ /* set up the noop descriptor */
...@@ -624,10 +665,14 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) ...@@ -624,10 +665,14 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
ioat_chan->pending++; ioat_chan->pending++;
ioat_chan->dmacount++; ioat_chan->dmacount++;
/* get a few more descriptors */ /* try to get a few more descriptors */
for (i = 16; i; i--) { for (i = 16; i; i--) {
desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
BUG_ON(!desc); if (!desc) {
dev_err(&ioat_chan->device->pdev->dev,
"alloc failed\n");
break;
}
list_add_tail(&desc->node, ioat_chan->used_desc.next); list_add_tail(&desc->node, ioat_chan->used_desc.next);
desc->hw->next desc->hw->next
...@@ -677,10 +722,13 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( ...@@ -677,10 +722,13 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
spin_lock_bh(&ioat_chan->desc_lock); spin_lock_bh(&ioat_chan->desc_lock);
new = ioat_dma_get_next_descriptor(ioat_chan); new = ioat_dma_get_next_descriptor(ioat_chan);
new->len = len;
spin_unlock_bh(&ioat_chan->desc_lock); spin_unlock_bh(&ioat_chan->desc_lock);
return new ? &new->async_tx : NULL; if (new) {
new->len = len;
return &new->async_tx;
} else
return NULL;
} }
static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
...@@ -693,53 +741,17 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( ...@@ -693,53 +741,17 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
spin_lock_bh(&ioat_chan->desc_lock); spin_lock_bh(&ioat_chan->desc_lock);
new = ioat2_dma_get_next_descriptor(ioat_chan); new = ioat2_dma_get_next_descriptor(ioat_chan);
new->len = len;
/* leave ioat_chan->desc_lock set in version 2 path */
return new ? &new->async_tx : NULL;
}
/**
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
* descriptors to hw
* @chan: DMA channel handle
*/
static inline void __ioat1_dma_memcpy_issue_pending(
struct ioat_dma_chan *ioat_chan)
{
ioat_chan->pending = 0;
writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
}
static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
if (ioat_chan->pending != 0) {
spin_lock_bh(&ioat_chan->desc_lock);
__ioat1_dma_memcpy_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
}
}
static inline void __ioat2_dma_memcpy_issue_pending(
struct ioat_dma_chan *ioat_chan)
{
ioat_chan->pending = 0;
writew(ioat_chan->dmacount,
ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
}
static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) /*
{ * leave ioat_chan->desc_lock set in ioat 2 path
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); * it will get unlocked at end of tx_submit
*/
if (ioat_chan->pending != 0) { if (new) {
spin_lock_bh(&ioat_chan->desc_lock); new->len = len;
__ioat2_dma_memcpy_issue_pending(ioat_chan); return &new->async_tx;
spin_unlock_bh(&ioat_chan->desc_lock); } else
} return NULL;
} }
static void ioat_dma_cleanup_tasklet(unsigned long data) static void ioat_dma_cleanup_tasklet(unsigned long data)
...@@ -1019,7 +1031,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) ...@@ -1019,7 +1031,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
static void ioat_dma_test_callback(void *dma_async_param) static void ioat_dma_test_callback(void *dma_async_param)
{ {
printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
dma_async_param); dma_async_param);
} }
/** /**
...@@ -1032,7 +1044,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) ...@@ -1032,7 +1044,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
u8 *src; u8 *src;
u8 *dest; u8 *dest;
struct dma_chan *dma_chan; struct dma_chan *dma_chan;
struct dma_async_tx_descriptor *tx = NULL; struct dma_async_tx_descriptor *tx;
dma_addr_t addr; dma_addr_t addr;
dma_cookie_t cookie; dma_cookie_t cookie;
int err = 0; int err = 0;
......
...@@ -76,7 +76,7 @@ struct ioat_dma_chan { ...@@ -76,7 +76,7 @@ struct ioat_dma_chan {
dma_cookie_t completed_cookie; dma_cookie_t completed_cookie;
unsigned long last_completion; unsigned long last_completion;
u32 xfercap; /* XFERCAP register value expanded out */ size_t xfercap; /* XFERCAP register value expanded out */
spinlock_t cleanup_lock; spinlock_t cleanup_lock;
spinlock_t desc_lock; spinlock_t desc_lock;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册