提交 17375d25 编写于 作者: M Matt Carlson 提交者: David S. Miller

tg3: Convert napi handlers to use tnapi

This patch converts the napi interrupt handler functions to accept and
use tg3_napi structures.
Signed-off-by: NMatt Carlson <mcarlson@broadcom.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 09943a18
...@@ -643,8 +643,9 @@ static void tg3_enable_ints(struct tg3 *tp) ...@@ -643,8 +643,9 @@ static void tg3_enable_ints(struct tg3 *tp)
tg3_cond_int(tp); tg3_cond_int(tp);
} }
static inline unsigned int tg3_has_work(struct tg3 *tp) static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
{ {
struct tg3 *tp = tnapi->tp;
struct tg3_hw_status *sblk = tp->hw_status; struct tg3_hw_status *sblk = tp->hw_status;
unsigned int work_exists = 0; unsigned int work_exists = 0;
...@@ -663,13 +664,15 @@ static inline unsigned int tg3_has_work(struct tg3 *tp) ...@@ -663,13 +664,15 @@ static inline unsigned int tg3_has_work(struct tg3 *tp)
return work_exists; return work_exists;
} }
/* tg3_restart_ints /* tg3_int_reenable
* similar to tg3_enable_ints, but it accurately determines whether there * similar to tg3_enable_ints, but it accurately determines whether there
* is new work pending and can return without flushing the PIO write * is new work pending and can return without flushing the PIO write
* which reenables interrupts * which reenables interrupts
*/ */
static void tg3_restart_ints(struct tg3 *tp) static void tg3_int_reenable(struct tg3_napi *tnapi)
{ {
struct tg3 *tp = tnapi->tp;
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
tp->last_tag << 24); tp->last_tag << 24);
mmiowb(); mmiowb();
...@@ -679,7 +682,7 @@ static void tg3_restart_ints(struct tg3 *tp) ...@@ -679,7 +682,7 @@ static void tg3_restart_ints(struct tg3 *tp)
* work we've completed. * work we've completed.
*/ */
if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
tg3_has_work(tp)) tg3_has_work(tnapi))
tw32(HOSTCC_MODE, tp->coalesce_mode | tw32(HOSTCC_MODE, tp->coalesce_mode |
(HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
} }
...@@ -4278,8 +4281,9 @@ static inline u32 tg3_tx_avail(struct tg3 *tp) ...@@ -4278,8 +4281,9 @@ static inline u32 tg3_tx_avail(struct tg3 *tp)
* need special logic to handle SKBs that have not had all * need special logic to handle SKBs that have not had all
* of their frags sent yet, like SunGEM does. * of their frags sent yet, like SunGEM does.
*/ */
static void tg3_tx(struct tg3 *tp) static void tg3_tx(struct tg3_napi *tnapi)
{ {
struct tg3 *tp = tnapi->tp;
u32 hw_idx = tp->hw_status->idx[0].tx_consumer; u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
u32 sw_idx = tp->tx_cons; u32 sw_idx = tp->tx_cons;
...@@ -4344,9 +4348,10 @@ static void tg3_tx(struct tg3 *tp) ...@@ -4344,9 +4348,10 @@ static void tg3_tx(struct tg3 *tp)
* buffers the cpu only reads the last cacheline of the RX descriptor * buffers the cpu only reads the last cacheline of the RX descriptor
* (to fetch the error flags, vlan tag, checksum, and opaque cookie). * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
*/ */
static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
int src_idx, u32 dest_idx_unmasked) int src_idx, u32 dest_idx_unmasked)
{ {
struct tg3 *tp = tnapi->tp;
struct tg3_rx_buffer_desc *desc; struct tg3_rx_buffer_desc *desc;
struct ring_info *map, *src_map; struct ring_info *map, *src_map;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -4409,9 +4414,10 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, ...@@ -4409,9 +4414,10 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
* members of the RX descriptor are invariant. See notes above * members of the RX descriptor are invariant. See notes above
* tg3_alloc_rx_skb for full details. * tg3_alloc_rx_skb for full details.
*/ */
static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
int src_idx, u32 dest_idx_unmasked) int src_idx, u32 dest_idx_unmasked)
{ {
struct tg3 *tp = tnapi->tp;
struct tg3_rx_buffer_desc *src_desc, *dest_desc; struct tg3_rx_buffer_desc *src_desc, *dest_desc;
struct ring_info *src_map, *dest_map; struct ring_info *src_map, *dest_map;
int dest_idx; int dest_idx;
...@@ -4471,8 +4477,9 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, ...@@ -4471,8 +4477,9 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
* If both the host and chip were to write into the same ring, cache line * If both the host and chip were to write into the same ring, cache line
* eviction could occur since both entities want it in an exclusive state. * eviction could occur since both entities want it in an exclusive state.
*/ */
static int tg3_rx(struct tg3 *tp, int budget) static int tg3_rx(struct tg3_napi *tnapi, int budget)
{ {
struct tg3 *tp = tnapi->tp;
u32 work_mask, rx_std_posted = 0; u32 work_mask, rx_std_posted = 0;
u32 sw_idx = tp->rx_rcb_ptr; u32 sw_idx = tp->rx_rcb_ptr;
u16 hw_idx; u16 hw_idx;
...@@ -4515,7 +4522,7 @@ static int tg3_rx(struct tg3 *tp, int budget) ...@@ -4515,7 +4522,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
if ((desc->err_vlan & RXD_ERR_MASK) != 0 && if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
(desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
drop_it: drop_it:
tg3_recycle_rx(tp, opaque_key, tg3_recycle_rx(tnapi, opaque_key,
desc_idx, *post_ptr); desc_idx, *post_ptr);
drop_it_no_recycle: drop_it_no_recycle:
/* Other statistics kept track of by card. */ /* Other statistics kept track of by card. */
...@@ -4535,7 +4542,7 @@ static int tg3_rx(struct tg3 *tp, int budget) ...@@ -4535,7 +4542,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
) { ) {
int skb_size; int skb_size;
skb_size = tg3_alloc_rx_skb(tp, opaque_key, skb_size = tg3_alloc_rx_skb(tnapi, opaque_key,
desc_idx, *post_ptr); desc_idx, *post_ptr);
if (skb_size < 0) if (skb_size < 0)
goto drop_it; goto drop_it;
...@@ -4547,7 +4554,7 @@ static int tg3_rx(struct tg3 *tp, int budget) ...@@ -4547,7 +4554,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
} else { } else {
struct sk_buff *copy_skb; struct sk_buff *copy_skb;
tg3_recycle_rx(tp, opaque_key, tg3_recycle_rx(tnapi, opaque_key,
desc_idx, *post_ptr); desc_idx, *post_ptr);
copy_skb = netdev_alloc_skb(tp->dev, copy_skb = netdev_alloc_skb(tp->dev,
...@@ -4584,11 +4591,11 @@ static int tg3_rx(struct tg3 *tp, int budget) ...@@ -4584,11 +4591,11 @@ static int tg3_rx(struct tg3 *tp, int budget)
#if TG3_VLAN_TAG_USED #if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL && if (tp->vlgrp != NULL &&
desc->type_flags & RXD_FLAG_VLAN) { desc->type_flags & RXD_FLAG_VLAN) {
vlan_gro_receive(&tp->napi[0].napi, tp->vlgrp, vlan_gro_receive(&tnapi->napi, tp->vlgrp,
desc->err_vlan & RXD_VLAN_MASK, skb); desc->err_vlan & RXD_VLAN_MASK, skb);
} else } else
#endif #endif
napi_gro_receive(&tp->napi[0].napi, skb); napi_gro_receive(&tnapi->napi, skb);
received++; received++;
budget--; budget--;
...@@ -4635,8 +4642,9 @@ static int tg3_rx(struct tg3 *tp, int budget) ...@@ -4635,8 +4642,9 @@ static int tg3_rx(struct tg3 *tp, int budget)
return received; return received;
} }
static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
{ {
struct tg3 *tp = tnapi->tp;
struct tg3_hw_status *sblk = tp->hw_status; struct tg3_hw_status *sblk = tp->hw_status;
/* handle link change and other phy events */ /* handle link change and other phy events */
...@@ -4662,7 +4670,7 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) ...@@ -4662,7 +4670,7 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
/* run TX completion thread */ /* run TX completion thread */
if (sblk->idx[0].tx_consumer != tp->tx_cons) { if (sblk->idx[0].tx_consumer != tp->tx_cons) {
tg3_tx(tp); tg3_tx(tnapi);
if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
return work_done; return work_done;
} }
...@@ -4672,7 +4680,7 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) ...@@ -4672,7 +4680,7 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
* code synchronizes with tg3->napi.poll() * code synchronizes with tg3->napi.poll()
*/ */
if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
work_done += tg3_rx(tp, budget - work_done); work_done += tg3_rx(tnapi, budget - work_done);
return work_done; return work_done;
} }
...@@ -4685,7 +4693,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) ...@@ -4685,7 +4693,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
struct tg3_hw_status *sblk = tp->hw_status; struct tg3_hw_status *sblk = tp->hw_status;
while (1) { while (1) {
work_done = tg3_poll_work(tp, work_done, budget); work_done = tg3_poll_work(tnapi, work_done, budget);
if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
goto tx_recovery; goto tx_recovery;
...@@ -4694,7 +4702,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) ...@@ -4694,7 +4702,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
break; break;
if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
/* tp->last_tag is used in tg3_restart_ints() below /* tp->last_tag is used in tg3_int_reenable() below
* to tell the hw how much work has been processed, * to tell the hw how much work has been processed,
* so we must read it before checking for more work. * so we must read it before checking for more work.
*/ */
...@@ -4704,9 +4712,9 @@ static int tg3_poll(struct napi_struct *napi, int budget) ...@@ -4704,9 +4712,9 @@ static int tg3_poll(struct napi_struct *napi, int budget)
} else } else
sblk->status &= ~SD_STATUS_UPDATED; sblk->status &= ~SD_STATUS_UPDATED;
if (likely(!tg3_has_work(tp))) { if (likely(!tg3_has_work(tnapi))) {
napi_complete(napi); napi_complete(napi);
tg3_restart_ints(tp); tg3_int_reenable(tnapi);
break; break;
} }
} }
...@@ -4829,7 +4837,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id) ...@@ -4829,7 +4837,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id)
if (tg3_irq_sync(tp)) if (tg3_irq_sync(tp))
goto out; goto out;
sblk->status &= ~SD_STATUS_UPDATED; sblk->status &= ~SD_STATUS_UPDATED;
if (likely(tg3_has_work(tp))) { if (likely(tg3_has_work(tnapi))) {
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
napi_schedule(&tnapi->napi); napi_schedule(&tnapi->napi);
} else { } else {
...@@ -5560,6 +5568,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, ...@@ -5560,6 +5568,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
struct tg3_rx_prodring_set *tpr) struct tg3_rx_prodring_set *tpr)
{ {
u32 i, rx_pkt_dma_sz; u32 i, rx_pkt_dma_sz;
struct tg3_napi *tnapi = &tp->napi[0];
/* Zero out all descriptors. */ /* Zero out all descriptors. */
memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
...@@ -5586,7 +5595,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, ...@@ -5586,7 +5595,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */ /* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) { for (i = 0; i < tp->rx_pending; i++) {
if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) {
printk(KERN_WARNING PFX printk(KERN_WARNING PFX
"%s: Using a smaller RX standard ring, " "%s: Using a smaller RX standard ring, "
"only %d out of %d buffers were allocated " "only %d out of %d buffers were allocated "
...@@ -5617,7 +5626,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, ...@@ -5617,7 +5626,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
} }
for (i = 0; i < tp->rx_jumbo_pending; i++) { for (i = 0; i < tp->rx_jumbo_pending; i++) {
if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO,
-1, i) < 0) { -1, i) < 0) {
printk(KERN_WARNING PFX printk(KERN_WARNING PFX
"%s: Using a smaller RX jumbo ring, " "%s: Using a smaller RX jumbo ring, "
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册