提交 21f581a5 编写于 作者: M Matt Carlson 提交者: David S. Miller

tg3: Create a new prodring_set structure

This patch migrates most of the rx producer ring variables to a new
tg3_rx_prodring_set structure and modifies the code accordingly.
Signed-off-by: NMatt Carlson <mcarlson@broadcom.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 cf7a7298
...@@ -4352,24 +4352,25 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, ...@@ -4352,24 +4352,25 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t mapping; dma_addr_t mapping;
int skb_size, dest_idx; int skb_size, dest_idx;
struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
src_map = NULL; src_map = NULL;
switch (opaque_key) { switch (opaque_key) {
case RXD_OPAQUE_RING_STD: case RXD_OPAQUE_RING_STD:
dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
desc = &tp->rx_std[dest_idx]; desc = &tpr->rx_std[dest_idx];
map = &tp->rx_std_buffers[dest_idx]; map = &tpr->rx_std_buffers[dest_idx];
if (src_idx >= 0) if (src_idx >= 0)
src_map = &tp->rx_std_buffers[src_idx]; src_map = &tpr->rx_std_buffers[src_idx];
skb_size = tp->rx_pkt_map_sz; skb_size = tp->rx_pkt_map_sz;
break; break;
case RXD_OPAQUE_RING_JUMBO: case RXD_OPAQUE_RING_JUMBO:
dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
desc = &tp->rx_jumbo[dest_idx]; desc = &tpr->rx_jmb[dest_idx];
map = &tp->rx_jumbo_buffers[dest_idx]; map = &tpr->rx_jmb_buffers[dest_idx];
if (src_idx >= 0) if (src_idx >= 0)
src_map = &tp->rx_jumbo_buffers[src_idx]; src_map = &tpr->rx_jmb_buffers[src_idx];
skb_size = TG3_RX_JMB_MAP_SZ; skb_size = TG3_RX_JMB_MAP_SZ;
break; break;
...@@ -4414,22 +4415,23 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, ...@@ -4414,22 +4415,23 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
struct tg3_rx_buffer_desc *src_desc, *dest_desc; struct tg3_rx_buffer_desc *src_desc, *dest_desc;
struct ring_info *src_map, *dest_map; struct ring_info *src_map, *dest_map;
int dest_idx; int dest_idx;
struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
switch (opaque_key) { switch (opaque_key) {
case RXD_OPAQUE_RING_STD: case RXD_OPAQUE_RING_STD:
dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
dest_desc = &tp->rx_std[dest_idx]; dest_desc = &tpr->rx_std[dest_idx];
dest_map = &tp->rx_std_buffers[dest_idx]; dest_map = &tpr->rx_std_buffers[dest_idx];
src_desc = &tp->rx_std[src_idx]; src_desc = &tpr->rx_std[src_idx];
src_map = &tp->rx_std_buffers[src_idx]; src_map = &tpr->rx_std_buffers[src_idx];
break; break;
case RXD_OPAQUE_RING_JUMBO: case RXD_OPAQUE_RING_JUMBO:
dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
dest_desc = &tp->rx_jumbo[dest_idx]; dest_desc = &tpr->rx_jmb[dest_idx];
dest_map = &tp->rx_jumbo_buffers[dest_idx]; dest_map = &tpr->rx_jmb_buffers[dest_idx];
src_desc = &tp->rx_jumbo[src_idx]; src_desc = &tpr->rx_jmb[src_idx];
src_map = &tp->rx_jumbo_buffers[src_idx]; src_map = &tpr->rx_jmb_buffers[src_idx];
break; break;
default: default:
...@@ -4482,6 +4484,7 @@ static int tg3_rx(struct tg3 *tp, int budget) ...@@ -4482,6 +4484,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
u32 sw_idx = tp->rx_rcb_ptr; u32 sw_idx = tp->rx_rcb_ptr;
u16 hw_idx; u16 hw_idx;
int received; int received;
struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
hw_idx = tp->hw_status->idx[0].rx_producer; hw_idx = tp->hw_status->idx[0].rx_producer;
/* /*
...@@ -4501,20 +4504,18 @@ static int tg3_rx(struct tg3 *tp, int budget) ...@@ -4501,20 +4504,18 @@ static int tg3_rx(struct tg3 *tp, int budget)
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) { if (opaque_key == RXD_OPAQUE_RING_STD) {
dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
mapping); dma_addr = pci_unmap_addr(ri, mapping);
skb = tp->rx_std_buffers[desc_idx].skb; skb = ri->skb;
post_ptr = &tp->rx_std_ptr; post_ptr = &tpr->rx_std_ptr;
rx_std_posted++; rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx], struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
mapping); dma_addr = pci_unmap_addr(ri, mapping);
skb = tp->rx_jumbo_buffers[desc_idx].skb; skb = ri->skb;
post_ptr = &tp->rx_jumbo_ptr; post_ptr = &tpr->rx_jmb_ptr;
} } else
else {
goto next_pkt_nopost; goto next_pkt_nopost;
}
work_mask |= opaque_key; work_mask |= opaque_key;
...@@ -4627,12 +4628,12 @@ static int tg3_rx(struct tg3 *tp, int budget) ...@@ -4627,12 +4628,12 @@ static int tg3_rx(struct tg3 *tp, int budget)
/* Refill RX ring(s). */ /* Refill RX ring(s). */
if (work_mask & RXD_OPAQUE_RING_STD) { if (work_mask & RXD_OPAQUE_RING_STD) {
sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE; sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
sw_idx); sw_idx);
} }
if (work_mask & RXD_OPAQUE_RING_JUMBO) { if (work_mask & RXD_OPAQUE_RING_JUMBO) {
sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE; sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
sw_idx); sw_idx);
} }
...@@ -5517,13 +5518,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) ...@@ -5517,13 +5518,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
return err; return err;
} }
static void tg3_rx_prodring_free(struct tg3 *tp) static void tg3_rx_prodring_free(struct tg3 *tp,
struct tg3_rx_prodring_set *tpr)
{ {
struct ring_info *rxp; struct ring_info *rxp;
int i; int i;
for (i = 0; i < TG3_RX_RING_SIZE; i++) { for (i = 0; i < TG3_RX_RING_SIZE; i++) {
rxp = &tp->rx_std_buffers[i]; rxp = &tpr->rx_std_buffers[i];
if (rxp->skb == NULL) if (rxp->skb == NULL)
continue; continue;
...@@ -5538,7 +5540,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp) ...@@ -5538,7 +5540,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp)
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
rxp = &tp->rx_jumbo_buffers[i]; rxp = &tpr->rx_jmb_buffers[i];
if (rxp->skb == NULL) if (rxp->skb == NULL)
continue; continue;
...@@ -5560,12 +5562,13 @@ static void tg3_rx_prodring_free(struct tg3 *tp) ...@@ -5560,12 +5562,13 @@ static void tg3_rx_prodring_free(struct tg3 *tp)
* end up in the driver. tp->{tx,}lock are held and thus * end up in the driver. tp->{tx,}lock are held and thus
* we may not sleep. * we may not sleep.
*/ */
static int tg3_rx_prodring_alloc(struct tg3 *tp) static int tg3_rx_prodring_alloc(struct tg3 *tp,
struct tg3_rx_prodring_set *tpr)
{ {
u32 i, rx_pkt_dma_sz; u32 i, rx_pkt_dma_sz;
/* Zero out all descriptors. */ /* Zero out all descriptors. */
memset(tp->rx_std, 0, TG3_RX_RING_BYTES); memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
...@@ -5580,7 +5583,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp) ...@@ -5580,7 +5583,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp)
for (i = 0; i < TG3_RX_RING_SIZE; i++) { for (i = 0; i < TG3_RX_RING_SIZE; i++) {
struct tg3_rx_buffer_desc *rxd; struct tg3_rx_buffer_desc *rxd;
rxd = &tp->rx_std[i]; rxd = &tpr->rx_std[i];
rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
rxd->opaque = (RXD_OPAQUE_RING_STD | rxd->opaque = (RXD_OPAQUE_RING_STD |
...@@ -5605,13 +5608,13 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp) ...@@ -5605,13 +5608,13 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp)
if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
goto done; goto done;
memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES); memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
struct tg3_rx_buffer_desc *rxd; struct tg3_rx_buffer_desc *rxd;
rxd = &tp->rx_jumbo[i]; rxd = &tpr->rx_jmb[i];
rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
RXD_FLAG_JUMBO; RXD_FLAG_JUMBO;
...@@ -5639,58 +5642,60 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp) ...@@ -5639,58 +5642,60 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp)
return 0; return 0;
initfail: initfail:
tg3_rx_prodring_free(tp); tg3_rx_prodring_free(tp, tpr);
return -ENOMEM; return -ENOMEM;
} }
static void tg3_rx_prodring_fini(struct tg3 *tp) static void tg3_rx_prodring_fini(struct tg3 *tp,
struct tg3_rx_prodring_set *tpr)
{ {
kfree(tp->rx_std_buffers); kfree(tpr->rx_std_buffers);
tp->rx_std_buffers = NULL; tpr->rx_std_buffers = NULL;
kfree(tp->rx_jumbo_buffers); kfree(tpr->rx_jmb_buffers);
tp->rx_jumbo_buffers = NULL; tpr->rx_jmb_buffers = NULL;
if (tp->rx_std) { if (tpr->rx_std) {
pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
tp->rx_std, tp->rx_std_mapping); tpr->rx_std, tpr->rx_std_mapping);
tp->rx_std = NULL; tpr->rx_std = NULL;
} }
if (tp->rx_jumbo) { if (tpr->rx_jmb) {
pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
tp->rx_jumbo, tp->rx_jumbo_mapping); tpr->rx_jmb, tpr->rx_jmb_mapping);
tp->rx_jumbo = NULL; tpr->rx_jmb = NULL;
} }
} }
static int tg3_rx_prodring_init(struct tg3 *tp) static int tg3_rx_prodring_init(struct tg3 *tp,
struct tg3_rx_prodring_set *tpr)
{ {
tp->rx_std_buffers = kzalloc(sizeof(struct ring_info) * tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
TG3_RX_RING_SIZE, GFP_KERNEL); TG3_RX_RING_SIZE, GFP_KERNEL);
if (!tp->rx_std_buffers) if (!tpr->rx_std_buffers)
return -ENOMEM; return -ENOMEM;
tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
&tp->rx_std_mapping); &tpr->rx_std_mapping);
if (!tp->rx_std) if (!tpr->rx_std)
goto err_out; goto err_out;
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
tp->rx_jumbo_buffers = kzalloc(sizeof(struct ring_info) * tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
TG3_RX_JUMBO_RING_SIZE, TG3_RX_JUMBO_RING_SIZE,
GFP_KERNEL); GFP_KERNEL);
if (!tp->rx_jumbo_buffers) if (!tpr->rx_jmb_buffers)
goto err_out; goto err_out;
tp->rx_jumbo = pci_alloc_consistent(tp->pdev, tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
TG3_RX_JUMBO_RING_BYTES, TG3_RX_JUMBO_RING_BYTES,
&tp->rx_jumbo_mapping); &tpr->rx_jmb_mapping);
if (!tp->rx_jumbo) if (!tpr->rx_jmb)
goto err_out; goto err_out;
} }
return 0; return 0;
err_out: err_out:
tg3_rx_prodring_fini(tp); tg3_rx_prodring_fini(tp, tpr);
return -ENOMEM; return -ENOMEM;
} }
...@@ -5726,7 +5731,7 @@ static void tg3_free_rings(struct tg3 *tp) ...@@ -5726,7 +5731,7 @@ static void tg3_free_rings(struct tg3 *tp)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
tg3_rx_prodring_free(tp); tg3_rx_prodring_free(tp, &tp->prodring[0]);
} }
/* Initialize tx/rx rings for packet processing. /* Initialize tx/rx rings for packet processing.
...@@ -5745,7 +5750,7 @@ static int tg3_init_rings(struct tg3 *tp) ...@@ -5745,7 +5750,7 @@ static int tg3_init_rings(struct tg3 *tp)
memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
return tg3_rx_prodring_alloc(tp); return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
} }
/* /*
...@@ -5776,7 +5781,7 @@ static void tg3_free_consistent(struct tg3 *tp) ...@@ -5776,7 +5781,7 @@ static void tg3_free_consistent(struct tg3 *tp)
tp->hw_stats, tp->stats_mapping); tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL; tp->hw_stats = NULL;
} }
tg3_rx_prodring_fini(tp); tg3_rx_prodring_fini(tp, &tp->prodring[0]);
} }
/* /*
...@@ -5785,7 +5790,7 @@ static void tg3_free_consistent(struct tg3 *tp) ...@@ -5785,7 +5790,7 @@ static void tg3_free_consistent(struct tg3 *tp)
*/ */
static int tg3_alloc_consistent(struct tg3 *tp) static int tg3_alloc_consistent(struct tg3 *tp)
{ {
if (tg3_rx_prodring_init(tp)) if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
return -ENOMEM; return -ENOMEM;
tp->tx_buffers = kzalloc(sizeof(struct tx_ring_info) * tp->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
...@@ -6794,6 +6799,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ...@@ -6794,6 +6799,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{ {
u32 val, rdmac_mode; u32 val, rdmac_mode;
int i, err, limit; int i, err, limit;
struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
tg3_disable_ints(tp); tg3_disable_ints(tp);
...@@ -7022,9 +7028,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ...@@ -7022,9 +7028,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
* configurable. * configurable.
*/ */
tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
((u64) tp->rx_std_mapping >> 32)); ((u64) tpr->rx_std_mapping >> 32));
tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
((u64) tp->rx_std_mapping & 0xffffffff)); ((u64) tpr->rx_std_mapping & 0xffffffff));
tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_BUFFER_DESC); NIC_SRAM_RX_BUFFER_DESC);
...@@ -7043,9 +7049,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ...@@ -7043,9 +7049,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
((u64) tp->rx_jumbo_mapping >> 32)); ((u64) tpr->rx_jmb_mapping >> 32));
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
((u64) tp->rx_jumbo_mapping & 0xffffffff)); ((u64) tpr->rx_jmb_mapping & 0xffffffff));
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
...@@ -7102,14 +7108,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ...@@ -7102,14 +7108,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
BDINFO_FLAGS_MAXLEN_SHIFT), BDINFO_FLAGS_MAXLEN_SHIFT),
0); 0);
tp->rx_std_ptr = tp->rx_pending; tpr->rx_std_ptr = tp->rx_pending;
tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
tp->rx_std_ptr); tpr->rx_std_ptr);
tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
tp->rx_jumbo_pending : 0; tp->rx_jumbo_pending : 0;
tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
tp->rx_jumbo_ptr); tpr->rx_jmb_ptr);
/* Initialize MAC address and backoff seed. */ /* Initialize MAC address and backoff seed. */
__tg3_set_mac_addr(tp, 0); __tg3_set_mac_addr(tp, 0);
...@@ -9815,6 +9821,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) ...@@ -9815,6 +9821,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
dma_addr_t map; dma_addr_t map;
int num_pkts, tx_len, rx_len, i, err; int num_pkts, tx_len, rx_len, i, err;
struct tg3_rx_buffer_desc *desc; struct tg3_rx_buffer_desc *desc;
struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
if (loopback_mode == TG3_MAC_LOOPBACK) { if (loopback_mode == TG3_MAC_LOOPBACK) {
/* HW errata - mac loopback fails in some cases on 5780. /* HW errata - mac loopback fails in some cases on 5780.
...@@ -9949,9 +9956,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) ...@@ -9949,9 +9956,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
if (rx_len != tx_len) if (rx_len != tx_len)
goto out; goto out;
rx_skb = tp->rx_std_buffers[desc_idx].skb; rx_skb = tpr->rx_std_buffers[desc_idx].skb;
map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping); map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
for (i = 14; i < tx_len; i++) { for (i = 14; i < tx_len; i++) {
......
...@@ -2476,6 +2476,17 @@ struct tg3_ethtool_stats { ...@@ -2476,6 +2476,17 @@ struct tg3_ethtool_stats {
u64 nic_tx_threshold_hit; u64 nic_tx_threshold_hit;
}; };
struct tg3_rx_prodring_set {
u32 rx_std_ptr;
u32 rx_jmb_ptr;
struct tg3_rx_buffer_desc *rx_std;
struct tg3_rx_buffer_desc *rx_jmb;
struct ring_info *rx_std_buffers;
struct ring_info *rx_jmb_buffers;
dma_addr_t rx_std_mapping;
dma_addr_t rx_jmb_mapping;
};
struct tg3 { struct tg3 {
/* begin "general, frequently-used members" cacheline section */ /* begin "general, frequently-used members" cacheline section */
...@@ -2551,27 +2562,19 @@ struct tg3 { ...@@ -2551,27 +2562,19 @@ struct tg3 {
void (*write32_rx_mbox) (struct tg3 *, u32, void (*write32_rx_mbox) (struct tg3 *, u32,
u32); u32);
u32 rx_rcb_ptr; u32 rx_rcb_ptr;
u32 rx_std_ptr;
u32 rx_jumbo_ptr;
u32 rx_pending; u32 rx_pending;
u32 rx_jumbo_pending; u32 rx_jumbo_pending;
u32 rx_std_max_post;
u32 rx_pkt_map_sz;
#if TG3_VLAN_TAG_USED #if TG3_VLAN_TAG_USED
struct vlan_group *vlgrp; struct vlan_group *vlgrp;
#endif #endif
struct tg3_rx_buffer_desc *rx_std;
struct ring_info *rx_std_buffers;
dma_addr_t rx_std_mapping;
u32 rx_std_max_post;
struct tg3_rx_buffer_desc *rx_jumbo;
struct ring_info *rx_jumbo_buffers;
dma_addr_t rx_jumbo_mapping;
struct tg3_rx_buffer_desc *rx_rcb; struct tg3_rx_buffer_desc *rx_rcb;
dma_addr_t rx_rcb_mapping; dma_addr_t rx_rcb_mapping;
u32 rx_pkt_map_sz; struct tg3_rx_prodring_set prodring[1];
/* begin "everything else" cacheline(s) section */ /* begin "everything else" cacheline(s) section */
struct net_device_stats net_stats; struct net_device_stats net_stats;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册