提交 49a359e3 编写于 作者: M Michael Chan 提交者: David S. Miller

tg3: Introduce separate functions to allocate/free RX/TX rings.

This is preparation work to allow the number of RX and TX rings to be
configured separately.
Reviewed-by: NNithin Nayak Sujir <nsujir@broadcom.com>
Reviewed-by: NBenjamin Li <benli@broadcom.com>
Signed-off-by: NMichael Chan <mchan@broadcom.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 188c517a
...@@ -7607,15 +7607,11 @@ static int tg3_init_rings(struct tg3 *tp) ...@@ -7607,15 +7607,11 @@ static int tg3_init_rings(struct tg3 *tp)
return 0; return 0;
} }
/* static void tg3_mem_tx_release(struct tg3 *tp)
* Must not be invoked with interrupt sources disabled and
* the hardware shutdown down.
*/
static void tg3_free_consistent(struct tg3 *tp)
{ {
int i; int i;
for (i = 0; i < tp->irq_cnt; i++) { for (i = 0; i < tp->irq_max; i++) {
struct tg3_napi *tnapi = &tp->napi[i]; struct tg3_napi *tnapi = &tp->napi[i];
if (tnapi->tx_ring) { if (tnapi->tx_ring) {
...@@ -7626,16 +7622,113 @@ static void tg3_free_consistent(struct tg3 *tp) ...@@ -7626,16 +7622,113 @@ static void tg3_free_consistent(struct tg3 *tp)
kfree(tnapi->tx_buffers); kfree(tnapi->tx_buffers);
tnapi->tx_buffers = NULL; tnapi->tx_buffers = NULL;
}
}
static int tg3_mem_tx_acquire(struct tg3 *tp)
{
int i;
struct tg3_napi *tnapi = &tp->napi[0];
/* If multivector TSS is enabled, vector 0 does not handle
* tx interrupts. Don't allocate any resources for it.
*/
if (tg3_flag(tp, ENABLE_TSS))
tnapi++;
for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
TG3_TX_RING_SIZE, GFP_KERNEL);
if (!tnapi->tx_buffers)
goto err_out;
tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
TG3_TX_RING_BYTES,
&tnapi->tx_desc_mapping,
GFP_KERNEL);
if (!tnapi->tx_ring)
goto err_out;
}
return 0;
err_out:
tg3_mem_tx_release(tp);
return -ENOMEM;
}
static void tg3_mem_rx_release(struct tg3 *tp)
{
int i;
for (i = 0; i < tp->irq_max; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
tg3_rx_prodring_fini(tp, &tnapi->prodring);
if (!tnapi->rx_rcb)
continue;
if (tnapi->rx_rcb) {
dma_free_coherent(&tp->pdev->dev, dma_free_coherent(&tp->pdev->dev,
TG3_RX_RCB_RING_BYTES(tp), TG3_RX_RCB_RING_BYTES(tp),
tnapi->rx_rcb, tnapi->rx_rcb,
tnapi->rx_rcb_mapping); tnapi->rx_rcb_mapping);
tnapi->rx_rcb = NULL; tnapi->rx_rcb = NULL;
} }
}
tg3_rx_prodring_fini(tp, &tnapi->prodring); static int tg3_mem_rx_acquire(struct tg3 *tp)
{
unsigned int i, limit;
limit = tp->rxq_cnt;
/* If RSS is enabled, we need a (dummy) producer ring
* set on vector zero. This is the true hw prodring.
*/
if (tg3_flag(tp, ENABLE_RSS))
limit++;
for (i = 0; i < limit; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
if (tg3_rx_prodring_init(tp, &tnapi->prodring))
goto err_out;
/* If multivector RSS is enabled, vector 0
* does not handle rx or tx interrupts.
* Don't allocate any resources for it.
*/
if (!i && tg3_flag(tp, ENABLE_RSS))
continue;
tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
TG3_RX_RCB_RING_BYTES(tp),
&tnapi->rx_rcb_mapping,
GFP_KERNEL);
if (!tnapi->rx_rcb)
goto err_out;
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
}
return 0;
err_out:
tg3_mem_rx_release(tp);
return -ENOMEM;
}
/*
* Must not be invoked with interrupt sources disabled and
* the hardware shutdown down.
*/
static void tg3_free_consistent(struct tg3 *tp)
{
int i;
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
if (tnapi->hw_status) { if (tnapi->hw_status) {
dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
...@@ -7645,6 +7738,9 @@ static void tg3_free_consistent(struct tg3 *tp) ...@@ -7645,6 +7738,9 @@ static void tg3_free_consistent(struct tg3 *tp)
} }
} }
tg3_mem_rx_release(tp);
tg3_mem_tx_release(tp);
if (tp->hw_stats) { if (tp->hw_stats) {
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping); tp->hw_stats, tp->stats_mapping);
...@@ -7683,27 +7779,8 @@ static int tg3_alloc_consistent(struct tg3 *tp) ...@@ -7683,27 +7779,8 @@ static int tg3_alloc_consistent(struct tg3 *tp)
memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
sblk = tnapi->hw_status; sblk = tnapi->hw_status;
if (tg3_rx_prodring_init(tp, &tnapi->prodring)) if (tg3_flag(tp, ENABLE_RSS)) {
goto err_out; u16 *prodptr = 0;
/* If multivector TSS is enabled, vector 0 does not handle
* tx interrupts. Don't allocate any resources for it.
*/
if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
(i && tg3_flag(tp, ENABLE_TSS))) {
tnapi->tx_buffers = kzalloc(
sizeof(struct tg3_tx_ring_info) *
TG3_TX_RING_SIZE, GFP_KERNEL);
if (!tnapi->tx_buffers)
goto err_out;
tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
TG3_TX_RING_BYTES,
&tnapi->tx_desc_mapping,
GFP_KERNEL);
if (!tnapi->tx_ring)
goto err_out;
}
/* /*
* When RSS is enabled, the status block format changes * When RSS is enabled, the status block format changes
...@@ -7712,43 +7789,28 @@ static int tg3_alloc_consistent(struct tg3 *tp) ...@@ -7712,43 +7789,28 @@ static int tg3_alloc_consistent(struct tg3 *tp)
* other three rx return ring producer indexes. * other three rx return ring producer indexes.
*/ */
switch (i) { switch (i) {
default:
if (tg3_flag(tp, ENABLE_RSS)) {
tnapi->rx_rcb_prod_idx = NULL;
break;
}
/* Fall through */
case 1: case 1:
tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; prodptr = &sblk->idx[0].rx_producer;
break; break;
case 2: case 2:
tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer; prodptr = &sblk->rx_jumbo_consumer;
break; break;
case 3: case 3:
tnapi->rx_rcb_prod_idx = &sblk->reserved; prodptr = &sblk->reserved;
break; break;
case 4: case 4:
tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer; prodptr = &sblk->rx_mini_consumer;
break; break;
} }
tnapi->rx_rcb_prod_idx = prodptr;
} else {
tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
}
}
/* if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
* If multivector RSS is enabled, vector 0 does not handle
* rx or tx interrupts. Don't allocate any resources for it.
*/
if (!i && tg3_flag(tp, ENABLE_RSS))
continue;
tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
TG3_RX_RCB_RING_BYTES(tp),
&tnapi->rx_rcb_mapping,
GFP_KERNEL);
if (!tnapi->rx_rcb)
goto err_out; goto err_out;
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
}
return 0; return 0;
err_out: err_out:
...@@ -10154,6 +10216,7 @@ static bool tg3_enable_msix(struct tg3 *tp) ...@@ -10154,6 +10216,7 @@ static bool tg3_enable_msix(struct tg3 *tp)
* one to the number of vectors we are requesting. * one to the number of vectors we are requesting.
*/ */
tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max); tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
tp->rxq_cnt = tp->irq_cnt - 1;
} }
for (i = 0; i < tp->irq_max; i++) { for (i = 0; i < tp->irq_max; i++) {
...@@ -10170,14 +10233,13 @@ static bool tg3_enable_msix(struct tg3 *tp) ...@@ -10170,14 +10233,13 @@ static bool tg3_enable_msix(struct tg3 *tp)
netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
tp->irq_cnt, rc); tp->irq_cnt, rc);
tp->irq_cnt = rc; tp->irq_cnt = rc;
tp->rxq_cnt = max(rc - 1, 1);
} }
for (i = 0; i < tp->irq_max; i++) for (i = 0; i < tp->irq_max; i++)
tp->napi[i].irq_vec = msix_ent[i].vector; tp->napi[i].irq_vec = msix_ent[i].vector;
netif_set_real_num_tx_queues(tp->dev, 1); if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
if (netif_set_real_num_rx_queues(tp->dev, rc)) {
pci_disable_msix(tp->pdev); pci_disable_msix(tp->pdev);
return false; return false;
} }
...@@ -10188,7 +10250,8 @@ static bool tg3_enable_msix(struct tg3 *tp) ...@@ -10188,7 +10250,8 @@ static bool tg3_enable_msix(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
tg3_flag_set(tp, ENABLE_TSS); tg3_flag_set(tp, ENABLE_TSS);
netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); tp->txq_cnt = tp->rxq_cnt;
netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
} }
} }
...@@ -10224,6 +10287,11 @@ static void tg3_ints_init(struct tg3 *tp) ...@@ -10224,6 +10287,11 @@ static void tg3_ints_init(struct tg3 *tp)
if (!tg3_flag(tp, USING_MSIX)) { if (!tg3_flag(tp, USING_MSIX)) {
tp->irq_cnt = 1; tp->irq_cnt = 1;
tp->napi[0].irq_vec = tp->pdev->irq; tp->napi[0].irq_vec = tp->pdev->irq;
}
if (tp->irq_cnt == 1) {
tp->txq_cnt = 1;
tp->rxq_cnt = 1;
netif_set_real_num_tx_queues(tp->dev, 1); netif_set_real_num_tx_queues(tp->dev, 1);
netif_set_real_num_rx_queues(tp->dev, 1); netif_set_real_num_rx_queues(tp->dev, 1);
} }
......
...@@ -3037,6 +3037,7 @@ struct tg3 { ...@@ -3037,6 +3037,7 @@ struct tg3 {
void (*write32_tx_mbox) (struct tg3 *, u32, void (*write32_tx_mbox) (struct tg3 *, u32,
u32); u32);
u32 dma_limit; u32 dma_limit;
u32 txq_cnt;
/* begin "rx thread" cacheline section */ /* begin "rx thread" cacheline section */
struct tg3_napi napi[TG3_IRQ_MAX_VECS]; struct tg3_napi napi[TG3_IRQ_MAX_VECS];
...@@ -3051,6 +3052,7 @@ struct tg3 { ...@@ -3051,6 +3052,7 @@ struct tg3 {
u32 rx_std_max_post; u32 rx_std_max_post;
u32 rx_offset; u32 rx_offset;
u32 rx_pkt_map_sz; u32 rx_pkt_map_sz;
u32 rxq_cnt;
bool rx_refill; bool rx_refill;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册