提交 2f1860b8 编写于 作者: A Alexander Duyck 提交者: David S. Miller

ixgbe: pull all Tx init into ixgbe_configure_tx

The Tx init was spread out over ixgbe_configure, ixgbe_configure_tx, and
ixgbe_up_complete.  This change combines all of that into the
ixgbe_configure_tx function in order to simplify the Tx init path.
Signed-off-by: NAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 a34bcfff
...@@ -2436,8 +2436,16 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) ...@@ -2436,8 +2436,16 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u64 tdba = ring->dma; u64 tdba = ring->dma;
int wait_loop = 10;
u32 txdctl;
u16 reg_idx = ring->reg_idx; u16 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
txdctl & ~IXGBE_TXDCTL_ENABLE);
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
(tdba & DMA_BIT_MASK(32))); (tdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
...@@ -2448,6 +2456,38 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) ...@@ -2448,6 +2456,38 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
ring->head = IXGBE_TDH(reg_idx); ring->head = IXGBE_TDH(reg_idx);
ring->tail = IXGBE_TDT(reg_idx); ring->tail = IXGBE_TDT(reg_idx);
/* configure fetching thresholds */
if (adapter->rx_itr_setting == 0) {
/* cannot set wthresh when itr==0 */
txdctl &= ~0x007F0000;
} else {
/* enable WTHRESH=8 descriptors, to encourage burst writeback */
txdctl |= (8 << 16);
}
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
/* PThresh workaround for Tx hang with DFP enabled. */
txdctl |= 32;
}
/* reinitialize flowdirector state */
set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
/* enable queue */
txdctl |= IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
/* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
return;
/* poll to verify queue is enabled */
do {
msleep(1);
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
if (!wait_loop)
e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
} }
static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
...@@ -2497,13 +2537,22 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) ...@@ -2497,13 +2537,22 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
**/ **/
static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw;
u32 dmatxctl;
u32 i; u32 i;
ixgbe_setup_mtqc(adapter);
if (hw->mac.type != ixgbe_mac_82598EB) {
/* DMATXCTL.EN must be before Tx queues are enabled */
dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
dmatxctl |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
}
/* Setup the HW Tx Head and Tail descriptor pointers */ /* Setup the HW Tx Head and Tail descriptor pointers */
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
ixgbe_setup_mtqc(adapter);
} }
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
...@@ -3416,44 +3465,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) ...@@ -3416,44 +3465,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
int i, j = 0; int i, j = 0;
int num_rx_rings = adapter->num_rx_queues; int num_rx_rings = adapter->num_rx_queues;
int err; int err;
u32 txdctl, rxdctl; u32 rxdctl;
u32 dmatxctl;
u32 ctrl_ext; u32 ctrl_ext;
ixgbe_get_hw_control(adapter); ixgbe_get_hw_control(adapter);
ixgbe_setup_gpie(adapter); ixgbe_setup_gpie(adapter);
if (hw->mac.type == ixgbe_mac_82599EB) {
/* DMATXCTL.EN must be set after all Tx queue config is done */
dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
dmatxctl |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
}
for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
if (adapter->rx_itr_setting == 0) {
/* cannot set wthresh when itr==0 */
txdctl &= ~0x007F0000;
} else {
/* enable WTHRESH=8 descriptors, to encourage burst writeback */
txdctl |= (8 << 16);
}
txdctl |= IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
if (hw->mac.type == ixgbe_mac_82599EB) {
int wait_loop = 10;
/* poll for Tx Enable ready */
do {
msleep(1);
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
} while (--wait_loop &&
!(txdctl & IXGBE_TXDCTL_ENABLE));
if (!wait_loop)
e_err(drv, "Could not enable Tx Queue %d\n", j);
}
}
for (i = 0; i < num_rx_rings; i++) { for (i = 0; i < num_rx_rings; i++) {
j = adapter->rx_ring[i]->reg_idx; j = adapter->rx_ring[i]->reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
...@@ -3530,10 +3547,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) ...@@ -3530,10 +3547,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
e_err(probe, "link_config FAILED %d\n", err); e_err(probe, "link_config FAILED %d\n", err);
} }
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
&(adapter->tx_ring[i]->reinit_state));
/* enable transmits */ /* enable transmits */
netif_tx_start_all_queues(adapter->netdev); netif_tx_start_all_queues(adapter->netdev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册