提交 f7cbdb7d 编写于 作者: D David S. Miller

Merge branch 'ixgbe-next'

Aaron Brown says:

====================
Intel Wired LAN Driver Updates

This series contains updates to ixgbe and ixgbevf.

John adds rtnl lock / unlock semantics for ixgbe_reinit_locked()
which was being called without the rtnl lock being held.

Jacob corrects an issue where ixgbevf_qv_disable function does not
set the disabled bit correctly.

From the community, Wei uses a type of struct for pci driver-specific
data in ixgbevf_suspend()

Don changes the way we store ring arrays in a manner that allows
support of multiple queues on multiple nodes and creates new ring
initialization functions for work previously done across multiple
functions - making the code closer to ixgbe and hopefully more readable.
He also fixes incorrect fiber eeprom write logic.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -626,7 +626,7 @@ static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ...@@ -626,7 +626,7 @@ static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
goto out; goto out;
} }
eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
IXGBE_I2C_EEPROM_DEV_ADDR2, IXGBE_I2C_EEPROM_DEV_ADDR2,
......
...@@ -6392,7 +6392,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) ...@@ -6392,7 +6392,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
netdev_err(adapter->netdev, "Reset adapter\n"); netdev_err(adapter->netdev, "Reset adapter\n");
adapter->tx_timeout_count++; adapter->tx_timeout_count++;
rtnl_lock();
ixgbe_reinit_locked(adapter); ixgbe_reinit_locked(adapter);
rtnl_unlock();
} }
/** /**
......
...@@ -277,4 +277,21 @@ struct ixgbe_adv_tx_context_desc { ...@@ -277,4 +277,21 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ERR_RESET_FAILED -2 #define IXGBE_ERR_RESET_FAILED -2
#define IXGBE_ERR_INVALID_ARGUMENT -3 #define IXGBE_ERR_INVALID_ARGUMENT -3
/* Transmit Config masks */
#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
#endif /* _IXGBEVF_DEFINES_H_ */ #endif /* _IXGBEVF_DEFINES_H_ */
...@@ -286,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -286,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) { if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i].count = new_tx_count; adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i].count = new_rx_count; adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count; adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count; adapter->rx_ring_count = new_rx_count;
goto clear_reset; goto clear_reset;
...@@ -303,7 +303,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -303,7 +303,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
tx_ring[i] = adapter->tx_ring[i]; tx_ring[i] = *adapter->tx_ring[i];
tx_ring[i].count = new_tx_count; tx_ring[i].count = new_tx_count;
err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
if (!err) if (!err)
...@@ -329,7 +329,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -329,7 +329,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
rx_ring[i] = adapter->rx_ring[i]; rx_ring[i] = *adapter->rx_ring[i];
rx_ring[i].count = new_rx_count; rx_ring[i].count = new_rx_count;
err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
if (!err) if (!err)
...@@ -352,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -352,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Tx */ /* Tx */
if (tx_ring) { if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
ixgbevf_free_tx_resources(adapter, ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
&adapter->tx_ring[i]); *adapter->tx_ring[i] = tx_ring[i];
adapter->tx_ring[i] = tx_ring[i];
} }
adapter->tx_ring_count = new_tx_count; adapter->tx_ring_count = new_tx_count;
...@@ -365,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -365,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Rx */ /* Rx */
if (rx_ring) { if (rx_ring) {
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
ixgbevf_free_rx_resources(adapter, ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
&adapter->rx_ring[i]); *adapter->rx_ring[i] = rx_ring[i];
adapter->rx_ring[i] = rx_ring[i];
} }
adapter->rx_ring_count = new_rx_count; adapter->rx_ring_count = new_rx_count;
...@@ -413,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, ...@@ -413,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
tx_yields = 0, tx_cleaned = 0, tx_missed = 0; tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
rx_yields += adapter->rx_ring[i].bp_yields; rx_yields += adapter->rx_ring[i]->bp_yields;
rx_cleaned += adapter->rx_ring[i].bp_cleaned; rx_cleaned += adapter->rx_ring[i]->bp_cleaned;
rx_yields += adapter->rx_ring[i].bp_yields; rx_yields += adapter->rx_ring[i]->bp_yields;
} }
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
tx_yields += adapter->tx_ring[i].bp_yields; tx_yields += adapter->tx_ring[i]->bp_yields;
tx_cleaned += adapter->tx_ring[i].bp_cleaned; tx_cleaned += adapter->tx_ring[i]->bp_cleaned;
tx_yields += adapter->tx_ring[i].bp_yields; tx_yields += adapter->tx_ring[i]->bp_yields;
} }
adapter->bp_rx_yields = rx_yields; adapter->bp_rx_yields = rx_yields;
......
...@@ -260,6 +260,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) ...@@ -260,6 +260,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
spin_lock_bh(&q_vector->lock); spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBEVF_QV_OWNED) if (q_vector->state & IXGBEVF_QV_OWNED)
rc = false; rc = false;
q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock); spin_unlock_bh(&q_vector->lock);
return rc; return rc;
} }
...@@ -326,7 +327,7 @@ struct ixgbevf_adapter { ...@@ -326,7 +327,7 @@ struct ixgbevf_adapter {
u32 eims_other; u32 eims_other;
/* TX */ /* TX */
struct ixgbevf_ring *tx_ring; /* One per active queue */ struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
int num_tx_queues; int num_tx_queues;
u64 restart_queue; u64 restart_queue;
u64 hw_csum_tx_good; u64 hw_csum_tx_good;
...@@ -336,7 +337,7 @@ struct ixgbevf_adapter { ...@@ -336,7 +337,7 @@ struct ixgbevf_adapter {
u32 tx_timeout_count; u32 tx_timeout_count;
/* RX */ /* RX */
struct ixgbevf_ring *rx_ring; /* One per active queue */ struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
int num_rx_queues; int num_rx_queues;
u64 hw_csum_rx_error; u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources; u64 hw_rx_no_dma_resources;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册