提交 6c62f606 编写于 作者: D David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2014-09-18

This series contains updates to ixgbe and ixgbevf.

Ethan Zhao cleans up ixgbe and ixgbevf by removing bd_number from the
adapter struct because it is not longer useful.

Mark fixes ixgbe where if a hardware transmit timestamp is requested,
an uninitialized workqueue entry may be scheduled.  Added a check for
a PTP clock to avoid that.

Jacob provides a number of cleanups for ixgbe.  Since we may call
ixgbe_acquire_msix_vectors() prior to registering our netdevice, we
should not use the netdevice specific printk and use e_dev_warn()
instead.  Similar to how ixgbevf handles acquiring MSI-X vectors, we
can return an error code instead of relying on the flag being set.
This makes it more clear that we have failed to setup MSI-X mode and
will make it easier to consolidate MSI-X related code into a single
function.  In the case of disabling DCB, it is not an error since we
still can function, we just have to let the user know.  So use
e_dev_warn() instead of e_err().  Added warnings for other features
that are disabled when we are without MSI-X support.  Cleanup flags
that are no longer used or needed.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -611,9 +611,7 @@ struct ixgbe_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0)
#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2)
#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
......@@ -728,8 +726,6 @@ struct ixgbe_adapter {
u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 wol;
u16 bd_number;
u16 eeprom_verh;
u16 eeprom_verl;
u16 eeprom_cap;
......
......@@ -696,46 +696,83 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
ixgbe_set_rss_queues(adapter);
}
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
int vectors)
/**
* ixgbe_acquire_msix_vectors - acquire MSI-X vectors
* @adapter: board private structure
*
* Attempts to acquire a suitable range of MSI-X vector interrupts. Will
* return a negative error code if unable to acquire MSI-X vectors for any
* reason.
*/
static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
{
int vector_threshold;
struct ixgbe_hw *hw = &adapter->hw;
int i, vectors, vector_threshold;
/* We start by asking for one vector per queue pair */
vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
/* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] + RxQ[0] handler
* 2) Other (Link Status Change, etc.)
/* It is easy to be greedy for MSI-X vectors. However, it really
* doesn't do much good if we have a lot more vectors than CPUs. We'll
* be somewhat conservative and only ask for (roughly) the same number
* of vectors as there are CPUs.
*/
vector_threshold = MIN_MSIX_COUNT;
vectors = min_t(int, vectors, num_online_cpus());
/*
* The more we get, the more we will assign to Tx/Rx Cleanup
* for the separate queues...where Rx Cleanup >= Tx Cleanup.
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
/* Some vectors are necessary for non-queue interrupts */
vectors += NON_Q_VECTORS;
/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
* With features such as RSS and VMDq, we can easily surpass the
* number of Rx and Tx descriptor queues supported by our device.
* Thus, we cap the maximum in the rare cases where the CPU count also
* exceeds our vector limit
*/
vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
* handler, and (2) an Other (Link Status Change, etc.) handler.
*/
vector_threshold = MIN_MSIX_COUNT;
adapter->msix_entries = kcalloc(vectors,
sizeof(struct msix_entry),
GFP_KERNEL);
if (!adapter->msix_entries)
return -ENOMEM;
for (i = 0; i < vectors; i++)
adapter->msix_entries[i].entry = i;
vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
vector_threshold, vectors);
if (vectors < 0) {
/* Can't allocate enough MSI-X interrupts? Oh well.
* This just means we'll go with either a single MSI
* vector or fall back to legacy interrupts.
/* A negative count of allocated vectors indicates an error in
* acquiring within the specified range of MSI-X vectors
*/
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
"Unable to allocate MSI-X interrupts\n");
e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
vectors);
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
} else {
adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
/*
* Adjust for only the vectors we'll use, which is minimum
* of max_msix_q_vectors + NON_Q_VECTORS, or the number of
* vectors we were allocated.
*/
vectors -= NON_Q_VECTORS;
adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
return vectors;
}
/* we successfully allocated some number of vectors within our
* requested range.
*/
adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
/* Adjust for only the vectors we'll use, which is minimum
* of max_q_vectors, or the number of vectors we were allocated.
*/
vectors -= NON_Q_VECTORS;
adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
return 0;
}
static void ixgbe_add_ring(struct ixgbe_ring *ring,
......@@ -1054,51 +1091,20 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
**/
static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int vector, v_budget, err;
/*
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
* (roughly) the same number of vectors as there are CPU's.
* The default is to use pairs of vectors.
*/
v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
v_budget = min_t(int, v_budget, num_online_cpus());
v_budget += NON_Q_VECTORS;
/*
* At the same time, hardware can only support a maximum of
* hw.mac->max_msix_vectors vectors. With features
* such as RSS and VMDq, we can easily surpass the number of Rx and Tx
* descriptor queues supported by our device. Thus, we cap it off in
* those rare cases where the cpu count also exceeds our vector limit.
*/
v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
if (adapter->msix_entries) {
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
ixgbe_acquire_msix_vectors(adapter, v_budget);
int err;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
return;
}
/* We will try to get MSI-X interrupts first */
if (!ixgbe_acquire_msix_vectors(adapter))
return;
/* At this point, we do not have MSI-X capabilities. We need to
* reconfigure or disable various features which require MSI-X
* capability.
*/
/* disable DCB if number of TCs exceeds 1 */
/* Disable DCB unless we only have a single traffic class */
if (netdev_get_num_tc(adapter->netdev) > 1) {
e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
netdev_reset_tc(adapter->netdev);
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
......@@ -1108,13 +1114,16 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->temp_dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.pfc_mode_enable = false;
}
adapter->dcb_cfg.num_tcs.pg_tcs = 1;
adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
/* disable SR-IOV */
/* Disable SR-IOV support */
e_dev_warn("Disabling SR-IOV support\n");
ixgbe_disable_sriov(adapter);
/* disable RSS */
/* Disable RSS */
e_dev_warn("Disabling RSS support\n");
adapter->ring_feature[RING_F_RSS].limit = 1;
/* recalculate number of queues now that many features have been
......@@ -1124,13 +1133,11 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->num_q_vectors = 1;
err = pci_enable_msi(adapter->pdev);
if (err) {
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
"Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n",
err);
return;
}
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
if (err)
e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
err);
else
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
}
/**
......
......@@ -7108,9 +7108,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
&adapter->state))) {
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
adapter->ptp_clock &&
!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
&adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
......@@ -7981,7 +7982,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbe_adapter *adapter = NULL;
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac, expected_gts;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
......@@ -8067,8 +8067,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->watchdog_timeo = 5 * HZ;
strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
adapter->bd_number = cards_found;
/* Setup hw api */
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
......@@ -8352,7 +8350,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ixgbe_add_sanmac_netdev(netdev);
e_dev_info("%s\n", ixgbe_default_device_descr);
cards_found++;
#ifdef CONFIG_IXGBE_HWMON
if (ixgbe_sysfs_init(adapter))
......
......@@ -385,7 +385,6 @@ struct ixgbevf_adapter {
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
u16 msg_enable;
u16 bd_number;
/* Interrupt Throttle Rate */
u32 eitr_param;
......
......@@ -3464,7 +3464,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbevf_adapter *adapter = NULL;
struct ixgbe_hw *hw = NULL;
const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
static int cards_found;
int err, pci_using_dac;
err = pci_enable_device(pdev);
......@@ -3525,8 +3524,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ixgbevf_assign_netdev_ops(netdev);
adapter->bd_number = cards_found;
/* Setup hw api */
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
......@@ -3601,7 +3598,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
cards_found++;
return 0;
err_register:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册