提交 77d5dfca 编写于 作者: A Alexander Duyck 提交者: Jeff Kirsher

ixgbevf: Drop all dead or unnecessary code

There is a large amount of code present in this driver to support features
that either do no exist or are not supported such ask packet split, DCA, or
RSC.  This patch strips out almost all of that code and in the case of
conditionals based on unused flags I am flatting the code out to just the
path that would have been selected.
Signed-off-by: NAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: NGreg Rose <gregory.v.rose@intel.com>
Tested-by: NSibai Li <sibai.li@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
上级 282f23c6
......@@ -43,7 +43,6 @@
#define IXGBE_ALL_RAR_ENTRIES 16
#ifdef ETHTOOL_GSTATS
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
......@@ -75,21 +74,17 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
zero_base)},
{"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
zero_base)},
{"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
};
#define IXGBE_QUEUE_STATS_LEN 0
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
#endif /* ETHTOOL_GSTATS */
#ifdef ETHTOOL_TEST
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)"
};
#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
#endif /* ETHTOOL_TEST */
static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
......@@ -674,10 +669,8 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev)) {
if (!adapter->dev_closed)
ixgbevf_reinit_locked(adapter);
}
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
return 0;
}
......
......@@ -52,9 +52,6 @@ struct ixgbevf_tx_buffer {
struct ixgbevf_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
dma_addr_t page_dma;
unsigned int page_offset;
};
struct ixgbevf_ring {
......@@ -83,11 +80,6 @@ struct ixgbevf_ring {
* offset associated with this ring, which is different
* for DCB and RSS modes */
#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
/* cpu for tx queue */
int cpu;
#endif
u64 v_idx; /* maps directly to the index for this ring in the hardware
* vector array, can also be used for finding the bit in EICR
* and friends that represents the vector for this ring */
......@@ -96,16 +88,6 @@ struct ixgbevf_ring {
u16 rx_buf_len;
};
enum ixgbevf_ring_f_enum {
RING_F_NONE = 0,
RING_F_ARRAY_SIZE /* must be last in enum set */
};
struct ixgbevf_ring_feature {
int indices;
int mask;
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
......@@ -120,8 +102,6 @@ struct ixgbevf_ring_feature {
#define IXGBEVF_MIN_RXD 64
/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
#define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
......@@ -213,18 +193,13 @@ struct ixgbevf_adapter {
/* RX */
struct ixgbevf_ring *rx_ring; /* One per active queue */
int num_rx_queues;
int num_rx_pools; /* == num_rx_queues in 82598 */
int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
int max_msix_q_vectors; /* true count of q_vectors for device */
struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
u64 rx_hdr_split;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
......@@ -233,14 +208,8 @@ struct ixgbevf_adapter {
*/
u32 flags;
#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 1)
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
......@@ -254,18 +223,15 @@ struct ixgbevf_adapter {
u32 eitr_param;
unsigned long state;
u32 *config_space;
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
u32 link_speed;
bool link_up;
unsigned long link_check_timeout;
struct work_struct watchdog_task;
bool netdev_registered;
bool dev_closed;
};
enum ixbgevf_state_t {
......@@ -302,10 +268,8 @@ extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
struct ixgbevf_ring *);
extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
#ifdef ETHTOOL_OPS_COMPAT
extern int ethtool_ioctl(struct ifreq *ifr);
#endif
extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
......
......@@ -177,12 +177,8 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
(((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
#ifdef MAX_SKB_FRAGS
#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
#else
#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
#endif
static void ixgbevf_tx_timeout(struct net_device *netdev);
......@@ -255,19 +251,11 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
* sees the new next_to_clean.
*/
smp_mb();
#ifdef HAVE_TX_MQ
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
netif_wake_subqueue(netdev, tx_ring->queue_index);
++adapter->restart_queue;
}
#else
if (netif_queue_stopped(netdev) &&
!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
netif_wake_queue(netdev);
++adapter->restart_queue;
}
#endif
}
/* re-arm the interrupt */
......@@ -304,10 +292,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
if (is_vlan && test_bit(tag, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, tag);
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
napi_gro_receive(&q_vector->napi, skb);
else
netif_rx(skb);
napi_gro_receive(&q_vector->napi, skb);
}
/**
......@@ -365,27 +350,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
while (cleaned_count--) {
rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
if (!bi->page_dma &&
(adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) {
adapter->alloc_rx_page_failed++;
goto no_buffers;
}
bi->page_offset = 0;
} else {
/* use a half page if we're re-using */
bi->page_offset ^= (PAGE_SIZE / 2);
}
bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset,
(PAGE_SIZE / 2),
DMA_FROM_DEVICE);
}
skb = bi->skb;
if (!skb) {
skb = netdev_alloc_skb(adapter->netdev,
......@@ -410,14 +374,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
}
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
i++;
if (i == rx_ring->count)
......@@ -445,16 +402,6 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
}
static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
{
return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
}
static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
{
return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
}
static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
int *work_done, int work_to_do)
......@@ -466,7 +413,6 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb;
unsigned int i;
u32 len, staterr;
u16 hdr_info;
bool cleaned = false;
int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
......@@ -477,24 +423,12 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
while (staterr & IXGBE_RXD_STAT_DD) {
u32 upper_len = 0;
if (*work_done >= work_to_do)
break;
(*work_done)++;
rmb(); /* read descriptor and rx_buffer_info after status DD */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
IXGBE_RXDADV_HDRBUFLEN_SHIFT;
if (hdr_info & IXGBE_RXDADV_SPH)
adapter->rx_hdr_split++;
if (len > IXGBEVF_RX_HDR_SIZE)
len = IXGBEVF_RX_HDR_SIZE;
upper_len = le16_to_cpu(rx_desc->wb.upper.length);
} else {
len = le16_to_cpu(rx_desc->wb.upper.length);
}
len = le16_to_cpu(rx_desc->wb.upper.length);
cleaned = true;
skb = rx_buffer_info->skb;
prefetch(skb->data - NET_IP_ALIGN);
......@@ -508,26 +442,6 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
skb_put(skb, len);
}
if (upper_len) {
dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
rx_buffer_info->page_offset,
upper_len);
if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
(page_count(rx_buffer_info->page) != 1))
rx_buffer_info->page = NULL;
else
get_page(rx_buffer_info->page);
skb->len += upper_len;
skb->data_len += upper_len;
skb->truesize += upper_len;
}
i++;
if (i == rx_ring->count)
i = 0;
......@@ -539,15 +453,8 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
next_buffer = &rx_ring->rx_buffer_info[i];
if (!(staterr & IXGBE_RXD_STAT_EOP)) {
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
rx_buffer_info->skb = next_buffer->skb;
rx_buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb;
next_buffer->dma = 0;
} else {
skb->next = next_buffer->skb;
skb->next->prev = skb;
}
skb->next = next_buffer->skb;
skb->next->prev = skb;
adapter->non_eop_descs++;
goto next_desc;
}
......@@ -673,11 +580,6 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
r_idx + 1);
}
#ifndef HAVE_NETDEV_NAPI_LIST
if (!netif_running(adapter->netdev))
work_done = 0;
#endif
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
rx_ring = &(adapter->rx_ring[r_idx]);
......@@ -1320,29 +1222,14 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
srrctl = IXGBE_SRRCTL_DROP_EN;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
u16 bufsz = IXGBEVF_RXBUFFER_2048;
/* grow the amount we can receive on large page machines */
if (bufsz < (PAGE_SIZE / 2))
bufsz = (PAGE_SIZE / 2);
/* cap the bufsz at our largest descriptor size */
bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK);
} else {
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
srrctl |= IXGBEVF_RXBUFFER_2048 >>
IXGBE_SRRCTL_BSIZEPKT_SHIFT;
else
srrctl |= rx_ring->rx_buf_len >>
IXGBE_SRRCTL_BSIZEPKT_SHIFT;
}
if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
srrctl |= IXGBEVF_RXBUFFER_2048 >>
IXGBE_SRRCTL_BSIZEPKT_SHIFT;
else
srrctl |= rx_ring->rx_buf_len >>
IXGBE_SRRCTL_BSIZEPKT_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
......@@ -1362,36 +1249,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
u32 rdlen;
int rx_buf_len;
/* Decide whether to use packet split mode or not */
if (netdev->mtu > ETH_DATA_LEN) {
if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
else
adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
} else {
if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
else
adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
}
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
/* PSRTYPE must be initialized in 82599 */
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR |
IXGBE_PSRTYPE_IPV6HDR |
IXGBE_PSRTYPE_L2HDR;
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
rx_buf_len = IXGBEVF_RX_HDR_SIZE;
} else {
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
if (netdev->mtu <= ETH_DATA_LEN)
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else
rx_buf_len = ALIGN(max_frame, 1024);
}
/* PSRTYPE must be initialized in 82599 */
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
if (netdev->mtu <= ETH_DATA_LEN)
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else
rx_buf_len = ALIGN(max_frame, 1024);
rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
......@@ -1667,10 +1530,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_save_reset_stats(adapter);
ixgbevf_init_last_counter_stats(adapter);
/* bring the link up in the watchdog, this could race with our first
* link up interrupt but shouldn't be a problem */
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies;
mod_timer(&adapter->watchdog_timer, jiffies);
}
......@@ -1723,14 +1582,6 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
dev_kfree_skb(this);
} while (skb);
}
if (!rx_buffer_info->page)
continue;
dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
put_page(rx_buffer_info->page);
rx_buffer_info->page = NULL;
rx_buffer_info->page_offset = 0;
}
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
......@@ -1958,8 +1809,6 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
adapter->num_rx_pools = adapter->num_rx_queues;
adapter->num_rx_queues_per_pool = 1;
}
/**
......@@ -3220,9 +3069,7 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
ixgbevf_free_all_rx_resources(adapter);
}
#ifdef CONFIG_PM
pci_save_state(pdev);
#endif
pci_disable_device(pdev);
}
......@@ -3350,12 +3197,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
pci_set_master(pdev);
#ifdef HAVE_TX_MQ
netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
MAX_TX_QUEUES);
#else
netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
#endif
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
......@@ -3396,10 +3239,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
sizeof(struct ixgbe_mbx_operations));
adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
/* setup the private structure */
err = ixgbevf_sw_init(adapter);
if (err)
......@@ -3469,8 +3308,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
hw_dbg(hw, "LRO is disabled\n");
hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
cards_found++;
return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册