提交 7c6e0a43 编写于 作者: J Jesse Brandeburg 提交者: Jeff Garzik

ixgbe: Lock RSS seed, move rx_buf_len to the rx_ring

This locks the seed down so loading/unloading the driver will present
predictable hashing from RSS.  Also move the rx_buf_len out of the adapter
struct, and into the Rx ring struct.
Signed-off-by: NJesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: NPeter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: NJeff Garzik <jgarzik@redhat.com>
上级 e01c31a5
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
...@@ -166,6 +166,7 @@ struct ixgbe_ring { ...@@ -166,6 +166,7 @@ struct ixgbe_ring {
char name[IFNAMSIZ + 5]; char name[IFNAMSIZ + 5];
u16 work_limit; /* max work per interrupt */ u16 work_limit; /* max work per interrupt */
u16 rx_buf_len;
}; };
#define RING_F_VMDQ 1 #define RING_F_VMDQ 1
...@@ -228,7 +229,6 @@ struct ixgbe_adapter { ...@@ -228,7 +229,6 @@ struct ixgbe_adapter {
struct timer_list watchdog_timer; struct timer_list watchdog_timer;
struct vlan_group *vlgrp; struct vlan_group *vlgrp;
u16 bd_number; u16 bd_number;
u16 rx_buf_len;
struct work_struct reset_task; struct work_struct reset_task;
struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
char name[MAX_MSIX_COUNT][IFNAMSIZ + 5]; char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
......
...@@ -474,15 +474,15 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, ...@@ -474,15 +474,15 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
* @adapter: address of board private structure * @adapter: address of board private structure
**/ **/
static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring, struct ixgbe_ring *rx_ring,
int cleaned_count) int cleaned_count)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc; union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi; struct ixgbe_rx_buffer *bi;
unsigned int i; unsigned int i;
unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN; unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
i = rx_ring->next_to_use; i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i]; bi = &rx_ring->rx_buffer_info[i];
...@@ -498,8 +498,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, ...@@ -498,8 +498,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
goto no_buffers; goto no_buffers;
} }
bi->page_dma = pci_map_page(pdev, bi->page, 0, bi->page_dma = pci_map_page(pdev, bi->page, 0,
PAGE_SIZE, PAGE_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
} }
if (!bi->skb) { if (!bi->skb) {
...@@ -535,6 +535,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, ...@@ -535,6 +535,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
i = 0; i = 0;
bi = &rx_ring->rx_buffer_info[i]; bi = &rx_ring->rx_buffer_info[i];
} }
no_buffers: no_buffers:
if (rx_ring->next_to_use != i) { if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i; rx_ring->next_to_use = i;
...@@ -552,9 +553,19 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, ...@@ -552,9 +553,19 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
} }
} }
static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
{
return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
}
static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
{
return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
}
static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring, struct ixgbe_ring *rx_ring,
int *work_done, int work_to_do) int *work_done, int work_to_do)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
...@@ -562,36 +573,35 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, ...@@ -562,36 +573,35 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int i; unsigned int i;
u32 upper_len, len, staterr; u32 len, staterr;
u16 hdr_info; u16 hdr_info;
bool cleaned = false; bool cleaned = false;
int cleaned_count = 0; int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean; i = rx_ring->next_to_clean;
upper_len = 0;
rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error); staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
rx_buffer_info = &rx_ring->rx_buffer_info[i]; rx_buffer_info = &rx_ring->rx_buffer_info[i];
while (staterr & IXGBE_RXD_STAT_DD) { while (staterr & IXGBE_RXD_STAT_DD) {
u32 upper_len = 0;
if (*work_done >= work_to_do) if (*work_done >= work_to_do)
break; break;
(*work_done)++; (*work_done)++;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
hdr_info = hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info); len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
len = IXGBE_RXDADV_HDRBUFLEN_SHIFT;
((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
IXGBE_RXDADV_HDRBUFLEN_SHIFT);
if (hdr_info & IXGBE_RXDADV_SPH) if (hdr_info & IXGBE_RXDADV_SPH)
adapter->rx_hdr_split++; adapter->rx_hdr_split++;
if (len > IXGBE_RX_HDR_SIZE) if (len > IXGBE_RX_HDR_SIZE)
len = IXGBE_RX_HDR_SIZE; len = IXGBE_RX_HDR_SIZE;
upper_len = le16_to_cpu(rx_desc->wb.upper.length); upper_len = le16_to_cpu(rx_desc->wb.upper.length);
} else } else {
len = le16_to_cpu(rx_desc->wb.upper.length); len = le16_to_cpu(rx_desc->wb.upper.length);
}
cleaned = true; cleaned = true;
skb = rx_buffer_info->skb; skb = rx_buffer_info->skb;
...@@ -600,8 +610,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, ...@@ -600,8 +610,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
if (len && !skb_shinfo(skb)->nr_frags) { if (len && !skb_shinfo(skb)->nr_frags) {
pci_unmap_single(pdev, rx_buffer_info->dma, pci_unmap_single(pdev, rx_buffer_info->dma,
adapter->rx_buf_len + NET_IP_ALIGN, rx_ring->rx_buf_len + NET_IP_ALIGN,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
skb_put(skb, len); skb_put(skb, len);
} }
...@@ -1415,7 +1425,7 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, ...@@ -1415,7 +1425,7 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
union ixgbe_adv_rx_desc *rx_desc = priv; union ixgbe_adv_rx_desc *rx_desc = priv;
/* Verify that this is a valid IPv4 TCP packet */ /* Verify that this is a valid IPv4 TCP packet */
if (!(rx_desc->wb.lower.lo_dword.pkt_info & if (!(ixgbe_get_pkt_info(rx_desc) &
(IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP))) (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)))
return -1; return -1;
...@@ -1442,10 +1452,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) ...@@ -1442,10 +1452,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, j; int i, j;
u32 rdlen, rxctrl, rxcsum; u32 rdlen, rxctrl, rxcsum;
u32 random[10]; static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
0x6A3E67EA, 0x14364D17, 0x3BED200D};
u32 fctrl, hlreg0; u32 fctrl, hlreg0;
u32 pages; u32 pages;
u32 reta = 0, mrqc, srrctl; u32 reta = 0, mrqc, srrctl;
int rx_buf_len;
/* Decide whether to use packet split mode or not */ /* Decide whether to use packet split mode or not */
if (netdev->mtu > ETH_DATA_LEN) if (netdev->mtu > ETH_DATA_LEN)
...@@ -1455,12 +1468,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) ...@@ -1455,12 +1468,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
/* Set the RX buffer length according to the mode */ /* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
adapter->rx_buf_len = IXGBE_RX_HDR_SIZE; rx_buf_len = IXGBE_RX_HDR_SIZE;
} else { } else {
if (netdev->mtu <= ETH_DATA_LEN) if (netdev->mtu <= ETH_DATA_LEN)
adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else else
adapter->rx_buf_len = ALIGN(max_frame, 1024); rx_buf_len = ALIGN(max_frame, 1024);
} }
fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
...@@ -1490,12 +1503,11 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) ...@@ -1490,12 +1503,11 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
} else { } else {
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) if (rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
srrctl |= srrctl |=
IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
else else
srrctl |= srrctl |= rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
} }
IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl); IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
...@@ -1508,13 +1520,15 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) ...@@ -1508,13 +1520,15 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* the Base and Length of the Rx Descriptor Ring */ * the Base and Length of the Rx Descriptor Ring */
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
rdba = adapter->rx_ring[i].dma; rdba = adapter->rx_ring[i].dma;
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK)); j = adapter->rx_ring[i].reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen); IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
adapter->rx_ring[i].head = IXGBE_RDH(i); IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
adapter->rx_ring[i].tail = IXGBE_RDT(i); adapter->rx_ring[i].head = IXGBE_RDH(j);
adapter->rx_ring[i].tail = IXGBE_RDT(j);
adapter->rx_ring[i].rx_buf_len = rx_buf_len;
} }
/* Intitial LRO Settings */ /* Intitial LRO Settings */
...@@ -1541,22 +1555,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) ...@@ -1541,22 +1555,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
} }
/* Fill out hash function seeds */ /* Fill out hash function seeds */
/* XXX use a random constant here to glue certain flows */
get_random_bytes(&random[0], 40);
for (i = 0; i < 10; i++) for (i = 0; i < 10; i++)
IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]); IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
mrqc = IXGBE_MRQC_RSSEN mrqc = IXGBE_MRQC_RSSEN
/* Perform hash on these packet types */ /* Perform hash on these packet types */
| IXGBE_MRQC_RSS_FIELD_IPV4 | IXGBE_MRQC_RSS_FIELD_IPV4
| IXGBE_MRQC_RSS_FIELD_IPV4_TCP | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
| IXGBE_MRQC_RSS_FIELD_IPV4_UDP | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
| IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
| IXGBE_MRQC_RSS_FIELD_IPV6_EX | IXGBE_MRQC_RSS_FIELD_IPV6_EX
| IXGBE_MRQC_RSS_FIELD_IPV6 | IXGBE_MRQC_RSS_FIELD_IPV6
| IXGBE_MRQC_RSS_FIELD_IPV6_TCP | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
| IXGBE_MRQC_RSS_FIELD_IPV6_UDP | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
| IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
} }
...@@ -1926,7 +1938,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, ...@@ -1926,7 +1938,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i]; rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) { if (rx_buffer_info->dma) {
pci_unmap_single(pdev, rx_buffer_info->dma, pci_unmap_single(pdev, rx_buffer_info->dma,
adapter->rx_buf_len, rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
rx_buffer_info->dma = 0; rx_buffer_info->dma = 0;
} }
......
...@@ -1049,9 +1049,12 @@ union ixgbe_adv_rx_desc { ...@@ -1049,9 +1049,12 @@ union ixgbe_adv_rx_desc {
} read; } read;
struct { struct {
struct { struct {
struct { union {
__le16 pkt_info; /* RSS type, Packet type */ __le32 data;
__le16 hdr_info; /* Split Header, header len */ struct {
__le16 pkt_info; /* RSS type, Packet type */
__le16 hdr_info; /* Split Header, header len */
} hs_rss;
} lo_dword; } lo_dword;
union { union {
__le32 rss; /* RSS Hash */ __le32 rss; /* RSS Hash */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部