提交 622c36f1 编写于 作者: L Lendacky, Thomas 提交者: David S. Miller

amd-xgbe: Fix jumbo MTU processing on newer hardware

Newer hardware does not provide a cumulative payload length when multiple
descriptors are needed to handle the data. Once the MTU increases beyond
the size that can be handled by a single descriptor, the SKB does not get
built properly by the driver.

The driver will now calculate the size of the data buffers used by the
hardware.  The first buffer of the first descriptor is for packet headers
or packet headers and data when the headers can't be split. Subsequent
descriptors in a multi-descriptor chain will not use the first buffer. The
second buffer is used by all the descriptors in the chain for payload data.
Based on whether the driver is processing the first, intermediate, or last
descriptor it can calculate the buffer usage and build the SKB properly.

Tested and verified on both old and new hardware.
Signed-off-by: NTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 5371bbf4
...@@ -1148,8 +1148,8 @@ ...@@ -1148,8 +1148,8 @@
#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 #define RX_PACKET_ATTRIBUTES_LAST_INDEX 2
#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 #define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
...@@ -1158,6 +1158,8 @@ ...@@ -1158,6 +1158,8 @@
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7
#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16 #define RX_NORMAL_DESC0_OVT_WIDTH 16
......
...@@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel) ...@@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
/* Get the header length */ /* Get the header length */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
FIRST, 1);
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL); RX_NORMAL_DESC2, HL);
if (rdata->rx.hdr_len) if (rdata->rx.hdr_len)
pdata->ext_stats.rx_split_header_packets++; pdata->ext_stats.rx_split_header_packets++;
} else {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
FIRST, 0);
} }
/* Get the RSS hash */ /* Get the RSS hash */
...@@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel) ...@@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
} }
} }
/* Get the packet length */ /* Not all the data has been transferred for this packet */
rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
/* Not all the data has been transferred for this packet */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
INCOMPLETE, 1);
return 0; return 0;
}
/* This is the last of the data for this packet */ /* This is the last of the data for this packet */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
INCOMPLETE, 0); LAST, 1);
/* Get the packet length */
rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
/* Set checksum done indicator as appropriate */ /* Set checksum done indicator as appropriate */
if (netdev->features & NETIF_F_RXCSUM) if (netdev->features & NETIF_F_RXCSUM)
......
...@@ -1971,13 +1971,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, ...@@ -1971,13 +1971,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
{ {
struct sk_buff *skb; struct sk_buff *skb;
u8 *packet; u8 *packet;
unsigned int copy_len;
skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
if (!skb) if (!skb)
return NULL; return NULL;
/* Start with the header buffer which may contain just the header /* Pull in the header buffer which may contain just the header
* or the header plus data * or the header plus data
*/ */
dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
...@@ -1986,30 +1985,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, ...@@ -1986,30 +1985,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
packet = page_address(rdata->rx.hdr.pa.pages) + packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset; rdata->rx.hdr.pa.pages_offset;
copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len; skb_copy_to_linear_data(skb, packet, len);
copy_len = min(rdata->rx.hdr.dma_len, copy_len); skb_put(skb, len);
skb_copy_to_linear_data(skb, packet, copy_len);
skb_put(skb, copy_len);
len -= copy_len;
if (len) {
/* Add the remaining data as a frag */
dma_sync_single_range_for_cpu(pdata->dev,
rdata->rx.buf.dma_base,
rdata->rx.buf.dma_off,
rdata->rx.buf.dma_len,
DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
rdata->rx.buf.pa.pages_offset,
len, rdata->rx.buf.dma_len);
rdata->rx.buf.pa.pages = NULL;
}
return skb; return skb;
} }
static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
struct xgbe_packet_data *packet)
{
/* Always zero if not the first descriptor */
if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
return 0;
/* First descriptor with split header, return header length */
if (rdata->rx.hdr_len)
return rdata->rx.hdr_len;
/* First descriptor but not the last descriptor and no split header,
* so the full buffer was used
*/
if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
return rdata->rx.hdr.dma_len;
/* First descriptor and last descriptor and no split header, so
* calculate how much of the buffer was used
*/
return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
}
static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
struct xgbe_packet_data *packet,
unsigned int len)
{
/* Always the full buffer if not the last descriptor */
if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
return rdata->rx.buf.dma_len;
/* Last descriptor so calculate how much of the buffer was used
* for the last bit of data
*/
return rdata->rx.len - len;
}
static int xgbe_tx_poll(struct xgbe_channel *channel) static int xgbe_tx_poll(struct xgbe_channel *channel)
{ {
struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_prv_data *pdata = channel->pdata;
...@@ -2092,8 +2110,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -2092,8 +2110,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct napi_struct *napi; struct napi_struct *napi;
struct sk_buff *skb; struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps; struct skb_shared_hwtstamps *hwtstamps;
unsigned int incomplete, error, context_next, context; unsigned int last, error, context_next, context;
unsigned int len, rdesc_len, max_len; unsigned int len, buf1_len, buf2_len, max_len;
unsigned int received = 0; unsigned int received = 0;
int packet_count = 0; int packet_count = 0;
...@@ -2103,7 +2121,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -2103,7 +2121,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring) if (!ring)
return 0; return 0;
incomplete = 0; last = 0;
context_next = 0; context_next = 0;
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
...@@ -2137,9 +2155,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -2137,9 +2155,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
received++; received++;
ring->cur++; ring->cur++;
incomplete = XGMAC_GET_BITS(packet->attributes, last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
RX_PACKET_ATTRIBUTES, LAST);
INCOMPLETE);
context_next = XGMAC_GET_BITS(packet->attributes, context_next = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES, RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT); CONTEXT_NEXT);
...@@ -2148,7 +2165,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -2148,7 +2165,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
CONTEXT); CONTEXT);
/* Earlier error, just drain the remaining data */ /* Earlier error, just drain the remaining data */
if ((incomplete || context_next) && error) if ((!last || context_next) && error)
goto read_again; goto read_again;
if (error || packet->errors) { if (error || packet->errors) {
...@@ -2160,16 +2177,22 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -2160,16 +2177,22 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
} }
if (!context) { if (!context) {
/* Length is cumulative, get this descriptor's length */ /* Get the data length in the descriptor buffers */
rdesc_len = rdata->rx.len - len; buf1_len = xgbe_rx_buf1_len(rdata, packet);
len += rdesc_len; len += buf1_len;
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
len += buf2_len;
if (rdesc_len && !skb) { if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata, skb = xgbe_create_skb(pdata, napi, rdata,
rdesc_len); buf1_len);
if (!skb) if (!skb) {
error = 1; error = 1;
} else if (rdesc_len) { goto skip_data;
}
}
if (buf2_len) {
dma_sync_single_range_for_cpu(pdata->dev, dma_sync_single_range_for_cpu(pdata->dev,
rdata->rx.buf.dma_base, rdata->rx.buf.dma_base,
rdata->rx.buf.dma_off, rdata->rx.buf.dma_off,
...@@ -2179,13 +2202,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -2179,13 +2202,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages, rdata->rx.buf.pa.pages,
rdata->rx.buf.pa.pages_offset, rdata->rx.buf.pa.pages_offset,
rdesc_len, buf2_len,
rdata->rx.buf.dma_len); rdata->rx.buf.dma_len);
rdata->rx.buf.pa.pages = NULL; rdata->rx.buf.pa.pages = NULL;
} }
} }
if (incomplete || context_next) skip_data:
if (!last || context_next)
goto read_again; goto read_again;
if (!skb) if (!skb)
...@@ -2243,7 +2267,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) ...@@ -2243,7 +2267,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
} }
/* Check if we need to save state before leaving */ /* Check if we need to save state before leaving */
if (received && (incomplete || context_next)) { if (received && (!last || context_next)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdata->state_saved = 1; rdata->state_saved = 1;
rdata->state.skb = skb; rdata->state.skb = skb;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册