提交 fe6af0e1 编写于 作者: N Niklas Cassel 提交者: David S. Miller

net: stmmac: set total length of the packet to be transmitted in TDES3

Field FL/TPL in register TDES3 is not correctly set on GMAC4.
TX appears to be functional on GMAC 4.10a even if this field is not set,
however, to avoid relying on undefined behavior, set the length in TDES3.

The field has a different meaning depending on if the TSE bit in TDES3
is set or not (TSO). However, regardless of the TSE bit, the field is
not optional. The field is already set correctly when the TSE bit is set.

Since there is no limit for the number of descriptors that can be
used for a single packet, the field should be set to the sum of
the buffers contained in:
[<desc with First Descriptor bit set> ... <desc n> ...
<desc with Last Descriptor bit set>], which should be equal to skb->len.
Signed-off-by: NNiklas Cassel <niklas.cassel@axis.com>
Acked-by: NGiuseppe Cavallaro <peppe.cavallaro@st.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 6b254afd
......@@ -52,7 +52,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].len = bmax;
/* do not close the descriptor and do not set own bit */
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
0, false);
0, false, skb->len);
while (len != 0) {
tx_q->tx_skbuff[entry] = NULL;
......@@ -70,7 +70,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].len = bmax;
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
STMMAC_CHAIN_MODE, 1,
false);
false, skb->len);
len -= bmax;
i++;
} else {
......@@ -85,7 +85,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
/* last descriptor can be set now */
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_CHAIN_MODE, 1,
true);
true, skb->len);
len = 0;
}
}
......
......@@ -373,7 +373,7 @@ struct stmmac_desc_ops {
/* Invoked by the xmit function to prepare the tx descriptor */
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls);
bool ls, unsigned int tot_pkt_len);
void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
int len2, bool tx_own, bool ls,
unsigned int tcphdrlen,
......
......@@ -304,12 +304,13 @@ static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls)
bool ls, unsigned int tot_pkt_len)
{
unsigned int tdes3 = le32_to_cpu(p->des3);
p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK;
if (is_fs)
tdes3 |= TDES3_FIRST_DESCRIPTOR;
else
......
......@@ -315,7 +315,7 @@ static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls)
bool ls, unsigned int tot_pkt_len)
{
unsigned int tdes0 = le32_to_cpu(p->des0);
......
......@@ -191,7 +191,7 @@ static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls)
bool ls, unsigned int tot_pkt_len)
{
unsigned int tdes1 = le32_to_cpu(p->des1);
......
......@@ -59,7 +59,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
STMMAC_RING_MODE, 0, false);
STMMAC_RING_MODE, 0,
false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
......@@ -79,7 +80,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_RING_MODE, 1, true);
STMMAC_RING_MODE, 1,
true, skb->len);
} else {
des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
......@@ -91,7 +93,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
STMMAC_RING_MODE, 0, true);
STMMAC_RING_MODE, 0,
true, skb->len);
}
tx_q->cur_tx = entry;
......
......@@ -3033,7 +3033,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare the descriptor and set the own bit too */
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
priv->mode, 1, last_segment);
priv->mode, 1, last_segment,
skb->len);
}
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
......@@ -3116,7 +3117,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare the first descriptor setting the OWN bit too */
priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
csum_insertion, priv->mode, 1,
last_segment);
last_segment, skb->len);
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册