提交 553e2ab3 编写于 作者: G Giuseppe Cavallaro 提交者: David S. Miller

stmmac: add length field to dma data

Currently, the code pulls out the length field when
unmapping a buffer directly from the descriptor. This will result
in an uncached read to a dma_alloc_coherent() region. There is no
need to do this, so this patch simply puts the value directly into
a data structure which will hit the cache.
Signed-off-by: NFabrice Gasnier <fabrice.gasnier@st.com>
Signed-off-by: NGiuseppe Cavallaro <peppe.cavallaro@st.com>
Signed-off-by: NAlexandre TORGUE <alexandre.torgue@st.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 e3ad57c9
......@@ -49,6 +49,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->tx_skbuff_dma[entry].len = bmax;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
while (len != 0) {
......@@ -63,6 +64,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->tx_skbuff_dma[entry].len = bmax;
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
......@@ -75,6 +77,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->tx_skbuff_dma[entry].len = len;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
......
......@@ -56,6 +56,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->tx_skbuff_dma[entry].len = bmax;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
STMMAC_RING_MODE);
......@@ -73,6 +75,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->tx_skbuff_dma[entry].len = len;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_RING_MODE);
......@@ -84,6 +88,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->tx_skbuff_dma[entry].len = nopaged_len;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
STMMAC_RING_MODE);
......
......@@ -45,6 +45,7 @@ struct stmmac_resources {
struct stmmac_tx_info {
dma_addr_t buf;
bool map_as_page;
unsigned len;
};
struct stmmac_priv {
......
......@@ -1093,6 +1093,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
p->des2 = 0;
priv->tx_skbuff_dma[i].buf = 0;
priv->tx_skbuff_dma[i].map_as_page = false;
priv->tx_skbuff_dma[i].len = 0;
priv->tx_skbuff[i] = NULL;
}
......@@ -1136,12 +1137,12 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
if (priv->tx_skbuff_dma[i].map_as_page)
dma_unmap_page(priv->device,
priv->tx_skbuff_dma[i].buf,
priv->hw->desc->get_tx_len(p),
priv->tx_skbuff_dma[i].len,
DMA_TO_DEVICE);
else
dma_unmap_single(priv->device,
priv->tx_skbuff_dma[i].buf,
priv->hw->desc->get_tx_len(p),
priv->tx_skbuff_dma[i].len,
DMA_TO_DEVICE);
}
......@@ -1347,12 +1348,12 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
if (priv->tx_skbuff_dma[entry].map_as_page)
dma_unmap_page(priv->device,
priv->tx_skbuff_dma[entry].buf,
priv->hw->desc->get_tx_len(p),
priv->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
else
dma_unmap_single(priv->device,
priv->tx_skbuff_dma[entry].buf,
priv->hw->desc->get_tx_len(p),
priv->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry].buf = 0;
priv->tx_skbuff_dma[entry].map_as_page = false;
......@@ -1986,6 +1987,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, desc->des2))
goto dma_map_err;
priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->tx_skbuff_dma[entry].len = nopaged_len;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
csum_insertion, priv->mode);
} else {
......@@ -2014,6 +2016,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->tx_skbuff_dma[entry].map_as_page = true;
priv->tx_skbuff_dma[entry].len = len;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
priv->mode);
wmb();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册