提交 29ce85f3 编写于 作者: D David S. Miller

Merge branch 'hns3-Some-cleanup-and-bugfix-for-desc-filling'

Yunsheng Lin says:

====================
Some cleanup and bugfix for desc filling

When retransmiting packets, skb_cow_head which is called in
hns3_set_tso may clone a new header. And driver will clear the
checksum of the header after doing DMA map, so HW will read the
old header whose L3 checksum is not cleared and calculate a
wrong L3 checksum.

Also When sending a big fragment using multiple buffer descriptor,
hns3 does one maping, but do multiple unmapping when tx is done,
which may cause unmapping problem.

This patchset does some cleanup before fixing the above problem.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -977,35 +977,28 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
}
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
enum hns_desc_type type)
int size, int frag_end, enum hns_desc_type type)
{
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
u32 ol_type_vlan_len_msec = 0;
u16 bdtp_fe_sc_vld_ra_ri = 0;
struct skb_frag_struct *frag;
unsigned int frag_buf_num;
u32 type_cs_vlan_tso = 0;
struct sk_buff *skb;
u16 inner_vtag = 0;
u16 out_vtag = 0;
unsigned int k;
int sizeoflast;
u32 paylen = 0;
dma_addr_t dma;
u16 mss = 0;
u8 ol4_proto;
u8 il4_proto;
int ret;
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb->priv = priv;
desc_cb->length = size;
desc_cb->dma = dma;
desc_cb->type = type;
/* now, fill the descriptor */
desc->addr = cpu_to_le64(dma);
desc->tx.send_size = cpu_to_le16((u16)size);
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
if (type == DESC_TYPE_SKB) {
skb = (struct sk_buff *)priv;
paylen = skb->len;
......@@ -1046,38 +1039,47 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
}
/* move ring pointer to next.*/
ring_ptr_move_fw(ring, next_to_use);
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else {
frag = (struct skb_frag_struct *)priv;
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
return 0;
}
if (dma_mapping_error(ring->dev, dma)) {
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
enum hns_desc_type type)
{
unsigned int frag_buf_num;
unsigned int k;
int sizeoflast;
int ret;
desc_cb->length = size;
frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
sizeoflast = size % HNS3_MAX_BD_SIZE;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
/* When the frag size is bigger than hardware, split this frag */
/* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
ret = hns3_fill_desc(ring, priv,
(k == frag_buf_num - 1) ?
sizeoflast : HNS3_MAX_BD_SIZE,
dma + HNS3_MAX_BD_SIZE * k,
frag_end && (k == frag_buf_num - 1) ? 1 : 0,
(type == DESC_TYPE_SKB && !k) ?
DESC_TYPE_SKB : DESC_TYPE_PAGE);
if (ret)
return ret;
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
DESC_TYPE_SKB : DESC_TYPE_PAGE;
/* now, fill the descriptor */
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
(u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
frag_end && (k == frag_buf_num - 1) ?
1 : 0);
desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
/* move ring pointer to next.*/
ring_ptr_move_fw(ring, next_to_use);
desc_cb = &ring->desc_cb[ring->next_to_use];
desc = &ring->desc[ring->next_to_use];
}
return 0;
......@@ -1133,7 +1135,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
return 0;
}
static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
{
struct device *dev = ring_to_dev(ring);
unsigned int i;
......@@ -1149,12 +1151,14 @@ static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
ring->desc_cb[ring->next_to_use].dma,
ring->desc_cb[ring->next_to_use].length,
DMA_TO_DEVICE);
else
else if (ring->desc_cb[ring->next_to_use].length)
dma_unmap_page(dev,
ring->desc_cb[ring->next_to_use].dma,
ring->desc_cb[ring->next_to_use].length,
DMA_TO_DEVICE);
ring->desc_cb[ring->next_to_use].length = 0;
/* rollback one */
ring_ptr_move_bw(ring, next_to_use);
}
......@@ -1166,12 +1170,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
struct hns3_nic_ring_data *ring_data =
&tx_ring_data(priv, skb->queue_mapping);
struct hns3_enet_ring *ring = ring_data->ring;
struct device *dev = priv->dev;
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int next_to_use_head;
int next_to_use_frag;
dma_addr_t dma;
int buf_num;
int seg_num;
int size;
......@@ -1206,35 +1208,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma)) {
netdev_err(netdev, "TX head DMA map failed\n");
ring->stats.sw_err_cnt++;
goto out_err_tx_ok;
}
ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
DESC_TYPE_SKB);
ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
DESC_TYPE_SKB);
if (ret)
goto head_dma_map_err;
goto head_fill_err;
next_to_use_frag = ring->next_to_use;
/* Fill the fragments */
for (i = 1; i < seg_num; i++) {
frag = &skb_shinfo(skb)->frags[i - 1];
size = skb_frag_size(frag);
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma)) {
netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
ring->stats.sw_err_cnt++;
goto frag_dma_map_err;
}
ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
seg_num - 1 == i ? 1 : 0,
DESC_TYPE_PAGE);
ret = priv->ops.fill_desc(ring, frag, size,
seg_num - 1 == i ? 1 : 0,
DESC_TYPE_PAGE);
if (ret)
goto frag_dma_map_err;
goto frag_fill_err;
}
/* Complete translate all packets */
......@@ -1247,11 +1237,11 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
frag_dma_map_err:
hns_nic_dma_unmap(ring, next_to_use_frag);
frag_fill_err:
hns3_clear_desc(ring, next_to_use_frag);
head_dma_map_err:
hns_nic_dma_unmap(ring, next_to_use_head);
head_fill_err:
hns3_clear_desc(ring, next_to_use_head);
out_err_tx_ok:
dev_kfree_skb_any(skb);
......@@ -1313,13 +1303,10 @@ static int hns3_nic_set_features(struct net_device *netdev,
int ret;
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
priv->ops.fill_desc = hns3_fill_desc_tso;
if (features & (NETIF_F_TSO | NETIF_F_TSO6))
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
} else {
priv->ops.fill_desc = hns3_fill_desc;
else
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
}
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
......@@ -1890,7 +1877,7 @@ static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
if (cb->type == DESC_TYPE_SKB)
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
else
else if (cb->length)
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
}
......@@ -3247,14 +3234,12 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
priv->ops.fill_desc = hns3_fill_desc;
if ((netdev->features & NETIF_F_TSO) ||
(netdev->features & NETIF_F_TSO6)) {
priv->ops.fill_desc = hns3_fill_desc_tso;
(netdev->features & NETIF_F_TSO6))
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
} else {
priv->ops.fill_desc = hns3_fill_desc;
else
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
}
static int hns3_client_init(struct hnae3_handle *handle)
......
......@@ -419,8 +419,7 @@ struct hns3_nic_ring_data {
struct hns3_nic_ops {
int (*fill_desc)(struct hns3_enet_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
enum hns_desc_type type);
int size, int frag_end, enum hns_desc_type type);
int (*maybe_stop_tx)(struct sk_buff **out_skb,
int *bnum, struct hns3_enet_ring *ring);
void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册