提交 44df32c5 编写于 作者: A Alexander Duyck 提交者: David S. Miller

ixgbe: refactor tx buffer processing to use skb_dma_map/unmap

This patch resolves an issue with map single being used to map a buffer and
then unmap page being used to unmap it.  In addition it handles any error
conditions that may be detected using skb_dma_map.
Signed-off-by: NAlexander Duyck <alexander.h.duyck@intel.com>
Acked-by: NMallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>
Acked-by: NPeter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 8be0e467
...@@ -187,15 +187,14 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, ...@@ -187,15 +187,14 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
struct ixgbe_tx_buffer struct ixgbe_tx_buffer
*tx_buffer_info) *tx_buffer_info)
{ {
if (tx_buffer_info->dma) { tx_buffer_info->dma = 0;
pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
tx_buffer_info->length, PCI_DMA_TODEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) { if (tx_buffer_info->skb) {
skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
DMA_TO_DEVICE);
dev_kfree_skb_any(tx_buffer_info->skb); dev_kfree_skb_any(tx_buffer_info->skb);
tx_buffer_info->skb = NULL; tx_buffer_info->skb = NULL;
} }
tx_buffer_info->time_stamp = 0;
/* tx_buffer_info must be completely set up in the transmit path */ /* tx_buffer_info must be completely set up in the transmit path */
} }
...@@ -204,15 +203,11 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, ...@@ -204,15 +203,11 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
unsigned int eop) unsigned int eop)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 head, tail;
/* Detect a transmit hang in hardware, this serializes the /* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of eop */ * check with the clearing of time_stamp and movement of eop */
head = IXGBE_READ_REG(hw, tx_ring->head);
tail = IXGBE_READ_REG(hw, tx_ring->tail);
adapter->detect_tx_hung = false; adapter->detect_tx_hung = false;
if ((head != tail) && if (tx_ring->tx_buffer_info[eop].time_stamp &&
tx_ring->tx_buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
!(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
/* detected Tx unit hang */ /* detected Tx unit hang */
...@@ -227,7 +222,8 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, ...@@ -227,7 +222,8 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
" time_stamp <%lx>\n" " time_stamp <%lx>\n"
" jiffies <%lx>\n", " jiffies <%lx>\n",
tx_ring->queue_index, tx_ring->queue_index,
head, tail, IXGBE_READ_REG(hw, tx_ring->head),
IXGBE_READ_REG(hw, tx_ring->tail),
tx_ring->next_to_use, eop, tx_ring->next_to_use, eop,
tx_ring->tx_buffer_info[eop].time_stamp, jiffies); tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
return true; return true;
...@@ -4164,32 +4160,39 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, ...@@ -4164,32 +4160,39 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct sk_buff *skb, unsigned int first) struct sk_buff *skb, unsigned int first)
{ {
struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int len = skb->len; unsigned int len = skb_headlen(skb);
unsigned int offset = 0, size, count = 0, i; unsigned int offset = 0, size, count = 0, i;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f; unsigned int f;
dma_addr_t *map;
len -= skb->data_len;
i = tx_ring->next_to_use; i = tx_ring->next_to_use;
if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
return 0;
}
map = skb_shinfo(skb)->dma_maps;
while (len) { while (len) {
tx_buffer_info = &tx_ring->tx_buffer_info[i]; tx_buffer_info = &tx_ring->tx_buffer_info[i];
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size; tx_buffer_info->length = size;
tx_buffer_info->dma = pci_map_single(adapter->pdev, tx_buffer_info->dma = map[0] + offset;
skb->data + offset,
size, PCI_DMA_TODEVICE);
tx_buffer_info->time_stamp = jiffies; tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i; tx_buffer_info->next_to_watch = i;
len -= size; len -= size;
offset += size; offset += size;
count++; count++;
i++;
if (i == tx_ring->count) if (len) {
i = 0; i++;
if (i == tx_ring->count)
i = 0;
}
} }
for (f = 0; f < nr_frags; f++) { for (f = 0; f < nr_frags; f++) {
...@@ -4197,33 +4200,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, ...@@ -4197,33 +4200,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
frag = &skb_shinfo(skb)->frags[f]; frag = &skb_shinfo(skb)->frags[f];
len = frag->size; len = frag->size;
offset = frag->page_offset; offset = 0;
while (len) { while (len) {
i++;
if (i == tx_ring->count)
i = 0;
tx_buffer_info = &tx_ring->tx_buffer_info[i]; tx_buffer_info = &tx_ring->tx_buffer_info[i];
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size; tx_buffer_info->length = size;
tx_buffer_info->dma = pci_map_page(adapter->pdev, tx_buffer_info->dma = map[f + 1] + offset;
frag->page,
offset,
size,
PCI_DMA_TODEVICE);
tx_buffer_info->time_stamp = jiffies; tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i; tx_buffer_info->next_to_watch = i;
len -= size; len -= size;
offset += size; offset += size;
count++; count++;
i++;
if (i == tx_ring->count)
i = 0;
} }
} }
if (i == 0)
i = tx_ring->count - 1;
else
i = i - 1;
tx_ring->tx_buffer_info[i].skb = skb; tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[first].next_to_watch = i; tx_ring->tx_buffer_info[first].next_to_watch = i;
...@@ -4389,13 +4386,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -4389,13 +4386,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
(skb->ip_summed == CHECKSUM_PARTIAL)) (skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IXGBE_TX_FLAGS_CSUM; tx_flags |= IXGBE_TX_FLAGS_CSUM;
ixgbe_tx_queue(adapter, tx_ring, tx_flags, count = ixgbe_tx_map(adapter, tx_ring, skb, first);
ixgbe_tx_map(adapter, tx_ring, skb, first),
skb->len, hdr_len);
netdev->trans_start = jiffies; if (count) {
ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
hdr_len);
netdev->trans_start = jiffies;
ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); } else {
dev_kfree_skb_any(skb);
tx_ring->tx_buffer_info[first].time_stamp = 0;
tx_ring->next_to_use = first;
}
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册