提交 6366ad33 编写于 作者: A Alexander Duyck 提交者: David S. Miller

igb: remove use of skb_dma_map from driver

This change removes skb_dma_map/unmap calls from the igb driver due to the
fact that the call is incompatible with iommu enabled kernels.  In order to
prevent warnings about using the wrong unmap call I have added a
mapped_as_page value to the buffer_info structure to track if the mapped
region is a page or a buffer.
Signed-off-by: NAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 e5a43549
...@@ -137,12 +137,13 @@ struct igb_buffer { ...@@ -137,12 +137,13 @@ struct igb_buffer {
unsigned long time_stamp; unsigned long time_stamp;
u16 length; u16 length;
u16 next_to_watch; u16 next_to_watch;
u16 mapped_as_page;
}; };
/* RX */ /* RX */
struct { struct {
struct page *page; struct page *page;
u64 page_dma; dma_addr_t page_dma;
unsigned int page_offset; u16 page_offset;
}; };
}; };
}; };
......
...@@ -2654,16 +2654,27 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) ...@@ -2654,16 +2654,27 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
struct igb_buffer *buffer_info) struct igb_buffer *buffer_info)
{ {
buffer_info->dma = 0; if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
pci_unmap_page(tx_ring->pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
else
pci_unmap_single(tx_ring->pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) { if (buffer_info->skb) {
skb_dma_unmap(&tx_ring->pdev->dev,
buffer_info->skb,
DMA_TO_DEVICE);
dev_kfree_skb_any(buffer_info->skb); dev_kfree_skb_any(buffer_info->skb);
buffer_info->skb = NULL; buffer_info->skb = NULL;
} }
buffer_info->time_stamp = 0; buffer_info->time_stamp = 0;
/* buffer_info must be completely set up in the transmit path */ buffer_info->length = 0;
buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
} }
/** /**
...@@ -3561,24 +3572,19 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, ...@@ -3561,24 +3572,19 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
unsigned int len = skb_headlen(skb); unsigned int len = skb_headlen(skb);
unsigned int count = 0, i; unsigned int count = 0, i;
unsigned int f; unsigned int f;
dma_addr_t *map;
i = tx_ring->next_to_use; i = tx_ring->next_to_use;
if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
dev_err(&pdev->dev, "TX DMA map failed\n");
return 0;
}
map = skb_shinfo(skb)->dma_maps;
buffer_info = &tx_ring->buffer_info[i]; buffer_info = &tx_ring->buffer_info[i];
BUG_ON(len >= IGB_MAX_DATA_PER_TXD); BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
buffer_info->length = len; buffer_info->length = len;
/* set time_stamp *before* dma to help avoid a possible race */ /* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies; buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i; buffer_info->next_to_watch = i;
buffer_info->dma = skb_shinfo(skb)->dma_head; buffer_info->dma = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma))
goto dma_error;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag; struct skb_frag_struct *frag;
...@@ -3595,7 +3601,15 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, ...@@ -3595,7 +3601,15 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
buffer_info->length = len; buffer_info->length = len;
buffer_info->time_stamp = jiffies; buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i; buffer_info->next_to_watch = i;
buffer_info->dma = map[count]; buffer_info->mapped_as_page = true;
buffer_info->dma = pci_map_page(pdev,
frag->page,
frag->page_offset,
len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma))
goto dma_error;
count++; count++;
} }
...@@ -3603,6 +3617,29 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, ...@@ -3603,6 +3617,29 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
tx_ring->buffer_info[first].next_to_watch = i; tx_ring->buffer_info[first].next_to_watch = i;
return ++count; return ++count;
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
/* clear timestamp and dma mappings for failed buffer_info mapping */
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
buffer_info->length = 0;
buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
count--;
/* clear timestamp and dma mappings for remaining portion of packet */
while (count >= 0) {
count--;
i--;
if (i < 0)
i += tx_ring->count;
buffer_info = &tx_ring->buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
return 0;
} }
static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
...@@ -3755,7 +3792,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, ...@@ -3755,7 +3792,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
* has occured and we need to rewind the descriptor queue * has occured and we need to rewind the descriptor queue
*/ */
count = igb_tx_map_adv(tx_ring, skb, first); count = igb_tx_map_adv(tx_ring, skb, first);
if (count <= 0) { if (!count) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
tx_ring->buffer_info[first].time_stamp = 0; tx_ring->buffer_info[first].time_stamp = 0;
tx_ring->next_to_use = first; tx_ring->next_to_use = first;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册