提交 e01ee14d 编写于 作者: M Matt Carlson 提交者: David S. Miller

tg3: Add partial fragment unmapping code

The following patches are going to break skb fragments into smaller
sizes.  This patch attempts to make the change easier to digest by only
addressing the skb teardown portion.

The patch modifies the driver to skip over any BDs that have a flag set
that indicates the BD isn't the beginning of an skb fragment.  Such BDs
were a result of segmentation and do not need a pci_unmap_page() call.
Signed-off-by: NMatt Carlson <mcarlson@broadcom.com>
Reviewed-by: NMichael Chan <mchan@broadcom.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 0d681b27
...@@ -4840,6 +4840,12 @@ static void tg3_tx(struct tg3_napi *tnapi) ...@@ -4840,6 +4840,12 @@ static void tg3_tx(struct tg3_napi *tnapi)
ri->skb = NULL; ri->skb = NULL;
while (ri->fragmented) {
ri->fragmented = false;
sw_idx = NEXT_TX(sw_idx);
ri = &tnapi->tx_buffers[sw_idx];
}
sw_idx = NEXT_TX(sw_idx); sw_idx = NEXT_TX(sw_idx);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
...@@ -4851,6 +4857,13 @@ static void tg3_tx(struct tg3_napi *tnapi) ...@@ -4851,6 +4857,13 @@ static void tg3_tx(struct tg3_napi *tnapi)
dma_unmap_addr(ri, mapping), dma_unmap_addr(ri, mapping),
skb_shinfo(skb)->frags[i].size, skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
while (ri->fragmented) {
ri->fragmented = false;
sw_idx = NEXT_TX(sw_idx);
ri = &tnapi->tx_buffers[sw_idx];
}
sw_idx = NEXT_TX(sw_idx); sw_idx = NEXT_TX(sw_idx);
} }
...@@ -5926,6 +5939,13 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) ...@@ -5926,6 +5939,13 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
dma_unmap_addr(txb, mapping), dma_unmap_addr(txb, mapping),
skb_headlen(skb), skb_headlen(skb),
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
while (txb->fragmented) {
txb->fragmented = false;
entry = NEXT_TX(entry);
txb = &tnapi->tx_buffers[entry];
}
for (i = 0; i < last; i++) { for (i = 0; i < last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
...@@ -5935,6 +5955,12 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) ...@@ -5935,6 +5955,12 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
pci_unmap_page(tnapi->tp->pdev, pci_unmap_page(tnapi->tp->pdev,
dma_unmap_addr(txb, mapping), dma_unmap_addr(txb, mapping),
frag->size, PCI_DMA_TODEVICE); frag->size, PCI_DMA_TODEVICE);
while (txb->fragmented) {
txb->fragmented = false;
entry = NEXT_TX(entry);
txb = &tnapi->tx_buffers[entry];
}
} }
} }
......
...@@ -2655,6 +2655,7 @@ struct ring_info { ...@@ -2655,6 +2655,7 @@ struct ring_info {
struct tg3_tx_ring_info { struct tg3_tx_ring_info {
struct sk_buff *skb; struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(mapping); DEFINE_DMA_UNMAP_ADDR(mapping);
bool fragmented;
}; };
struct tg3_link_config { struct tg3_link_config {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册