提交 c46e9907 编写于 作者: E Eric Dumazet 提交者: David S. Miller

amd8111e: add GRO support

Use napi_complete_done() instead of __napi_complete() to :

1) Get support of gro_flush_timeout if opt-in
2) Not rearm interrupts for busy-polling users.
3) use standard NAPI API.
4) get rid of baroque code and ease maintenance.
Signed-off-by: NEric Dumazet <edumazet@google.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 1fa8c5f3
...@@ -695,125 +695,105 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) ...@@ -695,125 +695,105 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
void __iomem *mmio = lp->mmio; void __iomem *mmio = lp->mmio;
struct sk_buff *skb,*new_skb; struct sk_buff *skb,*new_skb;
int min_pkt_len, status; int min_pkt_len, status;
unsigned int intr0;
int num_rx_pkt = 0; int num_rx_pkt = 0;
short pkt_len; short pkt_len;
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
short vtag; short vtag;
#endif #endif
int rx_pkt_limit = budget;
unsigned long flags;
if (rx_pkt_limit <= 0) while (num_rx_pkt < budget) {
goto rx_not_empty; status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
if (status & OWN_BIT)
break;
do{ /* There is a tricky error noted by John Murphy,
/* process receive packets until we use the quota. * <murf@perftech.com> to Russ Nelson: Even with
* If we own the next entry, it's a new packet. Send it up. * full-sized * buffers it's possible for a
* jabber packet to use two buffers, with only
* the last correctly noting the error.
*/ */
while(1) { if (status & ERR_BIT) {
status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); /* resetting flags */
if (status & OWN_BIT) lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
break; goto err_next_pkt;
}
/* There is a tricky error noted by John Murphy, /* check for STP and ENP */
* <murf@perftech.com> to Russ Nelson: Even with if (!((status & STP_BIT) && (status & ENP_BIT))){
* full-sized * buffers it's possible for a /* resetting flags */
* jabber packet to use two buffers, with only lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
* the last correctly noting the error. goto err_next_pkt;
*/ }
if(status & ERR_BIT) { pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
/* resetting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
goto err_next_pkt;
}
/* check for STP and ENP */
if(!((status & STP_BIT) && (status & ENP_BIT))){
/* resetting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
goto err_next_pkt;
}
pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
vtag = status & TT_MASK; vtag = status & TT_MASK;
/*MAC will strip vlan tag*/ /* MAC will strip vlan tag */
if (vtag != 0) if (vtag != 0)
min_pkt_len =MIN_PKT_LEN - 4; min_pkt_len = MIN_PKT_LEN - 4;
else else
#endif #endif
min_pkt_len =MIN_PKT_LEN; min_pkt_len = MIN_PKT_LEN;
if (pkt_len < min_pkt_len) { if (pkt_len < min_pkt_len) {
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
lp->drv_rx_errors++; lp->drv_rx_errors++;
goto err_next_pkt; goto err_next_pkt;
} }
if(--rx_pkt_limit < 0) new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
goto rx_not_empty; if (!new_skb) {
new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); /* if allocation fail,
if (!new_skb) { * ignore that pkt and go to next one
/* if allocation fail, */
* ignore that pkt and go to next one lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
*/ lp->drv_rx_errors++;
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; goto err_next_pkt;
lp->drv_rx_errors++; }
goto err_next_pkt;
}
skb_reserve(new_skb, 2); skb_reserve(new_skb, 2);
skb = lp->rx_skbuff[rx_index]; skb = lp->rx_skbuff[rx_index];
pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
lp->rx_skbuff[rx_index] = new_skb; lp->rx_skbuff[rx_index] = new_skb;
lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
new_skb->data, new_skb->data,
lp->rx_buff_len-2, lp->rx_buff_len-2,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
if (vtag == TT_VLAN_TAGGED){ if (vtag == TT_VLAN_TAGGED){
u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
}
#endif
netif_receive_skb(skb);
/*COAL update rx coalescing parameters*/
lp->coal_conf.rx_packets++;
lp->coal_conf.rx_bytes += pkt_len;
num_rx_pkt++;
err_next_pkt:
lp->rx_ring[rx_index].buff_phy_addr
= cpu_to_le32(lp->rx_dma_addr[rx_index]);
lp->rx_ring[rx_index].buff_count =
cpu_to_le16(lp->rx_buff_len-2);
wmb();
lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
} }
/* Check the interrupt status register for more packets in the #endif
* mean time. Process them since we have not used up our quota. napi_gro_receive(napi, skb);
*/ /* COAL update rx coalescing parameters */
intr0 = readl(mmio + INT0); lp->coal_conf.rx_packets++;
/*Ack receive packets */ lp->coal_conf.rx_bytes += pkt_len;
writel(intr0 & RINT0,mmio + INT0); num_rx_pkt++;
err_next_pkt:
lp->rx_ring[rx_index].buff_phy_addr
= cpu_to_le32(lp->rx_dma_addr[rx_index]);
lp->rx_ring[rx_index].buff_count =
cpu_to_le16(lp->rx_buff_len-2);
wmb();
lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
}
} while(intr0 & RINT0); if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
unsigned long flags;
if (rx_pkt_limit > 0) {
/* Receive descriptor is empty now */ /* Receive descriptor is empty now */
spin_lock_irqsave(&lp->lock, flags); spin_lock_irqsave(&lp->lock, flags);
__napi_complete(napi);
writel(VAL0|RINTEN0, mmio + INTEN0); writel(VAL0|RINTEN0, mmio + INTEN0);
writel(VAL2 | RDMD0, mmio + CMD0); writel(VAL2 | RDMD0, mmio + CMD0);
spin_unlock_irqrestore(&lp->lock, flags); spin_unlock_irqrestore(&lp->lock, flags);
} }
rx_not_empty:
return num_rx_pkt; return num_rx_pkt;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册