提交 53a01e00 编写于 作者: D dhananjay@netxen.com 提交者: Jeff Garzik

netxen: optimize tx handling

netxen driver allows limited number of threads simultaneously posting
skb's in tx ring. If transmit slot is unavailable, driver calls
schedule() or loops in xmit_frame().

This patch returns TX_BUSY and lets the stack reschedule the packet if
transmit slot is unavailable. Also removes unnecessary check for tx
timeout in the driver itself, the network stack does that anyway.
Signed-off-by: NDhananjay Phadke <dhananjay@netxen.com>
Signed-off-by: NJeff Garzik <jeff@garzik.org>
上级 72b0a7a8
......@@ -1248,7 +1248,6 @@ int netxen_process_cmd_ring(unsigned long data)
struct pci_dev *pdev;
struct netxen_skb_frag *frag;
u32 i;
struct sk_buff *skb = NULL;
int done;
spin_lock(&adapter->tx_lock);
......@@ -1278,9 +1277,8 @@ int netxen_process_cmd_ring(unsigned long data)
while ((last_consumer != consumer) && (count1 < MAX_STATUS_HANDLE)) {
buffer = &adapter->cmd_buf_arr[last_consumer];
pdev = adapter->pdev;
frag = &buffer->frag_array[0];
skb = buffer->skb;
if (skb && (cmpxchg(&buffer->skb, skb, 0) == skb)) {
if (buffer->skb) {
frag = &buffer->frag_array[0];
pci_unmap_single(pdev, frag->dma, frag->length,
PCI_DMA_TODEVICE);
frag->dma = 0ULL;
......@@ -1293,8 +1291,8 @@ int netxen_process_cmd_ring(unsigned long data)
}
adapter->stats.skbfreed++;
dev_kfree_skb_any(skb);
skb = NULL;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
} else if (adapter->proc_cmd_buf_counter == 1) {
adapter->stats.txnullskb++;
}
......
......@@ -994,28 +994,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
/*
* Everything is set up. Now, we just need to transmit it out.
* Note that we have to copy the contents of buffer over to
* right place. Later on, this can be optimized out by de-coupling the
* producer index from the buffer index.
*/
retry_getting_window:
spin_lock_bh(&adapter->tx_lock);
if (adapter->total_threads >= MAX_XMIT_PRODUCERS) {
spin_unlock_bh(&adapter->tx_lock);
/*
* Yield CPU
*/
if (!in_atomic())
schedule();
else {
for (i = 0; i < 20; i++)
cpu_relax(); /*This a nop instr on i386 */
}
goto retry_getting_window;
}
local_producer = adapter->cmd_producer;
/* There 4 fragments per descriptor */
no_of_desc = (frag_count + 3) >> 2;
if (netdev->features & NETIF_F_TSO) {
......@@ -1029,16 +1007,19 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
}
}
spin_lock_bh(&adapter->tx_lock);
if (adapter->total_threads >= MAX_XMIT_PRODUCERS) {
goto out_requeue;
}
local_producer = adapter->cmd_producer;
k = adapter->cmd_producer;
max_tx_desc_count = adapter->max_tx_desc_count;
last_cmd_consumer = adapter->last_cmd_consumer;
if ((k + no_of_desc) >=
((last_cmd_consumer <= k) ? last_cmd_consumer + max_tx_desc_count :
last_cmd_consumer)) {
netif_stop_queue(netdev);
adapter->flags |= NETXEN_NETDEV_STATUS;
spin_unlock_bh(&adapter->tx_lock);
return NETDEV_TX_BUSY;
goto out_requeue;
}
k = get_index_range(k, max_tx_desc_count, no_of_desc);
adapter->cmd_producer = k;
......@@ -1091,6 +1072,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
adapter->max_tx_desc_count);
hwdesc = &hw->cmd_desc_head[producer];
memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
pbuf = &adapter->cmd_buf_arr[producer];
pbuf->skb = NULL;
}
frag = &skb_shinfo(skb)->frags[i - 1];
len = frag->size;
......@@ -1146,6 +1129,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
/* copy the MAC/IP/TCP headers to the cmd descriptor list */
hwdesc = &hw->cmd_desc_head[producer];
pbuf = &adapter->cmd_buf_arr[producer];
pbuf->skb = NULL;
/* copy the first 64 bytes */
memcpy(((void *)hwdesc) + 2,
......@@ -1154,6 +1139,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (more_hdr) {
hwdesc = &hw->cmd_desc_head[producer];
pbuf = &adapter->cmd_buf_arr[producer];
pbuf->skb = NULL;
/* copy the next 64 bytes - should be enough except
* for pathological case
*/
......@@ -1187,14 +1174,17 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
adapter->stats.xmitfinished++;
spin_unlock_bh(&adapter->tx_lock);
netdev->trans_start = jiffies;
DPRINTK(INFO, "wrote CMD producer %x to phantom\n", producer);
DPRINTK(INFO, "Done. Send\n");
spin_unlock_bh(&adapter->tx_lock);
return NETDEV_TX_OK;
out_requeue:
netif_stop_queue(netdev);
adapter->flags |= NETXEN_NETDEV_STATUS;
spin_unlock_bh(&adapter->tx_lock);
return NETDEV_TX_BUSY;
}
static void netxen_watchdog(unsigned long v)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册