提交 2bdfe0ba 编写于 作者: S Stephen Hemminger 提交者: David S. Miller

netpoll retry cleanup

The netpoll beast was still not happy. If the beast got
clogged pipes, it tended to stare blankly off in space
for a long time.

The problem couldn't be completely fixed because the
beast talked with irq's disabled. But it could be made
less painful and shorter.
Signed-off-by: NStephen Hemminger <shemminger@osdl.org>
上级 6c43ff18
...@@ -28,7 +28,6 @@ struct netpoll_info { ...@@ -28,7 +28,6 @@ struct netpoll_info {
atomic_t refcnt; atomic_t refcnt;
spinlock_t poll_lock; spinlock_t poll_lock;
int poll_owner; int poll_owner;
int tries;
int rx_flags; int rx_flags;
spinlock_t rx_lock; spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */ struct netpoll *rx_np; /* netpoll that registered an rx_hook */
......
...@@ -34,12 +34,12 @@ ...@@ -34,12 +34,12 @@
#define MAX_UDP_CHUNK 1460 #define MAX_UDP_CHUNK 1460
#define MAX_SKBS 32 #define MAX_SKBS 32
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2) #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
#define MAX_RETRIES 20000
static struct sk_buff_head skb_pool; static struct sk_buff_head skb_pool;
static atomic_t trapped; static atomic_t trapped;
#define USEC_PER_POLL 50
#define NETPOLL_RX_ENABLED 1 #define NETPOLL_RX_ENABLED 1
#define NETPOLL_RX_DROP 2 #define NETPOLL_RX_DROP 2
...@@ -72,6 +72,7 @@ static void queue_process(void *p) ...@@ -72,6 +72,7 @@ static void queue_process(void *p)
schedule_delayed_work(&npinfo->tx_work, HZ/10); schedule_delayed_work(&npinfo->tx_work, HZ/10);
return; return;
} }
netif_tx_unlock_bh(dev); netif_tx_unlock_bh(dev);
} }
} }
...@@ -244,50 +245,44 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) ...@@ -244,50 +245,44 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{ {
int status; int status = NETDEV_TX_BUSY;
struct netpoll_info *npinfo; unsigned long tries;
struct net_device *dev = np->dev;
struct netpoll_info *npinfo = np->dev->npinfo;
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
__kfree_skb(skb);
return;
}
/* don't get messages out of order, and no recursion */
if ( !(np->drop == netpoll_queue && skb_queue_len(&npinfo->txq))
&& npinfo->poll_owner != smp_processor_id()
&& netif_tx_trylock(dev)) {
/* try until next clock tick */
for(tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
if (!netif_queue_stopped(dev))
status = dev->hard_start_xmit(skb, dev);
if (status == NETDEV_TX_OK)
break;
if (!np || !np->dev || !netif_running(np->dev)) { /* tickle device maybe there is some cleanup */
__kfree_skb(skb); netpoll_poll(np);
return;
}
npinfo = np->dev->npinfo; udelay(USEC_PER_POLL);
}
netif_tx_unlock(dev);
}
/* avoid recursion */ if (status != NETDEV_TX_OK) {
if (npinfo->poll_owner == smp_processor_id() || /* requeue for later */
np->dev->xmit_lock_owner == smp_processor_id()) {
if (np->drop) if (np->drop)
np->drop(skb); np->drop(skb);
else else
__kfree_skb(skb); __kfree_skb(skb);
return;
} }
do {
npinfo->tries--;
netif_tx_lock(np->dev);
/*
* network drivers do not expect to be called if the queue is
* stopped.
*/
status = NETDEV_TX_BUSY;
if (!netif_queue_stopped(np->dev))
status = np->dev->hard_start_xmit(skb, np->dev);
netif_tx_unlock(np->dev);
/* success */
if(!status) {
npinfo->tries = MAX_RETRIES; /* reset */
return;
}
/* transmit busy */
netpoll_poll(np);
udelay(50);
} while (npinfo->tries > 0);
} }
void netpoll_send_udp(struct netpoll *np, const char *msg, int len) void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
...@@ -649,7 +644,7 @@ int netpoll_setup(struct netpoll *np) ...@@ -649,7 +644,7 @@ int netpoll_setup(struct netpoll *np)
npinfo->rx_np = NULL; npinfo->rx_np = NULL;
spin_lock_init(&npinfo->poll_lock); spin_lock_init(&npinfo->poll_lock);
npinfo->poll_owner = -1; npinfo->poll_owner = -1;
npinfo->tries = MAX_RETRIES;
spin_lock_init(&npinfo->rx_lock); spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx); skb_queue_head_init(&npinfo->arp_tx);
skb_queue_head_init(&npinfo->txq); skb_queue_head_init(&npinfo->txq);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册