提交 2a1292b3 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  [LRO]: fix lro_gen_skb() alignment
  [TCP]: NAGLE_PUSH seems to be a wrong way around
  [TCP]: Move prior_in_flight collect to more robust place
  [TCP] FRTO: Use of existing funcs make code more obvious & robust
  [IRDA]: Move ircomm_tty_line_info() under #ifdef CONFIG_PROC_FS
  [ROSE]: Trivial compilation CONFIG_INET=n case
  [IPVS]: Fix sched registration race when checking for name collision.
  [IPVS]: Don't leak sysctl tables if the scheduler registration fails.
......@@ -1979,6 +1979,7 @@ static int myri10ge_open(struct net_device *dev)
lro_mgr->lro_arr = mgp->rx_done.lro_desc;
lro_mgr->get_frag_header = myri10ge_get_frag_header;
lro_mgr->max_aggr = myri10ge_lro_max_pkts;
lro_mgr->frag_align_pad = 2;
if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
lro_mgr->max_aggr = MAX_SKB_FRAGS;
......
......@@ -91,6 +91,9 @@ struct net_lro_mgr {
int max_desc; /* Max number of LRO descriptors */
int max_aggr; /* Max number of LRO packets to be aggregated */
int frag_align_pad; /* Padding required to properly align layer 3
* headers in generated skb when using frags */
struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
/*
......
......@@ -401,10 +401,11 @@ static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
int data_len = len;
int hdr_len = min(len, hlen);
skb = netdev_alloc_skb(lro_mgr->dev, hlen);
skb = netdev_alloc_skb(lro_mgr->dev, hlen + lro_mgr->frag_align_pad);
if (!skb)
return NULL;
skb_reserve(skb, lro_mgr->frag_align_pad);
skb->len = len;
skb->data_len = len - hdr_len;
skb->truesize += true_size;
......
......@@ -580,9 +580,14 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
static int __init ip_vs_lblc_init(void)
{
int ret;
INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list);
sysctl_header = register_sysctl_table(lblc_root_table);
return register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
if (ret)
unregister_sysctl_table(sysctl_header);
return ret;
}
......
......@@ -769,9 +769,14 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
static int __init ip_vs_lblcr_init(void)
{
int ret;
INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
sysctl_header = register_sysctl_table(lblcr_root_table);
return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
if (ret)
unregister_sysctl_table(sysctl_header);
return ret;
}
......
......@@ -183,19 +183,6 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
/* increase the module use count */
ip_vs_use_count_inc();
/*
* Make sure that the scheduler with this name doesn't exist
* in the scheduler list.
*/
sched = ip_vs_sched_getbyname(scheduler->name);
if (sched) {
ip_vs_scheduler_put(sched);
ip_vs_use_count_dec();
IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
"already existed in the system\n", scheduler->name);
return -EINVAL;
}
write_lock_bh(&__ip_vs_sched_lock);
if (scheduler->n_list.next != &scheduler->n_list) {
......@@ -206,6 +193,20 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
return -EINVAL;
}
/*
* Make sure that the scheduler with this name doesn't exist
* in the scheduler list.
*/
list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
if (strcmp(scheduler->name, sched->name) == 0) {
write_unlock_bh(&__ip_vs_sched_lock);
ip_vs_use_count_dec();
IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
"already existed in the system\n",
scheduler->name);
return -EINVAL;
}
}
/*
* Add it into the d-linked scheduler list
*/
......
......@@ -3003,17 +3003,13 @@ static int tcp_process_frto(struct sock *sk, int flag)
}
if (tp->frto_counter == 1) {
/* Sending of the next skb must be allowed or no F-RTO */
if (!tcp_send_head(sk) ||
after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
tp->snd_una + tp->snd_wnd)) {
tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3),
flag);
return 1;
}
/* tcp_may_send_now needs to see updated state */
tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
tp->frto_counter = 2;
if (!tcp_may_send_now(sk))
tcp_enter_frto_loss(sk, 2, flag);
return 1;
} else {
switch (sysctl_tcp_frto_response) {
......@@ -3069,6 +3065,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
}
prior_fackets = tp->fackets_out;
prior_in_flight = tcp_packets_in_flight(tp);
if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
/* Window is constant, pure forward advance.
......@@ -3108,8 +3105,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
if (!prior_packets)
goto no_queue;
prior_in_flight = tcp_packets_in_flight(tp);
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets);
......
......@@ -1162,8 +1162,7 @@ int tcp_may_send_now(struct sock *sk)
return (skb &&
tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
(tcp_skb_is_last(sk, skb) ?
TCP_NAGLE_PUSH :
tp->nonagle)));
tp->nonagle : TCP_NAGLE_PUSH)));
}
/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
......
......@@ -1245,6 +1245,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap,
self->flow = cmd;
}
#ifdef CONFIG_PROC_FS
static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
{
int ret=0;
......@@ -1354,7 +1355,6 @@ static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
*
*
*/
#ifdef CONFIG_PROC_FS
static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len,
int *eof, void *unused)
{
......
......@@ -55,13 +55,13 @@ static int rose_header(struct sk_buff *skb, struct net_device *dev,
static int rose_rebuild_header(struct sk_buff *skb)
{
#ifdef CONFIG_INET
struct net_device *dev = skb->dev;
struct net_device_stats *stats = netdev_priv(dev);
unsigned char *bp = (unsigned char *)skb->data;
struct sk_buff *skbn;
unsigned int len;
#ifdef CONFIG_INET
if (arp_find(bp + 7, skb)) {
return 1;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册