提交 2d175d43 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  [TIPC]: Add tipc_config.h to include/linux/Kbuild.
  [WAN]: lmc_ioctl: don't return with locks held
  [SUNRPC]: fix rpc debugging
  [TCP]: Saner thash_entries default with much memory.
  [SUNRPC] rpc_rdma: we need to cast u64 to unsigned long long for printing
  [IPv4] SNMP: Refer correct memory location to display ICMP out-going statistics
  [NET]: Fix error reporting in sys_socketpair().
  [NETFILTER]: nf_ct_alloc_hashtable(): use __GFP_NOWARN
  [NET]: Fix race between poll_napi() and net_rx_action()
  [TCP] MD5: Remove some more unnecessary casting.
  [TCP] vegas: Fix a bug in disabling slow start by gamma parameter.
  [IPVS]: use proper timeout instead of fixed value
  [IPV6] NDISC: Fix setting base_reachable_time_ms variable.
...@@ -142,8 +142,9 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ ...@@ -142,8 +142,9 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
* To date internally, just copy this out to the user. * To date internally, just copy this out to the user.
*/ */
case LMCIOCGINFO: /*fold01*/ case LMCIOCGINFO: /*fold01*/
if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof (lmc_ctl_t))) if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t)))
return -EFAULT; ret = -EFAULT;
else
ret = 0; ret = 0;
break; break;
...@@ -159,8 +160,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ ...@@ -159,8 +160,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break; break;
} }
if (copy_from_user(&ctl, ifr->ifr_data, sizeof (lmc_ctl_t))) if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
return -EFAULT; ret = -EFAULT;
break;
}
sc->lmc_media->set_status (sc, &ctl); sc->lmc_media->set_status (sc, &ctl);
...@@ -190,8 +193,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ ...@@ -190,8 +193,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break; break;
} }
if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) {
return -EFAULT; ret = -EFAULT;
break;
}
if (new_type == old_type) if (new_type == old_type)
...@@ -229,8 +234,9 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ ...@@ -229,8 +234,9 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
sc->lmc_xinfo.Magic1 = 0xDEADBEEF; sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo, if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
sizeof (struct lmc_xinfo))) sizeof(struct lmc_xinfo))) {
return -EFAULT; ret = -EFAULT;
else
ret = 0; ret = 0;
break; break;
...@@ -262,8 +268,8 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ ...@@ -262,8 +268,8 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
if (copy_to_user(ifr->ifr_data, &sc->stats, if (copy_to_user(ifr->ifr_data, &sc->stats,
sizeof (struct lmc_statistics))) sizeof (struct lmc_statistics)))
return -EFAULT; ret = -EFAULT;
else
ret = 0; ret = 0;
break; break;
...@@ -292,8 +298,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ ...@@ -292,8 +298,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break; break;
} }
if (copy_from_user(&ctl, ifr->ifr_data, sizeof (lmc_ctl_t))) if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
return -EFAULT; ret = -EFAULT;
break;
}
sc->lmc_media->set_circuit_type(sc, ctl.circuit_type); sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
sc->ictl.circuit_type = ctl.circuit_type; sc->ictl.circuit_type = ctl.circuit_type;
ret = 0; ret = 0;
...@@ -318,12 +326,15 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ ...@@ -318,12 +326,15 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
#ifdef DEBUG #ifdef DEBUG
case LMCIOCDUMPEVENTLOG: case LMCIOCDUMPEVENTLOG:
if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof (u32))) if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) {
return -EFAULT; ret = -EFAULT;
break;
}
if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf))) if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf)))
return -EFAULT; ret = -EFAULT;
else
ret = 0; ret = 0;
break; break;
#endif /* end ifdef _DBG_EVENTLOG */ #endif /* end ifdef _DBG_EVENTLOG */
case LMCIOCT1CONTROL: /*fold01*/ case LMCIOCT1CONTROL: /*fold01*/
...@@ -346,8 +357,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ ...@@ -346,8 +357,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
*/ */
netif_stop_queue(dev); netif_stop_queue(dev);
if (copy_from_user(&xc, ifr->ifr_data, sizeof (struct lmc_xilinx_control))) if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) {
return -EFAULT; ret = -EFAULT;
break;
}
switch(xc.command){ switch(xc.command){
case lmc_xilinx_reset: /*fold02*/ case lmc_xilinx_reset: /*fold02*/
{ {
......
...@@ -149,6 +149,7 @@ header-y += ticable.h ...@@ -149,6 +149,7 @@ header-y += ticable.h
header-y += times.h header-y += times.h
header-y += tiocl.h header-y += tiocl.h
header-y += tipc.h header-y += tipc.h
header-y += tipc_config.h
header-y += toshiba.h header-y += toshiba.h
header-y += ultrasound.h header-y += ultrasound.h
header-y += un.h header-y += un.h
......
...@@ -2172,6 +2172,14 @@ static void net_rx_action(struct softirq_action *h) ...@@ -2172,6 +2172,14 @@ static void net_rx_action(struct softirq_action *h)
weight = n->weight; weight = n->weight;
/* This NAPI_STATE_SCHED test is for avoiding a race
* with netpoll's poll_napi(). Only the entity which
* obtains the lock and sees NAPI_STATE_SCHED set will
* actually make the ->poll() call. Therefore we avoid
* accidently calling ->poll() when NAPI is not scheduled.
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state))
work = n->poll(n, weight); work = n->poll(n, weight);
WARN_ON_ONCE(work > weight); WARN_ON_ONCE(work > weight);
......
...@@ -116,24 +116,43 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, ...@@ -116,24 +116,43 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
* network adapter, forcing superfluous retries and possibly timeouts. * network adapter, forcing superfluous retries and possibly timeouts.
* Thus, we set our budget to greater than 1. * Thus, we set our budget to greater than 1.
*/ */
static void poll_napi(struct netpoll *np) static int poll_one_napi(struct netpoll_info *npinfo,
struct napi_struct *napi, int budget)
{ {
struct netpoll_info *npinfo = np->dev->npinfo; int work;
struct napi_struct *napi;
int budget = 16; /* net_rx_action's ->poll() invocations and our's are
* synchronized by this test which is only made while
* holding the napi->poll_lock.
*/
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
return budget;
list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
if (test_bit(NAPI_STATE_SCHED, &napi->state) &&
napi->poll_owner != smp_processor_id() &&
spin_trylock(&napi->poll_lock)) {
npinfo->rx_flags |= NETPOLL_RX_DROP; npinfo->rx_flags |= NETPOLL_RX_DROP;
atomic_inc(&trapped); atomic_inc(&trapped);
napi->poll(napi, budget); work = napi->poll(napi, budget);
atomic_dec(&trapped); atomic_dec(&trapped);
npinfo->rx_flags &= ~NETPOLL_RX_DROP; npinfo->rx_flags &= ~NETPOLL_RX_DROP;
return budget - work;
}
static void poll_napi(struct netpoll *np)
{
struct netpoll_info *npinfo = np->dev->npinfo;
struct napi_struct *napi;
int budget = 16;
list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
if (napi->poll_owner != smp_processor_id() &&
spin_trylock(&napi->poll_lock)) {
budget = poll_one_napi(npinfo, napi, budget);
spin_unlock(&napi->poll_lock); spin_unlock(&napi->poll_lock);
if (!budget)
break;
} }
} }
} }
......
...@@ -72,7 +72,6 @@ struct ip_vs_sync_thread_data { ...@@ -72,7 +72,6 @@ struct ip_vs_sync_thread_data {
int state; int state;
}; };
#define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ)
#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn))
#define FULL_CONN_SIZE \ #define FULL_CONN_SIZE \
(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options)) (sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options))
...@@ -284,6 +283,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) ...@@ -284,6 +283,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
struct ip_vs_sync_conn *s; struct ip_vs_sync_conn *s;
struct ip_vs_sync_conn_options *opt; struct ip_vs_sync_conn_options *opt;
struct ip_vs_conn *cp; struct ip_vs_conn *cp;
struct ip_vs_protocol *pp;
char *p; char *p;
int i; int i;
...@@ -342,7 +342,8 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) ...@@ -342,7 +342,8 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
p += SIMPLE_CONN_SIZE; p += SIMPLE_CONN_SIZE;
atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]); atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
cp->timeout = IP_VS_SYNC_CONN_TIMEOUT; pp = ip_vs_proto_get(s->protocol);
cp->timeout = pp->timeout_table[cp->state];
ip_vs_conn_put(cp); ip_vs_conn_put(cp);
if (p > buffer+buflen) { if (p > buffer+buflen) {
......
...@@ -304,7 +304,7 @@ static void icmp_put(struct seq_file *seq) ...@@ -304,7 +304,7 @@ static void icmp_put(struct seq_file *seq)
for (i=0; icmpmibmap[i].name != NULL; i++) for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu", seq_printf(seq, " %lu",
snmp_fold_field((void **) icmpmsg_statistics, snmp_fold_field((void **) icmpmsg_statistics,
icmpmibmap[i].index)); icmpmibmap[i].index | 0x100));
} }
/* /*
......
...@@ -2453,7 +2453,7 @@ void __init tcp_init(void) ...@@ -2453,7 +2453,7 @@ void __init tcp_init(void)
0, 0,
&tcp_hashinfo.ehash_size, &tcp_hashinfo.ehash_size,
NULL, NULL,
0); thash_entries ? 0 : 512 * 1024);
tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size; tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
for (i = 0; i < tcp_hashinfo.ehash_size; i++) { for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
rwlock_init(&tcp_hashinfo.ehash[i].lock); rwlock_init(&tcp_hashinfo.ehash[i].lock);
......
...@@ -858,16 +858,16 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, ...@@ -858,16 +858,16 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
u8 *newkey, u8 newkeylen) u8 *newkey, u8 newkeylen)
{ {
/* Add Key to the list */ /* Add Key to the list */
struct tcp4_md5sig_key *key; struct tcp_md5sig_key *key;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct tcp4_md5sig_key *keys; struct tcp4_md5sig_key *keys;
key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); key = tcp_v4_md5_do_lookup(sk, addr);
if (key) { if (key) {
/* Pre-existing entry - just update that one. */ /* Pre-existing entry - just update that one. */
kfree(key->base.key); kfree(key->key);
key->base.key = newkey; key->key = newkey;
key->base.keylen = newkeylen; key->keylen = newkeylen;
} else { } else {
struct tcp_md5sig_info *md5sig; struct tcp_md5sig_info *md5sig;
......
...@@ -266,9 +266,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, ...@@ -266,9 +266,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
*/ */
diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
if (tp->snd_cwnd <= tp->snd_ssthresh) { if (diff > gamma && tp->snd_ssthresh > 2 ) {
/* Slow start. */
if (diff > gamma) {
/* Going too fast. Time to slow down /* Going too fast. Time to slow down
* and switch to congestion avoidance. * and switch to congestion avoidance.
*/ */
...@@ -285,7 +283,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, ...@@ -285,7 +283,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
(target_cwnd >> (target_cwnd >>
V_PARAM_SHIFT)+1); V_PARAM_SHIFT)+1);
} } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* Slow start. */
tcp_slow_start(tp); tcp_slow_start(tp);
} else { } else {
/* Congestion avoidance. */ /* Congestion avoidance. */
......
...@@ -1670,7 +1670,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * f ...@@ -1670,7 +1670,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * f
filp, buffer, lenp, ppos); filp, buffer, lenp, ppos);
else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) || else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) ||
(strcmp(ctl->procname, "base_reacable_time_ms") == 0)) (strcmp(ctl->procname, "base_reachable_time_ms") == 0))
ret = proc_dointvec_ms_jiffies(ctl, write, ret = proc_dointvec_ms_jiffies(ctl, write,
filp, buffer, lenp, ppos); filp, buffer, lenp, ppos);
else else
......
...@@ -561,16 +561,16 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, ...@@ -561,16 +561,16 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
char *newkey, u8 newkeylen) char *newkey, u8 newkeylen)
{ {
/* Add key to the list */ /* Add key to the list */
struct tcp6_md5sig_key *key; struct tcp_md5sig_key *key;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct tcp6_md5sig_key *keys; struct tcp6_md5sig_key *keys;
key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); key = tcp_v6_md5_do_lookup(sk, peer);
if (key) { if (key) {
/* modify existing entry - just update that one */ /* modify existing entry - just update that one */
kfree(key->base.key); kfree(key->key);
key->base.key = newkey; key->key = newkey;
key->base.keylen = newkeylen; key->keylen = newkeylen;
} else { } else {
/* reallocate new list if current one is full. */ /* reallocate new list if current one is full. */
if (!tp->md5sig_info) { if (!tp->md5sig_info) {
......
...@@ -999,7 +999,7 @@ struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced) ...@@ -999,7 +999,7 @@ struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced)
*vmalloced = 0; *vmalloced = 0;
size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head)); size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
hash = (void*)__get_free_pages(GFP_KERNEL, hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN,
get_order(sizeof(struct hlist_head) get_order(sizeof(struct hlist_head)
* size)); * size));
if (!hash) { if (!hash) {
......
...@@ -1250,11 +1250,14 @@ asmlinkage long sys_socketpair(int family, int type, int protocol, ...@@ -1250,11 +1250,14 @@ asmlinkage long sys_socketpair(int family, int type, int protocol,
goto out_release_both; goto out_release_both;
fd1 = sock_alloc_fd(&newfile1); fd1 = sock_alloc_fd(&newfile1);
if (unlikely(fd1 < 0)) if (unlikely(fd1 < 0)) {
err = fd1;
goto out_release_both; goto out_release_both;
}
fd2 = sock_alloc_fd(&newfile2); fd2 = sock_alloc_fd(&newfile2);
if (unlikely(fd2 < 0)) { if (unlikely(fd2 < 0)) {
err = fd2;
put_filp(newfile1); put_filp(newfile1);
put_unused_fd(fd1); put_unused_fd(fd1);
goto out_release_both; goto out_release_both;
......
...@@ -221,8 +221,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, ...@@ -221,8 +221,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
seg->mr_base); seg->mr_base);
dprintk("RPC: %s: read chunk " dprintk("RPC: %s: read chunk "
"elem %d@0x%llx:0x%x pos %d (%s)\n", __func__, "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__,
seg->mr_len, seg->mr_base, seg->mr_rkey, pos, seg->mr_len, (unsigned long long)seg->mr_base,
n < nsegs ? "more" : "last"); seg->mr_rkey, pos, n < nsegs ? "more" : "last");
cur_rchunk++; cur_rchunk++;
r_xprt->rx_stats.read_chunk_count++; r_xprt->rx_stats.read_chunk_count++;
} else { /* write/reply */ } else { /* write/reply */
...@@ -234,8 +234,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, ...@@ -234,8 +234,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
dprintk("RPC: %s: %s chunk " dprintk("RPC: %s: %s chunk "
"elem %d@0x%llx:0x%x (%s)\n", __func__, "elem %d@0x%llx:0x%x (%s)\n", __func__,
(type == rpcrdma_replych) ? "reply" : "write", (type == rpcrdma_replych) ? "reply" : "write",
seg->mr_len, seg->mr_base, seg->mr_rkey, seg->mr_len, (unsigned long long)seg->mr_base,
n < nsegs ? "more" : "last"); seg->mr_rkey, n < nsegs ? "more" : "last");
cur_wchunk++; cur_wchunk++;
if (type == rpcrdma_replych) if (type == rpcrdma_replych)
r_xprt->rx_stats.reply_chunk_count++; r_xprt->rx_stats.reply_chunk_count++;
...@@ -577,7 +577,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, __be32 **ipt ...@@ -577,7 +577,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, __be32 **ipt
dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
__func__, __func__,
ntohl(seg->rs_length), ntohl(seg->rs_length),
off, (unsigned long long)off,
ntohl(seg->rs_handle)); ntohl(seg->rs_handle));
} }
total_len += ntohl(seg->rs_length); total_len += ntohl(seg->rs_length);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册