Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
3872b284
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 3 年多
通知
13
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
3872b284
编写于
3月 02, 2011
作者:
D
David S. Miller
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' of
git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
上级
07df5294
8a80c79a
变更
20
隐藏空白更改
内联
并排
Showing
20 changed file
with
143 addition
and
146 deletion
+143
-146
include/net/ip_vs.h
include/net/ip_vs.h
+17
-1
net/bridge/netfilter/ebtables.c
net/bridge/netfilter/ebtables.c
+2
-0
net/netfilter/ipset/Kconfig
net/netfilter/ipset/Kconfig
+1
-0
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_conn.c
+29
-23
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_core.c
+3
-25
net/netfilter/ipvs/ip_vs_lblc.c
net/netfilter/ipvs/ip_vs_lblc.c
+4
-11
net/netfilter/ipvs/ip_vs_lblcr.c
net/netfilter/ipvs/ip_vs_lblcr.c
+8
-19
net/netfilter/ipvs/ip_vs_lc.c
net/netfilter/ipvs/ip_vs_lc.c
+2
-18
net/netfilter/ipvs/ip_vs_nq.c
net/netfilter/ipvs/ip_vs_nq.c
+1
-1
net/netfilter/ipvs/ip_vs_rr.c
net/netfilter/ipvs/ip_vs_rr.c
+1
-1
net/netfilter/ipvs/ip_vs_sched.c
net/netfilter/ipvs/ip_vs_sched.c
+25
-0
net/netfilter/ipvs/ip_vs_sed.c
net/netfilter/ipvs/ip_vs_sed.c
+1
-1
net/netfilter/ipvs/ip_vs_sh.c
net/netfilter/ipvs/ip_vs_sh.c
+1
-1
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/ipvs/ip_vs_sync.c
+2
-2
net/netfilter/ipvs/ip_vs_wlc.c
net/netfilter/ipvs/ip_vs_wlc.c
+3
-19
net/netfilter/ipvs/ip_vs_wrr.c
net/netfilter/ipvs/ip_vs_wrr.c
+8
-6
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/ipvs/ip_vs_xmit.c
+27
-14
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_tcp.c
+2
-2
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_log.c
+1
-2
net/netfilter/xt_conntrack.c
net/netfilter/xt_conntrack.c
+5
-0
未找到文件。
include/net/ip_vs.h
浏览文件 @
3872b284
...
...
@@ -494,7 +494,7 @@ struct ip_vs_conn_param {
* IP_VS structure allocated for each dynamically scheduled connection
*/
struct
ip_vs_conn
{
struct
list_head
c_list
;
/* hashed list heads */
struct
hlist_node
c_list
;
/* hashed list heads */
#ifdef CONFIG_NET_NS
struct
net
*
net
;
/* Name space */
#endif
...
...
@@ -1019,6 +1019,8 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
extern
int
ip_vs_leave
(
struct
ip_vs_service
*
svc
,
struct
sk_buff
*
skb
,
struct
ip_vs_proto_data
*
pd
);
extern
void
ip_vs_scheduler_err
(
struct
ip_vs_service
*
svc
,
const
char
*
msg
);
/*
* IPVS control data and functions (from ip_vs_ctl.c)
...
...
@@ -1241,6 +1243,20 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
/* CONFIG_IP_VS_NFCT */
#endif
static
inline
unsigned
int
ip_vs_dest_conn_overhead
(
struct
ip_vs_dest
*
dest
)
{
/*
* We think the overhead of processing active connections is 256
* times higher than that of inactive connections in average. (This
* 256 times might not be accurate, we will change it later) We
* use the following formula to estimate the overhead now:
* dest->activeconns*256 + dest->inactconns
*/
return
(
atomic_read
(
&
dest
->
activeconns
)
<<
8
)
+
atomic_read
(
&
dest
->
inactconns
);
}
#endif
/* __KERNEL__ */
#endif
/* _NET_IP_VS_H */
net/bridge/netfilter/ebtables.c
浏览文件 @
3872b284
...
...
@@ -1107,6 +1107,8 @@ static int do_replace(struct net *net, const void __user *user,
if
(
tmp
.
num_counters
>=
INT_MAX
/
sizeof
(
struct
ebt_counter
))
return
-
ENOMEM
;
tmp
.
name
[
sizeof
(
tmp
.
name
)
-
1
]
=
0
;
countersize
=
COUNTER_OFFSET
(
tmp
.
nentries
)
*
nr_cpu_ids
;
newinfo
=
vmalloc
(
sizeof
(
*
newinfo
)
+
countersize
);
if
(
!
newinfo
)
...
...
net/netfilter/ipset/Kconfig
浏览文件 @
3872b284
menuconfig IP_SET
tristate "IP set support"
depends on INET && NETFILTER
depends on NETFILTER_NETLINK
help
This option adds IP set support to the kernel.
In order to define and use the sets, you need the userspace utility
...
...
net/netfilter/ipvs/ip_vs_conn.c
浏览文件 @
3872b284
...
...
@@ -59,7 +59,7 @@ static int ip_vs_conn_tab_mask __read_mostly;
/*
* Connection hash table: for input and output packets lookups of IPVS
*/
static
struct
list_head
*
ip_vs_conn_tab
__read_mostly
;
static
struct
h
list_head
*
ip_vs_conn_tab
__read_mostly
;
/* SLAB cache for IPVS connections */
static
struct
kmem_cache
*
ip_vs_conn_cachep
__read_mostly
;
...
...
@@ -201,7 +201,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
spin_lock
(
&
cp
->
lock
);
if
(
!
(
cp
->
flags
&
IP_VS_CONN_F_HASHED
))
{
list_ad
d
(
&
cp
->
c_list
,
&
ip_vs_conn_tab
[
hash
]);
hlist_add_hea
d
(
&
cp
->
c_list
,
&
ip_vs_conn_tab
[
hash
]);
cp
->
flags
|=
IP_VS_CONN_F_HASHED
;
atomic_inc
(
&
cp
->
refcnt
);
ret
=
1
;
...
...
@@ -234,7 +234,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
spin_lock
(
&
cp
->
lock
);
if
(
cp
->
flags
&
IP_VS_CONN_F_HASHED
)
{
list_del
(
&
cp
->
c_list
);
h
list_del
(
&
cp
->
c_list
);
cp
->
flags
&=
~
IP_VS_CONN_F_HASHED
;
atomic_dec
(
&
cp
->
refcnt
);
ret
=
1
;
...
...
@@ -259,12 +259,13 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
{
unsigned
hash
;
struct
ip_vs_conn
*
cp
;
struct
hlist_node
*
n
;
hash
=
ip_vs_conn_hashkey_param
(
p
,
false
);
ct_read_lock
(
hash
);
list_for_each_entry
(
cp
,
&
ip_vs_conn_tab
[
hash
],
c_list
)
{
hlist_for_each_entry
(
cp
,
n
,
&
ip_vs_conn_tab
[
hash
],
c_list
)
{
if
(
cp
->
af
==
p
->
af
&&
p
->
cport
==
cp
->
cport
&&
p
->
vport
==
cp
->
vport
&&
ip_vs_addr_equal
(
p
->
af
,
p
->
caddr
,
&
cp
->
caddr
)
&&
...
...
@@ -345,12 +346,13 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
{
unsigned
hash
;
struct
ip_vs_conn
*
cp
;
struct
hlist_node
*
n
;
hash
=
ip_vs_conn_hashkey_param
(
p
,
false
);
ct_read_lock
(
hash
);
list_for_each_entry
(
cp
,
&
ip_vs_conn_tab
[
hash
],
c_list
)
{
hlist_for_each_entry
(
cp
,
n
,
&
ip_vs_conn_tab
[
hash
],
c_list
)
{
if
(
!
ip_vs_conn_net_eq
(
cp
,
p
->
net
))
continue
;
if
(
p
->
pe_data
&&
p
->
pe
->
ct_match
)
{
...
...
@@ -394,6 +396,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
{
unsigned
hash
;
struct
ip_vs_conn
*
cp
,
*
ret
=
NULL
;
struct
hlist_node
*
n
;
/*
* Check for "full" addressed entries
...
...
@@ -402,7 +405,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
ct_read_lock
(
hash
);
list_for_each_entry
(
cp
,
&
ip_vs_conn_tab
[
hash
],
c_list
)
{
hlist_for_each_entry
(
cp
,
n
,
&
ip_vs_conn_tab
[
hash
],
c_list
)
{
if
(
cp
->
af
==
p
->
af
&&
p
->
vport
==
cp
->
cport
&&
p
->
cport
==
cp
->
dport
&&
ip_vs_addr_equal
(
p
->
af
,
p
->
vaddr
,
&
cp
->
caddr
)
&&
...
...
@@ -818,7 +821,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
return
NULL
;
}
INIT_
LIST_HEAD
(
&
cp
->
c_list
);
INIT_
HLIST_NODE
(
&
cp
->
c_list
);
setup_timer
(
&
cp
->
timer
,
ip_vs_conn_expire
,
(
unsigned
long
)
cp
);
ip_vs_conn_net_set
(
cp
,
p
->
net
);
cp
->
af
=
p
->
af
;
...
...
@@ -894,8 +897,8 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
*/
#ifdef CONFIG_PROC_FS
struct
ip_vs_iter_state
{
struct
seq_net_private
p
;
struct
list_head
*
l
;
struct
seq_net_private
p
;
struct
hlist_head
*
l
;
};
static
void
*
ip_vs_conn_array
(
struct
seq_file
*
seq
,
loff_t
pos
)
...
...
@@ -903,13 +906,14 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
int
idx
;
struct
ip_vs_conn
*
cp
;
struct
ip_vs_iter_state
*
iter
=
seq
->
private
;
struct
hlist_node
*
n
;
for
(
idx
=
0
;
idx
<
ip_vs_conn_tab_size
;
idx
++
)
{
ct_read_lock_bh
(
idx
);
list_for_each_entry
(
cp
,
&
ip_vs_conn_tab
[
idx
],
c_list
)
{
hlist_for_each_entry
(
cp
,
n
,
&
ip_vs_conn_tab
[
idx
],
c_list
)
{
if
(
pos
--
==
0
)
{
iter
->
l
=
&
ip_vs_conn_tab
[
idx
];
return
cp
;
return
cp
;
}
}
ct_read_unlock_bh
(
idx
);
...
...
@@ -930,7 +934,8 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct
ip_vs_conn
*
cp
=
v
;
struct
ip_vs_iter_state
*
iter
=
seq
->
private
;
struct
list_head
*
e
,
*
l
=
iter
->
l
;
struct
hlist_node
*
e
;
struct
hlist_head
*
l
=
iter
->
l
;
int
idx
;
++*
pos
;
...
...
@@ -938,15 +943,15 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return
ip_vs_conn_array
(
seq
,
0
);
/* more on same hash chain? */
if
((
e
=
cp
->
c_list
.
next
)
!=
l
)
return
list_entry
(
e
,
struct
ip_vs_conn
,
c_list
);
if
((
e
=
cp
->
c_list
.
next
))
return
h
list_entry
(
e
,
struct
ip_vs_conn
,
c_list
);
idx
=
l
-
ip_vs_conn_tab
;
ct_read_unlock_bh
(
idx
);
while
(
++
idx
<
ip_vs_conn_tab_size
)
{
ct_read_lock_bh
(
idx
);
list_for_each_entry
(
cp
,
&
ip_vs_conn_tab
[
idx
],
c_list
)
{
hlist_for_each_entry
(
cp
,
e
,
&
ip_vs_conn_tab
[
idx
],
c_list
)
{
iter
->
l
=
&
ip_vs_conn_tab
[
idx
];
return
cp
;
}
...
...
@@ -959,7 +964,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static
void
ip_vs_conn_seq_stop
(
struct
seq_file
*
seq
,
void
*
v
)
{
struct
ip_vs_iter_state
*
iter
=
seq
->
private
;
struct
list_head
*
l
=
iter
->
l
;
struct
h
list_head
*
l
=
iter
->
l
;
if
(
l
)
ct_read_unlock_bh
(
l
-
ip_vs_conn_tab
);
...
...
@@ -1148,13 +1153,14 @@ void ip_vs_random_dropentry(struct net *net)
*/
for
(
idx
=
0
;
idx
<
(
ip_vs_conn_tab_size
>>
5
);
idx
++
)
{
unsigned
hash
=
net_random
()
&
ip_vs_conn_tab_mask
;
struct
hlist_node
*
n
;
/*
* Lock is actually needed in this loop.
*/
ct_write_lock_bh
(
hash
);
list_for_each_entry
(
cp
,
&
ip_vs_conn_tab
[
hash
],
c_list
)
{
hlist_for_each_entry
(
cp
,
n
,
&
ip_vs_conn_tab
[
hash
],
c_list
)
{
if
(
cp
->
flags
&
IP_VS_CONN_F_TEMPLATE
)
/* connection template */
continue
;
...
...
@@ -1202,12 +1208,14 @@ static void ip_vs_conn_flush(struct net *net)
flush_again:
for
(
idx
=
0
;
idx
<
ip_vs_conn_tab_size
;
idx
++
)
{
struct
hlist_node
*
n
;
/*
* Lock is actually needed in this loop.
*/
ct_write_lock_bh
(
idx
);
list_for_each_entry
(
cp
,
&
ip_vs_conn_tab
[
idx
],
c_list
)
{
hlist_for_each_entry
(
cp
,
n
,
&
ip_vs_conn_tab
[
idx
],
c_list
)
{
if
(
!
ip_vs_conn_net_eq
(
cp
,
net
))
continue
;
IP_VS_DBG
(
4
,
"del connection
\n
"
);
...
...
@@ -1265,8 +1273,7 @@ int __init ip_vs_conn_init(void)
/*
* Allocate the connection hash table and initialize its list heads
*/
ip_vs_conn_tab
=
vmalloc
(
ip_vs_conn_tab_size
*
sizeof
(
struct
list_head
));
ip_vs_conn_tab
=
vmalloc
(
ip_vs_conn_tab_size
*
sizeof
(
*
ip_vs_conn_tab
));
if
(
!
ip_vs_conn_tab
)
return
-
ENOMEM
;
...
...
@@ -1286,9 +1293,8 @@ int __init ip_vs_conn_init(void)
IP_VS_DBG
(
0
,
"Each connection entry needs %Zd bytes at least
\n
"
,
sizeof
(
struct
ip_vs_conn
));
for
(
idx
=
0
;
idx
<
ip_vs_conn_tab_size
;
idx
++
)
{
INIT_LIST_HEAD
(
&
ip_vs_conn_tab
[
idx
]);
}
for
(
idx
=
0
;
idx
<
ip_vs_conn_tab_size
;
idx
++
)
INIT_HLIST_HEAD
(
&
ip_vs_conn_tab
[
idx
]);
for
(
idx
=
0
;
idx
<
CT_LOCKARRAY_SIZE
;
idx
++
)
{
rwlock_init
(
&
__ip_vs_conntbl_lock_array
[
idx
].
l
);
...
...
net/netfilter/ipvs/ip_vs_core.c
浏览文件 @
3872b284
...
...
@@ -729,7 +729,7 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
#endif
/* Handle relevant response ICMP messages - forward to the right
* destination host.
Used for NAT and local client.
* destination host.
*/
static
int
handle_response_icmp
(
int
af
,
struct
sk_buff
*
skb
,
union
nf_inet_addr
*
snet
,
...
...
@@ -979,7 +979,6 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
}
/* Handle response packets: rewrite addresses and send away...
* Used for NAT and local client.
*/
static
unsigned
int
handle_response
(
int
af
,
struct
sk_buff
*
skb
,
struct
ip_vs_proto_data
*
pd
,
...
...
@@ -1280,7 +1279,6 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
struct
ip_vs_protocol
*
pp
;
struct
ip_vs_proto_data
*
pd
;
unsigned
int
offset
,
ihl
,
verdict
;
union
nf_inet_addr
snet
;
*
related
=
1
;
...
...
@@ -1339,17 +1337,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
ip_vs_fill_iphdr
(
AF_INET
,
cih
,
&
ciph
);
/* The embedded headers contain source and dest in reverse order */
cp
=
pp
->
conn_in_get
(
AF_INET
,
skb
,
&
ciph
,
offset
,
1
);
if
(
!
cp
)
{
/* The packet could also belong to a local client */
cp
=
pp
->
conn_out_get
(
AF_INET
,
skb
,
&
ciph
,
offset
,
1
);
if
(
cp
)
{
snet
.
ip
=
iph
->
saddr
;
return
handle_response_icmp
(
AF_INET
,
skb
,
&
snet
,
cih
->
protocol
,
cp
,
pp
,
offset
,
ihl
);
}
if
(
!
cp
)
return
NF_ACCEPT
;
}
verdict
=
NF_DROP
;
...
...
@@ -1395,7 +1384,6 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
struct
ip_vs_protocol
*
pp
;
struct
ip_vs_proto_data
*
pd
;
unsigned
int
offset
,
verdict
;
union
nf_inet_addr
snet
;
struct
rt6_info
*
rt
;
*
related
=
1
;
...
...
@@ -1455,18 +1443,8 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
ip_vs_fill_iphdr
(
AF_INET6
,
cih
,
&
ciph
);
/* The embedded headers contain source and dest in reverse order */
cp
=
pp
->
conn_in_get
(
AF_INET6
,
skb
,
&
ciph
,
offset
,
1
);
if
(
!
cp
)
{
/* The packet could also belong to a local client */
cp
=
pp
->
conn_out_get
(
AF_INET6
,
skb
,
&
ciph
,
offset
,
1
);
if
(
cp
)
{
ipv6_addr_copy
(
&
snet
.
in6
,
&
iph
->
saddr
);
return
handle_response_icmp
(
AF_INET6
,
skb
,
&
snet
,
cih
->
nexthdr
,
cp
,
pp
,
offset
,
sizeof
(
struct
ipv6hdr
));
}
if
(
!
cp
)
return
NF_ACCEPT
;
}
verdict
=
NF_DROP
;
...
...
net/netfilter/ipvs/ip_vs_lblc.c
浏览文件 @
3872b284
...
...
@@ -389,12 +389,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
int
loh
,
doh
;
/*
* We think the overhead of processing active connections is fifty
* times higher than that of inactive connections in average. (This
* fifty times might not be accurate, we will change it later.) We
* use the following formula to estimate the overhead:
* dest->activeconns*50 + dest->inactconns
* and the load:
* We use the following formula to estimate the load:
* (dest overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
...
...
@@ -410,8 +405,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
continue
;
if
(
atomic_read
(
&
dest
->
weight
)
>
0
)
{
least
=
dest
;
loh
=
atomic_read
(
&
least
->
activeconns
)
*
50
+
atomic_read
(
&
least
->
inactconns
);
loh
=
ip_vs_dest_conn_overhead
(
least
);
goto
nextstage
;
}
}
...
...
@@ -425,8 +419,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
if
(
dest
->
flags
&
IP_VS_DEST_F_OVERLOAD
)
continue
;
doh
=
atomic_read
(
&
dest
->
activeconns
)
*
50
+
atomic_read
(
&
dest
->
inactconns
);
doh
=
ip_vs_dest_conn_overhead
(
dest
);
if
(
loh
*
atomic_read
(
&
dest
->
weight
)
>
doh
*
atomic_read
(
&
least
->
weight
))
{
least
=
dest
;
...
...
@@ -510,7 +503,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* No cache entry or it is invalid, time to schedule */
dest
=
__ip_vs_lblc_schedule
(
svc
);
if
(
!
dest
)
{
IP_VS_ERR_RL
(
"LBLC: no destination available
\n
"
);
ip_vs_scheduler_err
(
svc
,
"no destination available
"
);
return
NULL
;
}
...
...
net/netfilter/ipvs/ip_vs_lblcr.c
浏览文件 @
3872b284
...
...
@@ -178,8 +178,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
if
((
atomic_read
(
&
least
->
weight
)
>
0
)
&&
(
least
->
flags
&
IP_VS_DEST_F_AVAILABLE
))
{
loh
=
atomic_read
(
&
least
->
activeconns
)
*
50
+
atomic_read
(
&
least
->
inactconns
);
loh
=
ip_vs_dest_conn_overhead
(
least
);
goto
nextstage
;
}
}
...
...
@@ -192,8 +191,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
if
(
dest
->
flags
&
IP_VS_DEST_F_OVERLOAD
)
continue
;
doh
=
atomic_read
(
&
dest
->
activeconns
)
*
50
+
atomic_read
(
&
dest
->
inactconns
);
doh
=
ip_vs_dest_conn_overhead
(
dest
);
if
((
loh
*
atomic_read
(
&
dest
->
weight
)
>
doh
*
atomic_read
(
&
least
->
weight
))
&&
(
dest
->
flags
&
IP_VS_DEST_F_AVAILABLE
))
{
...
...
@@ -228,8 +226,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
list_for_each_entry
(
e
,
&
set
->
list
,
list
)
{
most
=
e
->
dest
;
if
(
atomic_read
(
&
most
->
weight
)
>
0
)
{
moh
=
atomic_read
(
&
most
->
activeconns
)
*
50
+
atomic_read
(
&
most
->
inactconns
);
moh
=
ip_vs_dest_conn_overhead
(
most
);
goto
nextstage
;
}
}
...
...
@@ -239,8 +236,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
nextstage:
list_for_each_entry
(
e
,
&
set
->
list
,
list
)
{
dest
=
e
->
dest
;
doh
=
atomic_read
(
&
dest
->
activeconns
)
*
50
+
atomic_read
(
&
dest
->
inactconns
);
doh
=
ip_vs_dest_conn_overhead
(
dest
);
/* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
if
((
moh
*
atomic_read
(
&
dest
->
weight
)
<
doh
*
atomic_read
(
&
most
->
weight
))
...
...
@@ -563,12 +559,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
int
loh
,
doh
;
/*
* We think the overhead of processing active connections is fifty
* times higher than that of inactive connections in average. (This
* fifty times might not be accurate, we will change it later.) We
* use the following formula to estimate the overhead:
* dest->activeconns*50 + dest->inactconns
* and the load:
* We use the following formula to estimate the load:
* (dest overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
...
...
@@ -585,8 +576,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
if
(
atomic_read
(
&
dest
->
weight
)
>
0
)
{
least
=
dest
;
loh
=
atomic_read
(
&
least
->
activeconns
)
*
50
+
atomic_read
(
&
least
->
inactconns
);
loh
=
ip_vs_dest_conn_overhead
(
least
);
goto
nextstage
;
}
}
...
...
@@ -600,8 +590,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
if
(
dest
->
flags
&
IP_VS_DEST_F_OVERLOAD
)
continue
;
doh
=
atomic_read
(
&
dest
->
activeconns
)
*
50
+
atomic_read
(
&
dest
->
inactconns
);
doh
=
ip_vs_dest_conn_overhead
(
dest
);
if
(
loh
*
atomic_read
(
&
dest
->
weight
)
>
doh
*
atomic_read
(
&
least
->
weight
))
{
least
=
dest
;
...
...
@@ -692,7 +681,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* The cache entry is invalid, time to schedule */
dest
=
__ip_vs_lblcr_schedule
(
svc
);
if
(
!
dest
)
{
IP_VS_ERR_RL
(
"LBLCR: no destination available
\n
"
);
ip_vs_scheduler_err
(
svc
,
"no destination available
"
);
read_unlock
(
&
svc
->
sched_lock
);
return
NULL
;
}
...
...
net/netfilter/ipvs/ip_vs_lc.c
浏览文件 @
3872b284
...
...
@@ -22,22 +22,6 @@
#include <net/ip_vs.h>
static
inline
unsigned
int
ip_vs_lc_dest_overhead
(
struct
ip_vs_dest
*
dest
)
{
/*
* We think the overhead of processing active connections is 256
* times higher than that of inactive connections in average. (This
* 256 times might not be accurate, we will change it later) We
* use the following formula to estimate the overhead now:
* dest->activeconns*256 + dest->inactconns
*/
return
(
atomic_read
(
&
dest
->
activeconns
)
<<
8
)
+
atomic_read
(
&
dest
->
inactconns
);
}
/*
* Least Connection scheduling
*/
...
...
@@ -62,7 +46,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if
((
dest
->
flags
&
IP_VS_DEST_F_OVERLOAD
)
||
atomic_read
(
&
dest
->
weight
)
==
0
)
continue
;
doh
=
ip_vs_
lc_dest
_overhead
(
dest
);
doh
=
ip_vs_
dest_conn
_overhead
(
dest
);
if
(
!
least
||
doh
<
loh
)
{
least
=
dest
;
loh
=
doh
;
...
...
@@ -70,7 +54,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
if
(
!
least
)
IP_VS_ERR_RL
(
"LC: no destination available
\n
"
);
ip_vs_scheduler_err
(
svc
,
"no destination available
"
);
else
IP_VS_DBG_BUF
(
6
,
"LC: server %s:%u activeconns %d "
"inactconns %d
\n
"
,
...
...
net/netfilter/ipvs/ip_vs_nq.c
浏览文件 @
3872b284
...
...
@@ -99,7 +99,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
if
(
!
least
)
{
IP_VS_ERR_RL
(
"NQ: no destination available
\n
"
);
ip_vs_scheduler_err
(
svc
,
"no destination available
"
);
return
NULL
;
}
...
...
net/netfilter/ipvs/ip_vs_rr.c
浏览文件 @
3872b284
...
...
@@ -72,7 +72,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
q
=
q
->
next
;
}
while
(
q
!=
p
);
write_unlock
(
&
svc
->
sched_lock
);
IP_VS_ERR_RL
(
"RR: no destination available
\n
"
);
ip_vs_scheduler_err
(
svc
,
"no destination available
"
);
return
NULL
;
out:
...
...
net/netfilter/ipvs/ip_vs_sched.c
浏览文件 @
3872b284
...
...
@@ -29,6 +29,7 @@
#include <net/ip_vs.h>
EXPORT_SYMBOL
(
ip_vs_scheduler_err
);
/*
* IPVS scheduler list
*/
...
...
@@ -146,6 +147,30 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
module_put
(
scheduler
->
module
);
}
/*
* Common error output helper for schedulers
*/
void
ip_vs_scheduler_err
(
struct
ip_vs_service
*
svc
,
const
char
*
msg
)
{
if
(
svc
->
fwmark
)
{
IP_VS_ERR_RL
(
"%s: FWM %u 0x%08X - %s
\n
"
,
svc
->
scheduler
->
name
,
svc
->
fwmark
,
svc
->
fwmark
,
msg
);
#ifdef CONFIG_IP_VS_IPV6
}
else
if
(
svc
->
af
==
AF_INET6
)
{
IP_VS_ERR_RL
(
"%s: %s [%pI6]:%d - %s
\n
"
,
svc
->
scheduler
->
name
,
ip_vs_proto_name
(
svc
->
protocol
),
&
svc
->
addr
.
in6
,
ntohs
(
svc
->
port
),
msg
);
#endif
}
else
{
IP_VS_ERR_RL
(
"%s: %s %pI4:%d - %s
\n
"
,
svc
->
scheduler
->
name
,
ip_vs_proto_name
(
svc
->
protocol
),
&
svc
->
addr
.
ip
,
ntohs
(
svc
->
port
),
msg
);
}
}
/*
* Register a scheduler in the scheduler list
...
...
net/netfilter/ipvs/ip_vs_sed.c
浏览文件 @
3872b284
...
...
@@ -87,7 +87,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
goto
nextstage
;
}
}
IP_VS_ERR_RL
(
"SED: no destination available
\n
"
);
ip_vs_scheduler_err
(
svc
,
"no destination available
"
);
return
NULL
;
/*
...
...
net/netfilter/ipvs/ip_vs_sh.c
浏览文件 @
3872b284
...
...
@@ -223,7 +223,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
||
!
(
dest
->
flags
&
IP_VS_DEST_F_AVAILABLE
)
||
atomic_read
(
&
dest
->
weight
)
<=
0
||
is_overloaded
(
dest
))
{
IP_VS_ERR_RL
(
"SH: no destination available
\n
"
);
ip_vs_scheduler_err
(
svc
,
"no destination available
"
);
return
NULL
;
}
...
...
net/netfilter/ipvs/ip_vs_sync.c
浏览文件 @
3872b284
...
...
@@ -374,8 +374,8 @@ get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time)
struct
ip_vs_sync_buff
*
sb
;
spin_lock_bh
(
&
ipvs
->
sync_buff_lock
);
if
(
ipvs
->
sync_buff
&&
(
time
==
0
||
time_
before
(
jiffies
-
ipvs
->
sync_buff
->
firstuse
,
time
)
))
{
if
(
ipvs
->
sync_buff
&&
time_
after_eq
(
jiffies
-
ipvs
->
sync_buff
->
firstuse
,
time
))
{
sb
=
ipvs
->
sync_buff
;
ipvs
->
sync_buff
=
NULL
;
}
else
...
...
net/netfilter/ipvs/ip_vs_wlc.c
浏览文件 @
3872b284
...
...
@@ -27,22 +27,6 @@
#include <net/ip_vs.h>
static
inline
unsigned
int
ip_vs_wlc_dest_overhead
(
struct
ip_vs_dest
*
dest
)
{
/*
* We think the overhead of processing active connections is 256
* times higher than that of inactive connections in average. (This
* 256 times might not be accurate, we will change it later) We
* use the following formula to estimate the overhead now:
* dest->activeconns*256 + dest->inactconns
*/
return
(
atomic_read
(
&
dest
->
activeconns
)
<<
8
)
+
atomic_read
(
&
dest
->
inactconns
);
}
/*
* Weighted Least Connection scheduling
*/
...
...
@@ -71,11 +55,11 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if
(
!
(
dest
->
flags
&
IP_VS_DEST_F_OVERLOAD
)
&&
atomic_read
(
&
dest
->
weight
)
>
0
)
{
least
=
dest
;
loh
=
ip_vs_
wlc_dest
_overhead
(
least
);
loh
=
ip_vs_
dest_conn
_overhead
(
least
);
goto
nextstage
;
}
}
IP_VS_ERR_RL
(
"WLC: no destination available
\n
"
);
ip_vs_scheduler_err
(
svc
,
"no destination available
"
);
return
NULL
;
/*
...
...
@@ -85,7 +69,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
list_for_each_entry_continue
(
dest
,
&
svc
->
destinations
,
n_list
)
{
if
(
dest
->
flags
&
IP_VS_DEST_F_OVERLOAD
)
continue
;
doh
=
ip_vs_
wlc_dest
_overhead
(
dest
);
doh
=
ip_vs_
dest_conn
_overhead
(
dest
);
if
(
loh
*
atomic_read
(
&
dest
->
weight
)
>
doh
*
atomic_read
(
&
least
->
weight
))
{
least
=
dest
;
...
...
net/netfilter/ipvs/ip_vs_wrr.c
浏览文件 @
3872b284
...
...
@@ -147,8 +147,9 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if
(
mark
->
cl
==
mark
->
cl
->
next
)
{
/* no dest entry */
IP_VS_ERR_RL
(
"WRR: no destination available: "
"no destinations present
\n
"
);
ip_vs_scheduler_err
(
svc
,
"no destination available: "
"no destinations present"
);
dest
=
NULL
;
goto
out
;
}
...
...
@@ -162,8 +163,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
*/
if
(
mark
->
cw
==
0
)
{
mark
->
cl
=
&
svc
->
destinations
;
IP_VS_ERR_RL
(
"WRR: no destination "
"available
\n
"
);
ip_vs_scheduler_err
(
svc
,
"no destination available
"
);
dest
=
NULL
;
goto
out
;
}
...
...
@@ -185,8 +186,9 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* back to the start, and no dest is found.
It is only possible when all dests are OVERLOADED */
dest
=
NULL
;
IP_VS_ERR_RL
(
"WRR: no destination available: "
"all destinations are overloaded
\n
"
);
ip_vs_scheduler_err
(
svc
,
"no destination available: "
"all destinations are overloaded"
);
goto
out
;
}
}
...
...
net/netfilter/ipvs/ip_vs_xmit.c
浏览文件 @
3872b284
...
...
@@ -43,6 +43,13 @@
#include <net/ip_vs.h>
enum
{
IP_VS_RT_MODE_LOCAL
=
1
,
/* Allow local dest */
IP_VS_RT_MODE_NON_LOCAL
=
2
,
/* Allow non-local dest */
IP_VS_RT_MODE_RDR
=
4
,
/* Allow redirect from remote daddr to
* local
*/
};
/*
* Destination cache to speed up outgoing route lookup
...
...
@@ -77,11 +84,7 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
return
dst
;
}
/*
* Get route to destination or remote server
* rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
* &4=Allow redirect from remote daddr to local
*/
/* Get route to destination or remote server */
static
struct
rtable
*
__ip_vs_get_out_rt
(
struct
sk_buff
*
skb
,
struct
ip_vs_dest
*
dest
,
__be32
daddr
,
u32
rtos
,
int
rt_mode
)
...
...
@@ -126,15 +129,16 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
}
local
=
rt
->
rt_flags
&
RTCF_LOCAL
;
if
(
!
((
local
?
1
:
2
)
&
rt_mode
))
{
if
(
!
((
local
?
IP_VS_RT_MODE_LOCAL
:
IP_VS_RT_MODE_NON_LOCAL
)
&
rt_mode
))
{
IP_VS_DBG_RL
(
"Stopping traffic to %s address, dest: %pI4
\n
"
,
(
rt
->
rt_flags
&
RTCF_LOCAL
)
?
"local"
:
"non-local"
,
&
rt
->
rt_dst
);
ip_rt_put
(
rt
);
return
NULL
;
}
if
(
local
&&
!
(
rt_mode
&
4
)
&&
!
((
ort
=
skb_rtable
(
skb
)
)
&&
ort
->
rt_flags
&
RTCF_LOCAL
))
{
if
(
local
&&
!
(
rt_mode
&
IP_VS_RT_MODE_RDR
)
&&
!
((
ort
=
skb_rtable
(
skb
))
&&
ort
->
rt_flags
&
RTCF_LOCAL
))
{
IP_VS_DBG_RL
(
"Redirect from non-local address %pI4 to local "
"requires NAT method, dest: %pI4
\n
"
,
&
ip_hdr
(
skb
)
->
daddr
,
&
rt
->
rt_dst
);
...
...
@@ -383,8 +387,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction
(
10
);
if
(
!
(
rt
=
__ip_vs_get_out_rt
(
skb
,
NULL
,
iph
->
daddr
,
RT_TOS
(
iph
->
tos
),
2
)))
if
(
!
(
rt
=
__ip_vs_get_out_rt
(
skb
,
NULL
,
iph
->
daddr
,
RT_TOS
(
iph
->
tos
),
IP_VS_RT_MODE_NON_LOCAL
)))
goto
tx_error_icmp
;
/* MTU checking */
...
...
@@ -512,7 +516,10 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
}
if
(
!
(
rt
=
__ip_vs_get_out_rt
(
skb
,
cp
->
dest
,
cp
->
daddr
.
ip
,
RT_TOS
(
iph
->
tos
),
1
|
2
|
4
)))
RT_TOS
(
iph
->
tos
),
IP_VS_RT_MODE_LOCAL
|
IP_VS_RT_MODE_NON_LOCAL
|
IP_VS_RT_MODE_RDR
)))
goto
tx_error_icmp
;
local
=
rt
->
rt_flags
&
RTCF_LOCAL
;
/*
...
...
@@ -755,7 +762,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction
(
10
);
if
(
!
(
rt
=
__ip_vs_get_out_rt
(
skb
,
cp
->
dest
,
cp
->
daddr
.
ip
,
RT_TOS
(
tos
),
1
|
2
)))
RT_TOS
(
tos
),
IP_VS_RT_MODE_LOCAL
|
IP_VS_RT_MODE_NON_LOCAL
)))
goto
tx_error_icmp
;
if
(
rt
->
rt_flags
&
RTCF_LOCAL
)
{
ip_rt_put
(
rt
);
...
...
@@ -984,7 +992,9 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction
(
10
);
if
(
!
(
rt
=
__ip_vs_get_out_rt
(
skb
,
cp
->
dest
,
cp
->
daddr
.
ip
,
RT_TOS
(
iph
->
tos
),
1
|
2
)))
RT_TOS
(
iph
->
tos
),
IP_VS_RT_MODE_LOCAL
|
IP_VS_RT_MODE_NON_LOCAL
)))
goto
tx_error_icmp
;
if
(
rt
->
rt_flags
&
RTCF_LOCAL
)
{
ip_rt_put
(
rt
);
...
...
@@ -1128,7 +1138,10 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
*/
if
(
!
(
rt
=
__ip_vs_get_out_rt
(
skb
,
cp
->
dest
,
cp
->
daddr
.
ip
,
RT_TOS
(
ip_hdr
(
skb
)
->
tos
),
1
|
2
|
4
)))
RT_TOS
(
ip_hdr
(
skb
)
->
tos
),
IP_VS_RT_MODE_LOCAL
|
IP_VS_RT_MODE_NON_LOCAL
|
IP_VS_RT_MODE_RDR
)))
goto
tx_error_icmp
;
local
=
rt
->
rt_flags
&
RTCF_LOCAL
;
...
...
net/netfilter/nf_conntrack_proto_tcp.c
浏览文件 @
3872b284
...
...
@@ -227,11 +227,11 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
* sCL -> sIV
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*synack*/
{
sIV
,
sSR
,
s
SR
,
sIG
,
sIG
,
sIG
,
sIG
,
sIG
,
sIG
,
sSR
},
/*synack*/
{
sIV
,
sSR
,
s
IG
,
sIG
,
sIG
,
sIG
,
sIG
,
sIG
,
sIG
,
sSR
},
/*
* sSS -> sSR Standard open.
* sS2 -> sSR Simultaneous open
* sSR -> s
SR Retransmitted SYN/ACK
.
* sSR -> s
IG Retransmitted SYN/ACK, ignore it
.
* sES -> sIG Late retransmitted SYN/ACK?
* sFW -> sIG Might be SYN/ACK answering ignored SYN
* sCW -> sIG
...
...
net/netfilter/nfnetlink_log.c
浏览文件 @
3872b284
...
...
@@ -376,7 +376,6 @@ __build_packet_message(struct nfulnl_instance *inst,
unsigned
int
hooknum
,
const
struct
net_device
*
indev
,
const
struct
net_device
*
outdev
,
const
struct
nf_loginfo
*
li
,
const
char
*
prefix
,
unsigned
int
plen
)
{
struct
nfulnl_msg_packet_hdr
pmsg
;
...
...
@@ -652,7 +651,7 @@ nfulnl_log_packet(u_int8_t pf,
inst
->
qlen
++
;
__build_packet_message
(
inst
,
skb
,
data_len
,
pf
,
hooknum
,
in
,
out
,
li
,
prefix
,
plen
);
hooknum
,
in
,
out
,
prefix
,
plen
);
if
(
inst
->
qlen
>=
qthreshold
)
__nfulnl_flush
(
inst
);
...
...
net/netfilter/xt_conntrack.c
浏览文件 @
3872b284
...
...
@@ -272,6 +272,11 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par)
{
int
ret
;
if
(
strcmp
(
par
->
table
,
"raw"
)
==
0
)
{
pr_info
(
"state is undetermined at the time of raw table
\n
"
);
return
-
EINVAL
;
}
ret
=
nf_ct_l3proto_try_module_get
(
par
->
family
);
if
(
ret
<
0
)
pr_info
(
"cannot load conntrack support for proto=%u
\n
"
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录