Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
4805347c
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
4805347c
编写于
4月 19, 2011
作者:
D
David S. Miller
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' of
git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
上级
e1943424
91eb7c08
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
101 addition
and
98 deletion
+101
-98
include/linux/netfilter/ipset/ip_set_getport.h
include/linux/netfilter/ipset/ip_set_getport.h
+2
-0
include/linux/netfilter/x_tables.h
include/linux/netfilter/x_tables.h
+42
-54
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/arp_tables.c
+11
-7
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ip_tables.c
+12
-16
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6_tables.c
+12
-7
net/netfilter/ipset/ip_set_getport.c
net/netfilter/ipset/ip_set_getport.c
+15
-1
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipport.c
+1
-1
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportip.c
+1
-1
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
+1
-1
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_hash_netport.c
+1
-1
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_ctl.c
+0
-3
net/netfilter/x_tables.c
net/netfilter/x_tables.c
+3
-6
未找到文件。
include/linux/netfilter/ipset/ip_set_getport.h
浏览文件 @
4805347c
...
...
@@ -22,7 +22,9 @@ static inline bool ip_set_proto_with_ports(u8 proto)
{
switch
(
proto
)
{
case
IPPROTO_TCP
:
case
IPPROTO_SCTP
:
case
IPPROTO_UDP
:
case
IPPROTO_UDPLITE
:
return
true
;
}
return
false
;
...
...
include/linux/netfilter/x_tables.h
浏览文件 @
4805347c
...
...
@@ -456,72 +456,60 @@ extern void xt_proto_fini(struct net *net, u_int8_t af);
extern
struct
xt_table_info
*
xt_alloc_table_info
(
unsigned
int
size
);
extern
void
xt_free_table_info
(
struct
xt_table_info
*
info
);
/*
* Per-CPU spinlock associated with per-cpu table entries, and
* with a counter for the "reading" side that allows a recursive
* reader to avoid taking the lock and deadlocking.
*
* "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
* It needs to ensure that the rules are not being changed while the packet
* is being processed. In some cases, the read lock will be acquired
* twice on the same CPU; this is okay because of the count.
*
* "writing" is used when reading counters.
* During replace any readers that are using the old tables have to complete
* before freeing the old table. This is handled by the write locking
* necessary for reading the counters.
/**
* xt_recseq - recursive seqcount for netfilter use
*
* Packet processing changes the seqcount only if no recursion happened
* get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
* because we use the normal seqcount convention :
* Low order bit set to 1 if a writer is active.
*/
struct
xt_info_lock
{
seqlock_t
lock
;
unsigned
char
readers
;
};
DECLARE_PER_CPU
(
struct
xt_info_lock
,
xt_info_locks
);
DECLARE_PER_CPU
(
seqcount_t
,
xt_recseq
);
/*
* Note: we need to ensure that preemption is disabled before acquiring
* the per-cpu-variable, so we do it as a two step process rather than
* using "spin_lock_bh()".
*
* We _also_ need to disable bottom half processing before updating our
* nesting count, to make sure that the only kind of re-entrancy is this
* code being called by itself: since the count+lock is not an atomic
* operation, we can allow no races.
/**
* xt_write_recseq_begin - start of a write section
*
* _Only_ that special combination of being per-cpu and never getting
* re-entered asynchronously means that the count is safe.
* Begin packet processing : all readers must wait the end
* 1) Must be called with preemption disabled
* 2) softirqs must be disabled too (or we should use irqsafe_cpu_add())
* Returns :
* 1 if no recursion on this cpu
* 0 if recursion detected
*/
static
inline
void
xt_info_rdlock_bh
(
void
)
static
inline
unsigned
int
xt_write_recseq_begin
(
void
)
{
struct
xt_info_lock
*
lock
;
unsigned
int
addend
;
local_bh_disable
();
lock
=
&
__get_cpu_var
(
xt_info_locks
);
if
(
likely
(
!
lock
->
readers
++
))
write_seqlock
(
&
lock
->
lock
);
}
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
*/
addend
=
(
__this_cpu_read
(
xt_recseq
.
sequence
)
+
1
)
&
1
;
static
inline
void
xt_info_rdunlock_bh
(
void
)
{
struct
xt_info_lock
*
lock
=
&
__get_cpu_var
(
xt_info_locks
);
/*
* This is kind of a write_seqcount_begin(), but addend is 0 or 1
* We dont check addend value to avoid a test and conditional jump,
* since addend is most likely 1
*/
__this_cpu_add
(
xt_recseq
.
sequence
,
addend
);
smp_wmb
();
if
(
likely
(
!--
lock
->
readers
))
write_sequnlock
(
&
lock
->
lock
);
local_bh_enable
();
return
addend
;
}
/*
* The "writer" side needs to get exclusive access to the lock,
* regardless of readers. This must be called with bottom half
* processing (and thus also preemption) disabled.
/**
* xt_write_recseq_end - end of a write section
* @addend: return value from previous xt_write_recseq_begin()
*
* End packet processing : all readers can proceed
* 1) Must be called with preemption disabled
* 2) softirqs must be disabled too (or we should use irqsafe_cpu_add())
*/
static
inline
void
xt_info_wrlock
(
unsigned
int
cpu
)
{
write_seqlock
(
&
per_cpu
(
xt_info_locks
,
cpu
).
lock
);
}
static
inline
void
xt_info_wrunlock
(
unsigned
int
cpu
)
static
inline
void
xt_write_recseq_end
(
unsigned
int
addend
)
{
write_sequnlock
(
&
per_cpu
(
xt_info_locks
,
cpu
).
lock
);
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb
();
__this_cpu_add
(
xt_recseq
.
sequence
,
addend
);
}
/*
...
...
net/ipv4/netfilter/arp_tables.c
浏览文件 @
4805347c
...
...
@@ -260,6 +260,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
void
*
table_base
;
const
struct
xt_table_info
*
private
;
struct
xt_action_param
acpar
;
unsigned
int
addend
;
if
(
!
pskb_may_pull
(
skb
,
arp_hdr_len
(
skb
->
dev
)))
return
NF_DROP
;
...
...
@@ -267,7 +268,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
indev
=
in
?
in
->
name
:
nulldevname
;
outdev
=
out
?
out
->
name
:
nulldevname
;
xt_info_rdlock_bh
();
local_bh_disable
();
addend
=
xt_write_recseq_begin
();
private
=
table
->
private
;
table_base
=
private
->
entries
[
smp_processor_id
()];
...
...
@@ -338,7 +340,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
/* Verdict */
break
;
}
while
(
!
acpar
.
hotdrop
);
xt_info_rdunlock_bh
();
xt_write_recseq_end
(
addend
);
local_bh_enable
();
if
(
acpar
.
hotdrop
)
return
NF_DROP
;
...
...
@@ -712,7 +715,7 @@ static void get_counters(const struct xt_table_info *t,
unsigned
int
i
;
for_each_possible_cpu
(
cpu
)
{
seq
lock_t
*
lock
=
&
per_cpu
(
xt_info_locks
,
cpu
).
lock
;
seq
count_t
*
s
=
&
per_cpu
(
xt_recseq
,
cpu
)
;
i
=
0
;
xt_entry_foreach
(
iter
,
t
->
entries
[
cpu
],
t
->
size
)
{
...
...
@@ -720,10 +723,10 @@ static void get_counters(const struct xt_table_info *t,
unsigned
int
start
;
do
{
start
=
read_seq
begin
(
lock
);
start
=
read_seq
count_begin
(
s
);
bcnt
=
iter
->
counters
.
bcnt
;
pcnt
=
iter
->
counters
.
pcnt
;
}
while
(
read_seq
retry
(
lock
,
start
));
}
while
(
read_seq
count_retry
(
s
,
start
));
ADD_COUNTER
(
counters
[
i
],
bcnt
,
pcnt
);
++
i
;
...
...
@@ -1115,6 +1118,7 @@ static int do_add_counters(struct net *net, const void __user *user,
int
ret
=
0
;
void
*
loc_cpu_entry
;
struct
arpt_entry
*
iter
;
unsigned
int
addend
;
#ifdef CONFIG_COMPAT
struct
compat_xt_counters_info
compat_tmp
;
...
...
@@ -1171,12 +1175,12 @@ static int do_add_counters(struct net *net, const void __user *user,
/* Choose the copy that is on our node */
curcpu
=
smp_processor_id
();
loc_cpu_entry
=
private
->
entries
[
curcpu
];
xt_info_wrlock
(
curcpu
);
addend
=
xt_write_recseq_begin
(
);
xt_entry_foreach
(
iter
,
loc_cpu_entry
,
private
->
size
)
{
ADD_COUNTER
(
iter
->
counters
,
paddc
[
i
].
bcnt
,
paddc
[
i
].
pcnt
);
++
i
;
}
xt_
info_wrunlock
(
curcpu
);
xt_
write_recseq_end
(
addend
);
unlock_up_free:
local_bh_enable
();
xt_table_unlock
(
t
);
...
...
net/ipv4/netfilter/ip_tables.c
浏览文件 @
4805347c
...
...
@@ -68,15 +68,6 @@ void *ipt_alloc_initial_table(const struct xt_table *info)
}
EXPORT_SYMBOL_GPL
(
ipt_alloc_initial_table
);
/*
We keep a set of rules for each CPU, so we can avoid write-locking
them in the softirq when updating the counters and therefore
only need to read-lock in the softirq; doing a write_lock_bh() in user
context stops packets coming through and allows user context to read
the counters or update the rules.
Hence the start of any table is given by get_table() below. */
/* Returns whether matches rule or not. */
/* Performance critical - called for every packet */
static
inline
bool
...
...
@@ -311,6 +302,7 @@ ipt_do_table(struct sk_buff *skb,
unsigned
int
*
stackptr
,
origptr
,
cpu
;
const
struct
xt_table_info
*
private
;
struct
xt_action_param
acpar
;
unsigned
int
addend
;
/* Initialization */
ip
=
ip_hdr
(
skb
);
...
...
@@ -331,7 +323,8 @@ ipt_do_table(struct sk_buff *skb,
acpar
.
hooknum
=
hook
;
IP_NF_ASSERT
(
table
->
valid_hooks
&
(
1
<<
hook
));
xt_info_rdlock_bh
();
local_bh_disable
();
addend
=
xt_write_recseq_begin
();
private
=
table
->
private
;
cpu
=
smp_processor_id
();
table_base
=
private
->
entries
[
cpu
];
...
...
@@ -430,7 +423,9 @@ ipt_do_table(struct sk_buff *skb,
pr_debug
(
"Exiting %s; resetting sp from %u to %u
\n
"
,
__func__
,
*
stackptr
,
origptr
);
*
stackptr
=
origptr
;
xt_info_rdunlock_bh
();
xt_write_recseq_end
(
addend
);
local_bh_enable
();
#ifdef DEBUG_ALLOW_ALL
return
NF_ACCEPT
;
#else
...
...
@@ -886,7 +881,7 @@ get_counters(const struct xt_table_info *t,
unsigned
int
i
;
for_each_possible_cpu
(
cpu
)
{
seq
lock_t
*
lock
=
&
per_cpu
(
xt_info_locks
,
cpu
).
lock
;
seq
count_t
*
s
=
&
per_cpu
(
xt_recseq
,
cpu
)
;
i
=
0
;
xt_entry_foreach
(
iter
,
t
->
entries
[
cpu
],
t
->
size
)
{
...
...
@@ -894,10 +889,10 @@ get_counters(const struct xt_table_info *t,
unsigned
int
start
;
do
{
start
=
read_seq
begin
(
lock
);
start
=
read_seq
count_begin
(
s
);
bcnt
=
iter
->
counters
.
bcnt
;
pcnt
=
iter
->
counters
.
pcnt
;
}
while
(
read_seq
retry
(
lock
,
start
));
}
while
(
read_seq
count_retry
(
s
,
start
));
ADD_COUNTER
(
counters
[
i
],
bcnt
,
pcnt
);
++
i
;
/* macro does multi eval of i */
...
...
@@ -1312,6 +1307,7 @@ do_add_counters(struct net *net, const void __user *user,
int
ret
=
0
;
void
*
loc_cpu_entry
;
struct
ipt_entry
*
iter
;
unsigned
int
addend
;
#ifdef CONFIG_COMPAT
struct
compat_xt_counters_info
compat_tmp
;
...
...
@@ -1368,12 +1364,12 @@ do_add_counters(struct net *net, const void __user *user,
/* Choose the copy that is on our node */
curcpu
=
smp_processor_id
();
loc_cpu_entry
=
private
->
entries
[
curcpu
];
xt_info_wrlock
(
curcpu
);
addend
=
xt_write_recseq_begin
(
);
xt_entry_foreach
(
iter
,
loc_cpu_entry
,
private
->
size
)
{
ADD_COUNTER
(
iter
->
counters
,
paddc
[
i
].
bcnt
,
paddc
[
i
].
pcnt
);
++
i
;
}
xt_
info_wrunlock
(
curcpu
);
xt_
write_recseq_end
(
addend
);
unlock_up_free:
local_bh_enable
();
xt_table_unlock
(
t
);
...
...
net/ipv6/netfilter/ip6_tables.c
浏览文件 @
4805347c
...
...
@@ -340,6 +340,7 @@ ip6t_do_table(struct sk_buff *skb,
unsigned
int
*
stackptr
,
origptr
,
cpu
;
const
struct
xt_table_info
*
private
;
struct
xt_action_param
acpar
;
unsigned
int
addend
;
/* Initialization */
indev
=
in
?
in
->
name
:
nulldevname
;
...
...
@@ -358,7 +359,8 @@ ip6t_do_table(struct sk_buff *skb,
IP_NF_ASSERT
(
table
->
valid_hooks
&
(
1
<<
hook
));
xt_info_rdlock_bh
();
local_bh_disable
();
addend
=
xt_write_recseq_begin
();
private
=
table
->
private
;
cpu
=
smp_processor_id
();
table_base
=
private
->
entries
[
cpu
];
...
...
@@ -442,7 +444,9 @@ ip6t_do_table(struct sk_buff *skb,
}
while
(
!
acpar
.
hotdrop
);
*
stackptr
=
origptr
;
xt_info_rdunlock_bh
();
xt_write_recseq_end
(
addend
);
local_bh_enable
();
#ifdef DEBUG_ALLOW_ALL
return
NF_ACCEPT
;
...
...
@@ -899,7 +903,7 @@ get_counters(const struct xt_table_info *t,
unsigned
int
i
;
for_each_possible_cpu
(
cpu
)
{
seq
lock_t
*
lock
=
&
per_cpu
(
xt_info_locks
,
cpu
).
lock
;
seq
count_t
*
s
=
&
per_cpu
(
xt_recseq
,
cpu
)
;
i
=
0
;
xt_entry_foreach
(
iter
,
t
->
entries
[
cpu
],
t
->
size
)
{
...
...
@@ -907,10 +911,10 @@ get_counters(const struct xt_table_info *t,
unsigned
int
start
;
do
{
start
=
read_seq
begin
(
lock
);
start
=
read_seq
count_begin
(
s
);
bcnt
=
iter
->
counters
.
bcnt
;
pcnt
=
iter
->
counters
.
pcnt
;
}
while
(
read_seq
retry
(
lock
,
start
));
}
while
(
read_seq
count_retry
(
s
,
start
));
ADD_COUNTER
(
counters
[
i
],
bcnt
,
pcnt
);
++
i
;
...
...
@@ -1325,6 +1329,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
int
ret
=
0
;
const
void
*
loc_cpu_entry
;
struct
ip6t_entry
*
iter
;
unsigned
int
addend
;
#ifdef CONFIG_COMPAT
struct
compat_xt_counters_info
compat_tmp
;
...
...
@@ -1381,13 +1386,13 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
i
=
0
;
/* Choose the copy that is on our node */
curcpu
=
smp_processor_id
();
xt_info_wrlock
(
curcpu
);
addend
=
xt_write_recseq_begin
(
);
loc_cpu_entry
=
private
->
entries
[
curcpu
];
xt_entry_foreach
(
iter
,
loc_cpu_entry
,
private
->
size
)
{
ADD_COUNTER
(
iter
->
counters
,
paddc
[
i
].
bcnt
,
paddc
[
i
].
pcnt
);
++
i
;
}
xt_
info_wrunlock
(
curcpu
);
xt_
write_recseq_end
(
addend
);
unlock_up_free:
local_bh_enable
();
...
...
net/netfilter/ipset/ip_set_getport.c
浏览文件 @
4805347c
...
...
@@ -11,6 +11,7 @@
#include <linux/skbuff.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/sctp.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/ip.h>
#include <net/ipv6.h>
...
...
@@ -35,7 +36,20 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
*
port
=
src
?
th
->
source
:
th
->
dest
;
break
;
}
case
IPPROTO_UDP
:
{
case
IPPROTO_SCTP
:
{
sctp_sctphdr_t
_sh
;
const
sctp_sctphdr_t
*
sh
;
sh
=
skb_header_pointer
(
skb
,
protooff
,
sizeof
(
_sh
),
&
_sh
);
if
(
sh
==
NULL
)
/* No choice either */
return
false
;
*
port
=
src
?
sh
->
source
:
sh
->
dest
;
break
;
}
case
IPPROTO_UDP
:
case
IPPROTO_UDPLITE
:
{
struct
udphdr
_udph
;
const
struct
udphdr
*
uh
;
...
...
net/netfilter/ipset/ip_set_hash_ipport.c
浏览文件 @
4805347c
...
...
@@ -491,7 +491,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
.
features
=
IPSET_TYPE_IP
|
IPSET_TYPE_PORT
,
.
dimension
=
IPSET_DIM_TWO
,
.
family
=
AF_UNSPEC
,
.
revision
=
0
,
.
revision
=
1
,
.
create
=
hash_ipport_create
,
.
create_policy
=
{
[
IPSET_ATTR_HASHSIZE
]
=
{
.
type
=
NLA_U32
},
...
...
net/netfilter/ipset/ip_set_hash_ipportip.c
浏览文件 @
4805347c
...
...
@@ -509,7 +509,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
.
features
=
IPSET_TYPE_IP
|
IPSET_TYPE_PORT
|
IPSET_TYPE_IP2
,
.
dimension
=
IPSET_DIM_THREE
,
.
family
=
AF_UNSPEC
,
.
revision
=
0
,
.
revision
=
1
,
.
create
=
hash_ipportip_create
,
.
create_policy
=
{
[
IPSET_ATTR_HASHSIZE
]
=
{
.
type
=
NLA_U32
},
...
...
net/netfilter/ipset/ip_set_hash_ipportnet.c
浏览文件 @
4805347c
...
...
@@ -574,7 +574,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
.
features
=
IPSET_TYPE_IP
|
IPSET_TYPE_PORT
|
IPSET_TYPE_IP2
,
.
dimension
=
IPSET_DIM_THREE
,
.
family
=
AF_UNSPEC
,
.
revision
=
0
,
.
revision
=
1
,
.
create
=
hash_ipportnet_create
,
.
create_policy
=
{
[
IPSET_ATTR_HASHSIZE
]
=
{
.
type
=
NLA_U32
},
...
...
net/netfilter/ipset/ip_set_hash_netport.c
浏览文件 @
4805347c
...
...
@@ -526,7 +526,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
.
features
=
IPSET_TYPE_IP
|
IPSET_TYPE_PORT
,
.
dimension
=
IPSET_DIM_TWO
,
.
family
=
AF_UNSPEC
,
.
revision
=
0
,
.
revision
=
1
,
.
create
=
hash_netport_create
,
.
create_policy
=
{
[
IPSET_ATTR_HASHSIZE
]
=
{
.
type
=
NLA_U32
},
...
...
net/netfilter/ipvs/ip_vs_ctl.c
浏览文件 @
4805347c
...
...
@@ -1984,9 +1984,6 @@ static const struct file_operations ip_vs_info_fops = {
.
release
=
seq_release_private
,
};
#endif
#ifdef CONFIG_PROC_FS
static
int
ip_vs_stats_show
(
struct
seq_file
*
seq
,
void
*
v
)
{
struct
net
*
net
=
seq_file_single_net
(
seq
);
...
...
net/netfilter/x_tables.c
浏览文件 @
4805347c
...
...
@@ -762,8 +762,8 @@ void xt_compat_unlock(u_int8_t af)
EXPORT_SYMBOL_GPL
(
xt_compat_unlock
);
#endif
DEFINE_PER_CPU
(
s
truct
xt_info_lock
,
xt_info_locks
);
EXPORT_PER_CPU_SYMBOL_GPL
(
xt_
info_locks
);
DEFINE_PER_CPU
(
s
eqcount_t
,
xt_recseq
);
EXPORT_PER_CPU_SYMBOL_GPL
(
xt_
recseq
);
static
int
xt_jumpstack_alloc
(
struct
xt_table_info
*
i
)
{
...
...
@@ -1362,10 +1362,7 @@ static int __init xt_init(void)
int
rv
;
for_each_possible_cpu
(
i
)
{
struct
xt_info_lock
*
lock
=
&
per_cpu
(
xt_info_locks
,
i
);
seqlock_init
(
&
lock
->
lock
);
lock
->
readers
=
0
;
seqcount_init
(
&
per_cpu
(
xt_recseq
,
i
));
}
xt
=
kmalloc
(
sizeof
(
struct
xt_af
)
*
NFPROTO_NUMPROTO
,
GFP_KERNEL
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录