Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
60dbb011
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
60dbb011
编写于
14年前
作者:
D
David S. Miller
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' of
git://1984.lsi.us.es/net-2.6
上级
4b0ef1f2
2f46e079
无相关合并请求
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
55 addition
and
109 deletion
+55
-109
include/linux/if_bridge.h
include/linux/if_bridge.h
+1
-1
include/linux/netfilter/x_tables.h
include/linux/netfilter/x_tables.h
+5
-5
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/arp_tables.c
+14
-31
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ip_tables.c
+14
-31
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6_tables.c
+14
-31
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_netlink.c
+5
-9
net/netfilter/x_tables.c
net/netfilter/x_tables.c
+2
-1
未找到文件。
include/linux/if_bridge.h
浏览文件 @
60dbb011
...
...
@@ -103,7 +103,7 @@ struct __fdb_entry {
extern
void
brioctl_set
(
int
(
*
ioctl_hook
)(
struct
net
*
,
unsigned
int
,
void
__user
*
));
typedef
int
(
*
br_should_route_hook_t
)
(
struct
sk_buff
*
skb
);
typedef
int
br_should_route_hook_t
(
struct
sk_buff
*
skb
);
extern
br_should_route_hook_t
__rcu
*
br_should_route_hook
;
#endif
...
...
This diff is collapsed.
Click to expand it.
include/linux/netfilter/x_tables.h
浏览文件 @
60dbb011
...
...
@@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info);
* necessary for reading the counters.
*/
struct
xt_info_lock
{
s
pin
lock_t
lock
;
s
eq
lock_t
lock
;
unsigned
char
readers
;
};
DECLARE_PER_CPU
(
struct
xt_info_lock
,
xt_info_locks
);
...
...
@@ -497,7 +497,7 @@ static inline void xt_info_rdlock_bh(void)
local_bh_disable
();
lock
=
&
__get_cpu_var
(
xt_info_locks
);
if
(
likely
(
!
lock
->
readers
++
))
spin_
lock
(
&
lock
->
lock
);
write_seq
lock
(
&
lock
->
lock
);
}
static
inline
void
xt_info_rdunlock_bh
(
void
)
...
...
@@ -505,7 +505,7 @@ static inline void xt_info_rdunlock_bh(void)
struct
xt_info_lock
*
lock
=
&
__get_cpu_var
(
xt_info_locks
);
if
(
likely
(
!--
lock
->
readers
))
spin_
unlock
(
&
lock
->
lock
);
write_seq
unlock
(
&
lock
->
lock
);
local_bh_enable
();
}
...
...
@@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void)
*/
static
inline
void
xt_info_wrlock
(
unsigned
int
cpu
)
{
spin_
lock
(
&
per_cpu
(
xt_info_locks
,
cpu
).
lock
);
write_seq
lock
(
&
per_cpu
(
xt_info_locks
,
cpu
).
lock
);
}
static
inline
void
xt_info_wrunlock
(
unsigned
int
cpu
)
{
spin_
unlock
(
&
per_cpu
(
xt_info_locks
,
cpu
).
lock
);
write_seq
unlock
(
&
per_cpu
(
xt_info_locks
,
cpu
).
lock
);
}
/*
...
...
This diff is collapsed.
Click to expand it.
net/ipv4/netfilter/arp_tables.c
浏览文件 @
60dbb011
...
...
@@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t,
struct
arpt_entry
*
iter
;
unsigned
int
cpu
;
unsigned
int
i
;
unsigned
int
curcpu
=
get_cpu
();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* with data used by 'current' CPU
*
* Bottom half has to be disabled to prevent deadlock
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable
();
i
=
0
;
xt_entry_foreach
(
iter
,
t
->
entries
[
curcpu
],
t
->
size
)
{
SET_COUNTER
(
counters
[
i
],
iter
->
counters
.
bcnt
,
iter
->
counters
.
pcnt
);
++
i
;
}
local_bh_enable
();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu
(
cpu
)
{
if
(
cpu
==
curcpu
)
continue
;
seqlock_t
*
lock
=
&
per_cpu
(
xt_info_locks
,
cpu
).
lock
;
i
=
0
;
local_bh_disable
();
xt_info_wrlock
(
cpu
);
xt_entry_foreach
(
iter
,
t
->
entries
[
cpu
],
t
->
size
)
{
ADD_COUNTER
(
counters
[
i
],
iter
->
counters
.
bcnt
,
iter
->
counters
.
pcnt
);
u64
bcnt
,
pcnt
;
unsigned
int
start
;
do
{
start
=
read_seqbegin
(
lock
);
bcnt
=
iter
->
counters
.
bcnt
;
pcnt
=
iter
->
counters
.
pcnt
;
}
while
(
read_seqretry
(
lock
,
start
));
ADD_COUNTER
(
counters
[
i
],
bcnt
,
pcnt
);
++
i
;
}
xt_info_wrunlock
(
cpu
);
local_bh_enable
();
}
put_cpu
();
}
static
struct
xt_counters
*
alloc_counters
(
const
struct
xt_table
*
table
)
...
...
@@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
* about).
*/
countersize
=
sizeof
(
struct
xt_counters
)
*
private
->
number
;
counters
=
v
m
alloc
(
countersize
);
counters
=
v
z
alloc
(
countersize
);
if
(
counters
==
NULL
)
return
ERR_PTR
(
-
ENOMEM
);
...
...
@@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name,
struct
arpt_entry
*
iter
;
ret
=
0
;
counters
=
v
m
alloc
(
num_counters
*
sizeof
(
struct
xt_counters
));
counters
=
v
z
alloc
(
num_counters
*
sizeof
(
struct
xt_counters
));
if
(
!
counters
)
{
ret
=
-
ENOMEM
;
goto
out
;
...
...
This diff is collapsed.
Click to expand it.
net/ipv4/netfilter/ip_tables.c
浏览文件 @
60dbb011
...
...
@@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t,
struct
ipt_entry
*
iter
;
unsigned
int
cpu
;
unsigned
int
i
;
unsigned
int
curcpu
=
get_cpu
();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* with data used by 'current' CPU.
*
* Bottom half has to be disabled to prevent deadlock
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable
();
i
=
0
;
xt_entry_foreach
(
iter
,
t
->
entries
[
curcpu
],
t
->
size
)
{
SET_COUNTER
(
counters
[
i
],
iter
->
counters
.
bcnt
,
iter
->
counters
.
pcnt
);
++
i
;
}
local_bh_enable
();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu
(
cpu
)
{
if
(
cpu
==
curcpu
)
continue
;
seqlock_t
*
lock
=
&
per_cpu
(
xt_info_locks
,
cpu
).
lock
;
i
=
0
;
local_bh_disable
();
xt_info_wrlock
(
cpu
);
xt_entry_foreach
(
iter
,
t
->
entries
[
cpu
],
t
->
size
)
{
ADD_COUNTER
(
counters
[
i
],
iter
->
counters
.
bcnt
,
iter
->
counters
.
pcnt
);
u64
bcnt
,
pcnt
;
unsigned
int
start
;
do
{
start
=
read_seqbegin
(
lock
);
bcnt
=
iter
->
counters
.
bcnt
;
pcnt
=
iter
->
counters
.
pcnt
;
}
while
(
read_seqretry
(
lock
,
start
));
ADD_COUNTER
(
counters
[
i
],
bcnt
,
pcnt
);
++
i
;
/* macro does multi eval of i */
}
xt_info_wrunlock
(
cpu
);
local_bh_enable
();
}
put_cpu
();
}
static
struct
xt_counters
*
alloc_counters
(
const
struct
xt_table
*
table
)
...
...
@@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care
about). */
countersize
=
sizeof
(
struct
xt_counters
)
*
private
->
number
;
counters
=
v
m
alloc
(
countersize
);
counters
=
v
z
alloc
(
countersize
);
if
(
counters
==
NULL
)
return
ERR_PTR
(
-
ENOMEM
);
...
...
@@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct
ipt_entry
*
iter
;
ret
=
0
;
counters
=
v
m
alloc
(
num_counters
*
sizeof
(
struct
xt_counters
));
counters
=
v
z
alloc
(
num_counters
*
sizeof
(
struct
xt_counters
));
if
(
!
counters
)
{
ret
=
-
ENOMEM
;
goto
out
;
...
...
This diff is collapsed.
Click to expand it.
net/ipv6/netfilter/ip6_tables.c
浏览文件 @
60dbb011
...
...
@@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t,
struct
ip6t_entry
*
iter
;
unsigned
int
cpu
;
unsigned
int
i
;
unsigned
int
curcpu
=
get_cpu
();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* with data used by 'current' CPU
*
* Bottom half has to be disabled to prevent deadlock
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable
();
i
=
0
;
xt_entry_foreach
(
iter
,
t
->
entries
[
curcpu
],
t
->
size
)
{
SET_COUNTER
(
counters
[
i
],
iter
->
counters
.
bcnt
,
iter
->
counters
.
pcnt
);
++
i
;
}
local_bh_enable
();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu
(
cpu
)
{
if
(
cpu
==
curcpu
)
continue
;
seqlock_t
*
lock
=
&
per_cpu
(
xt_info_locks
,
cpu
).
lock
;
i
=
0
;
local_bh_disable
();
xt_info_wrlock
(
cpu
);
xt_entry_foreach
(
iter
,
t
->
entries
[
cpu
],
t
->
size
)
{
ADD_COUNTER
(
counters
[
i
],
iter
->
counters
.
bcnt
,
iter
->
counters
.
pcnt
);
u64
bcnt
,
pcnt
;
unsigned
int
start
;
do
{
start
=
read_seqbegin
(
lock
);
bcnt
=
iter
->
counters
.
bcnt
;
pcnt
=
iter
->
counters
.
pcnt
;
}
while
(
read_seqretry
(
lock
,
start
));
ADD_COUNTER
(
counters
[
i
],
bcnt
,
pcnt
);
++
i
;
}
xt_info_wrunlock
(
cpu
);
local_bh_enable
();
}
put_cpu
();
}
static
struct
xt_counters
*
alloc_counters
(
const
struct
xt_table
*
table
)
...
...
@@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care
about). */
countersize
=
sizeof
(
struct
xt_counters
)
*
private
->
number
;
counters
=
v
m
alloc
(
countersize
);
counters
=
v
z
alloc
(
countersize
);
if
(
counters
==
NULL
)
return
ERR_PTR
(
-
ENOMEM
);
...
...
@@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct
ip6t_entry
*
iter
;
ret
=
0
;
counters
=
v
m
alloc
(
num_counters
*
sizeof
(
struct
xt_counters
));
counters
=
v
z
alloc
(
num_counters
*
sizeof
(
struct
xt_counters
));
if
(
!
counters
)
{
ret
=
-
ENOMEM
;
goto
out
;
...
...
This diff is collapsed.
Click to expand it.
net/netfilter/nf_conntrack_netlink.c
浏览文件 @
60dbb011
...
...
@@ -645,25 +645,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
struct
nfgenmsg
*
nfmsg
=
nlmsg_data
(
cb
->
nlh
);
u_int8_t
l3proto
=
nfmsg
->
nfgen_family
;
rcu_read_lock
(
);
spin_lock_bh
(
&
nf_conntrack_lock
);
last
=
(
struct
nf_conn
*
)
cb
->
args
[
1
];
for
(;
cb
->
args
[
0
]
<
net
->
ct
.
htable_size
;
cb
->
args
[
0
]
++
)
{
restart:
hlist_nulls_for_each_entry
_rcu
(
h
,
n
,
&
net
->
ct
.
hash
[
cb
->
args
[
0
]],
hlist_nulls_for_each_entry
(
h
,
n
,
&
net
->
ct
.
hash
[
cb
->
args
[
0
]],
hnnode
)
{
if
(
NF_CT_DIRECTION
(
h
)
!=
IP_CT_DIR_ORIGINAL
)
continue
;
ct
=
nf_ct_tuplehash_to_ctrack
(
h
);
if
(
!
atomic_inc_not_zero
(
&
ct
->
ct_general
.
use
))
continue
;
/* Dump entries of a given L3 protocol number.
* If it is not specified, ie. l3proto == 0,
* then dump everything. */
if
(
l3proto
&&
nf_ct_l3num
(
ct
)
!=
l3proto
)
goto
releasect
;
continue
;
if
(
cb
->
args
[
1
])
{
if
(
ct
!=
last
)
goto
releasect
;
continue
;
cb
->
args
[
1
]
=
0
;
}
if
(
ctnetlink_fill_info
(
skb
,
NETLINK_CB
(
cb
->
skb
).
pid
,
...
...
@@ -681,8 +679,6 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
if
(
acct
)
memset
(
acct
,
0
,
sizeof
(
struct
nf_conn_counter
[
IP_CT_DIR_MAX
]));
}
releasect:
nf_ct_put
(
ct
);
}
if
(
cb
->
args
[
1
])
{
cb
->
args
[
1
]
=
0
;
...
...
@@ -690,7 +686,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
}
}
out:
rcu_read_unlock
(
);
spin_unlock_bh
(
&
nf_conntrack_lock
);
if
(
last
)
nf_ct_put
(
last
);
...
...
This diff is collapsed.
Click to expand it.
net/netfilter/x_tables.c
浏览文件 @
60dbb011
...
...
@@ -1325,7 +1325,8 @@ static int __init xt_init(void)
for_each_possible_cpu
(
i
)
{
struct
xt_info_lock
*
lock
=
&
per_cpu
(
xt_info_locks
,
i
);
spin_lock_init
(
&
lock
->
lock
);
seqlock_init
(
&
lock
->
lock
);
lock
->
readers
=
0
;
}
...
...
This diff is collapsed.
Click to expand it.
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录