Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
167de77f
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
167de77f
编写于
4月 20, 2012
作者:
D
David S. Miller
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'tipc_net-next' of
git://git.kernel.org/pub/scm/linux/kernel/git/paulg/linux
上级
2528a5dc
9d52ce4b
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
172 addition
and
107 deletion
+172
-107
net/tipc/addr.h
net/tipc/addr.h
+19
-1
net/tipc/bearer.c
net/tipc/bearer.c
+1
-1
net/tipc/config.c
net/tipc/config.c
+1
-1
net/tipc/name_distr.c
net/tipc/name_distr.c
+80
-39
net/tipc/name_table.c
net/tipc/name_table.c
+7
-7
net/tipc/net.c
net/tipc/net.c
+2
-1
net/tipc/node.c
net/tipc/node.c
+1
-1
net/tipc/node_subscr.c
net/tipc/node_subscr.c
+1
-1
net/tipc/port.c
net/tipc/port.c
+57
-44
net/tipc/port.h
net/tipc/port.h
+1
-10
net/tipc/socket.c
net/tipc/socket.c
+2
-1
未找到文件。
net/tipc/addr.h
浏览文件 @
167de77f
...
...
@@ -50,11 +50,29 @@ static inline u32 tipc_cluster_mask(u32 addr)
return
addr
&
TIPC_CLUSTER_MASK
;
}
static
inline
int
in_own_cluster
(
u32
addr
)
static
inline
int
in_own_cluster
_exact
(
u32
addr
)
{
return
!
((
addr
^
tipc_own_addr
)
>>
12
);
}
/**
* in_own_node - test for node inclusion; <0.0.0> always matches
*/
static
inline
int
in_own_node
(
u32
addr
)
{
return
(
addr
==
tipc_own_addr
)
||
!
addr
;
}
/**
* in_own_cluster - test for cluster inclusion; <0.0.0> always matches
*/
static
inline
int
in_own_cluster
(
u32
addr
)
{
return
in_own_cluster_exact
(
addr
)
||
!
addr
;
}
/**
* addr_domain - convert 2-bit scope value to equivalent message lookup domain
*
...
...
net/tipc/bearer.c
浏览文件 @
167de77f
...
...
@@ -449,7 +449,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
if
(
tipc_in_scope
(
disc_domain
,
tipc_own_addr
))
{
disc_domain
=
tipc_own_addr
&
TIPC_CLUSTER_MASK
;
res
=
0
;
/* accept any node in own cluster */
}
else
if
(
in_own_cluster
(
disc_domain
))
}
else
if
(
in_own_cluster
_exact
(
disc_domain
))
res
=
0
;
/* accept specified node in own cluster */
}
if
(
res
)
{
...
...
net/tipc/config.c
浏览文件 @
167de77f
...
...
@@ -290,7 +290,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
/* Check command authorization */
if
(
likely
(
orig_node
==
tipc_own_addr
))
{
if
(
likely
(
in_own_node
(
orig_node
)
))
{
/* command is permitted */
}
else
if
(
cmd
>=
0x8000
)
{
rep_tlv_buf
=
tipc_cfg_reply_error_string
(
TIPC_CFG_NOT_SUPPORTED
...
...
net/tipc/name_distr.c
浏览文件 @
167de77f
...
...
@@ -68,12 +68,37 @@ struct distr_item {
};
/**
* List of externally visible publications by this node --
* that is, all publications having scope > TIPC_NODE_SCOPE.
* struct publ_list - list of publications made by this node
* @list: circular list of publications
* @list_size: number of entries in list
*/
struct
publ_list
{
struct
list_head
list
;
u32
size
;
};
static
struct
publ_list
publ_zone
=
{
.
list
=
LIST_HEAD_INIT
(
publ_zone
.
list
),
.
size
=
0
,
};
static
struct
publ_list
publ_cluster
=
{
.
list
=
LIST_HEAD_INIT
(
publ_cluster
.
list
),
.
size
=
0
,
};
static
struct
publ_list
publ_node
=
{
.
list
=
LIST_HEAD_INIT
(
publ_node
.
list
),
.
size
=
0
,
};
static
struct
publ_list
*
publ_lists
[]
=
{
NULL
,
&
publ_zone
,
/* publ_lists[TIPC_ZONE_SCOPE] */
&
publ_cluster
,
/* publ_lists[TIPC_CLUSTER_SCOPE] */
&
publ_node
/* publ_lists[TIPC_NODE_SCOPE] */
};
static
LIST_HEAD
(
publ_root
);
static
u32
publ_cnt
;
/**
* publ_to_item - add publication info to a publication message
...
...
@@ -132,8 +157,11 @@ void tipc_named_publish(struct publication *publ)
struct
sk_buff
*
buf
;
struct
distr_item
*
item
;
list_add_tail
(
&
publ
->
local_list
,
&
publ_root
);
publ_cnt
++
;
list_add_tail
(
&
publ
->
local_list
,
&
publ_lists
[
publ
->
scope
]
->
list
);
publ_lists
[
publ
->
scope
]
->
size
++
;
if
(
publ
->
scope
==
TIPC_NODE_SCOPE
)
return
;
buf
=
named_prepare_buf
(
PUBLICATION
,
ITEM_SIZE
,
0
);
if
(
!
buf
)
{
...
...
@@ -156,7 +184,10 @@ void tipc_named_withdraw(struct publication *publ)
struct
distr_item
*
item
;
list_del
(
&
publ
->
local_list
);
publ_cnt
--
;
publ_lists
[
publ
->
scope
]
->
size
--
;
if
(
publ
->
scope
==
TIPC_NODE_SCOPE
)
return
;
buf
=
named_prepare_buf
(
WITHDRAWAL
,
ITEM_SIZE
,
0
);
if
(
!
buf
)
{
...
...
@@ -169,6 +200,39 @@ void tipc_named_withdraw(struct publication *publ)
named_cluster_distribute
(
buf
);
}
/*
* named_distribute - prepare name info for bulk distribution to another node
*/
static
void
named_distribute
(
struct
list_head
*
message_list
,
u32
node
,
struct
publ_list
*
pls
,
u32
max_item_buf
)
{
struct
publication
*
publ
;
struct
sk_buff
*
buf
=
NULL
;
struct
distr_item
*
item
=
NULL
;
u32
left
=
0
;
u32
rest
=
pls
->
size
*
ITEM_SIZE
;
list_for_each_entry
(
publ
,
&
pls
->
list
,
local_list
)
{
if
(
!
buf
)
{
left
=
(
rest
<=
max_item_buf
)
?
rest
:
max_item_buf
;
rest
-=
left
;
buf
=
named_prepare_buf
(
PUBLICATION
,
left
,
node
);
if
(
!
buf
)
{
warn
(
"Bulk publication failure
\n
"
);
return
;
}
item
=
(
struct
distr_item
*
)
msg_data
(
buf_msg
(
buf
));
}
publ_to_item
(
item
,
publ
);
item
++
;
left
-=
ITEM_SIZE
;
if
(
!
left
)
{
list_add_tail
((
struct
list_head
*
)
buf
,
message_list
);
buf
=
NULL
;
}
}
}
/**
* tipc_named_node_up - tell specified node about all publications by this node
*/
...
...
@@ -177,13 +241,8 @@ void tipc_named_node_up(unsigned long nodearg)
{
struct
tipc_node
*
n_ptr
;
struct
tipc_link
*
l_ptr
;
struct
publication
*
publ
;
struct
distr_item
*
item
=
NULL
;
struct
sk_buff
*
buf
=
NULL
;
struct
list_head
message_list
;
u32
node
=
(
u32
)
nodearg
;
u32
left
=
0
;
u32
rest
;
u32
max_item_buf
=
0
;
/* compute maximum amount of publication data to send per message */
...
...
@@ -207,28 +266,8 @@ void tipc_named_node_up(unsigned long nodearg)
INIT_LIST_HEAD
(
&
message_list
);
read_lock_bh
(
&
tipc_nametbl_lock
);
rest
=
publ_cnt
*
ITEM_SIZE
;
list_for_each_entry
(
publ
,
&
publ_root
,
local_list
)
{
if
(
!
buf
)
{
left
=
(
rest
<=
max_item_buf
)
?
rest
:
max_item_buf
;
rest
-=
left
;
buf
=
named_prepare_buf
(
PUBLICATION
,
left
,
node
);
if
(
!
buf
)
{
warn
(
"Bulk publication distribution failure
\n
"
);
goto
exit
;
}
item
=
(
struct
distr_item
*
)
msg_data
(
buf_msg
(
buf
));
}
publ_to_item
(
item
,
publ
);
item
++
;
left
-=
ITEM_SIZE
;
if
(
!
left
)
{
list_add_tail
((
struct
list_head
*
)
buf
,
&
message_list
);
buf
=
NULL
;
}
}
exit:
named_distribute
(
&
message_list
,
node
,
&
publ_cluster
,
max_item_buf
);
named_distribute
(
&
message_list
,
node
,
&
publ_zone
,
max_item_buf
);
read_unlock_bh
(
&
tipc_nametbl_lock
);
tipc_link_send_names
(
&
message_list
,
(
u32
)
node
);
...
...
@@ -316,21 +355,23 @@ void tipc_named_recv(struct sk_buff *buf)
}
/**
* tipc_named_reinit - re-initialize local publication
list
* tipc_named_reinit - re-initialize local publication
s
*
* This routine is called whenever TIPC networking is enabled.
* All
existing publications by this node that have "cluster" or "zone" scope
*
are updated to reflect
the node's new network address.
* All
name table entries published by this node are updated to reflect
* the node's new network address.
*/
void
tipc_named_reinit
(
void
)
{
struct
publication
*
publ
;
int
scope
;
write_lock_bh
(
&
tipc_nametbl_lock
);
list_for_each_entry
(
publ
,
&
publ_root
,
local_list
)
publ
->
node
=
tipc_own_addr
;
for
(
scope
=
TIPC_ZONE_SCOPE
;
scope
<=
TIPC_NODE_SCOPE
;
scope
++
)
list_for_each_entry
(
publ
,
&
publ_lists
[
scope
]
->
list
,
local_list
)
publ
->
node
=
tipc_own_addr
;
write_unlock_bh
(
&
tipc_nametbl_lock
);
}
net/tipc/name_table.c
浏览文件 @
167de77f
...
...
@@ -347,7 +347,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
info
->
cluster_list_size
++
;
}
if
(
node
==
tipc_own_addr
)
{
if
(
in_own_node
(
node
)
)
{
list_add
(
&
publ
->
node_list
,
&
info
->
node_list
);
info
->
node_list_size
++
;
}
...
...
@@ -418,7 +418,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
/* Remove publication from node scope list, if present */
if
(
node
==
tipc_own_addr
)
{
if
(
in_own_node
(
node
)
)
{
list_del
(
&
publ
->
node_list
);
info
->
node_list_size
--
;
}
...
...
@@ -604,7 +604,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
publ
=
list_first_entry
(
&
info
->
node_list
,
struct
publication
,
node_list
);
list_move_tail
(
&
publ
->
node_list
,
&
info
->
node_list
);
}
else
if
(
in_own_cluster
(
*
destnode
))
{
}
else
if
(
in_own_cluster
_exact
(
*
destnode
))
{
if
(
list_empty
(
&
info
->
cluster_list
))
goto
no_match
;
publ
=
list_first_entry
(
&
info
->
cluster_list
,
struct
publication
,
...
...
@@ -695,11 +695,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
}
write_lock_bh
(
&
tipc_nametbl_lock
);
table
.
local_publ_count
++
;
publ
=
tipc_nametbl_insert_publ
(
type
,
lower
,
upper
,
scope
,
tipc_own_addr
,
port_ref
,
key
);
if
(
publ
&&
(
scope
!=
TIPC_NODE_SCOPE
))
if
(
likely
(
publ
))
{
table
.
local_publ_count
++
;
tipc_named_publish
(
publ
);
}
write_unlock_bh
(
&
tipc_nametbl_lock
);
return
publ
;
}
...
...
@@ -716,8 +717,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
publ
=
tipc_nametbl_remove_publ
(
type
,
lower
,
tipc_own_addr
,
ref
,
key
);
if
(
likely
(
publ
))
{
table
.
local_publ_count
--
;
if
(
publ
->
scope
!=
TIPC_NODE_SCOPE
)
tipc_named_withdraw
(
publ
);
tipc_named_withdraw
(
publ
);
write_unlock_bh
(
&
tipc_nametbl_lock
);
list_del_init
(
&
publ
->
pport_list
);
kfree
(
publ
);
...
...
net/tipc/net.c
浏览文件 @
167de77f
...
...
@@ -178,11 +178,12 @@ int tipc_net_start(u32 addr)
tipc_subscr_stop
();
tipc_cfg_stop
();
write_lock_bh
(
&
tipc_net_lock
);
tipc_own_addr
=
addr
;
tipc_named_reinit
();
tipc_port_reinit
();
tipc_bclink_init
();
write_unlock_bh
(
&
tipc_net_lock
);
tipc_k_signal
((
Handler
)
tipc_subscr_start
,
0
);
tipc_k_signal
((
Handler
)
tipc_cfg_init
,
0
);
...
...
net/tipc/node.c
浏览文件 @
167de77f
...
...
@@ -72,7 +72,7 @@ struct tipc_node *tipc_node_find(u32 addr)
struct
tipc_node
*
node
;
struct
hlist_node
*
pos
;
if
(
unlikely
(
!
in_own_cluster
(
addr
)))
if
(
unlikely
(
!
in_own_cluster
_exact
(
addr
)))
return
NULL
;
hlist_for_each_entry
(
node
,
pos
,
&
node_htable
[
tipc_hashfn
(
addr
)],
hash
)
{
...
...
net/tipc/node_subscr.c
浏览文件 @
167de77f
...
...
@@ -45,7 +45,7 @@
void
tipc_nodesub_subscribe
(
struct
tipc_node_subscr
*
node_sub
,
u32
addr
,
void
*
usr_handle
,
net_ev_handler
handle_down
)
{
if
(
addr
==
tipc_own_addr
)
{
if
(
in_own_node
(
addr
)
)
{
node_sub
->
node
=
NULL
;
return
;
}
...
...
net/tipc/port.c
浏览文件 @
167de77f
...
...
@@ -59,16 +59,38 @@ static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err);
static
void
port_timeout
(
unsigned
long
ref
);
static
u32
port_peernode
(
struct
tipc_port
*
p_ptr
)
static
inline
u32
port_peernode
(
struct
tipc_port
*
p_ptr
)
{
return
msg_destnode
(
&
p_ptr
->
phdr
);
}
static
u32
port_peerport
(
struct
tipc_port
*
p_ptr
)
static
inline
u32
port_peerport
(
struct
tipc_port
*
p_ptr
)
{
return
msg_destport
(
&
p_ptr
->
phdr
);
}
/*
* tipc_port_peer_msg - verify message was sent by connected port's peer
*
* Handles cases where the node's network address has changed from
* the default of <0.0.0> to its configured setting.
*/
int
tipc_port_peer_msg
(
struct
tipc_port
*
p_ptr
,
struct
tipc_msg
*
msg
)
{
u32
peernode
;
u32
orignode
;
if
(
msg_origport
(
msg
)
!=
port_peerport
(
p_ptr
))
return
0
;
orignode
=
msg_orignode
(
msg
);
peernode
=
port_peernode
(
p_ptr
);
return
(
orignode
==
peernode
)
||
(
!
orignode
&&
(
peernode
==
tipc_own_addr
))
||
(
!
peernode
&&
(
orignode
==
tipc_own_addr
));
}
/**
* tipc_multicast - send a multicast message to local and remote destinations
*/
...
...
@@ -221,18 +243,25 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
p_ptr
->
usr_handle
=
usr_handle
;
p_ptr
->
max_pkt
=
MAX_PKT_DEFAULT
;
p_ptr
->
ref
=
ref
;
msg
=
&
p_ptr
->
phdr
;
tipc_msg_init
(
msg
,
importance
,
TIPC_NAMED_MSG
,
NAMED_H_SIZE
,
0
);
msg_set_origport
(
msg
,
ref
);
INIT_LIST_HEAD
(
&
p_ptr
->
wait_list
);
INIT_LIST_HEAD
(
&
p_ptr
->
subscription
.
nodesub_list
);
p_ptr
->
dispatcher
=
dispatcher
;
p_ptr
->
wakeup
=
wakeup
;
p_ptr
->
user_port
=
NULL
;
k_init_timer
(
&
p_ptr
->
timer
,
(
Handler
)
port_timeout
,
ref
);
spin_lock_bh
(
&
tipc_port_list_lock
);
INIT_LIST_HEAD
(
&
p_ptr
->
publications
);
INIT_LIST_HEAD
(
&
p_ptr
->
port_list
);
/*
* Must hold port list lock while initializing message header template
* to ensure a change to node's own network address doesn't result
* in template containing out-dated network address information
*/
spin_lock_bh
(
&
tipc_port_list_lock
);
msg
=
&
p_ptr
->
phdr
;
tipc_msg_init
(
msg
,
importance
,
TIPC_NAMED_MSG
,
NAMED_H_SIZE
,
0
);
msg_set_origport
(
msg
,
ref
);
list_add_tail
(
&
p_ptr
->
port_list
,
&
ports
);
spin_unlock_bh
(
&
tipc_port_list_lock
);
return
p_ptr
;
...
...
@@ -415,7 +444,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
/* send returned message & dispose of rejected message */
src_node
=
msg_prevnode
(
msg
);
if
(
src_node
==
tipc_own_addr
)
if
(
in_own_node
(
src_node
)
)
tipc_port_recv_msg
(
rbuf
);
else
tipc_link_send
(
rbuf
,
src_node
,
msg_link_selector
(
rmsg
));
...
...
@@ -519,25 +548,21 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
struct
tipc_msg
*
msg
=
buf_msg
(
buf
);
struct
tipc_port
*
p_ptr
;
struct
sk_buff
*
r_buf
=
NULL
;
u32
orignode
=
msg_orignode
(
msg
);
u32
origport
=
msg_origport
(
msg
);
u32
destport
=
msg_destport
(
msg
);
int
wakeable
;
/* Validate connection */
p_ptr
=
tipc_port_lock
(
destport
);
if
(
!
p_ptr
||
!
p_ptr
->
connected
||
(
port_peernode
(
p_ptr
)
!=
orignode
)
||
(
port_peerport
(
p_ptr
)
!=
origport
))
{
if
(
!
p_ptr
||
!
p_ptr
->
connected
||
!
tipc_port_peer_msg
(
p_ptr
,
msg
))
{
r_buf
=
tipc_buf_acquire
(
BASIC_H_SIZE
);
if
(
r_buf
)
{
msg
=
buf_msg
(
r_buf
);
tipc_msg_init
(
msg
,
TIPC_HIGH_IMPORTANCE
,
TIPC_CONN_MSG
,
BASIC_H_SIZE
,
orignode
);
BASIC_H_SIZE
,
msg_orignode
(
msg
)
);
msg_set_errcode
(
msg
,
TIPC_ERR_NO_PORT
);
msg_set_origport
(
msg
,
destport
);
msg_set_destport
(
msg
,
origport
);
msg_set_destport
(
msg
,
msg_origport
(
msg
)
);
}
if
(
p_ptr
)
tipc_port_unlock
(
p_ptr
);
...
...
@@ -646,8 +671,6 @@ void tipc_port_reinit(void)
spin_lock_bh
(
&
tipc_port_list_lock
);
list_for_each_entry
(
p_ptr
,
&
ports
,
port_list
)
{
msg
=
&
p_ptr
->
phdr
;
if
(
msg_orignode
(
msg
)
==
tipc_own_addr
)
break
;
msg_set_prevnode
(
msg
,
tipc_own_addr
);
msg_set_orignode
(
msg
,
tipc_own_addr
);
}
...
...
@@ -676,6 +699,7 @@ static void port_dispatcher_sigh(void *dummy)
struct
tipc_name_seq
dseq
;
void
*
usr_handle
;
int
connected
;
int
peer_invalid
;
int
published
;
u32
message_type
;
...
...
@@ -696,6 +720,7 @@ static void port_dispatcher_sigh(void *dummy)
up_ptr
=
p_ptr
->
user_port
;
usr_handle
=
up_ptr
->
usr_handle
;
connected
=
p_ptr
->
connected
;
peer_invalid
=
connected
&&
!
tipc_port_peer_msg
(
p_ptr
,
msg
);
published
=
p_ptr
->
published
;
if
(
unlikely
(
msg_errcode
(
msg
)))
...
...
@@ -705,8 +730,6 @@ static void port_dispatcher_sigh(void *dummy)
case
TIPC_CONN_MSG
:{
tipc_conn_msg_event
cb
=
up_ptr
->
conn_msg_cb
;
u32
peer_port
=
port_peerport
(
p_ptr
);
u32
peer_node
=
port_peernode
(
p_ptr
);
u32
dsz
;
tipc_port_unlock
(
p_ptr
);
...
...
@@ -715,8 +738,7 @@ static void port_dispatcher_sigh(void *dummy)
if
(
unlikely
(
!
connected
))
{
if
(
tipc_connect2port
(
dref
,
&
orig
))
goto
reject
;
}
else
if
((
msg_origport
(
msg
)
!=
peer_port
)
||
(
msg_orignode
(
msg
)
!=
peer_node
))
}
else
if
(
peer_invalid
)
goto
reject
;
dsz
=
msg_data_sz
(
msg
);
if
(
unlikely
(
dsz
&&
...
...
@@ -768,14 +790,9 @@ static void port_dispatcher_sigh(void *dummy)
case
TIPC_CONN_MSG
:{
tipc_conn_shutdown_event
cb
=
up_ptr
->
conn_err_cb
;
u32
peer_port
=
port_peerport
(
p_ptr
);
u32
peer_node
=
port_peernode
(
p_ptr
);
tipc_port_unlock
(
p_ptr
);
if
(
!
cb
||
!
connected
)
break
;
if
((
msg_origport
(
msg
)
!=
peer_port
)
||
(
msg_orignode
(
msg
)
!=
peer_node
))
if
(
!
cb
||
!
connected
||
peer_invalid
)
break
;
tipc_disconnect
(
dref
);
skb_pull
(
buf
,
msg_hdr_sz
(
msg
));
...
...
@@ -1152,17 +1169,6 @@ int tipc_port_recv_msg(struct sk_buff *buf)
/* validate destination & pass to port, otherwise reject message */
p_ptr
=
tipc_port_lock
(
destport
);
if
(
likely
(
p_ptr
))
{
if
(
likely
(
p_ptr
->
connected
))
{
if
((
unlikely
(
msg_origport
(
msg
)
!=
tipc_peer_port
(
p_ptr
)))
||
(
unlikely
(
msg_orignode
(
msg
)
!=
tipc_peer_node
(
p_ptr
)))
||
(
unlikely
(
!
msg_connected
(
msg
))))
{
err
=
TIPC_ERR_NO_PORT
;
tipc_port_unlock
(
p_ptr
);
goto
reject
;
}
}
err
=
p_ptr
->
dispatcher
(
p_ptr
,
buf
);
tipc_port_unlock
(
p_ptr
);
if
(
likely
(
!
err
))
...
...
@@ -1170,7 +1176,7 @@ int tipc_port_recv_msg(struct sk_buff *buf)
}
else
{
err
=
TIPC_ERR_NO_PORT
;
}
reject:
return
tipc_reject_msg
(
buf
,
err
);
}
...
...
@@ -1211,7 +1217,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
p_ptr
->
congested
=
1
;
if
(
!
tipc_port_congested
(
p_ptr
))
{
destnode
=
port_peernode
(
p_ptr
);
if
(
likely
(
destnode
!=
tipc_own_addr
))
if
(
likely
(
!
in_own_node
(
destnode
)
))
res
=
tipc_link_send_sections_fast
(
p_ptr
,
msg_sect
,
num_sect
,
total_len
,
destnode
);
else
...
...
@@ -1261,13 +1267,17 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
msg_set_destport
(
msg
,
destport
);
if
(
likely
(
destport
||
destnode
))
{
if
(
likely
(
destnode
==
tipc_own_addr
))
if
(
likely
(
in_own_node
(
destnode
)
))
res
=
tipc_port_recv_sections
(
p_ptr
,
num_sect
,
msg_sect
,
total_len
);
else
else
if
(
tipc_own_addr
)
res
=
tipc_link_send_sections_fast
(
p_ptr
,
msg_sect
,
num_sect
,
total_len
,
destnode
);
else
res
=
tipc_port_reject_sections
(
p_ptr
,
msg
,
msg_sect
,
num_sect
,
total_len
,
TIPC_ERR_NO_NODE
);
if
(
likely
(
res
!=
-
ELINKCONG
))
{
if
(
res
>
0
)
p_ptr
->
sent
++
;
...
...
@@ -1305,12 +1315,15 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
msg_set_destport
(
msg
,
dest
->
ref
);
msg_set_hdr_sz
(
msg
,
BASIC_H_SIZE
);
if
(
dest
->
node
==
tipc_own_addr
)
if
(
in_own_node
(
dest
->
node
)
)
res
=
tipc_port_recv_sections
(
p_ptr
,
num_sect
,
msg_sect
,
total_len
);
else
else
if
(
tipc_own_addr
)
res
=
tipc_link_send_sections_fast
(
p_ptr
,
msg_sect
,
num_sect
,
total_len
,
dest
->
node
);
else
res
=
tipc_port_reject_sections
(
p_ptr
,
msg
,
msg_sect
,
num_sect
,
total_len
,
TIPC_ERR_NO_NODE
);
if
(
likely
(
res
!=
-
ELINKCONG
))
{
if
(
res
>
0
)
p_ptr
->
sent
++
;
...
...
@@ -1349,7 +1362,7 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
skb_push
(
buf
,
BASIC_H_SIZE
);
skb_copy_to_linear_data
(
buf
,
msg
,
BASIC_H_SIZE
);
if
(
dest
->
node
==
tipc_own_addr
)
if
(
in_own_node
(
dest
->
node
)
)
res
=
tipc_port_recv_msg
(
buf
);
else
res
=
tipc_send_buf_fast
(
buf
,
dest
->
node
);
...
...
net/tipc/port.h
浏览文件 @
167de77f
...
...
@@ -201,6 +201,7 @@ int tipc_shutdown(u32 ref);
* The following routines require that the port be locked on entry
*/
int
tipc_disconnect_port
(
struct
tipc_port
*
tp_ptr
);
int
tipc_port_peer_msg
(
struct
tipc_port
*
p_ptr
,
struct
tipc_msg
*
msg
);
/*
* TIPC messaging routines
...
...
@@ -257,16 +258,6 @@ static inline struct tipc_port *tipc_port_deref(u32 ref)
return
(
struct
tipc_port
*
)
tipc_ref_deref
(
ref
);
}
static
inline
u32
tipc_peer_port
(
struct
tipc_port
*
p_ptr
)
{
return
msg_destport
(
&
p_ptr
->
phdr
);
}
static
inline
u32
tipc_peer_node
(
struct
tipc_port
*
p_ptr
)
{
return
msg_destnode
(
&
p_ptr
->
phdr
);
}
static
inline
int
tipc_port_congested
(
struct
tipc_port
*
p_ptr
)
{
return
(
p_ptr
->
sent
-
p_ptr
->
acked
)
>=
(
TIPC_FLOW_CONTROL_WIN
*
2
);
...
...
net/tipc/socket.c
浏览文件 @
167de77f
...
...
@@ -1236,7 +1236,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
if
(
msg_mcast
(
msg
))
return
TIPC_ERR_NO_PORT
;
if
(
sock
->
state
==
SS_CONNECTED
)
{
if
(
!
msg_connected
(
msg
))
if
(
!
msg_connected
(
msg
)
||
!
tipc_port_peer_msg
(
tipc_sk_port
(
sk
),
msg
))
return
TIPC_ERR_NO_PORT
;
}
else
if
(
sock
->
state
==
SS_CONNECTING
)
{
if
(
!
msg_connected
(
msg
)
&&
(
msg_errcode
(
msg
)
==
0
))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录