Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PGL
提交
a7a615dd
P
PGL
项目概览
PaddlePaddle
/
PGL
通知
76
Star
4
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
11
列表
看板
标记
里程碑
合并请求
1
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PGL
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
11
Issue
11
列表
看板
标记
里程碑
合并请求
1
合并请求
1
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a7a615dd
编写于
9月 01, 2020
作者:
K
kirayummy
提交者:
GitHub
9月 01, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #119 from kirayummy/main
fix msg norm
上级
9f629ac3
1a70632a
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
26 addition
and
25 deletion
+26
-25
pgl/message_passing.py
pgl/message_passing.py
+26
-25
未找到文件。
pgl/message_passing.py
浏览文件 @
a7a615dd
...
...
@@ -37,25 +37,25 @@ def weighted_copy_send(src_feat, dst_feat, edge_feat):
def
mean_recv
(
feat
):
"""doc"""
return
fluid
.
layers
.
sequence_pool
(
feat
,
pool_type
=
"average"
)
return
L
.
sequence_pool
(
feat
,
pool_type
=
"average"
)
def
sum_recv
(
feat
):
"""doc"""
return
fluid
.
layers
.
sequence_pool
(
feat
,
pool_type
=
"sum"
)
return
L
.
sequence_pool
(
feat
,
pool_type
=
"sum"
)
def
max_recv
(
feat
):
"""doc"""
return
fluid
.
layers
.
sequence_pool
(
feat
,
pool_type
=
"max"
)
return
L
.
sequence_pool
(
feat
,
pool_type
=
"max"
)
def
lstm_recv
(
hidden_dim
):
"""doc"""
def
lstm_recv_inside
(
feat
):
forward
,
_
=
fluid
.
layers
.
dynamic_lstm
(
forward
,
_
=
L
.
dynamic_lstm
(
input
=
feat
,
size
=
hidden_dim
*
4
,
use_peepholes
=
False
)
output
=
fluid
.
layers
.
sequence_last_step
(
forward
)
output
=
L
.
sequence_last_step
(
forward
)
return
output
return
lstm_recv_inside
...
...
@@ -65,22 +65,22 @@ def graphsage_sum(gw, feature, hidden_size, act, initializer, learning_rate, nam
msg
=
gw
.
send
(
copy_send
,
nfeat_list
=
[(
"h"
,
feature
)])
neigh_feature
=
gw
.
recv
(
msg
,
sum_recv
)
self_feature
=
feature
self_feature
=
fluid
.
layers
.
fc
(
self_feature
,
self_feature
=
L
.
fc
(
self_feature
,
hidden_size
,
act
=
act
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_l.w_0"
,
initializer
=
initializer
,
learning_rate
=
learning_rate
),
bias_attr
=
name
+
"_l.b_0"
)
neigh_feature
=
fluid
.
layers
.
fc
(
neigh_feature
,
neigh_feature
=
L
.
fc
(
neigh_feature
,
hidden_size
,
act
=
act
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_r.w_0"
,
initializer
=
initializer
,
learning_rate
=
learning_rate
),
bias_attr
=
name
+
"_r.b_0"
)
output
=
fluid
.
layers
.
concat
([
self_feature
,
neigh_feature
],
axis
=
1
)
output
=
fluid
.
layers
.
l2_normalize
(
output
,
axis
=
1
)
output
=
L
.
concat
([
self_feature
,
neigh_feature
],
axis
=
1
)
output
=
L
.
l2_normalize
(
output
,
axis
=
1
)
return
output
...
...
@@ -89,22 +89,22 @@ def graphsage_mean(gw, feature, hidden_size, act, initializer, learning_rate, na
msg
=
gw
.
send
(
copy_send
,
nfeat_list
=
[(
"h"
,
feature
)])
neigh_feature
=
gw
.
recv
(
msg
,
mean_recv
)
self_feature
=
feature
self_feature
=
fluid
.
layers
.
fc
(
self_feature
,
self_feature
=
L
.
fc
(
self_feature
,
hidden_size
,
act
=
act
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_l.w_0"
,
initializer
=
initializer
,
learning_rate
=
learning_rate
),
bias_attr
=
name
+
"_l.b_0"
)
neigh_feature
=
fluid
.
layers
.
fc
(
neigh_feature
,
neigh_feature
=
L
.
fc
(
neigh_feature
,
hidden_size
,
act
=
act
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_r.w_0"
,
initializer
=
initializer
,
learning_rate
=
learning_rate
),
bias_attr
=
name
+
"_r.b_0"
)
output
=
fluid
.
layers
.
concat
([
self_feature
,
neigh_feature
],
axis
=
1
)
output
=
fluid
.
layers
.
l2_normalize
(
output
,
axis
=
1
)
output
=
L
.
concat
([
self_feature
,
neigh_feature
],
axis
=
1
)
output
=
L
.
l2_normalize
(
output
,
axis
=
1
)
return
output
...
...
@@ -113,22 +113,22 @@ def pinsage_mean(gw, feature, hidden_size, act, initializer, learning_rate, name
msg
=
gw
.
send
(
weighted_copy_send
,
nfeat_list
=
[(
"h"
,
feature
)],
efeat_list
=
[
"weight"
])
neigh_feature
=
gw
.
recv
(
msg
,
mean_recv
)
self_feature
=
feature
self_feature
=
fluid
.
layers
.
fc
(
self_feature
,
self_feature
=
L
.
fc
(
self_feature
,
hidden_size
,
act
=
act
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_l.w_0"
,
initializer
=
initializer
,
learning_rate
=
learning_rate
),
bias_attr
=
name
+
"_l.b_0"
)
neigh_feature
=
fluid
.
layers
.
fc
(
neigh_feature
,
neigh_feature
=
L
.
fc
(
neigh_feature
,
hidden_size
,
act
=
act
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_r.w_0"
,
initializer
=
initializer
,
learning_rate
=
learning_rate
),
bias_attr
=
name
+
"_r.b_0"
)
output
=
fluid
.
layers
.
concat
([
self_feature
,
neigh_feature
],
axis
=
1
)
output
=
fluid
.
layers
.
l2_normalize
(
output
,
axis
=
1
)
output
=
L
.
concat
([
self_feature
,
neigh_feature
],
axis
=
1
)
output
=
L
.
l2_normalize
(
output
,
axis
=
1
)
return
output
...
...
@@ -137,22 +137,22 @@ def pinsage_sum(gw, feature, hidden_size, act, initializer, learning_rate, name)
msg
=
gw
.
send
(
weighted_copy_send
,
nfeat_list
=
[(
"h"
,
feature
)],
efeat_list
=
[
"weight"
])
neigh_feature
=
gw
.
recv
(
msg
,
sum_recv
)
self_feature
=
feature
self_feature
=
fluid
.
layers
.
fc
(
self_feature
,
self_feature
=
L
.
fc
(
self_feature
,
hidden_size
,
act
=
act
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_l.w_0"
,
initializer
=
initializer
,
learning_rate
=
learning_rate
),
bias_attr
=
name
+
"_l.b_0"
)
neigh_feature
=
fluid
.
layers
.
fc
(
neigh_feature
,
neigh_feature
=
L
.
fc
(
neigh_feature
,
hidden_size
,
act
=
act
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_r.w_0"
,
initializer
=
initializer
,
learning_rate
=
learning_rate
),
bias_attr
=
name
+
"_r.b_0"
)
output
=
fluid
.
layers
.
concat
([
self_feature
,
neigh_feature
],
axis
=
1
)
output
=
fluid
.
layers
.
l2_normalize
(
output
,
axis
=
1
)
output
=
L
.
concat
([
self_feature
,
neigh_feature
],
axis
=
1
)
output
=
L
.
l2_normalize
(
output
,
axis
=
1
)
return
output
...
...
@@ -172,7 +172,7 @@ def softmax_agg(beta):
def
softmax_agg_inside
(
msg
):
alpha
=
paddle_helper
.
sequence_softmax
(
msg
,
beta
)
msg
=
msg
*
alpha
return
fluid
.
layers
.
sequence_pool
(
msg
,
"sum"
)
return
L
.
sequence_pool
(
msg
,
"sum"
)
return
softmax_agg_inside
...
...
@@ -190,15 +190,16 @@ def msg_norm(x, msg, name):
Return:
An output tensor with shape (num_nodes, feature_size)
"""
s
=
fluid
.
layers
.
create_parameter
(
s
=
L
.
create_parameter
(
shape
=
[
1
],
dtype
=
'float32'
,
default_initializer
=
fluid
.
initializer
.
ConstantInitializer
(
value
=
1.0
),
name
=
name
+
'_s_msg_norm'
)
msg
=
fluid
.
layers
.
l2_normalize
(
msg
,
axis
=
1
)
x_norm
=
fluid
.
layers
.
reduce_sum
(
x
*
x
,
dim
=
1
,
keep_dim
=
True
)
msg
=
L
.
l2_normalize
(
msg
,
axis
=
1
)
x_norm
=
L
.
reduce_sum
(
x
*
x
,
dim
=
1
,
keep_dim
=
True
)
x_norm
=
L
.
sqrt
(
x_norm
)
msg
=
msg
*
x_norm
*
s
return
msg
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录