Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PGL
提交
fff91869
P
PGL
项目概览
PaddlePaddle
/
PGL
通知
76
Star
4
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
11
列表
看板
标记
里程碑
合并请求
1
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PGL
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
11
Issue
11
列表
看板
标记
里程碑
合并请求
1
合并请求
1
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
fff91869
编写于
4月 09, 2020
作者:
Webbley
提交者:
GitHub
4月 09, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #38 from Liwb5/develop
add gin layer
上级
46dd55da
15853d51
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
162 addition
and
6 deletion
+162
-6
ogb_examples/linkproppred/main_pgl.py
ogb_examples/linkproppred/main_pgl.py
+9
-5
pgl/layers/conv.py
pgl/layers/conv.py
+71
-1
pgl/tests/test_gin.py
pgl/tests/test_gin.py
+82
-0
未找到文件。
ogb_examples/linkproppred/main_pgl.py
浏览文件 @
fff91869
...
...
@@ -96,7 +96,7 @@ class GNNModel(object):
loss
=
fluid
.
layers
.
sigmoid_cross_entropy_with_logits
(
pred
,
self
.
edge_label
)
loss
=
fluid
.
layers
.
reduce_
mean
(
loss
)
loss
=
fluid
.
layers
.
reduce_
sum
(
loss
)
return
pred
,
prob
,
loss
...
...
@@ -223,8 +223,10 @@ def test(exe, val_program, prob, evaluator, feed, splitted_edge):
"float32"
).
reshape
(
-
1
,
1
)
y_pred
=
exe
.
run
(
val_program
,
feed
=
feed
,
fetch_list
=
[
prob
])[
0
]
input_dict
=
{
"y_true"
:
splitted_edge
[
"valid_edge_label"
],
"y_pred"
:
y_pred
.
reshape
(
-
1
,
),
"y_pred_pos"
:
y_pred
[
splitted_edge
[
"valid_edge_label"
]
==
1
].
reshape
(
-
1
,
),
"y_pred_neg"
:
y_pred
[
splitted_edge
[
"valid_edge_label"
]
==
0
].
reshape
(
-
1
,
)
}
result
[
"valid"
]
=
evaluator
.
eval
(
input_dict
)
...
...
@@ -234,8 +236,10 @@ def test(exe, val_program, prob, evaluator, feed, splitted_edge):
"float32"
).
reshape
(
-
1
,
1
)
y_pred
=
exe
.
run
(
val_program
,
feed
=
feed
,
fetch_list
=
[
prob
])[
0
]
input_dict
=
{
"y_true"
:
splitted_edge
[
"test_edge_label"
],
"y_pred"
:
y_pred
.
reshape
(
-
1
,
),
"y_pred_pos"
:
y_pred
[
splitted_edge
[
"test_edge_label"
]
==
1
].
reshape
(
-
1
,
),
"y_pred_neg"
:
y_pred
[
splitted_edge
[
"test_edge_label"
]
==
0
].
reshape
(
-
1
,
)
}
result
[
"test"
]
=
evaluator
.
eval
(
input_dict
)
return
result
...
...
pgl/layers/conv.py
浏览文件 @
fff91869
...
...
@@ -18,7 +18,7 @@ import paddle.fluid as fluid
from
pgl
import
graph_wrapper
from
pgl.utils
import
paddle_helper
__all__
=
[
'gcn'
,
'gat'
]
__all__
=
[
'gcn'
,
'gat'
,
'gin'
]
def
gcn
(
gw
,
feature
,
hidden_size
,
activation
,
name
,
norm
=
None
):
...
...
@@ -178,3 +178,73 @@ def gat(gw,
bias
.
stop_gradient
=
True
output
=
fluid
.
layers
.
elementwise_add
(
output
,
bias
,
act
=
activation
)
return
output
def
gin
(
gw
,
feature
,
hidden_size
,
activation
,
name
,
init_eps
=
0.0
,
train_eps
=
False
):
"""Implementation of Graph Isomorphism Network (GIN) layer.
This is an implementation of the paper How Powerful are Graph Neural Networks?
(https://arxiv.org/pdf/1810.00826.pdf).
In their implementation, all MLPs have 2 layers. Batch normalization is applied
on every hidden layer.
Args:
gw: Graph wrapper object (:code:`StaticGraphWrapper` or :code:`GraphWrapper`)
feature: A tensor with shape (num_nodes, feature_size).
name: GIN layer names.
hidden_size: The hidden size for gin.
activation: The activation for the output.
init_eps: float, optional
Initial :math:`\epsilon` value, default is 0.
train_eps: bool, optional
if True, :math:`\epsilon` will be a learnable parameter.
Return:
A tensor with shape (num_nodes, hidden_size).
"""
def
send_src_copy
(
src_feat
,
dst_feat
,
edge_feat
):
return
src_feat
[
"h"
]
epsilon
=
fluid
.
layers
.
create_parameter
(
shape
=
[
1
,
1
],
dtype
=
"float32"
,
attr
=
fluid
.
ParamAttr
(
name
=
"%s_eps"
%
name
),
default_initializer
=
fluid
.
initializer
.
ConstantInitializer
(
value
=
init_eps
))
if
not
train_eps
:
epsilon
.
stop_gradient
=
True
msg
=
gw
.
send
(
send_src_copy
,
nfeat_list
=
[(
"h"
,
feature
)])
output
=
gw
.
recv
(
msg
,
"sum"
)
+
(
1.0
+
epsilon
)
*
feature
output
=
fluid
.
layers
.
fc
(
output
,
size
=
hidden_size
,
act
=
None
,
param_attr
=
fluid
.
ParamAttr
(
name
=
"%s_w_0"
%
name
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
"%s_b_0"
%
name
))
output
=
fluid
.
layers
.
batch_norm
(
output
)
output
=
getattr
(
fluid
.
layers
,
activation
)(
output
)
output
=
fluid
.
layers
.
fc
(
output
,
size
=
hidden_size
,
act
=
activation
,
param_attr
=
fluid
.
ParamAttr
(
name
=
"%s_w_1"
%
name
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
"%s_b_1"
%
name
))
return
output
pgl/tests/test_gin.py
0 → 100644
浏览文件 @
fff91869
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file is for testing gin layer.
"""
from
__future__
import
division
from
__future__
import
absolute_import
from
__future__
import
print_function
from
__future__
import
unicode_literals
import
unittest
import
numpy
as
np
import
paddle.fluid
as
F
import
paddle.fluid.layers
as
L
from
pgl.layers.conv
import
gin
from
pgl
import
graph
from
pgl
import
graph_wrapper
class
GinTest
(
unittest
.
TestCase
):
"""GinTest
"""
def
test_gin
(
self
):
"""test_gin
"""
np
.
random
.
seed
(
1
)
hidden_size
=
8
num_nodes
=
10
edges
=
[(
1
,
4
),
(
0
,
5
),
(
1
,
9
),
(
1
,
8
),
(
2
,
8
),
(
2
,
5
),
(
3
,
6
),
(
3
,
7
),
(
3
,
4
),
(
3
,
8
)]
inver_edges
=
[(
v
,
u
)
for
u
,
v
in
edges
]
edges
.
extend
(
inver_edges
)
node_feat
=
{
"feature"
:
np
.
random
.
rand
(
10
,
4
).
astype
(
"float32"
)}
g
=
graph
.
Graph
(
num_nodes
=
num_nodes
,
edges
=
edges
,
node_feat
=
node_feat
)
use_cuda
=
False
place
=
F
.
GPUPlace
(
0
)
if
use_cuda
else
F
.
CPUPlace
()
prog
=
F
.
Program
()
startup_prog
=
F
.
Program
()
with
F
.
program_guard
(
prog
,
startup_prog
):
gw
=
graph_wrapper
.
GraphWrapper
(
name
=
'graph'
,
place
=
place
,
node_feat
=
g
.
node_feat_info
(),
edge_feat
=
g
.
edge_feat_info
())
output
=
gin
(
gw
,
gw
.
node_feat
[
'feature'
],
hidden_size
=
hidden_size
,
activation
=
'relu'
,
name
=
'gin'
,
init_eps
=
1
,
train_eps
=
True
)
exe
=
F
.
Executor
(
place
)
exe
.
run
(
startup_prog
)
ret
=
exe
.
run
(
prog
,
feed
=
gw
.
to_feed
(
g
),
fetch_list
=
[
output
])
self
.
assertEqual
(
ret
[
0
].
shape
[
0
],
num_nodes
)
self
.
assertEqual
(
ret
[
0
].
shape
[
1
],
hidden_size
)
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录