提交 60db1b4b 编写于 作者: F fengshikun01

fix deeper_gcn

上级 fb2940a6
......@@ -12,6 +12,14 @@ The datasets contain three citation networks: CORA, PUBMED, CITESEER. The detail
- paddlepaddle>=1.6
- pgl
### Performance
We train our models for 200 epochs and report the accuracy on the test dataset.
| Dataset | Accuracy |
| --- | --- |
| Cora | ~77% |
### How to run
For examples, use gpu to train gat on cora dataset.
......
......@@ -44,7 +44,7 @@ def main(args):
startup_program = fluid.Program()
test_program = fluid.Program()
hidden_size = 64
num_layers = 50
num_layers = 7
with fluid.program_guard(train_program, startup_program):
gw = pgl.graph_wrapper.GraphWrapper(
......@@ -103,7 +103,7 @@ def main(args):
# get beta param
beta_param_list = []
for param in train_program.global_block().all_parameters():
for param in fluid.io.get_program_parameter(train_program):
if param.name.endswith("_beta"):
beta_param_list.append(param)
......@@ -119,7 +119,7 @@ def main(args):
return_numpy=True)
for param in beta_param_list:
beta = np.array(fluid.global_scope().find_var(param.name).get_tensor())
writer.add_scalar(param.name, beta, epoch)
writer.add_scalar("beta/"+param.name, beta, epoch)
if epoch >= 3:
time_per_epoch = 1.0 * (time.time() - t0)
......
......@@ -50,13 +50,14 @@ def max_recv(feat):
return fluid.layers.sequence_pool(feat, pool_type="max")
def lstm_recv(feat):
def lstm_recv(hidden_dim):
"""doc"""
hidden_dim = 128
def lstm_recv_inside(feat):
forward, _ = fluid.layers.dynamic_lstm(
input=feat, size=hidden_dim * 4, use_peepholes=False)
output = fluid.layers.sequence_last_step(forward)
return output
return lstm_recv_inside
def graphsage_sum(gw, feature, hidden_size, act, initializer, learning_rate, name):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册