From 60db1b4ba010c5a9ffc7f1e275825b9457fadee5 Mon Sep 17 00:00:00 2001 From: fengshikun01 Date: Tue, 23 Jun 2020 21:58:03 +0800 Subject: [PATCH] fix deeper_gcn --- examples/deeper_gcn/README.md | 8 ++++++++ examples/deeper_gcn/train.py | 6 +++--- pgl/message_passing.py | 13 +++++++------ 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/examples/deeper_gcn/README.md b/examples/deeper_gcn/README.md index b75413a..e67e51e 100644 --- a/examples/deeper_gcn/README.md +++ b/examples/deeper_gcn/README.md @@ -12,6 +12,14 @@ The datasets contain three citation networks: CORA, PUBMED, CITESEER. The detail - paddlepaddle>=1.6 - pgl +### Performance + +We train our models for 200 epochs and report the accuracy on the test dataset. + +| Dataset | Accuracy | +| --- | --- | +| Cora | ~77% | + ### How to run For examples, use gpu to train gat on cora dataset. diff --git a/examples/deeper_gcn/train.py b/examples/deeper_gcn/train.py index 83b2c69..35ed8ac 100644 --- a/examples/deeper_gcn/train.py +++ b/examples/deeper_gcn/train.py @@ -44,7 +44,7 @@ def main(args): startup_program = fluid.Program() test_program = fluid.Program() hidden_size = 64 - num_layers = 50 + num_layers = 7 with fluid.program_guard(train_program, startup_program): gw = pgl.graph_wrapper.GraphWrapper( @@ -103,7 +103,7 @@ def main(args): # get beta param beta_param_list = [] - for param in train_program.global_block().all_parameters(): + for param in fluid.io.get_program_parameter(train_program): if param.name.endswith("_beta"): beta_param_list.append(param) @@ -119,7 +119,7 @@ def main(args): return_numpy=True) for param in beta_param_list: beta = np.array(fluid.global_scope().find_var(param.name).get_tensor()) - writer.add_scalar(param.name, beta, epoch) + writer.add_scalar("beta/"+param.name, beta, epoch) if epoch >= 3: time_per_epoch = 1.0 * (time.time() - t0) diff --git a/pgl/message_passing.py b/pgl/message_passing.py index 858ea97..4046ad9 100644 --- a/pgl/message_passing.py +++ b/pgl/message_passing.py @@ -50,13 +50,14 @@ def max_recv(feat): return fluid.layers.sequence_pool(feat, pool_type="max") -def lstm_recv(feat): +def lstm_recv(hidden_dim): """doc""" - hidden_dim = 128 - forward, _ = fluid.layers.dynamic_lstm( - input=feat, size=hidden_dim * 4, use_peepholes=False) - output = fluid.layers.sequence_last_step(forward) - return output + def lstm_recv_inside(feat): + forward, _ = fluid.layers.dynamic_lstm( + input=feat, size=hidden_dim * 4, use_peepholes=False) + output = fluid.layers.sequence_last_step(forward) + return output + return lstm_recv_inside def graphsage_sum(gw, feature, hidden_size, act, initializer, learning_rate, name): -- GitLab