diff --git a/examples/erniesage/models/erniesage_v2.py b/examples/erniesage/models/erniesage_v2.py index 78fed26cc56b31c0e1e604e5d01b51657ee48fd2..7ad9a26caf87d7ed79fe55584f21fe8d4f46dd2d 100644 --- a/examples/erniesage/models/erniesage_v2.py +++ b/examples/erniesage/models/erniesage_v2.py @@ -81,14 +81,16 @@ class ErnieSageV2(BaseNet): self_feature = L.fc(self_feature, hidden_size, act=act, - param_attr=F.ParamAttr(name=name + "_l", + param_attr=F.ParamAttr(name=name + "_l.w_0", learning_rate=learning_rate), + bias_attr=name+"_l.b_0" ) neigh_feature = L.fc(neigh_feature, hidden_size, act=act, - param_attr=F.ParamAttr(name=name + "_r", - learning_rate=learning_rate), + param_attr=F.ParamAttr(name=name + "_r.w_0", + learning_rate=learning_rate), + bias_attr=name+"_r.b_0" ) output = L.concat([self_feature, neigh_feature], axis=1) output = L.l2_normalize(output, axis=1) diff --git a/examples/erniesage/models/message_passing.py b/examples/erniesage/models/message_passing.py index 4567bd694b123841c6b71b61a88e8dcbae8957b7..f45e4be000a17348664486f627f74934c81add1a 100644 --- a/examples/erniesage/models/message_passing.py +++ b/examples/erniesage/models/message_passing.py @@ -57,14 +57,16 @@ def graphsage_sum(gw, feature, hidden_size, act, initializer, learning_rate, nam self_feature = fluid.layers.fc(self_feature, hidden_size, act=act, - param_attr=fluid.ParamAttr(name=name + "_l", initializer=initializer, + param_attr=fluid.ParamAttr(name=name + "_l.w_0", initializer=initializer, learning_rate=learning_rate), + bias_attr=name+"_l.b_0" ) neigh_feature = fluid.layers.fc(neigh_feature, hidden_size, act=act, - param_attr=fluid.ParamAttr(name=name + "_r", initializer=initializer, + param_attr=fluid.ParamAttr(name=name + "_r.w_0", initializer=initializer, learning_rate=learning_rate), + bias_attr=name+"_r.b_0" ) output = fluid.layers.concat([self_feature, neigh_feature], axis=1) output = fluid.layers.l2_normalize(output, axis=1) @@ -79,14 +81,16 @@ def graphsage_mean(gw, feature, hidden_size, act, initializer, learning_rate, na self_feature = fluid.layers.fc(self_feature, hidden_size, act=act, - param_attr=fluid.ParamAttr(name=name + "_l", initializer=initializer, + param_attr=fluid.ParamAttr(name=name + "_l.w_0", initializer=initializer, learning_rate=learning_rate), + bias_attr=name+"_l.b_0" ) neigh_feature = fluid.layers.fc(neigh_feature, hidden_size, act=act, - param_attr=fluid.ParamAttr(name=name + "_r", initializer=initializer, + param_attr=fluid.ParamAttr(name=name + "_r.w_0", initializer=initializer, learning_rate=learning_rate), + bias_attr=name+"_r.b_0" ) output = fluid.layers.concat([self_feature, neigh_feature], axis=1) output = fluid.layers.l2_normalize(output, axis=1) @@ -101,14 +105,16 @@ def pinsage_mean(gw, feature, hidden_size, act, initializer, learning_rate, name self_feature = fluid.layers.fc(self_feature, hidden_size, act=act, - param_attr=fluid.ParamAttr(name=name + "_l", initializer=initializer, + param_attr=fluid.ParamAttr(name=name + "_l.w_0", initializer=initializer, learning_rate=learning_rate), + bias_attr=name+"_l.b_0" ) neigh_feature = fluid.layers.fc(neigh_feature, hidden_size, act=act, - param_attr=fluid.ParamAttr(name=name + "_r", initializer=initializer, + param_attr=fluid.ParamAttr(name=name + "_r.w_0", initializer=initializer, learning_rate=learning_rate), + bias_attr=name+"_r.b_0" ) output = fluid.layers.concat([self_feature, neigh_feature], axis=1) output = fluid.layers.l2_normalize(output, axis=1) @@ -123,14 +129,16 @@ def pinsage_sum(gw, feature, hidden_size, act, initializer, learning_rate, name) self_feature = fluid.layers.fc(self_feature, hidden_size, act=act, - param_attr=fluid.ParamAttr(name=name + "_l", initializer=initializer, + param_attr=fluid.ParamAttr(name=name + "_l.w_0", initializer=initializer, learning_rate=learning_rate), + bias_attr=name+"_l.b_0" ) neigh_feature = fluid.layers.fc(neigh_feature, hidden_size, act=act, - param_attr=fluid.ParamAttr(name=name + "_r", initializer=initializer, + param_attr=fluid.ParamAttr(name=name + "_r.w_0", initializer=initializer, learning_rate=learning_rate), + bias_attr=name+"_r.b_0" ) output = fluid.layers.concat([self_feature, neigh_feature], axis=1) output = fluid.layers.l2_normalize(output, axis=1) diff --git a/pgl/utils/mp_reader.py b/pgl/utils/mp_reader.py index b7aec4d268e13d282c8420d80628f975e5472499..a7962830031c3aeede2b780104dacf936d62a120 100644 --- a/pgl/utils/mp_reader.py +++ b/pgl/utils/mp_reader.py @@ -25,7 +25,7 @@ except: import numpy as np import time import paddle.fluid as fluid -from queue import Queue +from multiprocessing import Queue import threading