未验证 提交 dd1cb348 编写于 作者: W Weiyue Su 提交者: GitHub

Merge pull request #87 from WeiyueSu/erniesage

1. fix mp_reader queue, 2. fix erniesagev2 biasattr
...@@ -81,14 +81,16 @@ class ErnieSageV2(BaseNet): ...@@ -81,14 +81,16 @@ class ErnieSageV2(BaseNet):
self_feature = L.fc(self_feature, self_feature = L.fc(self_feature,
hidden_size, hidden_size,
act=act, act=act,
param_attr=F.ParamAttr(name=name + "_l", param_attr=F.ParamAttr(name=name + "_l.w_0",
learning_rate=learning_rate), learning_rate=learning_rate),
bias_attr=name+"_l.b_0"
) )
neigh_feature = L.fc(neigh_feature, neigh_feature = L.fc(neigh_feature,
hidden_size, hidden_size,
act=act, act=act,
param_attr=F.ParamAttr(name=name + "_r", param_attr=F.ParamAttr(name=name + "_r.w_0",
learning_rate=learning_rate), learning_rate=learning_rate),
bias_attr=name+"_r.b_0"
) )
output = L.concat([self_feature, neigh_feature], axis=1) output = L.concat([self_feature, neigh_feature], axis=1)
output = L.l2_normalize(output, axis=1) output = L.l2_normalize(output, axis=1)
......
...@@ -57,14 +57,16 @@ def graphsage_sum(gw, feature, hidden_size, act, initializer, learning_rate, nam ...@@ -57,14 +57,16 @@ def graphsage_sum(gw, feature, hidden_size, act, initializer, learning_rate, nam
self_feature = fluid.layers.fc(self_feature, self_feature = fluid.layers.fc(self_feature,
hidden_size, hidden_size,
act=act, act=act,
param_attr=fluid.ParamAttr(name=name + "_l", initializer=initializer, param_attr=fluid.ParamAttr(name=name + "_l.w_0", initializer=initializer,
learning_rate=learning_rate), learning_rate=learning_rate),
bias_attr=name+"_l.b_0"
) )
neigh_feature = fluid.layers.fc(neigh_feature, neigh_feature = fluid.layers.fc(neigh_feature,
hidden_size, hidden_size,
act=act, act=act,
param_attr=fluid.ParamAttr(name=name + "_r", initializer=initializer, param_attr=fluid.ParamAttr(name=name + "_r.w_0", initializer=initializer,
learning_rate=learning_rate), learning_rate=learning_rate),
bias_attr=name+"_r.b_0"
) )
output = fluid.layers.concat([self_feature, neigh_feature], axis=1) output = fluid.layers.concat([self_feature, neigh_feature], axis=1)
output = fluid.layers.l2_normalize(output, axis=1) output = fluid.layers.l2_normalize(output, axis=1)
...@@ -79,14 +81,16 @@ def graphsage_mean(gw, feature, hidden_size, act, initializer, learning_rate, na ...@@ -79,14 +81,16 @@ def graphsage_mean(gw, feature, hidden_size, act, initializer, learning_rate, na
self_feature = fluid.layers.fc(self_feature, self_feature = fluid.layers.fc(self_feature,
hidden_size, hidden_size,
act=act, act=act,
param_attr=fluid.ParamAttr(name=name + "_l", initializer=initializer, param_attr=fluid.ParamAttr(name=name + "_l.w_0", initializer=initializer,
learning_rate=learning_rate), learning_rate=learning_rate),
bias_attr=name+"_l.b_0"
) )
neigh_feature = fluid.layers.fc(neigh_feature, neigh_feature = fluid.layers.fc(neigh_feature,
hidden_size, hidden_size,
act=act, act=act,
param_attr=fluid.ParamAttr(name=name + "_r", initializer=initializer, param_attr=fluid.ParamAttr(name=name + "_r.w_0", initializer=initializer,
learning_rate=learning_rate), learning_rate=learning_rate),
bias_attr=name+"_r.b_0"
) )
output = fluid.layers.concat([self_feature, neigh_feature], axis=1) output = fluid.layers.concat([self_feature, neigh_feature], axis=1)
output = fluid.layers.l2_normalize(output, axis=1) output = fluid.layers.l2_normalize(output, axis=1)
...@@ -101,14 +105,16 @@ def pinsage_mean(gw, feature, hidden_size, act, initializer, learning_rate, name ...@@ -101,14 +105,16 @@ def pinsage_mean(gw, feature, hidden_size, act, initializer, learning_rate, name
self_feature = fluid.layers.fc(self_feature, self_feature = fluid.layers.fc(self_feature,
hidden_size, hidden_size,
act=act, act=act,
param_attr=fluid.ParamAttr(name=name + "_l", initializer=initializer, param_attr=fluid.ParamAttr(name=name + "_l.w_0", initializer=initializer,
learning_rate=learning_rate), learning_rate=learning_rate),
bias_attr=name+"_l.b_0"
) )
neigh_feature = fluid.layers.fc(neigh_feature, neigh_feature = fluid.layers.fc(neigh_feature,
hidden_size, hidden_size,
act=act, act=act,
param_attr=fluid.ParamAttr(name=name + "_r", initializer=initializer, param_attr=fluid.ParamAttr(name=name + "_r.w_0", initializer=initializer,
learning_rate=learning_rate), learning_rate=learning_rate),
bias_attr=name+"_r.b_0"
) )
output = fluid.layers.concat([self_feature, neigh_feature], axis=1) output = fluid.layers.concat([self_feature, neigh_feature], axis=1)
output = fluid.layers.l2_normalize(output, axis=1) output = fluid.layers.l2_normalize(output, axis=1)
...@@ -123,14 +129,16 @@ def pinsage_sum(gw, feature, hidden_size, act, initializer, learning_rate, name) ...@@ -123,14 +129,16 @@ def pinsage_sum(gw, feature, hidden_size, act, initializer, learning_rate, name)
self_feature = fluid.layers.fc(self_feature, self_feature = fluid.layers.fc(self_feature,
hidden_size, hidden_size,
act=act, act=act,
param_attr=fluid.ParamAttr(name=name + "_l", initializer=initializer, param_attr=fluid.ParamAttr(name=name + "_l.w_0", initializer=initializer,
learning_rate=learning_rate), learning_rate=learning_rate),
bias_attr=name+"_l.b_0"
) )
neigh_feature = fluid.layers.fc(neigh_feature, neigh_feature = fluid.layers.fc(neigh_feature,
hidden_size, hidden_size,
act=act, act=act,
param_attr=fluid.ParamAttr(name=name + "_r", initializer=initializer, param_attr=fluid.ParamAttr(name=name + "_r.w_0", initializer=initializer,
learning_rate=learning_rate), learning_rate=learning_rate),
bias_attr=name+"_r.b_0"
) )
output = fluid.layers.concat([self_feature, neigh_feature], axis=1) output = fluid.layers.concat([self_feature, neigh_feature], axis=1)
output = fluid.layers.l2_normalize(output, axis=1) output = fluid.layers.l2_normalize(output, axis=1)
......
...@@ -25,7 +25,7 @@ except: ...@@ -25,7 +25,7 @@ except:
import numpy as np import numpy as np
import time import time
import paddle.fluid as fluid import paddle.fluid as fluid
from queue import Queue from multiprocessing import Queue
import threading import threading
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册