提交 32f5c9dd 编写于 作者: Q qiaolongfei

recurrent_op pass the unit test

上级 7163dd04
...@@ -28,7 +28,8 @@ using Variable = framework::Variable; ...@@ -28,7 +28,8 @@ using Variable = framework::Variable;
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor; using LoDTensor = framework::LoDTensor;
void RecurrentAlgorithm::InferShape(const Scope& scope) const { void RecurrentAlgorithm::Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const {
auto* input0 = scope.FindVar(arg_->inlinks[0]); auto* input0 = scope.FindVar(arg_->inlinks[0]);
PADDLE_ENFORCE_NOT_NULL(input0); PADDLE_ENFORCE_NOT_NULL(input0);
seq_len_ = input0->GetMutable<LoDTensor>()->dims()[0]; seq_len_ = input0->GetMutable<LoDTensor>()->dims()[0];
...@@ -36,38 +37,16 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const { ...@@ -36,38 +37,16 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const {
CreateScopes(scope); CreateScopes(scope);
auto& step_scopes = GetStepScopes(scope); auto& step_scopes = GetStepScopes(scope);
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_);
true /*infer_shape_mode*/); InitMemories(step_scopes[0]);
InitMemories(step_scopes[0], true /*infer_shape_mode*/);
for (size_t i = 0; i < seq_len_; i++) { for (size_t i = 0; i < seq_len_; i++) {
if (i > 0) { if (i > 0) {
rnn::LinkMemories(step_scopes, arg_->memories, i, -1, rnn::LinkMemories(step_scopes, arg_->memories, i, -1);
true /*infer_shape_mode*/);
}
(*stepnet_)->InferShape(*step_scopes[i]);
}
rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_,
true /*infer_shape_mode*/);
}
void RecurrentAlgorithm::Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const {
auto step_scopes = GetStepScopes(scope);
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_,
false /*infer_shape_mode*/);
InitMemories(step_scopes[0], false /*infer_shape_mode*/);
for (size_t step_id = 0; step_id < seq_len_; step_id++) {
// create output alias variables
if (step_id > 0) {
rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1,
false /*infer_shape_mode*/);
} }
(*stepnet_)->Run(*step_scopes[step_id], dev_ctx); (*stepnet_)->Run(*step_scopes[i], dev_ctx);
} }
rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_);
false /*infer_shape_mode*/);
} }
void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
...@@ -105,8 +84,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { ...@@ -105,8 +84,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
} }
} }
void RecurrentAlgorithm::InitMemories(Scope* step_scope, void RecurrentAlgorithm::InitMemories(Scope* step_scope) const {
bool infer_shape_mode) const {
for (auto& attr : arg_->memories) { for (auto& attr : arg_->memories) {
auto* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable<LoDTensor>(); auto* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable<LoDTensor>();
PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr,
...@@ -114,12 +92,9 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope, ...@@ -114,12 +92,9 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope,
attr.boot_var); attr.boot_var);
auto* boot_mem = auto* boot_mem =
step_scope->FindVar(attr.boot_var)->GetMutable<LoDTensor>(); step_scope->FindVar(attr.boot_var)->GetMutable<LoDTensor>();
if (infer_shape_mode) { pre_mem->Resize(boot_mem->dims());
pre_mem->Resize(boot_mem->dims()); PADDLE_ENFORCE_EQ(pre_mem->dims().size(), 2);
PADDLE_ENFORCE_EQ(pre_mem->dims().size(), 2); pre_mem->ShareDataWith<float>(*boot_mem);
} else {
pre_mem->ShareDataWith<float>(*boot_mem);
}
} }
} }
...@@ -169,23 +144,22 @@ class RecurrentAlgorithmProtoAndCheckerMaker ...@@ -169,23 +144,22 @@ class RecurrentAlgorithmProtoAndCheckerMaker
void RecurrentGradientAlgorithm::Run( void RecurrentGradientAlgorithm::Run(
const Scope& scope, const platform::DeviceContext& dev_ctx) const { const Scope& scope, const platform::DeviceContext& dev_ctx) const {
seq_len_ =
scope.FindVar(arg_->inlinks[0])->GetMutable<LoDTensor>()->dims()[0];
auto step_scopes = GetStepScopes(scope); auto step_scopes = GetStepScopes(scope);
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_);
false /*infer_shape_mode*/);
for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) {
if (static_cast<size_t>(step_id) != seq_len_ - 1) { if (static_cast<size_t>(step_id) != seq_len_ - 1) {
rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1, rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1);
false /*infer_shape_mode*/);
} }
(*stepnet_)->Run(*step_scopes[step_id], dev_ctx); (*stepnet_)->Run(*step_scopes[step_id], dev_ctx);
} }
LinkBootMemoryGradients(step_scopes[0], false); rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_);
rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, LinkBootMemoryGradients(step_scopes[0]);
false /*infer_shape_mode*/);
} }
void RecurrentGradientAlgorithm::LinkBootMemoryGradients( void RecurrentGradientAlgorithm::LinkBootMemoryGradients(
Scope* step_scope, bool infer_shape_mode) const { Scope* step_scope) const {
for (auto& attr : arg_->memories) { for (auto& attr : arg_->memories) {
PADDLE_ENFORCE(step_scope->FindVar(attr.var) != nullptr, PADDLE_ENFORCE(step_scope->FindVar(attr.var) != nullptr,
"memory variable [%s] does not exists", attr.var); "memory variable [%s] does not exists", attr.var);
...@@ -194,30 +168,9 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients( ...@@ -194,30 +168,9 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients(
auto* mem_grad = step_scope->NewVar(attr.var)->GetMutable<LoDTensor>(); auto* mem_grad = step_scope->NewVar(attr.var)->GetMutable<LoDTensor>();
auto* boot_mem_grad = auto* boot_mem_grad =
step_scope->NewVar(attr.boot_var)->GetMutable<LoDTensor>(); step_scope->NewVar(attr.boot_var)->GetMutable<LoDTensor>();
if (infer_shape_mode) { boot_mem_grad->Resize(mem_grad->dims());
boot_mem_grad->Resize(mem_grad->dims()); boot_mem_grad->ShareDataWith<float>(*mem_grad);
} else {
boot_mem_grad->ShareDataWith<float>(*mem_grad);
}
}
}
void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const {
seq_len_ =
scope.FindVar(arg_->inlinks[0])->GetMutable<LoDTensor>()->dims()[0];
auto step_scopes = GetStepScopes(scope);
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_,
true /*infer_shape_mode*/);
for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) {
if (static_cast<size_t>(step_id) != seq_len_ - 1) {
rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1,
true /*infer_shape_mode*/);
}
(*stepnet_)->InferShape(*step_scopes[step_id]);
} }
rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_,
true /*infer_shape_mode*/);
LinkBootMemoryGradients(step_scopes[0], true /*infer_shape_mode*/);
} }
RecurrentGradientOp::RecurrentGradientOp( RecurrentGradientOp::RecurrentGradientOp(
......
...@@ -41,11 +41,6 @@ class RecurrentAlgorithm { ...@@ -41,11 +41,6 @@ class RecurrentAlgorithm {
stepnet_ = stepnet; stepnet_ = stepnet;
} }
/**
* InferShape must be called before Run.
*/
void InferShape(const framework::Scope& scope) const;
protected: protected:
/* /*
* The step scopes will be stored in the father scope as a variable. * The step scopes will be stored in the father scope as a variable.
...@@ -61,7 +56,7 @@ class RecurrentAlgorithm { ...@@ -61,7 +56,7 @@ class RecurrentAlgorithm {
->GetMutable<std::vector<framework::Scope*>>(); ->GetMutable<std::vector<framework::Scope*>>();
} }
void InitMemories(framework::Scope* step_scopes, bool infer_shape_mode) const; void InitMemories(framework::Scope* step_scopes) const;
private: private:
std::unique_ptr<framework::OperatorBase>* stepnet_; std::unique_ptr<framework::OperatorBase>* stepnet_;
...@@ -91,13 +86,7 @@ class RecurrentGradientAlgorithm { ...@@ -91,13 +86,7 @@ class RecurrentGradientAlgorithm {
void Run(const framework::Scope& scope, void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const; const platform::DeviceContext& dev_ctx) const;
void LinkBootMemoryGradients(framework::Scope* step_scopes, void LinkBootMemoryGradients(framework::Scope* step_scopes) const;
bool infer_shape_mode) const;
/**
* InferShape must be called before Run.
*/
void InferShape(const framework::Scope& scope) const;
protected: protected:
inline const std::vector<framework::Scope*>& GetStepScopes( inline const std::vector<framework::Scope*>& GetStepScopes(
...@@ -136,10 +125,6 @@ class RecurrentOp : public framework::OperatorBase { ...@@ -136,10 +125,6 @@ class RecurrentOp : public framework::OperatorBase {
const OperatorBase& stepnet() const { return *stepnet_; } const OperatorBase& stepnet() const { return *stepnet_; }
void InferShape(const framework::Scope& scope) const {
alg_.InferShape(scope);
}
static const rnn::ArgumentName kArgName; static const rnn::ArgumentName kArgName;
private: private:
...@@ -162,10 +147,6 @@ class RecurrentGradientOp : public framework::OperatorBase { ...@@ -162,10 +147,6 @@ class RecurrentGradientOp : public framework::OperatorBase {
PADDLE_THROW("Not Implemented"); PADDLE_THROW("Not Implemented");
} }
void InferShape(const framework::Scope& scope) const {
alg_.InferShape(scope);
}
void Run(const framework::Scope& scope, void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const override { const platform::DeviceContext& dev_ctx) const override {
alg_.Run(scope, dev_ctx); alg_.Run(scope, dev_ctx);
......
...@@ -25,7 +25,7 @@ using LoDTensor = framework::LoDTensor; ...@@ -25,7 +25,7 @@ using LoDTensor = framework::LoDTensor;
void SegmentInputs(const std::vector<Scope*>& step_scopes, void SegmentInputs(const std::vector<Scope*>& step_scopes,
const std::vector<std::string>& inlinks, const std::vector<std::string>& inlinks,
const size_t seq_len, bool infer_shape_mode) { const size_t seq_len) {
PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided."); PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided.");
for (size_t i = 0; i < inlinks.size(); ++i) { for (size_t i = 0; i < inlinks.size(); ++i) {
// global inputs // global inputs
...@@ -41,11 +41,9 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes, ...@@ -41,11 +41,9 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes,
for (size_t j = 0; j < seq_len; j++) { for (size_t j = 0; j < seq_len; j++) {
Tensor* step_input = Tensor* step_input =
step_scopes[j]->NewVar(inlinks[i])->GetMutable<Tensor>(); step_scopes[j]->NewVar(inlinks[i])->GetMutable<Tensor>();
if (!infer_shape_mode) { // The input of operators of each step is Tensor here.
// The input of operators of each step is Tensor here. // Maybe need to modify Slice function.
// Maybe need to modify Slice function. *step_input = input->Slice<float>(j, j + 1);
*step_input = input->Slice<float>(j, j + 1);
}
step_input->Resize(step_dims); step_input->Resize(step_dims);
} }
} }
...@@ -53,39 +51,35 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes, ...@@ -53,39 +51,35 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes,
void ConcatOutputs(const std::vector<Scope*>& step_scopes, void ConcatOutputs(const std::vector<Scope*>& step_scopes,
const std::vector<std::string>& outlinks, const std::vector<std::string>& outlinks,
const size_t seq_len, bool infer_shape_mode) { const size_t seq_len) {
for (size_t i = 0; i < outlinks.size(); i++) { for (size_t i = 0; i < outlinks.size(); i++) {
auto output_var = step_scopes[0]->parent().FindVar(outlinks[i]); auto output_var = step_scopes[0]->parent().FindVar(outlinks[i]);
PADDLE_ENFORCE_NOT_NULL(output_var, "output link [%s] is not in scope.", PADDLE_ENFORCE_NOT_NULL(output_var, "output link [%s] is not in scope.",
outlinks[i]); outlinks[i]);
LoDTensor* output = output_var->GetMutable<LoDTensor>(); LoDTensor* output = output_var->GetMutable<LoDTensor>();
if (infer_shape_mode) { auto step_scope_var = step_scopes[0]->FindVar(outlinks[i]);
auto step_scope_var = step_scopes[0]->FindVar(outlinks[i]); PADDLE_ENFORCE_NOT_NULL(step_scope_var, "%s not in scope", outlinks[i]);
PADDLE_ENFORCE_NOT_NULL(step_scope_var, "%s not in scope", outlinks[i]); f::DDim step_dims =
f::DDim step_dims = step_scope_var->template GetMutable<LoDTensor>()->dims();
step_scope_var->template GetMutable<LoDTensor>()->dims(); std::vector<int64_t> dims_vec = vectorize(step_dims);
std::vector<int64_t> dims_vec = vectorize(step_dims); dims_vec.insert(dims_vec.begin(), seq_len);
dims_vec.insert(dims_vec.begin(), seq_len); output->Resize(f::make_ddim(dims_vec));
output->Resize(f::make_ddim(dims_vec)); output->mutable_data<float>(platform::CPUPlace());
} else { for (size_t j = 0; j < seq_len; j++) {
output->mutable_data<float>(platform::CPUPlace()); LoDTensor* step_output =
for (size_t j = 0; j < seq_len; j++) { step_scopes[j]->FindVar(outlinks[i])->GetMutable<LoDTensor>();
LoDTensor* step_output = // TODO(luotao02) data type and platform::DeviceContext() should set
step_scopes[j]->FindVar(outlinks[i])->GetMutable<LoDTensor>(); // correctly
// TODO(luotao02) data type and platform::DeviceContext() should set (output->Slice<float>(j, j + 1))
// correctly .CopyFrom<float>(*step_output, platform::CPUPlace());
(output->Slice<float>(j, j + 1))
.CopyFrom<float>(*step_output, platform::CPUPlace());
}
} }
} }
} }
void LinkMemories(const std::vector<Scope*>& scopes, void LinkMemories(const std::vector<Scope*>& scopes,
const std::vector<rnn::MemoryAttr>& memories, const std::vector<rnn::MemoryAttr>& memories,
const size_t step_id, const int offset, const size_t step_id, const int offset) {
bool infer_shape_mode) {
PADDLE_ENFORCE_LT(step_id, scopes.size(), PADDLE_ENFORCE_LT(step_id, scopes.size(),
"step [%d] is out of range of step scopes' size [%d]", "step [%d] is out of range of step scopes' size [%d]",
step_id, scopes.size()); step_id, scopes.size());
...@@ -100,11 +94,8 @@ void LinkMemories(const std::vector<Scope*>& scopes, ...@@ -100,11 +94,8 @@ void LinkMemories(const std::vector<Scope*>& scopes,
for (auto& attr : memories) { for (auto& attr : memories) {
auto mem = scope->FindVar(attr.pre_var)->GetMutable<LoDTensor>(); auto mem = scope->FindVar(attr.pre_var)->GetMutable<LoDTensor>();
auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable<LoDTensor>(); auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable<LoDTensor>();
if (infer_shape_mode) { mem->Resize(linked_mem->dims());
mem->Resize(linked_mem->dims()); mem->ShareDataWith<float>(*linked_mem);
} else {
mem->ShareDataWith<float>(*linked_mem);
}
} }
} }
......
...@@ -64,18 +64,18 @@ struct ArgumentName { ...@@ -64,18 +64,18 @@ struct ArgumentName {
*/ */
void SegmentInputs(const std::vector<Scope*>& step_scopes, void SegmentInputs(const std::vector<Scope*>& step_scopes,
const std::vector<std::string>& inlinks, const std::vector<std::string>& inlinks,
const size_t seq_len, bool infer_shape_mode); const size_t seq_len);
/** /**
* Process outputs of step nets and merge to variables. * Process outputs of step nets and merge to variables.
*/ */
void ConcatOutputs(const std::vector<Scope*>& step_scopes, void ConcatOutputs(const std::vector<Scope*>& step_scopes,
const std::vector<std::string>& outlinks, const std::vector<std::string>& outlinks,
const size_t seq_len, bool infer_shape_mode); const size_t seq_len);
void LinkMemories(const std::vector<Scope*>& step_scopes, void LinkMemories(const std::vector<Scope*>& step_scopes,
const std::vector<MemoryAttr>& memories, const size_t step_id, const std::vector<MemoryAttr>& memories, const size_t step_id,
const int offset, bool infer_shape_mode); const int offset);
void InitArgument(const ArgumentName& name, Argument* arg, void InitArgument(const ArgumentName& name, Argument* arg,
const framework::OperatorBase& op, bool is_grad = false); const framework::OperatorBase& op, bool is_grad = false);
......
...@@ -22,14 +22,15 @@ class SumOp : public framework::OperatorWithKernel { ...@@ -22,14 +22,15 @@ class SumOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContextBase* ctx) const override { void InferShape(framework::InferShapeContextBase* ctx) const override {
PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null");
auto x_dims = ctx->GetInputsDim("X"); auto x_dims = ctx->GetInputsDim("X");
PADDLE_ENFORCE(!x_dims.empty(), "Input(X) of SumOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"), PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SumOp should not be null."); "Output(Out) of SumOp should not be null.");
auto in_dim = x_dims[0];
size_t N = x_dims.size(); size_t N = x_dims.size();
PADDLE_ENFORCE_GT(N, 1, "Input tensors count should > 1."); PADDLE_ENFORCE_GT(N, 1, "Input tensors count should > 1.");
auto in_dim = x_dims[0];
for (size_t i = 1; i < N; i++) { for (size_t i = 1; i < N; i++) {
auto dim = x_dims[i]; auto dim = x_dims[i];
PADDLE_ENFORCE(in_dim == dim, "Input tensors must have same shape"); PADDLE_ENFORCE(in_dim == dim, "Input tensors must have same shape");
......
...@@ -16,14 +16,17 @@ class PySimpleRNN(object): ...@@ -16,14 +16,17 @@ class PySimpleRNN(object):
''' '''
def __init__(self, input_dim=30, batch_size=50, weight_dim=15, sent_len=11): def __init__(self, input_dim=30, batch_size=50, weight_dim=15, sent_len=11):
self.x = np.random.normal(size=(sent_len, batch_size, input_dim)) self.x = np.random.normal(size=(sent_len, batch_size,
self.W = np.random.normal(size=(input_dim, input_dim)) input_dim)).astype("float32")
self.U = np.random.normal(size=(input_dim, input_dim)) self.W = np.random.normal(size=(input_dim, input_dim)).astype("float32")
self.h_boot = np.random.normal(size=(batch_size, input_dim)) self.U = np.random.normal(size=(input_dim, input_dim)).astype("float32")
self.h_boot = np.random.normal(size=(batch_size,
input_dim)).astype("float32")
# memories # memories
self.mems = [ self.mems = [
np.zeros(shape=(batch_size, input_dim)) for i in range(sent_len) np.zeros(shape=(batch_size, input_dim)).astype("float32")
for i in range(sent_len)
] ]
def forward(self): def forward(self):
...@@ -36,7 +39,7 @@ class PySimpleRNN(object): ...@@ -36,7 +39,7 @@ class PySimpleRNN(object):
return [self.x[i] for i in range(self.x.shape[0])] return [self.x[i] for i in range(self.x.shape[0])]
def concat_outputs(self): def concat_outputs(self):
return np.array(self.mems) return np.array(self.mems).astype("float32")
def step(self, step_id, x): def step(self, step_id, x):
''' '''
...@@ -47,8 +50,8 @@ class PySimpleRNN(object): ...@@ -47,8 +50,8 @@ class PySimpleRNN(object):
pre_mem = self.mems[step_id - 1] pre_mem = self.mems[step_id - 1]
else: else:
pre_mem = self.h_boot pre_mem = self.h_boot
xW = np.matmul(x, self.W) xW = np.matmul(x, self.W).astype("float32")
hU = np.matmul(pre_mem, self.U) hU = np.matmul(pre_mem, self.U).astype("float32")
sum = xW + hU sum = xW + hU
self.mems[step_id] = py_sigmoid(sum) self.mems[step_id] = py_sigmoid(sum)
...@@ -102,7 +105,8 @@ class RecurrentOpTest(unittest.TestCase): ...@@ -102,7 +105,8 @@ class RecurrentOpTest(unittest.TestCase):
self.create_step_net() self.create_step_net()
ctx = core.DeviceContext.create(core.CPUPlace()) ctx = core.DeviceContext.create(core.CPUPlace())
self.rnnop.run(self.scope, ctx) self.rnnop.run(self.scope, ctx)
return np.array(self.scope.find_var("h@mem").get_tensor()) return np.array(self.scope.find_var("h@mem").get_tensor()).astype(
"float32")
def create_global_variables(self): def create_global_variables(self):
# create inlink # create inlink
...@@ -142,7 +146,7 @@ class RecurrentOpTest(unittest.TestCase): ...@@ -142,7 +146,7 @@ class RecurrentOpTest(unittest.TestCase):
stepnet = core.Net.create() stepnet = core.Net.create()
x_fc_op = Operator("mul", X="x", Y="W", Out="Wx") x_fc_op = Operator("mul", X="x", Y="W", Out="Wx")
h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh") h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh")
sum_op = Operator("add", X="Wx", Y="Uh", Out="sum") sum_op = Operator("sum", X=["Wx", "Uh"], Out="sum")
sig_op = Operator("sigmoid", X="sum", Y="h@mem") sig_op = Operator("sigmoid", X="sum", Y="h@mem")
for op in [x_fc_op, h_fc_op, sum_op, sig_op]: for op in [x_fc_op, h_fc_op, sum_op, sig_op]:
...@@ -179,7 +183,7 @@ class RecurrentGradientOpTest(unittest.TestCase): ...@@ -179,7 +183,7 @@ class RecurrentGradientOpTest(unittest.TestCase):
stepnet = core.Net.create() stepnet = core.Net.create()
x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx") x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx")
h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh") h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh")
sum_op = Operator("add", X="Wx", Y="Uh", Out="sum") sum_op = Operator("sum", X=["Wx", "Uh"], Out="sum")
sig_op = Operator("sigmoid", X="sum", Y="h@alias") sig_op = Operator("sigmoid", X="sum", Y="h@alias")
for op in [x_fc_op, h_fc_op, sum_op, sig_op]: for op in [x_fc_op, h_fc_op, sum_op, sig_op]:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册