You need to sign in or sign up before continuing.
提交 7665bdba 编写于 作者: Y Yan Chunwei 提交者: GitHub

Rnn forward logic test (#3291)

* finish forward debug
上级 ec2c753c
...@@ -174,7 +174,11 @@ class OperatorContext { ...@@ -174,7 +174,11 @@ class OperatorContext {
template <typename T> template <typename T>
T* Output(const size_t index) const { T* Output(const size_t index) const {
auto var = OutputVar(index); auto var = OutputVar(index);
PADDLE_ENFORCE(var != nullptr, "Output(%d) should not be nullptr", index); PADDLE_ENFORCE(
var != nullptr,
"Output(%d) not be nullptr, which means variable [%s] does not "
"exist in scope",
index, op_.outputs_[index]);
return var->GetMutable<T>(); return var->GetMutable<T>();
} }
......
...@@ -20,8 +20,8 @@ namespace operators { ...@@ -20,8 +20,8 @@ namespace operators {
class AddOp : public OperatorWithKernel { class AddOp : public OperatorWithKernel {
protected: protected:
void InferShape(const InferShapeContext &ctx) const override { void InferShape(const InferShapeContext &ctx) const override {
PADDLE_ENFORCE(ctx.InputSize() == 2, "Input size of AddOp must be two"); PADDLE_ENFORCE_EQ(ctx.InputSize(), 2);
PADDLE_ENFORCE(ctx.OutputSize() == 1, "Output size of AddOp must be one"); PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1);
PADDLE_ENFORCE(ctx.InputVar(0) != nullptr && ctx.InputVar(1) != nullptr, PADDLE_ENFORCE(ctx.InputVar(0) != nullptr && ctx.InputVar(1) != nullptr,
"Inputs of AddOp must all be set"); "Inputs of AddOp must all be set");
PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr, PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr,
......
...@@ -23,12 +23,16 @@ class MulOp : public OperatorWithKernel { ...@@ -23,12 +23,16 @@ class MulOp : public OperatorWithKernel {
PADDLE_ENFORCE(ctx.InputSize() == 2, "The mul op must take two inputs"); PADDLE_ENFORCE(ctx.InputSize() == 2, "The mul op must take two inputs");
auto dim0 = ctx.Input<Tensor>(0)->dims(); auto dim0 = ctx.Input<Tensor>(0)->dims();
auto dim1 = ctx.Input<Tensor>(1)->dims(); auto dim1 = ctx.Input<Tensor>(1)->dims();
PADDLE_ENFORCE(dim0.size() == 2 && dim1.size() == 2, PADDLE_ENFORCE_EQ(dim0.size(), 2,
"The input of mul op must be matrix"); "input X(%s) should be a tensor with 2 dims, a matrix",
PADDLE_ENFORCE( ctx.op_.Input("X"));
dim0[1] == dim1[0], PADDLE_ENFORCE_EQ(dim1.size(), 2,
"input Y(%s) should be a tensor with 2 dims, a matrix",
ctx.op_.Input("Y"));
PADDLE_ENFORCE_EQ(
dim0[1], dim1[0],
"First matrix's width must be equal with second matrix's height."); "First matrix's width must be equal with second matrix's height.");
PADDLE_ENFORCE(ctx.OutputSize() == 1, "The mul op must take one output"); PADDLE_ENFORCE_EQ(ctx.OutputSize(), 1, "The mul op takes only one output");
ctx.Output<Tensor>(0)->Resize({dim0[0], dim1[1]}); ctx.Output<Tensor>(0)->Resize({dim0[0], dim1[1]});
} }
}; };
......
...@@ -36,6 +36,7 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const { ...@@ -36,6 +36,7 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const {
InitMemories(step_scopes[0], true /*infer_shape_mode*/); InitMemories(step_scopes[0], true /*infer_shape_mode*/);
Variable* net = scope.FindVar(arg_->step_net); Variable* net = scope.FindVar(arg_->step_net);
PADDLE_ENFORCE(net != nullptr, "failed to get step net"); PADDLE_ENFORCE(net != nullptr, "failed to get step net");
for (size_t i = 0; i < seq_len_; i++) { for (size_t i = 0; i < seq_len_; i++) {
if (i > 0) { if (i > 0) {
rnn::LinkMemories(step_scopes, arg_->memories, i, -1, rnn::LinkMemories(step_scopes, arg_->memories, i, -1,
...@@ -56,6 +57,7 @@ void RecurrentAlgorithm::Run(const Scope& scope, ...@@ -56,6 +57,7 @@ void RecurrentAlgorithm::Run(const Scope& scope,
Variable* net = scope.FindVar(arg_->step_net); Variable* net = scope.FindVar(arg_->step_net);
for (size_t step_id = 0; step_id < seq_len_; step_id++) { for (size_t step_id = 0; step_id < seq_len_; step_id++) {
// create output alias variables
if (step_id > 0) { if (step_id > 0) {
rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1, rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1,
false /*infer_shape_mode*/); false /*infer_shape_mode*/);
...@@ -67,22 +69,31 @@ void RecurrentAlgorithm::Run(const Scope& scope, ...@@ -67,22 +69,31 @@ void RecurrentAlgorithm::Run(const Scope& scope,
} }
void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
// TODO(xxx) Only two scopes are needed for inference, this case will be // TODO(superjom) Only two scopes are needed for inference, this case will be
// supported later. // supported later.
auto step_scopes = auto step_scopes_var = scope.FindVar(arg_->step_scopes);
scope.FindVar(arg_->step_scopes)->GetMutable<std::vector<Scope*>>(); PADDLE_ENFORCE(step_scopes_var != nullptr, "");
auto step_scopes = step_scopes_var->GetMutable<std::vector<Scope*>>();
// Now all variables in scope must be created outside of op.
auto net_var = scope.FindVar(arg_->step_net);
PADDLE_ENFORCE(net_var != nullptr, "no stepnet called %s in scope",
arg_->step_net);
auto net_op = net_var->GetMutable<NetOp>();
PADDLE_ENFORCE(!net_op->outputs_.empty(), "net_op has no outputs");
if (seq_len_ > step_scopes->size()) { if (seq_len_ > step_scopes->size()) {
for (size_t i = step_scopes->size(); i < seq_len_; ++i) { for (size_t i = step_scopes->size(); i < seq_len_; ++i) {
auto& step_scope = scope.NewScope(); auto& step_scope = scope.NewScope();
// Now all variables in scope must be created outside of op. // create step net's temp inputs
auto net_op = scope.FindVar(arg_->step_net)->GetMutable<NetOp>();
for (auto& input : net_op->inputs_) { for (auto& input : net_op->inputs_) {
// the weight are located in parent scope // the weight are located in parent scope
if (!step_scope.FindVar(input)) step_scope.NewVar(input); if (!step_scope.FindVar(input))
step_scope.NewVar(input)->GetMutable<Tensor>();
} }
for (auto& output : net_op->outputs_) { // create stepnet's outputs
for (const auto& output : net_op->outputs_) {
step_scope.NewVar(output); step_scope.NewVar(output);
} }
step_scopes->emplace_back(&step_scope); step_scopes->emplace_back(&step_scope);
...@@ -100,6 +111,7 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope, ...@@ -100,6 +111,7 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope,
Tensor* boot_mem = step_scope->FindVar(attr.boot_var)->GetMutable<Tensor>(); Tensor* boot_mem = step_scope->FindVar(attr.boot_var)->GetMutable<Tensor>();
if (infer_shape_mode) { if (infer_shape_mode) {
pre_mem->Resize(boot_mem->dims()); pre_mem->Resize(boot_mem->dims());
PADDLE_ENFORCE_EQ(pre_mem->dims().size(), 2);
} else { } else {
pre_mem->ShareDataWith<float>(*boot_mem); pre_mem->ShareDataWith<float>(*boot_mem);
} }
......
...@@ -53,11 +53,13 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes, ...@@ -53,11 +53,13 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes,
PADDLE_ENFORCE(output_var != nullptr, "output link [%s] is not in scope.", PADDLE_ENFORCE(output_var != nullptr, "output link [%s] is not in scope.",
outlinks[i].external); outlinks[i].external);
Tensor* output = output_var->GetMutable<Tensor>(); Tensor* output = output_var->GetMutable<Tensor>();
if (infer_shape_mode) { if (infer_shape_mode) {
fmw::DDim step_dims = step_scopes[0] auto step_scope_var = step_scopes[0]->FindVar(outlinks[i].internal);
->FindVar(outlinks[i].internal) PADDLE_ENFORCE(step_scope_var != nullptr, "%s not in scope",
->GetMutable<Tensor>() outlinks[i].internal);
->dims(); fmw::DDim step_dims =
step_scope_var->template GetMutable<Tensor>()->dims();
std::vector<int> dims_vec = vectorize(step_dims); std::vector<int> dims_vec = vectorize(step_dims);
dims_vec.insert(dims_vec.begin(), seq_len); dims_vec.insert(dims_vec.begin(), seq_len);
output->Resize(fmw::make_ddim(dims_vec)); output->Resize(fmw::make_ddim(dims_vec));
...@@ -79,14 +81,15 @@ void LinkMemories(const std::vector<Scope*>& scopes, ...@@ -79,14 +81,15 @@ void LinkMemories(const std::vector<Scope*>& scopes,
const std::vector<rnn::MemoryAttr>& memories, const std::vector<rnn::MemoryAttr>& memories,
const size_t step_id, const int offset, const size_t step_id, const int offset,
bool infer_shape_mode) { bool infer_shape_mode) {
PADDLE_ENFORCE(step_id < scopes.size(), PADDLE_ENFORCE_LT(step_id, scopes.size(),
"step [%d] is out of range of step scopes' size [%d]", step_id, "step [%d] is out of range of step scopes' size [%d]",
scopes.size()); step_id, scopes.size());
PADDLE_ENFORCE(static_cast<int>(step_id) + offset >= 0, PADDLE_ENFORCE_GE(static_cast<int>(step_id) + offset, 0,
"offset [%d] must be large than -[%d]", offset, step_id); "offset [%d] must be large than -[%d]", offset, step_id);
PADDLE_ENFORCE(step_id + offset < scopes.size(), PADDLE_ENFORCE_LT(
"offset [%d] is out of range, it must be less than (%d - %d)", step_id + offset, scopes.size(),
offset, scopes.size(), step_id); "offset [%d] is out of range, it must be less than (%d - %d)", offset,
scopes.size(), step_id);
auto scope = scopes[step_id]; auto scope = scopes[step_id];
auto linked_scope = scopes[step_id + offset]; auto linked_scope = scopes[step_id + offset];
for (auto& attr : memories) { for (auto& attr : memories) {
......
import logging
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import unittest import unittest
import numpy as np import numpy as np
...@@ -7,10 +8,9 @@ ops = creation.op_creations ...@@ -7,10 +8,9 @@ ops = creation.op_creations
def create_tensor(scope, name, shape): def create_tensor(scope, name, shape):
tensor = scope.create_var(name).get_tensor() tensor = scope.new_var(name).get_tensor()
tensor.set_dims(shape) tensor.set_dims(shape)
tensor.alloc_float() tensor.set(np.random.random(shape), core.CPUPlace())
tensor.set(np.random.random(shape))
return tensor return tensor
...@@ -31,40 +31,36 @@ class TestRNN(unittest.TestCase): ...@@ -31,40 +31,36 @@ class TestRNN(unittest.TestCase):
- h - h
''' '''
input_dim = 30
batch_size = 50
weight_dim = 15
sent_len = 11
def init(self): def init(self):
input_dim = 30
batch_size = 50
weight_dim = 15
self.scope = core.Scope(None)
# create vars
create_tensor(self.scope, "x", [batch_size, input_dim])
create_tensor(self.scope, "W", [input_dim, weight_dim])
create_tensor(self.scope, "U", [weight_dim, weight_dim])
create_tensor(self.scope, "h_boot", [batch_size, weight_dim])
x_alias = "x@alias"
y_alias = "y@alias"
memory = "h@alias"
prememory = "h@pre"
output = "rnn_out"
output_alias = "rnn_out@alias"
# create step net
stepnet_var = self.scope.create_var("stepnet")
stepnet = stepnet_var.get_net()
# stepnet = core.Net.create()
x_fc_op = ops.fc(X=x_alias, W="W", Y="Wx")
h_fc_op = ops.fc(X=prememory, W="U", Y="Uh")
sum_op = ops.add_two(X="Wx", Y="Uh", Out="sum")
sig_op = ops.sigmoid(X="sum", Y=memory)
stepnet.add_op(x_fc_op)
stepnet.add_op(h_fc_op)
stepnet.add_op(sum_op)
stepnet.add_op(sig_op)
stepnet.complete_add_op(True)
self.scope = core.Scope()
self.create_global_variables()
self.create_step_net()
rnn_op = self.create_rnn_op()
ctx = core.DeviceContext.create(core.CPUPlace())
print 'infer_shape'
rnn_op.infer_shape(self.scope)
rnn_op.run(self.scope, ctx)
def create_global_variables(self):
# create inlink
create_tensor(self.scope, "x",
[self.sent_len, self.batch_size, self.input_dim])
create_tensor(self.scope, "W", [self.input_dim, self.input_dim])
create_tensor(self.scope, "U", [self.input_dim, self.input_dim])
create_tensor(self.scope, "h_boot", [self.batch_size, self.input_dim])
self.scope.new_var("step_scopes")
self.scope.new_var("h@alias")
self.scope.new_var("h")
def create_rnn_op(self):
# create RNNOp # create RNNOp
rnnop = ops.recurrent_op( rnnop = ops.recurrent_op(
# inputs # inputs
...@@ -72,17 +68,27 @@ class TestRNN(unittest.TestCase): ...@@ -72,17 +68,27 @@ class TestRNN(unittest.TestCase):
boot_memories=["h_boot"], boot_memories=["h_boot"],
step_net="stepnet", step_net="stepnet",
# outputs # outputs
outlinks=[output], outlinks=["h"],
step_scopes="step_scopes", step_scopes="step_scopes",
# attributes # attributes
inlink_alias=["x@alias"], inlink_alias=["x@alias"],
outlink_alias=[output_alias], outlink_alias=["h@alias"],
pre_memories=[prememory], pre_memories=["h@pre"],
memories=[memory]) memories=["h@alias"])
return rnnop
def create_step_net(self):
var = self.scope.new_var("stepnet")
stepnet = var.get_net()
ctx = core.DeviceContext.cpu_context() x_fc_op = ops.fc(X="x@alias", W="W", Y="Wx")
rnnop.infer_shape(self.scope) h_fc_op = ops.fc(X="h@pre", W="U", Y="Uh")
rnnop.run(self.scope, ctx) sum_op = ops.add_two(X="Wx", Y="Uh", Out="sum")
sig_op = ops.sigmoid(X="sum", Y="h@alias")
for op in [x_fc_op, h_fc_op, sum_op, sig_op]:
stepnet.add_op(op)
stepnet.complete_add_op(True)
def test_recurrent(self): def test_recurrent(self):
self.init() self.init()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册