diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index b6a20615783c4dd9eea3dd4a8a189ea03acb2bef..913cd0f81eaef37014f38c71e7c3d23bfeec1466 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -321,8 +321,6 @@ static void CreateGradVarInBlock( auto* param = block_desc->FindVarRecursive(pname); auto* grad = block_desc->FindVar(arg); if (param == nullptr) { - LOG(WARNING) << "Cannot find forward variable of " << arg - << ". Set its gradient to FP32"; grad->SetDataType(DataType::FP32); } else { grad->SetDataType(param->GetDataType()); diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 9e3d597f3a2c84623a1ce9e4b6f4b956cffde211..11764810e1d40e5e6eb3cd0d8e9b4b63a79855b4 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -50,6 +50,15 @@ VarDescBind *BlockDescBind::FindVarRecursive(const std::string &name) const { return it->second.get(); } +VarDescBind *BlockDescBind::FindRecursiveOrCreateVar( + const std::string &name_bytes) { + VarDescBind *res = FindVarRecursive(name_bytes); + if (res == nullptr) { + res = Var(name_bytes); + } + return res; +} + bool BlockDescBind::HasVarRecursive(const std::string &name) const { return FindVarRecursive(name) != nullptr; } diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index 26adf6a20ff09483b84f479db08efcf402135053..8e967e5378eb47a7869efb59cc96a271f1cbb9a1 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -58,6 +58,8 @@ class BlockDescBind { VarDescBind *FindVarRecursive(const std::string &name_bytes) const; + VarDescBind *FindRecursiveOrCreateVar(const std::string &name_bytes); + bool HasVarRecursive(const std::string &var_name) const; std::set LocalVarNames() const { diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index e7cba9e702ce0f96a9680169f0593130df2fd096..39c8def82e1ebb10a0e357a648af760099020c32 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -357,7 +357,8 @@ void OpDescBind::InferVarType(BlockDescBind *block) const { "LOD_TENSOR"; for (auto &out_pair : this->outputs_) { for (auto &out_var_name : out_pair.second) { - block->Var(out_var_name)->SetType(VarDesc::LOD_TENSOR); + block->FindRecursiveOrCreateVar(out_var_name) + ->SetType(VarDesc::LOD_TENSOR); } } } diff --git a/paddle/operators/lod_rank_table_op.cc b/paddle/operators/lod_rank_table_op.cc index ce010fcb91873b3099f6bf52cfe20c1ff61846ea..f7d4db1947b83fecf57575e17fafe26795c92bdd 100644 --- a/paddle/operators/lod_rank_table_op.cc +++ b/paddle/operators/lod_rank_table_op.cc @@ -66,7 +66,8 @@ class LoDRankTableInferVarType : public framework::VarTypeInference { void operator()(const framework::OpDescBind &op_desc, framework::BlockDescBind *block) const override { for (auto &o : op_desc.Output("Out")) { - block->Var(o)->SetType(framework::VarDesc::LOD_RANK_TABLE); + block->FindRecursiveOrCreateVar(o)->SetType( + framework::VarDesc::LOD_RANK_TABLE); } } }; diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index 750f96296a8414019265b26095d50eefb7dbb2dd..57b99bdb3a9359bbfdbe62a6fc9afca6c4d5df9e 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -99,11 +99,12 @@ class SumOpVarTypeInference : public framework::VarTypeInference { bool any_input_is_lod_tensor = std::any_of( inputs.begin(), inputs.end(), [block](const std::string& name) { - return block->Var(name)->GetType() == framework::VarDesc::LOD_TENSOR; + return block->FindRecursiveOrCreateVar(name)->GetType() == + framework::VarDesc::LOD_TENSOR; }); auto is_tensor_array = [block](const std::string& name) { - return block->Var(name)->GetType() == + return block->FindRecursiveOrCreateVar(name)->GetType() == framework::VarDesc::LOD_TENSOR_ARRAY; }; @@ -120,7 +121,7 @@ class SumOpVarTypeInference : public framework::VarTypeInference { } auto out_var_name = op_desc.Output("Out").front(); - block->Var(out_var_name)->SetType(var_type); + block->FindRecursiveOrCreateVar(out_var_name)->SetType(var_type); } }; diff --git a/paddle/operators/tensor_array_read_write_op.cc b/paddle/operators/tensor_array_read_write_op.cc index eaf6352748729fa04ccc9b9901608cb89f489c28..62e15604c47f25c458abc69ecd1cabf964de39bb 100644 --- a/paddle/operators/tensor_array_read_write_op.cc +++ b/paddle/operators/tensor_array_read_write_op.cc @@ -87,7 +87,8 @@ class WriteToArrayInferVarType : public framework::VarTypeInference { framework::BlockDescBind *block) const override { for (auto &out_var : op_desc.OutputArgumentNames()) { VLOG(10) << "Set Variable " << out_var << " as LOD_TENSOR_ARRAY"; - block->Var(out_var)->SetType(framework::VarDesc::LOD_TENSOR_ARRAY); + block->FindRecursiveOrCreateVar(out_var)->SetType( + framework::VarDesc::LOD_TENSOR_ARRAY); } } }; diff --git a/paddle/operators/while_op.cc b/paddle/operators/while_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..4ca6c8507a48507fd29a9c9acae2bdf36ed936ee --- /dev/null +++ b/paddle/operators/while_op.cc @@ -0,0 +1,197 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include +#include "paddle/framework/executor.h" +#include "paddle/framework/op_registry.h" +#include "paddle/framework/operator.h" + +namespace paddle { +namespace operators { + +using StepScopeVar = std::vector; +using LoDTensor = framework::LoDTensor; + +constexpr char kStepBlock[] = "step_block"; +constexpr char kCondition[] = "Condition"; +constexpr char kStepScopes[] = "StepScopes"; +constexpr char kParamGrads[] = "X@Grad"; +constexpr char kParameters[] = "X"; + +class WhileOp : public framework::OperatorBase { + public: + WhileOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : framework::OperatorBase(type, inputs, outputs, attrs) {} + + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + PADDLE_ENFORCE_NOT_NULL(scope.FindVar(Input(kCondition))); + auto &cond = scope.FindVar(Input(kCondition))->Get(); + PADDLE_ENFORCE_EQ(cond.dims(), paddle::framework::make_ddim({1})); + + framework::Executor executor(dev_ctx); + auto *block = Attr(kStepBlock); + auto *program = block->Program(); + + auto step_scopes = + scope.FindVar(Output(kStepScopes))->GetMutable(); + + while (cond.data()[0]) { + auto ¤t_scope = scope.NewScope(); + step_scopes->push_back(¤t_scope); + + executor.Run(*program, ¤t_scope, block->ID(), + false /*create_local_scope*/); + } + } +}; + +class WhileOpMaker : public framework::OpProtoAndCheckerMaker { + public: + WhileOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput(kParameters, + "A set of variables, which are required by operators inside the " + "block of While Op.") + .AsDuplicable(); + AddInput( + kCondition, + "(Bool) An scalar. When it's False, the While Op will be terminated.") + .AsDuplicable(); + AddOutput("Out", + "A set of variables, which will be assigned with values " + "generated by perators inside the block of While Op.") + .AsDuplicable(); + AddOutput(kStepScopes, + "(StepScopeVar) A vector of local scope, which size equals the " + "step number of While Op. The i'th scope storages temporary " + "variables generated in the i'th step."); + AddAttr(kStepBlock, + "The step block inside WhileOp"); + AddComment(R"DOC( +)DOC"); + } +}; + +class WhileGradOp : public framework::OperatorBase { + public: + WhileGradOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : framework::OperatorBase(type, inputs, outputs, attrs) {} + + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + // PADDLE_ENFORCE(...) + + framework::Executor executor(dev_ctx); + auto *block = Attr(kStepBlock); + auto *program = block->Program(); + + auto *step_scopes = + scope.FindVar(Input(kStepScopes))->GetMutable(); + + for (auto cur_scope_iter = step_scopes->rbegin(); + cur_scope_iter != step_scopes->rend(); ++cur_scope_iter) { + executor.Run(*program, *cur_scope_iter, block->ID(), false); + + auto &pg_names = Outputs(kParamGrads); + auto &p_names = Inputs(kParameters); + PADDLE_ENFORCE_EQ(pg_names.size(), p_names.size()); + for (size_t prog_id = 0; prog_id < pg_names.size(); ++prog_id) { + auto inside_grad_name = framework::GradVarName(p_names[prog_id]); + + // // TODO(tonyyang-savil: Not sure we need the following + // // If does not compute gradient of that variable inside rnn, + // just + // // continue + // if (local_var_names.find(inside_grad_name) == + // local_var_names.end()) { + // continue; + // } + + // zero gradient variable in step 0 + if (cur_scope_iter == step_scopes->rbegin()) { + auto *var = (*cur_scope_iter)->FindVar(inside_grad_name); + PADDLE_ENFORCE_NOT_NULL(var); + if (var->IsType()) { + auto &inside_tensor = var->Get(); + framework::AttributeMap attrs; + attrs["data_type"] = framework::ToDataType(inside_tensor.type()); + attrs["shape"] = framework::vectorize2int(inside_tensor.dims()); + attrs["value"] = 0.0f; + + auto zero_op = framework::OpRegistry::CreateOp( + "fill_constant", {}, {{"Out", {pg_names[prog_id]}}}, attrs); + zero_op->Run(scope, dev_ctx); + } + } + + // sum gradient + auto *outside_var = scope.FindVar(pg_names[prog_id]); + PADDLE_ENFORCE_NOT_NULL(outside_var); + auto &outside_tensor = *outside_var->GetMutable(); + + std::string result_var_name; + auto *local_result_var = (*cur_scope_iter)->Var(&result_var_name); + auto &local_result_tensor = + *local_result_var->GetMutable(); + + local_result_tensor.ShareDataWith(outside_tensor); + + auto sum_op = framework::OpRegistry::CreateOp( + "sum", {{"X", {result_var_name, inside_grad_name}}}, + {{"Out", {result_var_name}}}, {}); + sum_op->Run(**cur_scope_iter, dev_ctx); + } + } + } +}; + +class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + virtual std::unique_ptr Apply() const { + auto *grad = new framework::OpDescBind(); + grad->SetType("while_grad"); + for (auto &input_param : this->InputNames()) { + grad->SetInput(input_param, this->Input(input_param)); + grad->SetOutput(framework::GradVarName(input_param), + this->InputGrad(input_param)); + } + + for (auto &output_param : this->OutputNames()) { + grad->SetInput(output_param, this->Output(output_param)); + if (output_param != kStepScopes) { + grad->SetInput(framework::GradVarName(output_param), + this->OutputGrad(output_param)); + } + } + grad->SetAttrMap(this->Attrs()); + grad->SetBlockAttr(kStepBlock, *grad_block_[0]); + + return std::unique_ptr(grad); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OPERATOR(while, paddle::operators::WhileOp, + paddle::operators::WhileOpMaker, + paddle::operators::WhileGradOpDescMaker); diff --git a/python/paddle/v2/framework/framework.py b/python/paddle/v2/framework/framework.py index 8fb3cca91e5f8759b8a83b12428c78d222f382ac..b9db2707c0705659260c04ab3412f429058a1316 100644 --- a/python/paddle/v2/framework/framework.py +++ b/python/paddle/v2/framework/framework.py @@ -285,7 +285,7 @@ class Operator(object): self.desc.check_attrs() no_kernel_op_set = { 'feed', 'fetch', 'save', 'load', 'recurrent', - 'rnn_memory_helper_grad' + 'rnn_memory_helper_grad', 'while' } if type not in no_kernel_op_set: self.desc.infer_var_type(self.block.desc) diff --git a/python/paddle/v2/framework/layers.py b/python/paddle/v2/framework/layers.py index f40c3cf43a6a400f67732ebd4f55afd35f98c01c..9a1999243750aca62a4ef898ae979d273902b45c 100644 --- a/python/paddle/v2/framework/layers.py +++ b/python/paddle/v2/framework/layers.py @@ -717,7 +717,7 @@ class StaticRNNGuard(BlockGuard): def __init__(self, rnn): if not isinstance(rnn, StaticRNN): - raise TypeError("StaticRNNGuard takes an StaticRNN") + raise TypeError("StaticRNNGuard takes a StaticRNN") super(StaticRNNGuard, self).__init__(rnn.helper.main_program) self.rnn = rnn @@ -964,6 +964,82 @@ class StaticRNN(object): }) +class WhileGuard(BlockGuard): + def __init__(self, while_op): + if not isinstance(while_op, While): + raise TypeError("WhileGuard takes a while op") + super(WhileGuard, self).__init__(while_op.helper.main_program) + self.while_op = while_op + + def __enter__(self): + self.while_op.status = While.IN_WHILE_BLOCK + return super(WhileGuard, self).__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + return False + self.while_op.status = While.AFTER_WHILE_BLOCK + self.while_op.complete() + return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb) + + +class While(object): + BEFORE_WHILE_BLOCK = 0 + IN_WHILE_BLOCK = 1 + AFTER_WHILE_BLOCK = 2 + + def __init__(self, cond, name=None, main_program=None): + self.helper = LayerHelper("while", name=name, main_program=main_program) + self.status = While.BEFORE_WHILE_BLOCK + if not isinstance(cond, Variable): + raise TypeError("condition should be a variable") + assert isinstance(cond, Variable) + if cond.data_type != core.DataType.BOOL: + raise TypeError("condition should be a bool variable") + if reduce(lambda a, b: a * b, cond.shape, 1) != 1: + raise TypeError("condition should be a bool scalar") + self.cond_var = cond + + def block(self): + return WhileGuard(self) + + def complete(self): + main_program = self.helper.main_program + while_block = main_program.current_block() + parent_block = main_program.block(main_program.current_block() + .parent_idx) + + inner_outputs = {self.cond_var.name} + x_name_list = set() + for op in while_block.ops: + for iname in op.input_names: + for in_var_name in op.input(iname): + if in_var_name not in inner_outputs: + x_name_list.add(in_var_name) + + for oname in op.output_names: + for out_var_name in op.output(oname): + inner_outputs.add(out_var_name) + + out_vars = [] + for inner_out_name in inner_outputs: + if inner_out_name in parent_block.vars: + out_vars.append(parent_block.var(inner_out_name)) + + step_scope = parent_block.create_var( + type=core.VarDesc.VarType.STEP_SCOPES) + + parent_block.append_op( + type='while', + inputs={ + 'X': [parent_block.var(x_name) for x_name in x_name_list], + 'Condition': [self.cond_var] + }, + outputs={'Out': out_vars, + 'StepScopes': [step_scope]}, + attrs={'step_block': while_block}) + + def lstm(x, c_pre_init, hidden_dim, @@ -1102,10 +1178,10 @@ def increment(x, value=1.0, in_place=True, main_program=None): operation is performed in-place by default. """ helper = LayerHelper("increment", **locals()) - if in_place: - out = x - else: + if not in_place: out = helper.create_tmp_variable(dtype=x.data_type) + else: + out = x helper.append_op( type='increment', inputs={'X': [x]}, @@ -1133,6 +1209,26 @@ def array_write(x, i, array=None, main_program=None): return array +def create_array(dtype, main_program=None): + helper = LayerHelper("array", **locals()) + return helper.create_variable( + name="{0}.out".format(helper.name), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=dtype) + + +def less_than(x, y, cond=None, main_program=None): + helper = LayerHelper("less_than", **locals()) + if cond is None: + cond = helper.create_tmp_variable(dtype='bool') + cond.stop_gradient = True + + helper.append_op( + type='less_than', inputs={'X': [x], + 'Y': [y]}, outputs={'Out': [cond]}) + return cond + + def array_read(array, i, main_program=None): """ This function creates an operator to read the data in as a diff --git a/python/paddle/v2/framework/tests/test_while_op.py b/python/paddle/v2/framework/tests/test_while_op.py new file mode 100644 index 0000000000000000000000000000000000000000..1c344eae49705ecce586154c30c4d4f770022e7e --- /dev/null +++ b/python/paddle/v2/framework/tests/test_while_op.py @@ -0,0 +1,68 @@ +import unittest +import paddle.v2.framework.layers as layers +from paddle.v2.framework.executor import Executor +import paddle.v2.framework.core as core +import numpy + + +class TestWhileOp(unittest.TestCase): + def test_simple_forward(self): + d0 = layers.data( + "d0", shape=[10], append_batch_size=False, data_type='float32') + d1 = layers.data( + "d1", shape=[10], append_batch_size=False, data_type='float32') + d2 = layers.data( + "d2", shape=[10], append_batch_size=False, data_type='float32') + i = layers.zeros(shape=[1], dtype='int64') + i.stop_gradient = True + init = layers.zeros(shape=[10], dtype='float32') + mem_array = layers.array_write(init, i=i) + data_array = layers.array_write(x=d0, i=i) + + i = layers.increment(i) + layers.array_write(d1, i, array=data_array) + + i = layers.increment(i) + layers.array_write(d2, i, array=data_array) + + i = layers.zeros(shape=[1], dtype='int64') + i.stop_gradient = True + + array_len = layers.fill_constant(shape=[1], dtype='int64', value=3) + cond = layers.less_than(x=i, y=array_len) + + while_op = layers.While(cond=cond) + with while_op.block(): + d = layers.array_read(array=data_array, i=i) + prev = layers.array_read(array=mem_array, i=i) + i = layers.increment(x=i, in_place=True) + result = layers.sums(input=[d, prev]) + layers.array_write(result, i=i, array=mem_array) + layers.less_than(x=i, y=array_len, cond=cond) + sum_result = layers.array_read(mem_array, i=array_len) + + cpu = core.CPUPlace() + exe = Executor(cpu) + d = [] + + for i in xrange(3): + d.append(numpy.random.random(size=[10]).astype('float32')) + + d_tensor = [] + for item in d: + t = core.LoDTensor() + t.set(item, cpu) + d_tensor.append(t) + + outs = map(numpy.array, + exe.run(feed={ + 'd0': d_tensor[0], + 'd1': d_tensor[1], + 'd2': d_tensor[2] + }, + fetch_list=[sum_result])) + self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01) + + +if __name__ == '__main__': + unittest.main()